diff --git a/.circleci/config.yml b/.circleci/config.yml index 7ad793549..be2d9690b 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -26,17 +26,12 @@ jobs: - checkout - restore_cache: keys: - - v3-pkg-cache + - v4-pkg-cache - run: name: tools command: | export PATH="$GOBIN:$PATH" make get_tools - - run: - name: dependencies - command: | - export PATH="$GOBIN:$PATH" - make get_vendor_deps - run: name: binaries command: | @@ -48,7 +43,7 @@ jobs: - bin - profiles - save_cache: - key: v3-pkg-cache + key: v4-pkg-cache paths: - /go/pkg - save_cache: @@ -62,7 +57,7 @@ jobs: - attach_workspace: at: /tmp/workspace - restore_cache: - key: v3-pkg-cache + key: v4-pkg-cache - restore_cache: key: v3-tree-{{ .Environment.CIRCLE_SHA1 }} - run: @@ -72,35 +67,13 @@ jobs: export PATH="$GOBIN:$PATH" make build-slate - lint: - <<: *defaults - steps: - - attach_workspace: - at: /tmp/workspace - - restore_cache: - key: v3-pkg-cache - - restore_cache: - key: v3-tree-{{ .Environment.CIRCLE_SHA1 }} - - run: - name: metalinter - command: | - set -ex - export PATH="$GOBIN:$PATH" - make lint - - run: - name: check_dep - command: | - set -ex - export PATH="$GOBIN:$PATH" - make check_dep - test_abci_apps: <<: *defaults steps: - attach_workspace: at: /tmp/workspace - restore_cache: - key: v3-pkg-cache + key: v4-pkg-cache - restore_cache: key: v3-tree-{{ .Environment.CIRCLE_SHA1 }} - run: @@ -109,15 +82,15 @@ jobs: export PATH="$GOBIN:$PATH" bash abci/tests/test_app/test.sh -# if this test fails, fix it and update the docs at: -# https://github.com/tendermint/tendermint/blob/develop/docs/abci-cli.md + # if this test fails, fix it and update the docs at: + # https://github.com/tendermint/tendermint/blob/develop/docs/abci-cli.md test_abci_cli: <<: *defaults steps: - attach_workspace: at: /tmp/workspace - restore_cache: - key: v3-pkg-cache + key: v4-pkg-cache - restore_cache: key: v3-tree-{{ .Environment.CIRCLE_SHA1 }} - run: @@ -132,7 +105,7 @@ jobs: - attach_workspace: at: /tmp/workspace - restore_cache: - key: v3-pkg-cache + key: v4-pkg-cache - restore_cache: key: v3-tree-{{ .Environment.CIRCLE_SHA1 }} - run: sudo apt-get update && sudo apt-get install -y --no-install-recommends bsdmainutils @@ -147,7 +120,7 @@ jobs: - attach_workspace: at: /tmp/workspace - restore_cache: - key: v3-pkg-cache + key: v4-pkg-cache - restore_cache: key: v3-tree-{{ .Environment.CIRCLE_SHA1 }} - run: mkdir -p /tmp/logs @@ -157,7 +130,7 @@ jobs: for pkg in $(go list github.com/tendermint/tendermint/... | circleci tests split --split-by=timings); do id=$(basename "$pkg") - go test -v -timeout 5m -race -coverprofile=/tmp/workspace/profiles/$id.out -covermode=atomic "$pkg" | tee "/tmp/logs/$id-$RANDOM.log" + GO111MODULE=on go test -v -timeout 5m -mod=readonly -race -coverprofile=/tmp/workspace/profiles/$id.out -covermode=atomic "$pkg" | tee "/tmp/logs/$id-$RANDOM.log" done - persist_to_workspace: root: /tmp/workspace @@ -172,7 +145,7 @@ jobs: - attach_workspace: at: /tmp/workspace - restore_cache: - key: v3-pkg-cache + key: v4-pkg-cache - restore_cache: key: v3-tree-{{ .Environment.CIRCLE_SHA1 }} - run: @@ -180,24 +153,24 @@ jobs: command: bash test/persist/test_failure_indices.sh localnet: - working_directory: /home/circleci/.go_workspace/src/github.com/tendermint/tendermint - machine: - image: circleci/classic:latest - environment: - GOBIN: /home/circleci/.go_workspace/bin - GOPATH: /home/circleci/.go_workspace/ - GOOS: linux - GOARCH: amd64 - parallelism: 1 - steps: - - checkout - - run: - name: run localnet and exit on failure - command: | - set -x - docker run --rm -v "$PWD":/go/src/github.com/tendermint/tendermint -w /go/src/github.com/tendermint/tendermint golang make build-linux - make localnet-start & - ./scripts/localnet-blocks-test.sh 40 5 10 localhost + working_directory: /home/circleci/.go_workspace/src/github.com/tendermint/tendermint + machine: + image: circleci/classic:latest + environment: + GOBIN: /home/circleci/.go_workspace/bin + GOPATH: /home/circleci/.go_workspace/ + GOOS: linux + GOARCH: amd64 + parallelism: 1 + steps: + - checkout + - run: + name: run localnet and exit on failure + command: | + set -x + docker run --rm -v "$PWD":/go/src/github.com/tendermint/tendermint -w /go/src/github.com/tendermint/tendermint golang make build-linux + make localnet-start & + ./scripts/localnet-blocks-test.sh 40 5 10 localhost test_p2p: environment: @@ -219,7 +192,7 @@ jobs: - attach_workspace: at: /tmp/workspace - restore_cache: - key: v3-pkg-cache + key: v4-pkg-cache - restore_cache: key: v3-tree-{{ .Environment.CIRCLE_SHA1 }} - run: @@ -243,11 +216,11 @@ jobs: name: Trigger website build command: | curl --silent \ - --show-error \ - -X POST \ - --header "Content-Type: application/json" \ - -d "{\"branch\": \"$CIRCLE_BRANCH\"}" \ - "https://circleci.com/api/v1.1/project/github/$CIRCLE_PROJECT_USERNAME/$WEBSITE_REPO_NAME/build?circle-token=$TENDERBOT_API_TOKEN" > response.json + --show-error \ + -X POST \ + --header "Content-Type: application/json" \ + -d "{\"branch\": \"$CIRCLE_BRANCH\"}" \ + "https://circleci.com/api/v1.1/project/github/$CIRCLE_PROJECT_USERNAME/$WEBSITE_REPO_NAME/build?circle-token=$TENDERBOT_API_TOKEN" > response.json RESULT=`jq -r '.status' response.json` MESSAGE=`jq -r '.message' response.json` @@ -278,15 +251,15 @@ jobs: - run: name: Build dependencies command: | - make get_tools get_vendor_deps + make get_tools - persist_to_workspace: root: . paths: - "release-version.source" - save_cache: - key: v1-release-deps-{{ .Branch }}-{{ .Revision }} + key: v2-release-deps-{{ checksum "go.sum" }} paths: - - "vendor" + - "/go/pkg/mod" build_artifacts: <<: *defaults @@ -295,7 +268,7 @@ jobs: - checkout - restore_cache: keys: - - v1-release-deps-{{ .Branch }}-{{ .Revision }} + - v2-release-deps-{{ checksum "go.sum" }} - attach_workspace: at: /tmp/workspace - run: @@ -358,6 +331,63 @@ jobs: docker push "tendermint/tendermint" docker logout + reproducible_builds: + <<: *defaults + steps: + - attach_workspace: + at: /tmp/workspace + - checkout + - setup_remote_docker: + docker_layer_caching: true + - run: + name: Build tendermint + no_output_timeout: 20m + command: | + sudo apt-get install -y ruby + bash -x ./scripts/gitian-build.sh all + for os in darwin linux windows; do + cp gitian-build-${os}/result/tendermint-${os}-res.yml . + cp gitian-build-${os}/build/out/tendermint-*.tar.gz . + rm -rf gitian-build-${os}/ + done + - store_artifacts: + path: /go/src/github.com/tendermint/tendermint/tendermint-darwin-res.yml + - store_artifacts: + path: /go/src/github.com/tendermint/tendermint/tendermint-linux-res.yml + - store_artifacts: + path: /go/src/github.com/tendermint/tendermint/tendermint-windows-res.yml + - store_artifacts: + path: /go/src/github.com/tendermint/tendermint/tendermint-*.tar.gz + + # Test RPC implementation against the swagger documented specs + contract_tests: + working_directory: /home/circleci/.go_workspace/src/github.com/tendermint/tendermint + machine: + image: circleci/classic:latest + environment: + GOBIN: /home/circleci/.go_workspace/bin + GOPATH: /home/circleci/.go_workspace/ + GOOS: linux + GOARCH: amd64 + parallelism: 1 + steps: + - checkout + - run: + name: Test RPC endpoints against swagger documentation + command: | + set -x + export PATH=~/.local/bin:$PATH + + # install node and dredd + ./scripts/get_nodejs.sh + + # build the binaries with a proper version of Go + docker run --rm -v "$PWD":/go/src/github.com/tendermint/tendermint -w /go/src/github.com/tendermint/tendermint golang make build-linux build-contract-tests-hooks + + # This docker image works with go 1.7, we can install here the hook handler that contract-tests is going to use + go get github.com/snikch/goodman/cmd/goodman + make contract-tests + workflows: version: 2 test-suite: @@ -367,11 +397,7 @@ workflows: branches: only: - master - - develop - setup_dependencies - - lint: - requires: - - setup_dependencies - test_abci_apps: requires: - setup_dependencies @@ -394,6 +420,16 @@ workflows: - upload_coverage: requires: - test_cover + - reproducible_builds: + filters: + branches: + only: + - master + - /v[0-9]+\.[0-9]+/ + - contract_tests: + requires: + - setup_dependencies + release: jobs: - prepare_build diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index d4f55392b..e1863c783 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -1,5 +1,12 @@ - + + +* [ ] Referenced an issue explaining the need for the change * [ ] Updated all relevant documentation in docs * [ ] Updated all code comments where relevant * [ ] Wrote tests diff --git a/.gitignore b/.gitignore index 10ee3099c..9e2e5a9ea 100644 --- a/.gitignore +++ b/.gitignore @@ -43,3 +43,5 @@ terraform.tfstate.backup terraform.tfstate.d .vscode + +profile\.out diff --git a/.golangci.yml b/.golangci.yml index a051e1a45..17d575316 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -8,7 +8,6 @@ linters: - golint - maligned - errcheck - - staticcheck - interfacer - unconvert - goconst @@ -16,11 +15,9 @@ linters: - nakedret - lll - gochecknoglobals - - gocritic - gochecknoinits - scopelint - stylecheck - # linters-settings: # govet: # check-shadowing: true diff --git a/CHANGELOG.md b/CHANGELOG.md index 195b44da7..738a437de 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,260 @@ # Changelog +## v0.32.3-binance.1 + +### IMPROVEMENTS: + +- [Performance] [\#110](https://github.com/binance-chain/bnc-tendermint/pull/110) Improve performance + +### BUG FIXES: + +- [index] [\#129](https://github.com/binance-chain/bnc-tendermint/pull/129) fix tx indexer lag behind block + +## v0.32.3 + +*August 28, 2019* + +@climber73 wrote the [Writing a Tendermint Core application in Java +(gRPC)](https://github.com/tendermint/tendermint/blob/master/docs/guides/java.md) +guide. + +Special thanks to external contributors on this release: +@gchaincl, @bluele, @climber73 + +Friendly reminder, we have a [bug bounty +program](https://hackerone.com/tendermint). + +### IMPROVEMENTS: + +- [consensus] [\#3839](https://github.com/tendermint/tendermint/issues/3839) Reduce "Error attempting to add vote" message severity (Error -> Info) +- [mempool] [\#3877](https://github.com/tendermint/tendermint/pull/3877) Make `max_tx_bytes` configurable instead of `max_msg_bytes` (@bluele) +- [privval] [\#3370](https://github.com/tendermint/tendermint/issues/3370) Refactor and simplify validator/kms connection handling. Please refer to [this comment](https://github.com/tendermint/tendermint/pull/3370#issue-257360971) for details +- [rpc] [\#3880](https://github.com/tendermint/tendermint/issues/3880) Document endpoints with `swagger`, introduce contract tests of implementation against documentation + +### BUG FIXES: + +- [config] [\#3868](https://github.com/tendermint/tendermint/issues/3868) Move misplaced `max_msg_bytes` into mempool section (@bluele) +- [rpc] [\#3910](https://github.com/tendermint/tendermint/pull/3910) Fix DATA RACE in HTTP client (@gchaincl) +- [store] [\#3893](https://github.com/tendermint/tendermint/issues/3893) Fix "Unregistered interface types.Evidence" panic + +## v0.32.2 + +*July 31, 2019* + +Special thanks to external contributors on this release: +@ruseinov, @bluele, @guagualvcha + +Friendly reminder, we have a [bug bounty +program](https://hackerone.com/tendermint). + +### BREAKING CHANGES: + +- Go API + - [libs] [\#3811](https://github.com/tendermint/tendermint/issues/3811) Remove `db` from libs in favor of `https://github.com/tendermint/tm-db` + +### FEATURES: + +- [blockchain] [\#3561](https://github.com/tendermint/tendermint/issues/3561) Add early version of the new blockchain reactor, which is supposed to be more modular and testable compared to the old version. To try it, you'll have to change `version` in the config file, [here](https://github.com/tendermint/tendermint/blob/master/config/toml.go#L303) NOTE: It's not ready for a production yet. For further information, see [ADR-40](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-040-blockchain-reactor-refactor.md) & [ADR-43](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-043-blockchain-riri-org.md) +- [mempool] [\#3826](https://github.com/tendermint/tendermint/issues/3826) Make `max_msg_bytes` configurable(@bluele) +- [node] [\#3846](https://github.com/tendermint/tendermint/pull/3846) Allow replacing existing p2p.Reactor(s) using [`CustomReactors` + option](https://godoc.org/github.com/tendermint/tendermint/node#CustomReactors). + Warning: beware of accidental name clashes. Here is the list of existing + reactors: MEMPOOL, BLOCKCHAIN, CONSENSUS, EVIDENCE, PEX. +- [rpc] [\#3818](https://github.com/tendermint/tendermint/issues/3818) Make `max_body_bytes` and `max_header_bytes` configurable(@bluele) +- [rpc] [\#2252](https://github.com/tendermint/tendermint/issues/2252) Add `/broadcast_evidence` endpoint to submit double signing and other types of evidence + +### IMPROVEMENTS: + +- [abci] [\#3809](https://github.com/tendermint/tendermint/issues/3809) Recover from application panics in `server/socket_server.go` to allow socket cleanup (@ruseinov) +- [p2p] [\#3664](https://github.com/tendermint/tendermint/issues/3664) p2p/conn: reuse buffer when write/read from secret connection(@guagualvcha) +- [p2p] [\#3834](https://github.com/tendermint/tendermint/issues/3834) Do not write 'Couldn't connect to any seeds' error log if there are no seeds in config file +- [rpc] [\#3076](https://github.com/tendermint/tendermint/issues/3076) Improve transaction search performance + +### BUG FIXES: + +- [p2p] [\#3644](https://github.com/tendermint/tendermint/issues/3644) Fix error logging for connection stop (@defunctzombie) +- [rpc] [\#3813](https://github.com/tendermint/tendermint/issues/3813) Return err if page is incorrect (less than 0 or greater than total pages) + +## v0.32.1 + +*July 15, 2019* + +Special thanks to external contributors on this release: +@ParthDesai, @climber73, @jim380, @ashleyvega + +This release contains a minor enhancement to the ABCI and some breaking changes to our libs folder, namely: +- CheckTx requests include a `CheckTxType` enum that can be set to `Recheck` to indicate to the application that this transaction was already checked/validated and certain expensive operations (like checking signatures) can be skipped +- Removed various functions from `libs` pkgs + +Friendly reminder, we have a [bug bounty +program](https://hackerone.com/tendermint). + +### BREAKING CHANGES: + +- Go API + + - [abci] [\#2127](https://github.com/tendermint/tendermint/issues/2127) The CheckTx and DeliverTx methods in the ABCI `Application` interface now take structs as arguments (RequestCheckTx and RequestDeliverTx, respectively), instead of just the raw tx bytes. This allows more information to be passed to these methods, for instance, indicating whether a tx has already been checked. + - [libs] Remove unused `db/debugDB` and `common/colors.go` & `errors/errors.go` files (@marbar3778) + - [libs] [\#2432](https://github.com/tendermint/tendermint/issues/2432) Remove unused `common/heap.go` file (@marbar3778) + - [libs] Remove unused `date.go`, `io.go`. Remove `GoPath()`, `Prompt()` and `IsDirEmpty()` functions from `os.go` (@marbar3778) + - [libs] Remove unused `FailRand()` func and minor clean up to `fail.go`(@marbar3778) + +### FEATURES: + +- [node] Add variadic argument to `NewNode` to support functional options, allowing the Node to be more easily customized. +- [node][\#3730](https://github.com/tendermint/tendermint/pull/3730) Add `CustomReactors` option to `NewNode` allowing caller to pass + custom reactors to run inside Tendermint node (@ParthDesai) +- [abci] [\#2127](https://github.com/tendermint/tendermint/issues/2127)RequestCheckTx has a new field, `CheckTxType`, which can take values of `CheckTxType_New` and `CheckTxType_Recheck`, indicating whether this is a new tx being checked for the first time or whether this tx is being rechecked after a block commit. This allows applications to skip certain expensive operations, like signature checking, if they've already been done once. see [docs](https://github.com/tendermint/tendermint/blob/eddb433d7c082efbeaf8974413a36641519ee895/docs/spec/abci/apps.md#mempool-connection) + +### IMPROVEMENTS: + +- [rpc] [\#3700](https://github.com/tendermint/tendermint/issues/3700) Make possible to set absolute paths for TLS cert and key (@climber73) +- [abci] [\#3513](https://github.com/tendermint/tendermint/issues/3513) Call the reqRes callback after the resCb so they always happen in the same order + +### BUG FIXES: + +- [p2p] [\#3338](https://github.com/tendermint/tendermint/issues/3338) Prevent "sent next PEX request too soon" errors by not calling + ensurePeers outside of ensurePeersRoutine +- [behaviour] [\3772](https://github.com/tendermint/tendermint/pull/3772) Return correct reason in MessageOutOfOrder (@jim380) +- [config] [\#3723](https://github.com/tendermint/tendermint/issues/3723) Add consensus_params to testnet config generation; document time_iota_ms (@ashleyvega) + + +## v0.32.0 + +*June 25, 2019* + +Special thanks to external contributors on this release: +@needkane, @SebastianElvis, @andynog, @Yawning, @wooparadog + +This release contains breaking changes to our build and release processes, ABCI, +and the RPC, namely: +- Use Go modules instead of dep +- Bring active development to the `master` Github branch +- Bind RPC to localhost by default, not to the public interface [UPGRADING/RPC_Changes](./UPGRADING.md#rpc_changes) + +Friendly reminder, we have a [bug bounty +program](https://hackerone.com/tendermint). + +### BREAKING CHANGES: + +* CLI/RPC/Config + - [cli] [\#3613](https://github.com/tendermint/tendermint/issues/3613) Switch from golang/dep to Go Modules to resolve dependencies: + It is recommended to switch to Go Modules if your project has tendermint as + a dependency. Read more on Modules here: + https://github.com/golang/go/wiki/Modules + - [config] [\#3632](https://github.com/tendermint/tendermint/pull/3632) Removed `leveldb` as generic + option for `db_backend`. Must be `goleveldb` or `cleveldb`. + - [rpc] [\#3616](https://github.com/tendermint/tendermint/issues/3616) Fix field names for `/block_results` response (eg. `results.DeliverTx` + -> `results.deliver_tx`). See docs for details. + +* Go API + - [abci] [\#3193](https://github.com/tendermint/tendermint/issues/3193) Use RequestDeliverTx and RequestCheckTx in the ABCI + Application interface + - [p2p] [\#3521](https://github.com/tendermint/tendermint/issues/3521) Remove NewNetAddressStringWithOptionalID + +* Blockchain Protocol + +* P2P Protocol + +### FEATURES: + +### IMPROVEMENTS: +- [abci/examples] [\#3659](https://github.com/tendermint/tendermint/issues/3659) Change validator update tx format in the `persistent_kvstore` to use base64 for pubkeys instead of hex (@needkane) +- [consensus] [\#3656](https://github.com/tendermint/tendermint/issues/3656) Exit if SwitchToConsensus fails +- [p2p] [\#3666](https://github.com/tendermint/tendermint/issues/3666) Add per channel telemetry to improve reactor observability +- [rpc] [\#3686](https://github.com/tendermint/tendermint/pull/3686) `HTTPClient#Call` returns wrapped errors, so a caller could use `errors.Cause` to retrieve an error code. (@wooparadog) + +### BUG FIXES: +- [libs/db] [\#3717](https://github.com/tendermint/tendermint/issues/3717) Fixed the BoltDB backend's Batch.Delete implementation (@Yawning) +- [libs/db] [\#3718](https://github.com/tendermint/tendermint/issues/3718) Fixed the BoltDB backend's Get and Iterator implementation (@Yawning) +- [node] [\#3716](https://github.com/tendermint/tendermint/issues/3716) Fix a bug where `nil` is recorded as node's address +- [node] [\#3741](https://github.com/tendermint/tendermint/issues/3741) Fix profiler blocking the entire node + +## v0.31.7 + +*June 3, 2019* + +This releases fixes a regression in the mempool introduced in v0.31.6. +The regression caused the invalid committed txs to be proposed in blocks over and +over again. + +### BUG FIXES: +- [mempool] [\#3699](https://github.com/tendermint/tendermint/issues/3699) Remove all committed txs from the mempool. + This reverts the change from v0.31.6 where we only remove valid txs from the mempool. + Note this means malicious proposals can cause txs to be dropped from the + mempools of other nodes by including them in blocks before they are valid. + See [\#3322](https://github.com/tendermint/tendermint/issues/3322). + +## v0.31.6 + +*May 31st, 2019* + +This release contains many fixes and improvements, primarily for p2p functionality. +It also fixes a security issue in the mempool package. + +With this release, Tendermint now supports [boltdb](https://github.com/etcd-io/bbolt), although +in experimental mode. Feel free to try and report to us any findings/issues. +Note also that the build tags for compiling CLevelDB have changed. + +Special thanks to external contributors on this release: +@guagualvcha, @james-ray, @gregdhill, @climber73, @yutianwu, +@carlosflrs, @defunctzombie, @leoluk, @needkane, @CrocdileChan + +### BREAKING CHANGES: + +* Go API + - [libs/common] Removed deprecated `PanicSanity`, `PanicCrisis`, + `PanicConsensus` and `PanicQ` + - [mempool, state] [\#2659](https://github.com/tendermint/tendermint/issues/2659) `Mempool` now an interface that lives in the mempool package. + See issue and PR for more details. + - [p2p] [\#3346](https://github.com/tendermint/tendermint/issues/3346) `Reactor#InitPeer` method is added to `Reactor` interface + - [types] [\#1648](https://github.com/tendermint/tendermint/issues/1648) `Commit#VoteSignBytes` signature was changed + +### FEATURES: +- [node] [\#2659](https://github.com/tendermint/tendermint/issues/2659) Add `node.Mempool()` method, which allows you to access mempool +- [libs/db] [\#3604](https://github.com/tendermint/tendermint/pull/3604) Add experimental support for bolt db (etcd's fork of bolt) (@CrocdileChan) + +### IMPROVEMENTS: +- [cli] [\#3585](https://github.com/tendermint/tendermint/issues/3585) Add `--keep-addr-book` option to `unsafe_reset_all` cmd to not + clear the address book (@climber73) +- [cli] [\#3160](https://github.com/tendermint/tendermint/issues/3160) Add + `--config=` option to `testnet` cmd (@gregdhill) +- [cli] [\#3661](https://github.com/tendermint/tendermint/pull/3661) Add + `--hostname-suffix`, `--hostname` and `--random-monikers` options to `testnet` + cmd for greater peer address/identity generation flexibility. +- [crypto] [\#3672](https://github.com/tendermint/tendermint/issues/3672) Return more info in the `AddSignatureFromPubKey` error +- [cs/replay] [\#3460](https://github.com/tendermint/tendermint/issues/3460) Check appHash for each block +- [libs/db] [\#3611](https://github.com/tendermint/tendermint/issues/3611) Conditional compilation + * Use `cleveldb` tag instead of `gcc` to compile Tendermint with CLevelDB or + use `make build_c` / `make install_c` (full instructions can be found at + https://tendermint.com/docs/introduction/install.html#compile-with-cleveldb-support) + * Use `boltdb` tag to compile Tendermint with bolt db +- [node] [\#3362](https://github.com/tendermint/tendermint/issues/3362) Return an error if `persistent_peers` list is invalid (except + when IP lookup fails) +- [p2p] [\#3463](https://github.com/tendermint/tendermint/pull/3463) Do not log "Can't add peer's address to addrbook" error for a private peer (@guagualvcha) +- [p2p] [\#3531](https://github.com/tendermint/tendermint/issues/3531) Terminate session on nonce wrapping (@climber73) +- [pex] [\#3647](https://github.com/tendermint/tendermint/pull/3647) Dial seeds, if any, instead of crawling peers first (@defunctzombie) +- [rpc] [\#3534](https://github.com/tendermint/tendermint/pull/3534) Add support for batched requests/responses in JSON RPC +- [rpc] [\#3362](https://github.com/tendermint/tendermint/issues/3362) `/dial_seeds` & `/dial_peers` return errors if addresses are + incorrect (except when IP lookup fails) + +### BUG FIXES: +- [consensus] [\#3067](https://github.com/tendermint/tendermint/issues/3067) Fix replay from appHeight==0 with validator set changes (@james-ray) +- [consensus] [\#3304](https://github.com/tendermint/tendermint/issues/3304) Create a peer state in consensus reactor before the peer + is started (@guagualvcha) +- [lite] [\#3669](https://github.com/tendermint/tendermint/issues/3669) Add context parameter to RPC Handlers in proxy routes (@yutianwu) +- [mempool] [\#3322](https://github.com/tendermint/tendermint/issues/3322) When a block is committed, only remove committed txs from the mempool +that were valid (ie. `ResponseDeliverTx.Code == 0`) +- [p2p] [\#3338](https://github.com/tendermint/tendermint/issues/3338) Ensure `RemovePeer` is always called before `InitPeer` (upon a peer + reconnecting to our node) +- [p2p] [\#3532](https://github.com/tendermint/tendermint/issues/3532) Limit the number of attempts to connect to a peer in seed mode + to 16 (as a result, the node will stop retrying after a 35 hours time window) +- [p2p] [\#3362](https://github.com/tendermint/tendermint/issues/3362) Allow inbound peers to be persistent, including for seed nodes. +- [pex] [\#3603](https://github.com/tendermint/tendermint/pull/3603) Dial seeds when addrbook needs more addresses (@defunctzombie) + +### OTHERS: +- [networks] fixes ansible integration script (@carlosflrs) + ## v0.31.5-binance.3 *Oct 12th, 2019* ### Bugfix: @@ -25,6 +280,7 @@ accepting new peers and only allowing `ed25519` pubkeys. - [index] [\#106](https://github.com/binance-chain/bnc-tendermint/pull/106) index service recover from data lost - [P2P] [\#106](https://github.com/binance-chain/bnc-tendermint/pull/107) introduce skip_tx_from_persistent config and other basic p2p improvement + ## v0.31.5-binance.1 *July 17th, 2019* @@ -32,6 +288,7 @@ accepting new peers and only allowing `ed25519` pubkeys. - [mempool] [\#100](https://github.com/binance-chain/bnc-tendermint/pull/100) add OnlyPersistent to config of mempool - [metrics] [\#96](https://github.com/binance-chain/bnc-tendermint/pull/96) monitor: add more metrics about p2p + ## v0.31.5 *April 16th, 2019* diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index aaa75891b..9d659f84f 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -1,18 +1,13 @@ ## develop -** +\*\* -### BREAKING CHANGES: - -* CLI/RPC/Config - -* Apps +Special thanks to external contributors on this release: -* Go API +Friendly reminder, we have a [bug bounty +program](https://hackerone.com/tendermint). -* Blockchain Protocol - -* P2P Protocol +### BREAKING CHANGES: ### FEATURES: diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 3dab3b8ab..1b9ea4409 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -2,24 +2,35 @@ Thank you for considering making contributions to Tendermint and related repositories! Start by taking a look at the [coding repo](https://github.com/tendermint/coding) for overall information on repository workflow and standards. -Please follow standard github best practices: fork the repo, branch from the tip of develop, make some commits, and submit a pull request to develop. See the [open issues](https://github.com/tendermint/tendermint/issues) for things we need help with! +Please follow standard github best practices: fork the repo, branch from the tip of `master`, make some commits, and submit a pull request to `master`. +See the [open issues](https://github.com/tendermint/tendermint/issues) for things we need help with! -Please make sure to use `gofmt` before every commit - the easiest way to do this is have your editor run it for you upon saving a file. +Before making a pull request, please open an issue describing the +change you would like to make. If an issue for your change already exists, +please comment on it that you will submit a pull request. Be sure to reference the issue in the opening +comment of your pull request. If your change is substantial, you will be asked +to write a more detailed design document in the form of an +Architectural Decision Record (ie. see [here](./docs/architecture/)) before submitting code +changes. + +Please open a [Draft PR](https://github.blog/2019-02-14-introducing-draft-pull-requests/), even if your contribution is incomplete, this inidicates to the community you're working on something and allows them to provide comments early in the development process. When the code is complete it can be marked as ready-for-review. + +Please make sure to use `gofmt` before every commit - the easiest way to do this is have your editor run it for you upon saving a file. Additionally please ensure that your code is lint compliant by running `make lint` ## Forking Please note that Go requires code to live under absolute paths, which complicates forking. While my fork lives at `https://github.com/ebuchman/tendermint`, -the code should never exist at `$GOPATH/src/github.com/ebuchman/tendermint`. +the code should never exist at `$GOPATH/src/github.com/ebuchman/tendermint`. Instead, we use `git remote` to add the fork as a new remote for the original repo, -`$GOPATH/src/github.com/tendermint/tendermint `, and do all the work there. +`$GOPATH/src/github.com/tendermint/tendermint`, and do all the work there. For instance, to create a fork and work on a branch of it, I would: - * Create the fork on github, using the fork button. - * Go to the original repo checked out locally (i.e. `$GOPATH/src/github.com/tendermint/tendermint`) - * `git remote rename origin upstream` - * `git remote add origin git@github.com:ebuchman/basecoin.git` +- Create the fork on github, using the fork button. +- Go to the original repo checked out locally (i.e. `$GOPATH/src/github.com/tendermint/tendermint`) +- `git remote rename origin upstream` +- `git remote add origin git@github.com:ebuchman/basecoin.git` Now `origin` refers to my fork and `upstream` refers to the tendermint version. So I can `git push -u origin master` to update my fork, and make pull requests to tendermint from there. @@ -27,14 +38,12 @@ Of course, replace `ebuchman` with your git handle. To pull in updates from the origin repo, run - * `git fetch upstream` - * `git rebase upstream/master` (or whatever branch you want) - -Please don't make Pull Requests to `master`. +- `git fetch upstream` +- `git rebase upstream/master` (or whatever branch you want) ## Dependencies -We use [dep](https://github.com/golang/dep) to manage dependencies. +We use [go modules](https://github.com/golang/go/wiki/Modules) to manage dependencies. That said, the master branch of every Tendermint repository should just build with `go get`, which means they should be kept up-to-date with their @@ -42,18 +51,17 @@ dependencies so we can get away with telling people they can just `go get` our software. Since some dependencies are not under our control, a third party may break our -build, in which case we can fall back on `dep ensure` (or `make -get_vendor_deps`). Even for dependencies under our control, dep helps us to +build, in which case we can fall back on `go mod tidy`. Even for dependencies under our control, go helps us to keep multiple repos in sync as they evolve. Anything with an executable, such as apps, tools, and the core, should use dep. -Run `dep status` to get a list of vendor dependencies that may not be +Run `go list -u -m all` to get a list of dependencies that may not be up-to-date. When updating dependencies, please only update the particular dependencies you -need. Instead of running `dep ensure -update`, which will update anything, +need. Instead of running `go get -u=patch`, which will update anything, specify exactly the dependency you want to update, eg. -`dep ensure -update github.com/tendermint/go-amino`. +`GO111MODULE=on go get -u github.com/tendermint/go-amino@master`. ## Vagrant @@ -105,53 +113,66 @@ removed from the header in rpc responses as well. ## Branching Model and Release -All repos should adhere to the branching model: http://nvie.com/posts/a-successful-git-branching-model/. -This means that all pull-requests should be made against develop. Any merge to -master constitutes a tagged release. +The main development branch is master. + +Every release is maintained in a release branch named `vX.Y.Z`. + +Note all pull requests should be squash merged except for merging to a release branch (named `vX.Y`). This keeps the commit history clean and makes it +easy to reference the pull request where a change was introduced. -### Development Procedure: -- the latest state of development is on `develop` -- `develop` must never fail `make test` -- never --force onto `develop` (except when reverting a broken commit, which should seldom happen) +### Development Procedure + +- the latest state of development is on `master` +- `master` must never fail `make test` +- never --force onto `master` (except when reverting a broken commit, which should seldom happen) - create a development branch either on github.com/tendermint/tendermint, or your fork (using `git remote add origin`) - make changes and update the `CHANGELOG_PENDING.md` to record your change -- before submitting a pull request, run `git rebase` on top of the latest `develop` +- before submitting a pull request, run `git rebase` on top of the latest `master` + +### Pull Merge Procedure -### Pull Merge Procedure: -- ensure pull branch is based on a recent develop +- ensure pull branch is based on a recent `master` - run `make test` to ensure that all tests pass -- merge pull request +- squash merge pull request - the `unstable` branch may be used to aggregate pull merges before fixing tests -### Release Procedure: -- start on `develop` -- run integration tests (see `test_integrations` in Makefile) -- prepare changelog: - - copy `CHANGELOG_PENDING.md` to top of `CHANGELOG.md` - - run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for - all issues - - run `bash ./scripts/authors.sh` to get a list of authors since the latest - release, and add the github aliases of external contributors to the top of - the changelog. To lookup an alias from an email, try `bash - ./scripts/authors.sh ` - - reset the `CHANGELOG_PENDING.md` -- bump versions -- push to release/vX.X.X to run the extended integration tests on the CI -- merge to master -- merge master back to develop - -### Hotfix Procedure: -- start on `master` -- checkout a new branch named hotfix-vX.X.X -- make the required changes - - these changes should be small and an absolute necessity - - add a note to CHANGELOG.md -- bump versions -- push to hotfix-vX.X.X to run the extended integration tests on the CI -- merge hotfix-vX.X.X to master -- merge hotfix-vX.X.X to develop -- delete the hotfix-vX.X.X branch +### Release Procedure + +#### Major Release + +1. start on `master` +2. run integration tests (see `test_integrations` in Makefile) +3. prepare release in a pull request against `master` (to be squash merged): + - copy `CHANGELOG_PENDING.md` to top of `CHANGELOG.md` + - run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for + all issues + - run `bash ./scripts/authors.sh` to get a list of authors since the latest + release, and add the github aliases of external contributors to the top of + the changelog. To lookup an alias from an email, try `bash ./scripts/authors.sh ` + - reset the `CHANGELOG_PENDING.md` + - bump versions +4. push your changes with prepared release details to `vX.X` (this will trigger the release `vX.X.0`) +5. merge back to master (don't squash merge!) +#### Minor Release + +If there were no breaking changes and you need to create a release nonetheless, +the procedure is almost exactly like with a new release above. + +The only difference is that in the end you create a pull request against the existing `X.X` branch. +The branch name should match the release number you want to create. +Merging this PR will trigger the next release. +For example, if the PR is against an existing 0.34 branch which already contains a v0.34.0 release/tag, +the patch version will be incremented and the created release will be v0.34.1. + +#### Backport Release + +1. start from the existing release branch you want to backport changes to (e.g. v0.30) + Branch to a release/vX.X.X branch locally (e.g. release/v0.30.7) +2. cherry pick the commit(s) that contain the changes you want to backport (usually these commits are from squash-merged PRs which were already reviewed) +3. steps 2 and 3 from [Major Release](#major-release) +4. push changes to release/vX.X.X branch +5. open a PR against the existing vX.X branch ## Testing @@ -161,3 +182,16 @@ If they have `.go` files in the root directory, they will be automatically tested by circle using `go test -v -race ./...`. If not, they will need a `circle.yml`. Ideally, every repo has a `Makefile` that defines `make test` and includes its continuous integration status using a badge in the `README.md`. + +### RPC Testing + +If you contribute to the RPC endpoints it's important to document your changes in the [Swagger file](./docs/spec/rpc/swagger.yaml) +To test your changes you should install `nodejs` and run: + +```bash +npm i -g dredd +make build-linux build-contract-tests-hooks +make contract-tests +``` + +This command will popup a network and check every endpoint against what has been documented diff --git a/DOCKER/Dockerfile.develop b/DOCKER/Dockerfile.develop deleted file mode 100644 index 5759e7658..000000000 --- a/DOCKER/Dockerfile.develop +++ /dev/null @@ -1,35 +0,0 @@ -FROM alpine:3.7 - -ENV DATA_ROOT /tendermint -ENV TMHOME $DATA_ROOT - -RUN addgroup tmuser && \ - adduser -S -G tmuser tmuser - -RUN mkdir -p $DATA_ROOT && \ - chown -R tmuser:tmuser $DATA_ROOT - -RUN apk add --no-cache bash curl jq - -ENV GOPATH /go -ENV PATH "$PATH:/go/bin" -RUN mkdir -p /go/src/github.com/tendermint/tendermint && \ - apk add --no-cache go build-base git && \ - cd /go/src/github.com/tendermint/tendermint && \ - git clone https://github.com/tendermint/tendermint . && \ - git checkout develop && \ - make get_tools && \ - make get_vendor_deps && \ - make install && \ - cd - && \ - rm -rf /go/src/github.com/tendermint/tendermint && \ - apk del go build-base git - -VOLUME $DATA_ROOT - -EXPOSE 26656 -EXPOSE 26657 - -ENTRYPOINT ["tendermint"] - -CMD ["node", "--moniker=`hostname`", "--proxy_app=kvstore"] diff --git a/DOCKER/Dockerfile.testing b/DOCKER/Dockerfile.testing index b82afe2a8..a658aeb10 100644 --- a/DOCKER/Dockerfile.testing +++ b/DOCKER/Dockerfile.testing @@ -1,4 +1,4 @@ -FROM golang:1.10.1 +FROM golang:1.12 # Grab deps (jq, hexdump, xxd, killall) diff --git a/DOCKER/README.md b/DOCKER/README.md index 43edce0fc..57e631aaa 100644 --- a/DOCKER/README.md +++ b/DOCKER/README.md @@ -12,28 +12,25 @@ - `0.9.1`, `0.9`, [(Dockerfile)](https://github.com/tendermint/tendermint/blob/809e0e8c5933604ba8b2d096803ada7c5ec4dfd3/DOCKER/Dockerfile) - `0.9.0` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/d474baeeea6c22b289e7402449572f7c89ee21da/DOCKER/Dockerfile) - `0.8.0`, `0.8` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/bf64dd21fdb193e54d8addaaaa2ecf7ac371de8c/DOCKER/Dockerfile) -- `develop` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/master/DOCKER/Dockerfile.develop) - -`develop` tag points to the [develop](https://github.com/tendermint/tendermint/tree/develop) branch. ## Quick reference -* **Where to get help:** - https://cosmos.network/community +- **Where to get help:** + [cosmos.network/ecosystem](https://cosmos.network/ecosystem) -* **Where to file issues:** - https://github.com/tendermint/tendermint/issues +- **Where to file issues:** + [Tendermint Issues](https://github.com/tendermint/tendermint/issues) -* **Supported Docker versions:** +- **Supported Docker versions:** [the latest release](https://github.com/moby/moby/releases) (down to 1.6 on a best-effort basis) ## Tendermint Tendermint Core is Byzantine Fault Tolerant (BFT) middleware that takes a state transition machine, written in any programming language, and securely replicates it on many machines. -For more background, see the [introduction](https://tendermint.readthedocs.io/en/master/introduction.html). +For more background, see the [the docs](https://tendermint.com/docs/introduction/#quick-start). -To get started developing applications, see the [application developers guide](https://tendermint.readthedocs.io/en/master/getting-started.html). +To get started developing applications, see the [application developers guide](https://tendermint.com/docs/introduction/quick-start.html). ## How to use this image @@ -48,7 +45,7 @@ docker run -it --rm -v "/tmp:/tendermint" tendermint/tendermint node --proxy_app ## Local cluster -To run a 4-node network, see the `Makefile` in the root of [the repo](https://github.com/tendermint/tendermint/master/Makefile) and run: +To run a 4-node network, see the `Makefile` in the root of [the repo](https://github.com/tendermint/tendermint/blob/master/Makefile) and run: ``` make build-linux @@ -60,7 +57,7 @@ Note that this will build and use a different image than the ones provided here. ## License -- Tendermint's license is [Apache 2.0](https://github.com/tendermint/tendermint/master/LICENSE). +- Tendermint's license is [Apache 2.0](https://github.com/tendermint/tendermint/blob/master/LICENSE). ## Contributing diff --git a/Gopkg.lock b/Gopkg.lock deleted file mode 100644 index ca02eac9e..000000000 --- a/Gopkg.lock +++ /dev/null @@ -1,563 +0,0 @@ -# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. - - -[[projects]] - branch = "master" - digest = "1:d6afaeed1502aa28e80a4ed0981d570ad91b2579193404256ce672ed0a609e0d" - name = "github.com/beorn7/perks" - packages = ["quantile"] - pruneopts = "UT" - revision = "3a771d992973f24aa725d07868b467d1ddfceafb" - -[[projects]] - digest = "1:093bf93a65962e8191e3e8cd8fc6c363f83d43caca9739c906531ba7210a9904" - name = "github.com/btcsuite/btcd" - packages = ["btcec"] - pruneopts = "UT" - revision = "ed77733ec07dfc8a513741138419b8d9d3de9d2d" - -[[projects]] - digest = "1:1d8e1cb71c33a9470bbbae09bfec09db43c6bf358dfcae13cd8807c4e2a9a2bf" - name = "github.com/btcsuite/btcutil" - packages = [ - "base58", - "bech32", - ] - pruneopts = "UT" - revision = "d4cc87b860166d00d6b5b9e0d3b3d71d6088d4d4" - -[[projects]] - digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec" - name = "github.com/davecgh/go-spew" - packages = ["spew"] - pruneopts = "UT" - revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73" - version = "v1.1.1" - -[[projects]] - digest = "1:544229a3ca0fb2dd5ebc2896d3d2ff7ce096d9751635301e44e37e761349ee70" - name = "github.com/fortytw2/leaktest" - packages = ["."] - pruneopts = "UT" - revision = "a5ef70473c97b71626b9abeda80ee92ba2a7de9e" - version = "v1.2.0" - -[[projects]] - digest = "1:abeb38ade3f32a92943e5be54f55ed6d6e3b6602761d74b4aab4c9dd45c18abd" - name = "github.com/fsnotify/fsnotify" - packages = ["."] - pruneopts = "UT" - revision = "c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9" - version = "v1.4.7" - -[[projects]] - digest = "1:fdf5169073fb0ad6dc12a70c249145e30f4058647bea25f0abd48b6d9f228a11" - name = "github.com/go-kit/kit" - packages = [ - "log", - "log/level", - "log/term", - "metrics", - "metrics/discard", - "metrics/internal/lv", - "metrics/prometheus", - ] - pruneopts = "UT" - revision = "4dc7be5d2d12881735283bcab7352178e190fc71" - version = "v0.6.0" - -[[projects]] - digest = "1:31a18dae27a29aa074515e43a443abfd2ba6deb6d69309d8d7ce789c45f34659" - name = "github.com/go-logfmt/logfmt" - packages = ["."] - pruneopts = "UT" - revision = "390ab7935ee28ec6b286364bba9b4dd6410cb3d5" - version = "v0.3.0" - -[[projects]] - digest = "1:586ea76dbd0374d6fb649a91d70d652b7fe0ccffb8910a77468e7702e7901f3d" - name = "github.com/go-stack/stack" - packages = ["."] - pruneopts = "UT" - revision = "2fee6af1a9795aafbe0253a0cfbdf668e1fb8a9a" - version = "v1.8.0" - -[[projects]] - digest = "1:95e1006e41c641abd2f365dfa0f1213c04da294e7cd5f0bf983af234b775db64" - name = "github.com/gogo/protobuf" - packages = [ - "gogoproto", - "jsonpb", - "proto", - "protoc-gen-gogo/descriptor", - "sortkeys", - "types", - ] - pruneopts = "UT" - revision = "ba06b47c162d49f2af050fb4c75bcbc86a159d5c" - version = "v1.2.1" - -[[projects]] - digest = "1:239c4c7fd2159585454003d9be7207167970194216193a8a210b8d29576f19c9" - name = "github.com/golang/protobuf" - packages = [ - "proto", - "ptypes", - "ptypes/any", - "ptypes/duration", - "ptypes/timestamp", - ] - pruneopts = "UT" - revision = "c823c79ea1570fb5ff454033735a8e68575d1d0f" - version = "v1.3.0" - -[[projects]] - branch = "master" - digest = "1:4a0c6bb4805508a6287675fac876be2ac1182539ca8a32468d8128882e9d5009" - name = "github.com/golang/snappy" - packages = ["."] - pruneopts = "UT" - revision = "2e65f85255dbc3072edf28d6b5b8efc472979f5a" - -[[projects]] - digest = "1:43dd08a10854b2056e615d1b1d22ac94559d822e1f8b6fcc92c1a1057e85188e" - name = "github.com/gorilla/websocket" - packages = ["."] - pruneopts = "UT" - revision = "ea4d1f681babbce9545c9c5f3d5194a789c89f5b" - version = "v1.2.0" - -[[projects]] - digest = "1:ea40c24cdbacd054a6ae9de03e62c5f252479b96c716375aace5c120d68647c8" - name = "github.com/hashicorp/hcl" - packages = [ - ".", - "hcl/ast", - "hcl/parser", - "hcl/scanner", - "hcl/strconv", - "hcl/token", - "json/parser", - "json/scanner", - "json/token", - ] - pruneopts = "UT" - revision = "8cb6e5b959231cc1119e43259c4a608f9c51a241" - version = "v1.0.0" - -[[projects]] - digest = "1:870d441fe217b8e689d7949fef6e43efbc787e50f200cb1e70dbca9204a1d6be" - name = "github.com/inconshreveable/mousetrap" - packages = ["."] - pruneopts = "UT" - revision = "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75" - version = "v1.0" - -[[projects]] - digest = "1:a74b5a8e34ee5843cd6e65f698f3e75614f812ff170c2243425d75bc091e9af2" - name = "github.com/jmhodges/levigo" - packages = ["."] - pruneopts = "UT" - revision = "853d788c5c416eaaee5b044570784a96c7a26975" - version = "v1.0.0" - -[[projects]] - branch = "master" - digest = "1:a64e323dc06b73892e5bb5d040ced475c4645d456038333883f58934abbf6f72" - name = "github.com/kr/logfmt" - packages = ["."] - pruneopts = "UT" - revision = "b84e30acd515aadc4b783ad4ff83aff3299bdfe0" - -[[projects]] - digest = "1:b18a269f11ff51135d6f82987dbb53288f4d66098a6639b429f4f494a910155b" - name = "github.com/libp2p/go-buffer-pool" - packages = ["."] - pruneopts = "UT" - revision = "c4a5988a1e475884367015e1a2d0bd5fa4c491f4" - version = "v0.0.2" - -[[projects]] - digest = "1:53e8c5c79716437e601696140e8b1801aae4204f4ec54a504333702a49572c4f" - name = "github.com/magiconair/properties" - packages = [ - ".", - "assert", - ] - pruneopts = "UT" - revision = "c2353362d570a7bfa228149c62842019201cfb71" - version = "v1.8.0" - -[[projects]] - digest = "1:ff5ebae34cfbf047d505ee150de27e60570e8c394b3b8fdbb720ff6ac71985fc" - name = "github.com/matttproud/golang_protobuf_extensions" - packages = ["pbutil"] - pruneopts = "UT" - revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c" - version = "v1.0.1" - -[[projects]] - digest = "1:53bc4cd4914cd7cd52139990d5170d6dc99067ae31c56530621b18b35fc30318" - name = "github.com/mitchellh/mapstructure" - packages = ["."] - pruneopts = "UT" - revision = "3536a929edddb9a5b34bd6861dc4a9647cb459fe" - version = "v1.1.2" - -[[projects]] - digest = "1:95741de3af260a92cc5c7f3f3061e85273f5a81b5db20d4bd68da74bd521675e" - name = "github.com/pelletier/go-toml" - packages = ["."] - pruneopts = "UT" - revision = "c01d1270ff3e442a8a57cddc1c92dc1138598194" - version = "v1.2.0" - -[[projects]] - digest = "1:40e195917a951a8bf867cd05de2a46aaf1806c50cf92eebf4c16f78cd196f747" - name = "github.com/pkg/errors" - packages = ["."] - pruneopts = "UT" - revision = "645ef00459ed84a119197bfb8d8205042c6df63d" - version = "v0.8.0" - -[[projects]] - digest = "1:0028cb19b2e4c3112225cd871870f2d9cf49b9b4276531f03438a88e94be86fe" - name = "github.com/pmezard/go-difflib" - packages = ["difflib"] - pruneopts = "UT" - revision = "792786c7400a136282c1664665ae0a8db921c6c2" - version = "v1.0.0" - -[[projects]] - digest = "1:26663fafdea73a38075b07e8e9d82fc0056379d2be8bb4e13899e8fda7c7dd23" - name = "github.com/prometheus/client_golang" - packages = [ - "prometheus", - "prometheus/internal", - "prometheus/promhttp", - ] - pruneopts = "UT" - revision = "abad2d1bd44235a26707c172eab6bca5bf2dbad3" - version = "v0.9.1" - -[[projects]] - branch = "master" - digest = "1:2d5cd61daa5565187e1d96bae64dbbc6080dacf741448e9629c64fd93203b0d4" - name = "github.com/prometheus/client_model" - packages = ["go"] - pruneopts = "UT" - revision = "5c3871d89910bfb32f5fcab2aa4b9ec68e65a99f" - -[[projects]] - branch = "master" - digest = "1:db712fde5d12d6cdbdf14b777f0c230f4ff5ab0be8e35b239fc319953ed577a4" - name = "github.com/prometheus/common" - packages = [ - "expfmt", - "internal/bitbucket.org/ww/goautoneg", - "model", - ] - pruneopts = "UT" - revision = "7e9e6cabbd393fc208072eedef99188d0ce788b6" - -[[projects]] - branch = "master" - digest = "1:ef74914912f99c79434d9c09658274678bc85080ebe3ab32bec3940ebce5e1fc" - name = "github.com/prometheus/procfs" - packages = [ - ".", - "internal/util", - "nfs", - "xfs", - ] - pruneopts = "UT" - revision = "185b4288413d2a0dd0806f78c90dde719829e5ae" - -[[projects]] - digest = "1:c4556a44e350b50a490544d9b06e9fba9c286c21d6c0e47f54f3a9214597298c" - name = "github.com/rcrowley/go-metrics" - packages = ["."] - pruneopts = "UT" - revision = "e2704e165165ec55d062f5919b4b29494e9fa790" - -[[projects]] - digest = "1:b0c25f00bad20d783d259af2af8666969e2fc343fa0dc9efe52936bbd67fb758" - name = "github.com/rs/cors" - packages = ["."] - pruneopts = "UT" - revision = "9a47f48565a795472d43519dd49aac781f3034fb" - version = "v1.6.0" - -[[projects]] - digest = "1:6a4a11ba764a56d2758899ec6f3848d24698d48442ebce85ee7a3f63284526cd" - name = "github.com/spf13/afero" - packages = [ - ".", - "mem", - ] - pruneopts = "UT" - revision = "d40851caa0d747393da1ffb28f7f9d8b4eeffebd" - version = "v1.1.2" - -[[projects]] - digest = "1:08d65904057412fc0270fc4812a1c90c594186819243160dc779a402d4b6d0bc" - name = "github.com/spf13/cast" - packages = ["."] - pruneopts = "UT" - revision = "8c9545af88b134710ab1cd196795e7f2388358d7" - version = "v1.3.0" - -[[projects]] - digest = "1:7ffc0983035bc7e297da3688d9fe19d60a420e9c38bef23f845c53788ed6a05e" - name = "github.com/spf13/cobra" - packages = ["."] - pruneopts = "UT" - revision = "7b2c5ac9fc04fc5efafb60700713d4fa609b777b" - version = "v0.0.1" - -[[projects]] - digest = "1:68ea4e23713989dc20b1bded5d9da2c5f9be14ff9885beef481848edd18c26cb" - name = "github.com/spf13/jwalterweatherman" - packages = ["."] - pruneopts = "UT" - revision = "4a4406e478ca629068e7768fc33f3f044173c0a6" - version = "v1.0.0" - -[[projects]] - digest = "1:c1b1102241e7f645bc8e0c22ae352e8f0dc6484b6cb4d132fa9f24174e0119e2" - name = "github.com/spf13/pflag" - packages = ["."] - pruneopts = "UT" - revision = "298182f68c66c05229eb03ac171abe6e309ee79a" - version = "v1.0.3" - -[[projects]] - digest = "1:f8e1a678a2571e265f4bf91a3e5e32aa6b1474a55cb0ea849750cc177b664d96" - name = "github.com/spf13/viper" - packages = ["."] - pruneopts = "UT" - revision = "25b30aa063fc18e48662b86996252eabdcf2f0c7" - version = "v1.0.0" - -[[projects]] - digest = "1:7e8d267900c7fa7f35129a2a37596e38ed0f11ca746d6d9ba727980ee138f9f6" - name = "github.com/stretchr/testify" - packages = [ - "assert", - "require", - ] - pruneopts = "UT" - revision = "12b6f73e6084dad08a7c6e575284b177ecafbc71" - version = "v1.2.1" - -[[projects]] - branch = "master" - digest = "1:59483b8e8183f10ab21a85ba1f4cbb4a2335d48891801f79ed7b9499f44d383c" - name = "github.com/syndtr/goleveldb" - packages = [ - "leveldb", - "leveldb/cache", - "leveldb/comparer", - "leveldb/errors", - "leveldb/filter", - "leveldb/iterator", - "leveldb/journal", - "leveldb/memdb", - "leveldb/opt", - "leveldb/storage", - "leveldb/table", - "leveldb/util", - ] - pruneopts = "UT" - revision = "6b91fda63f2e36186f1c9d0e48578defb69c5d43" - -[[projects]] - digest = "1:ad9c4c1a4e7875330b1f62906f2830f043a23edb5db997e3a5ac5d3e6eadf80a" - name = "github.com/tendermint/go-amino" - packages = ["."] - pruneopts = "UT" - revision = "dc14acf9ef15f85828bfbc561ed9dd9d2a284885" - version = "v0.14.1" - -[[projects]] - branch = "master" - digest = "1:f4edb30d5ff238e2abba10457010f74cd55ae20bbda8c54db1a07155fa020490" - name = "golang.org/x/crypto" - packages = [ - "bcrypt", - "blowfish", - "chacha20poly1305", - "curve25519", - "ed25519", - "ed25519/internal/edwards25519", - "hkdf", - "internal/chacha20", - "internal/subtle", - "nacl/box", - "nacl/secretbox", - "openpgp/armor", - "openpgp/errors", - "poly1305", - "ripemd160", - "salsa20/salsa", - ] - pruneopts = "UT" - revision = "8dd112bcdc25174059e45e07517d9fc663123347" - -[[projects]] - digest = "1:d36f55a999540d29b6ea3c2ea29d71c76b1d9853fdcd3e5c5cb4836f2ba118f1" - name = "golang.org/x/net" - packages = [ - "context", - "http/httpguts", - "http2", - "http2/hpack", - "idna", - "internal/timeseries", - "netutil", - "trace", - ] - pruneopts = "UT" - revision = "292b43bbf7cb8d35ddf40f8d5100ef3837cced3f" - -[[projects]] - branch = "master" - digest = "1:6f86e2f2e2217cd4d74dec6786163cf80e4d2b99adb341ecc60a45113b844dca" - name = "golang.org/x/sys" - packages = [ - "cpu", - "unix", - ] - pruneopts = "UT" - revision = "7e31e0c00fa05cb5fbf4347b585621d6709e19a4" - -[[projects]] - digest = "1:a2ab62866c75542dd18d2b069fec854577a20211d7c0ea6ae746072a1dccdd18" - name = "golang.org/x/text" - packages = [ - "collate", - "collate/build", - "internal/colltab", - "internal/gen", - "internal/tag", - "internal/triegen", - "internal/ucd", - "language", - "secure/bidirule", - "transform", - "unicode/bidi", - "unicode/cldr", - "unicode/norm", - "unicode/rangetable", - ] - pruneopts = "UT" - revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0" - version = "v0.3.0" - -[[projects]] - branch = "master" - digest = "1:56b0bca90b7e5d1facf5fbdacba23e4e0ce069d25381b8e2f70ef1e7ebfb9c1a" - name = "google.golang.org/genproto" - packages = ["googleapis/rpc/status"] - pruneopts = "UT" - revision = "b69ba1387ce2108ac9bc8e8e5e5a46e7d5c72313" - -[[projects]] - digest = "1:2dab32a43451e320e49608ff4542fdfc653c95dcc35d0065ec9c6c3dd540ed74" - name = "google.golang.org/grpc" - packages = [ - ".", - "balancer", - "balancer/base", - "balancer/roundrobin", - "codes", - "connectivity", - "credentials", - "encoding", - "encoding/proto", - "grpclog", - "internal", - "internal/backoff", - "internal/channelz", - "internal/grpcrand", - "keepalive", - "metadata", - "naming", - "peer", - "resolver", - "resolver/dns", - "resolver/passthrough", - "stats", - "status", - "tap", - "transport", - ] - pruneopts = "UT" - revision = "168a6198bcb0ef175f7dacec0b8691fc141dc9b8" - version = "v1.13.0" - -[[projects]] - digest = "1:342378ac4dcb378a5448dd723f0784ae519383532f5e70ade24132c4c8693202" - name = "gopkg.in/yaml.v2" - packages = ["."] - pruneopts = "UT" - revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183" - version = "v2.2.1" - -[solve-meta] - analyzer-name = "dep" - analyzer-version = 1 - input-imports = [ - "github.com/btcsuite/btcd/btcec", - "github.com/btcsuite/btcutil/base58", - "github.com/btcsuite/btcutil/bech32", - "github.com/fortytw2/leaktest", - "github.com/go-kit/kit/log", - "github.com/go-kit/kit/log/level", - "github.com/go-kit/kit/log/term", - "github.com/go-kit/kit/metrics", - "github.com/go-kit/kit/metrics/discard", - "github.com/go-kit/kit/metrics/prometheus", - "github.com/go-logfmt/logfmt", - "github.com/gogo/protobuf/gogoproto", - "github.com/gogo/protobuf/jsonpb", - "github.com/gogo/protobuf/proto", - "github.com/gogo/protobuf/types", - "github.com/golang/protobuf/proto", - "github.com/golang/protobuf/ptypes/timestamp", - "github.com/golang/snappy", - "github.com/gorilla/websocket", - "github.com/jmhodges/levigo", - "github.com/libp2p/go-buffer-pool", - "github.com/magiconair/properties/assert", - "github.com/pkg/errors", - "github.com/prometheus/client_golang/prometheus", - "github.com/prometheus/client_golang/prometheus/promhttp", - "github.com/rcrowley/go-metrics", - "github.com/rs/cors", - "github.com/spf13/cobra", - "github.com/spf13/viper", - "github.com/stretchr/testify/assert", - "github.com/stretchr/testify/require", - "github.com/syndtr/goleveldb/leveldb", - "github.com/syndtr/goleveldb/leveldb/errors", - "github.com/syndtr/goleveldb/leveldb/filter", - "github.com/syndtr/goleveldb/leveldb/iterator", - "github.com/syndtr/goleveldb/leveldb/opt", - "github.com/tendermint/go-amino", - "golang.org/x/crypto/bcrypt", - "golang.org/x/crypto/chacha20poly1305", - "golang.org/x/crypto/curve25519", - "golang.org/x/crypto/ed25519", - "golang.org/x/crypto/hkdf", - "golang.org/x/crypto/nacl/box", - "golang.org/x/crypto/nacl/secretbox", - "golang.org/x/crypto/openpgp/armor", - "golang.org/x/crypto/ripemd160", - "golang.org/x/net/context", - "golang.org/x/net/netutil", - "google.golang.org/grpc", - "google.golang.org/grpc/credentials", - ] - solver-name = "gps-cdcl" - solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml deleted file mode 100644 index b51c82dae..000000000 --- a/Gopkg.toml +++ /dev/null @@ -1,97 +0,0 @@ -# Gopkg.toml example -# -# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md -# for detailed Gopkg.toml documentation. -# -# required = ["github.com/user/thing/cmd/thing"] -# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] -# -# [[constraint]] -# name = "github.com/user/project" -# version = "1.0.0" -# -# [[override]] -# name = "github.com/x/y" -# version = "2.4.0" -# -# [prune] -# non-go = false -# go-tests = true -# unused-packages = true -# -########################################################### - -# Allow only patch releases for serialization libraries -[[constraint]] - name = "github.com/tendermint/go-amino" - version = "~0.14.1" - -[[constraint]] - name = "github.com/gogo/protobuf" - version = "~1.2.1" - -[[constraint]] - name = "github.com/golang/protobuf" - version = "~1.3.0" - -# Allow only minor releases for other libraries -[[constraint]] - name = "github.com/go-kit/kit" - version = "^0.6.0" - -[[constraint]] - name = "github.com/gorilla/websocket" - version = "^1.2.0" - -[[constraint]] - name = "github.com/rs/cors" - version = "^1.6.0" - -[[constraint]] - name = "github.com/pkg/errors" - version = "^0.8.0" - -[[constraint]] - name = "github.com/spf13/cobra" - version = "^0.0.1" - -[[constraint]] - name = "github.com/spf13/viper" - version = "^1.0.0" - -[[constraint]] - name = "github.com/stretchr/testify" - version = "^1.2.1" - -[[constraint]] - name = "google.golang.org/grpc" - version = "^1.13.0" - -[[constraint]] - name = "github.com/fortytw2/leaktest" - version = "^1.2.0" - -[[constraint]] - name = "github.com/prometheus/client_golang" - version = "^0.9.1" - -[[constraint]] - name = "github.com/jmhodges/levigo" - version = "^1.0.0" - -################################### -## Repos which don't have releases. - -## - github.com/btcsuite/btcd -## - golang.org/x/crypto -## - github.com/btcsuite/btcutil -## - github.com/rcrowley/go-metrics -## - golang.org/x/net - -[[constraint]] - name = "github.com/libp2p/go-buffer-pool" - version = "^0.0.1" - -[prune] - go-tests = true - unused-packages = true diff --git a/Makefile b/Makefile index cd4da1dd7..e5ec00112 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,5 @@ GOTOOLS = \ github.com/mitchellh/gox \ - github.com/golang/dep/cmd/dep \ github.com/golangci/golangci-lint/cmd/golangci-lint \ github.com/gogo/protobuf/protoc-gen-gogo \ github.com/square/certstrap @@ -8,13 +7,16 @@ GOBIN?=${GOPATH}/bin PACKAGES=$(shell go list ./...) OUTPUT?=build/tendermint +export GO111MODULE = on + INCLUDE = -I=. -I=${GOPATH}/src -I=${GOPATH}/src/github.com/gogo/protobuf/protobuf BUILD_TAGS?='tendermint' -BUILD_FLAGS = -ldflags "-X github.com/tendermint/tendermint/version.GitCommit=`git rev-parse --short=8 HEAD`" +LD_FLAGS = -X github.com/tendermint/tendermint/version.GitCommit=`git rev-parse --short=8 HEAD` -s -w +BUILD_FLAGS = -mod=readonly -ldflags "$(LD_FLAGS)" all: check build test install -check: check_tools get_vendor_deps +check: check_tools ######################################## ### Build Tendermint @@ -23,16 +25,16 @@ build: CGO_ENABLED=0 go build $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o $(OUTPUT) ./cmd/tendermint/ build_c: - CGO_ENABLED=1 go build $(BUILD_FLAGS) -tags "$(BUILD_TAGS) gcc" -o $(OUTPUT) ./cmd/tendermint/ + CGO_ENABLED=1 go build $(BUILD_FLAGS) -tags "$(BUILD_TAGS) cleveldb" -o $(OUTPUT) ./cmd/tendermint/ build_race: - CGO_ENABLED=0 go build -race $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o $(OUTPUT) ./cmd/tendermint + CGO_ENABLED=1 go build -race $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o $(OUTPUT) ./cmd/tendermint install: - CGO_ENABLED=0 go install $(BUILD_FLAGS) -tags $(BUILD_TAGS) ./cmd/tendermint + CGO_ENABLED=0 go install $(BUILD_FLAGS) -tags $(BUILD_TAGS) ./cmd/tendermint install_c: - CGO_ENABLED=1 go install $(BUILD_FLAGS) -tags "$(BUILD_TAGS) gcc" ./cmd/tendermint + CGO_ENABLED=1 go install $(BUILD_FLAGS) -tags "$(BUILD_TAGS) cleveldb" ./cmd/tendermint ######################################## ### Protobuf @@ -56,10 +58,10 @@ protoc_abci: abci/types/types.pb.go protoc_proto3types: types/proto3/block.pb.go build_abci: - @go build -i ./abci/cmd/... + @go build -mod=readonly -i ./abci/cmd/... install_abci: - @go install ./abci/cmd/... + @go install -mod=readonly ./abci/cmd/... ######################################## ### Distribution @@ -85,11 +87,6 @@ update_tools: @echo "--> Updating tools" ./scripts/get_tools.sh -#Update dependencies -get_vendor_deps: - @echo "--> Running dep" - @dep ensure - #For ABCI and libs get_protoc: @# https://github.com/google/protobuf/releases @@ -119,24 +116,31 @@ get_deps_bin_size: protoc_libs: libs/common/types.pb.go +# generates certificates for TLS testing in remotedb and RPC server gen_certs: clean_certs - ## Generating certificates for TLS testing... certstrap init --common-name "tendermint.com" --passphrase "" - certstrap request-cert -ip "::" --passphrase "" - certstrap sign "::" --CA "tendermint.com" --passphrase "" - mv out/::.crt out/::.key db/remotedb + certstrap request-cert --common-name "remotedb" -ip "127.0.0.1" --passphrase "" + certstrap sign "remotedb" --CA "tendermint.com" --passphrase "" + mv out/remotedb.crt libs/db/remotedb/test.crt + mv out/remotedb.key libs/db/remotedb/test.key + certstrap request-cert --common-name "server" -ip "127.0.0.1" --passphrase "" + certstrap sign "server" --CA "tendermint.com" --passphrase "" + mv out/server.crt rpc/lib/server/test.crt + mv out/server.key rpc/lib/server/test.key + rm -rf out +# deletes generated certificates clean_certs: - ## Cleaning TLS testing certificates... - rm -rf out - rm -f db/remotedb/::.crt db/remotedb/::.key + rm -f libs/db/remotedb/test.crt + rm -f libs/db/remotedb/test.key + rm -f rpc/lib/server/test.crt + rm -f rpc/lib/server/test.key -test_libs: gen_certs - go test -tags gcc $(PACKAGES) - make clean_certs +test_libs: + go test -tags clevedb boltdb $(PACKAGES) grpc_dbserver: - protoc -I db/remotedb/proto/ db/remotedb/proto/defs.proto --go_out=plugins=grpc:db/remotedb/proto + protoc -I libs/db/remotedb/proto/ libs/db/remotedb/proto/defs.proto --go_out=plugins=grpc:libs/db/remotedb/proto protoc_grpc: rpc/grpc/types.pb.go @@ -192,7 +196,6 @@ test_p2p: test_integrations: make build_docker_test_image make get_tools - make get_vendor_deps make install make test_cover make test_apps @@ -254,10 +257,6 @@ rpc-docs: cat rpc/core/docs_header.md > $(DESTINATION) godoc2md -template rpc/core/doc_template.txt github.com/tendermint/tendermint/rpc/core | grep -v -e "pipe.go" -e "routes.go" -e "dev.go" | sed 's,/src/target,https://github.com/tendermint/tendermint/tree/master/rpc/core,' >> $(DESTINATION) -check_dep: - dep status >> /dev/null - !(grep -n branch Gopkg.toml) - ########################################################### ### Docker image @@ -270,15 +269,15 @@ build-docker: ### Local testnet using docker # Build linux binary on other platforms -build-linux: get_tools get_vendor_deps +build-linux: get_tools GOOS=linux GOARCH=amd64 $(MAKE) build build-docker-localnode: @cd networks/local && make # Run a 4-node testnet locally -localnet-start: localnet-stop - @if ! [ -f build/node0/config/genesis.json ]; then docker run --rm -v $(CURDIR)/build:/tendermint:Z tendermint/localnode testnet --v 4 --o . --populate-persistent-peers --starting-ip-address 192.167.10.2 ; fi +localnet-start: localnet-stop build-docker-localnode + @if ! [ -f build/node0/config/genesis.json ]; then docker run --rm -v $(CURDIR)/build:/tendermint:Z tendermint/localnode testnet --config /etc/tendermint/config-template.toml --v 4 --o . --populate-persistent-peers --starting-ip-address 192.167.10.2; fi docker-compose up # Stop testnet @@ -309,7 +308,23 @@ sentry-stop: build-slate: bash scripts/slate.sh +# Build hooks for dredd, to skip or add information on some steps +build-contract-tests-hooks: +ifeq ($(OS),Windows_NT) + go build -mod=readonly $(BUILD_FLAGS) -o build/contract_tests.exe ./cmd/contract_tests +else + go build -mod=readonly $(BUILD_FLAGS) -o build/contract_tests ./cmd/contract_tests +endif + +# Run a nodejs tool to test endpoints against a localnet +# The command takes care of starting and stopping the network +# prerequisits: build-contract-tests-hooks build-linux +# the two build commands were not added to let this command run from generic containers or machines. +# The binaries should be built beforehand +contract-tests: + dredd + # To avoid unintended conflicts with file names, always add to .PHONY # unless there is a reason not to. # https://www.gnu.org/software/make/manual/html_node/Phony-Targets.html -.PHONY: check build build_race build_abci dist install install_abci check_dep check_tools get_tools update_tools get_vendor_deps draw_deps get_protoc protoc_abci protoc_libs gen_certs clean_certs grpc_dbserver test_cover test_apps test_persistence test_p2p test test_race test_integrations test_release test100 vagrant_test fmt rpc-docs build-linux localnet-start localnet-stop build-docker build-docker-localnode sentry-start sentry-config sentry-stop build-slate protoc_grpc protoc_all build_c install_c test_with_deadlock cleanup_after_test_with_deadlock lint +.PHONY: check build build_race build_abci dist install install_abci check_tools get_tools update_tools draw_deps get_protoc protoc_abci protoc_libs gen_certs clean_certs grpc_dbserver test_cover test_apps test_persistence test_p2p test test_race test_integrations test_release test100 vagrant_test fmt rpc-docs build-linux localnet-start localnet-stop build-docker build-docker-localnode sentry-start sentry-config sentry-stop build-slate protoc_grpc protoc_all build_c install_c test_with_deadlock cleanup_after_test_with_deadlock lint build-contract-tests-hooks contract-tests diff --git a/README.md b/README.md index ad4fc1308..3ea9d5de4 100644 --- a/README.md +++ b/README.md @@ -1,23 +1,21 @@ # Tendermint +![banner](docs/tendermint-core-image.jpg) + [Byzantine-Fault Tolerant](https://en.wikipedia.org/wiki/Byzantine_fault_tolerance) [State Machines](https://en.wikipedia.org/wiki/State_machine_replication). -Or [Blockchain](https://en.wikipedia.org/wiki/Blockchain_(database)), for short. +Or [Blockchain](), for short. [![version](https://img.shields.io/github/tag/tendermint/tendermint.svg)](https://github.com/tendermint/tendermint/releases/latest) -[![API Reference]( -https://camo.githubusercontent.com/915b7be44ada53c290eb157634330494ebe3e30a/68747470733a2f2f676f646f632e6f72672f6769746875622e636f6d2f676f6c616e672f6764646f3f7374617475732e737667 -)](https://godoc.org/github.com/tendermint/tendermint) +[![API Reference](https://camo.githubusercontent.com/915b7be44ada53c290eb157634330494ebe3e30a/68747470733a2f2f676f646f632e6f72672f6769746875622e636f6d2f676f6c616e672f6764646f3f7374617475732e737667)](https://godoc.org/github.com/tendermint/tendermint) [![Go version](https://img.shields.io/badge/go-1.12.0-blue.svg)](https://github.com/moovweb/gvm) [![riot.im](https://img.shields.io/badge/riot.im-JOIN%20CHAT-green.svg)](https://riot.im/app/#/room/#tendermint:matrix.org) [![license](https://img.shields.io/github/license/tendermint/tendermint.svg)](https://github.com/tendermint/tendermint/blob/master/LICENSE) [![](https://tokei.rs/b1/github/tendermint/tendermint?category=lines)](https://github.com/tendermint/tendermint) - -Branch | Tests | Coverage -----------|-------|---------- -master | [![CircleCI](https://circleci.com/gh/tendermint/tendermint/tree/master.svg?style=shield)](https://circleci.com/gh/tendermint/tendermint/tree/master) | [![codecov](https://codecov.io/gh/tendermint/tendermint/branch/master/graph/badge.svg)](https://codecov.io/gh/tendermint/tendermint) -develop | [![CircleCI](https://circleci.com/gh/tendermint/tendermint/tree/develop.svg?style=shield)](https://circleci.com/gh/tendermint/tendermint/tree/develop) | [![codecov](https://codecov.io/gh/tendermint/tendermint/branch/develop/graph/badge.svg)](https://codecov.io/gh/tendermint/tendermint) +| Branch | Tests | Coverage | +| ------ | ---------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------ | +| master | [![CircleCI](https://circleci.com/gh/tendermint/tendermint/tree/master.svg?style=shield)](https://circleci.com/gh/tendermint/tendermint/tree/master) | [![codecov](https://codecov.io/gh/tendermint/tendermint/branch/master/graph/badge.svg)](https://codecov.io/gh/tendermint/tendermint) | Tendermint Core is Byzantine Fault Tolerant (BFT) middleware that takes a state transition machine - written in any programming language - and securely replicates it on many machines. @@ -27,13 +25,15 @@ For protocol details, see [the specification](/docs/spec). For detailed analysis of the consensus protocol, including safety and liveness proofs, see our recent paper, "[The latest gossip on BFT consensus](https://arxiv.org/abs/1807.04938)". -## A Note on Production Readiness +## Releases -While Tendermint is being used in production in private, permissioned -environments, we are still working actively to harden and audit it in preparation -for use in public blockchains, such as the [Cosmos Network](https://cosmos.network/). -We are also still making breaking changes to the protocol and the APIs. -Thus, we tag the releases as *alpha software*. +NOTE: The master branch is now an active development branch (starting with `v0.32`). Please, do not depend on it and +use [releases](https://github.com/tendermint/tendermint/releases) instead. + +Tendermint is being used in production in both private and public environments, +most notably the blockchains of the [Cosmos Network](https://cosmos.network/). +However, we are still making breaking changes to the protocol and the APIs and have not yet released v1.0. +See below for more details about [versioning](#versioning). In any case, if you intend to run Tendermint in production, please [contact us](mailto:partners@tendermint.com) and [join the chat](https://riot.im/app/#/room/#tendermint:matrix.org). @@ -47,9 +47,9 @@ For examples of the kinds of bugs we're looking for, see [SECURITY.md](SECURITY. ## Minimum requirements -Requirement|Notes ----|--- -Go version | Go1.11.4 or higher +| Requirement | Notes | +| ----------- | ------------------ | +| Go version | Go1.11.4 or higher | ## Documentation @@ -143,20 +143,20 @@ Additional documentation is found [here](/docs/tools). ### Sub-projects -* [Amino](http://github.com/tendermint/go-amino), reflection-based proto3, with +- [Amino](http://github.com/tendermint/go-amino), reflection-based proto3, with interfaces -* [IAVL](http://github.com/tendermint/iavl), Merkleized IAVL+ Tree implementation +- [IAVL](http://github.com/tendermint/iavl), Merkleized IAVL+ Tree implementation +- [Tm-cmn](http://github.com/tendermint/tm-cmn), Commonly used libs across Tendermint & Cosmos repos ### Applications -* [Cosmos SDK](http://github.com/cosmos/cosmos-sdk); a cryptocurrency application framework -* [Ethermint](http://github.com/cosmos/ethermint); Ethereum on Tendermint -* [Many more](https://tendermint.com/ecosystem) +- [Cosmos SDK](http://github.com/cosmos/cosmos-sdk); a cryptocurrency application framework +- [Ethermint](http://github.com/cosmos/ethermint); Ethereum on Tendermint +- [Many more](https://tendermint.com/ecosystem) ### Research -* [The latest gossip on BFT consensus](https://arxiv.org/abs/1807.04938) -* [Master's Thesis on Tendermint](https://atrium.lib.uoguelph.ca/xmlui/handle/10214/9769) -* [Original Whitepaper](https://tendermint.com/static/docs/tendermint.pdf) -* [Blog](https://blog.cosmos.network/tendermint/home) - +- [The latest gossip on BFT consensus](https://arxiv.org/abs/1807.04938) +- [Master's Thesis on Tendermint](https://atrium.lib.uoguelph.ca/xmlui/handle/10214/9769) +- [Original Whitepaper](https://tendermint.com/static/docs/tendermint.pdf) +- [Blog](https://blog.cosmos.network/tendermint/home) diff --git a/ROADMAP.md b/ROADMAP.md deleted file mode 100644 index 60c284333..000000000 --- a/ROADMAP.md +++ /dev/null @@ -1,23 +0,0 @@ -# Roadmap - -BREAKING CHANGES: -- Better support for injecting randomness -- Upgrade consensus for more real-time use of evidence - -FEATURES: -- Use the chain as its own CA for nodes and validators -- Tooling to run multiple blockchains/apps, possibly in a single process -- State syncing (without transaction replay) -- Add authentication and rate-limitting to the RPC - -IMPROVEMENTS: -- Improve subtleties around mempool caching and logic -- Consensus optimizations: - - cache block parts for faster agreement after round changes - - propagate block parts rarest first -- Better testing of the consensus state machine (ie. use a DSL) -- Auto compiled serialization/deserialization code instead of go-wire reflection - -BUG FIXES: -- Graceful handling/recovery for apps that have non-determinism or fail to halt -- Graceful handling/recovery for violations of safety, or liveness diff --git a/UPGRADING.md b/UPGRADING.md index eccb954d3..af42d2a66 100644 --- a/UPGRADING.md +++ b/UPGRADING.md @@ -3,6 +3,124 @@ This guide provides steps to be followed when you upgrade your applications to a newer version of Tendermint Core. +## v0.32.0 + +This release is compatible with previous blockchains, +however the new ABCI Events mechanism may create some complexity +for nodes wishing to continue operation with v0.32 from a previous version. +There are some minor breaking changes to the RPC. + +### Config Changes + +If you have `db_backend` set to `leveldb` in your config file, please change it +to `goleveldb` or `cleveldb`. + +### RPC Changes + +The default listen address for the RPC is now `127.0.0.1`. If you want to expose +it publicly, you have to explicitly configure it. Note exposing the RPC to the +public internet may not be safe - endpoints which return a lot of data may +enable resource exhaustion attacks on your node, causing the process to crash. + +Any consumers of `/block_results` need to be mindful of the change in all field +names from CamelCase to Snake case, eg. `results.DeliverTx` is now `results.deliver_tx`. +This is a fix, but it's breaking. + +### ABCI Changes + +ABCI responses which previously had a `Tags` field now have an `Events` field +instead. The original `Tags` field was simply a list of key-value pairs, where +each key effectively represented some attribute of an event occuring in the +blockchain, like `sender`, `receiver`, or `amount`. However, it was difficult to +represent the occurence of multiple events (for instance, multiple transfers) in a single list. +The new `Events` field contains a list of `Event`, where each `Event` is itself a list +of key-value pairs, allowing for more natural expression of multiple events in +eg. a single DeliverTx or EndBlock. Note each `Event` also includes a `Type`, which is meant to categorize the +event. + +For transaction indexing, the index key is +prefixed with the event type: `{eventType}.{attributeKey}`. +If the same event type and attribute key appear multiple times, the values are +appended in a list. + +To make queries, include the event type as a prefix. For instance if you +previously queried for `recipient = 'XYZ'`, and after the upgrade you name your event `transfer`, +the new query would be for `transfer.recipient = 'XYZ'`. + +Note that transactions indexed on a node before upgrading to v0.32 will still be indexed +using the old scheme. For instance, if a node upgraded at height 100, +transactions before 100 would be queried with `recipient = 'XYZ'` and +transactions after 100 would be queried with `transfer.recipient = 'XYZ'`. +While this presents additional complexity to clients, it avoids the need to +reindex. Of course, you can reset the node and sync from scratch to re-index +entirely using the new scheme. + +We illustrate further with a more complete example. + +Prior to the update, suppose your `ResponseDeliverTx` look like: + +```go +abci.ResponseDeliverTx{ + Tags: []cmn.KVPair{ + {Key: []byte("sender"), Value: []byte("foo")}, + {Key: []byte("recipient"), Value: []byte("bar")}, + {Key: []byte("amount"), Value: []byte("35")}, + } +} +``` + +The following queries would match this transaction: + +```go +query.MustParse("tm.event = 'Tx' AND sender = 'foo'") +query.MustParse("tm.event = 'Tx' AND recipient = 'bar'") +query.MustParse("tm.event = 'Tx' AND sender = 'foo' AND recipient = 'bar'") +``` + +Following the upgrade, your `ResponseDeliverTx` would look something like: +the following `Events`: + +```go +abci.ResponseDeliverTx{ + Events: []abci.Event{ + { + Type: "transfer", + Attributes: cmn.KVPairs{ + {Key: []byte("sender"), Value: []byte("foo")}, + {Key: []byte("recipient"), Value: []byte("bar")}, + {Key: []byte("amount"), Value: []byte("35")}, + }, + } +} +``` + +Now the following queries would match this transaction: + +```go +query.MustParse("tm.event = 'Tx' AND transfer.sender = 'foo'") +query.MustParse("tm.event = 'Tx' AND transfer.recipient = 'bar'") +query.MustParse("tm.event = 'Tx' AND transfer.sender = 'foo' AND transfer.recipient = 'bar'") +``` + +For further documentation on `Events`, see the [docs](https://github.com/tendermint/tendermint/blob/60827f75623b92eff132dc0eff5b49d2025c591e/docs/spec/abci/abci.md#events). + +### Go Applications + +The ABCI Application interface changed slightly so the CheckTx and DeliverTx +methods now take Request structs. The contents of these structs are just the raw +tx bytes, which were previously passed in as the argument. + + +## v0.31.6 + +There are no breaking changes in this release except Go API of p2p and +mempool packages. Hovewer, if you're using cleveldb, you'll need to change +the compilation tag: + +Use `cleveldb` tag instead of `gcc` to compile Tendermint with CLevelDB or +use `make build_c` / `make install_c` (full instructions can be found at +https://tendermint.com/docs/introduction/install.html#compile-with-cleveldb-support) + ## v0.31.0 This release contains a breaking change to the behaviour of the pubsub system. diff --git a/Vagrantfile b/Vagrantfile index da4f8ac3d..67de74297 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -57,6 +57,6 @@ Vagrant.configure("2") do |config| # get all deps and tools, ready to install/test su - vagrant -c 'source /home/vagrant/.bash_profile' - su - vagrant -c 'cd /home/vagrant/go/src/github.com/tendermint/tendermint && make get_tools && make get_vendor_deps' + su - vagrant -c 'cd /home/vagrant/go/src/github.com/tendermint/tendermint && make get_tools' SHELL end diff --git a/abci/client/client.go b/abci/client/client.go index b173fff6c..af8344db2 100644 --- a/abci/client/client.go +++ b/abci/client/client.go @@ -28,9 +28,9 @@ type Client interface { EchoAsync(msg string) *ReqRes InfoAsync(types.RequestInfo) *ReqRes SetOptionAsync(types.RequestSetOption) *ReqRes - DeliverTxAsync(tx []byte) *ReqRes - CheckTxAsync(tx []byte) *ReqRes - ReCheckTxAsync(tx []byte) *ReqRes + ReCheckTxAsync(types.RequestCheckTx) *ReqRes + DeliverTxAsync(types.RequestDeliverTx) *ReqRes + CheckTxAsync(types.RequestCheckTx) *ReqRes QueryAsync(types.RequestQuery) *ReqRes CommitAsync() *ReqRes InitChainAsync(types.RequestInitChain) *ReqRes @@ -41,8 +41,8 @@ type Client interface { EchoSync(msg string) (*types.ResponseEcho, error) InfoSync(types.RequestInfo) (*types.ResponseInfo, error) SetOptionSync(types.RequestSetOption) (*types.ResponseSetOption, error) - DeliverTxSync(tx []byte) (*types.ResponseDeliverTx, error) - CheckTxSync(tx []byte) (*types.ResponseCheckTx, error) + DeliverTxSync(types.RequestDeliverTx) (*types.ResponseDeliverTx, error) + CheckTxSync(types.RequestCheckTx) (*types.ResponseCheckTx, error) QuerySync(types.RequestQuery) (*types.ResponseQuery, error) CommitSync() (*types.ResponseCommit, error) InitChainSync(types.RequestInitChain) (*types.ResponseInitChain, error) diff --git a/abci/client/grpc_client.go b/abci/client/grpc_client.go index 86dcec242..337ad7778 100644 --- a/abci/client/grpc_client.go +++ b/abci/client/grpc_client.go @@ -6,8 +6,8 @@ import ( "sync" "time" - context "golang.org/x/net/context" - grpc "google.golang.org/grpc" + "golang.org/x/net/context" + "google.golang.org/grpc" "github.com/tendermint/tendermint/abci/types" cmn "github.com/tendermint/tendermint/libs/common" @@ -39,7 +39,7 @@ func NewGRPCClient(addr string, mustConnect bool) *grpcClient { return cli } -func dialerFunc(addr string, timeout time.Duration) (net.Conn, error) { +func dialerFunc(ctx context.Context, addr string) (net.Conn, error) { return cmn.Connect(addr) } @@ -49,7 +49,7 @@ func (cli *grpcClient) OnStart() error { } RETRY_LOOP: for { - conn, err := grpc.Dial(cli.addr, grpc.WithInsecure(), grpc.WithDialer(dialerFunc)) + conn, err := grpc.Dial(cli.addr, grpc.WithInsecure(), grpc.WithContextDialer(dialerFunc)) if err != nil { if cli.mustConnect { return err @@ -65,7 +65,8 @@ RETRY_LOOP: ENSURE_CONNECTED: for { - _, err := client.Echo(context.Background(), &types.RequestEcho{Message: "hello"}, grpc.FailFast(true)) + // FailFast is deprecated + _, err := client.Echo(context.Background(), &types.RequestEcho{Message: "hello"}, grpc.WaitForReady(true)) if err == nil { break ENSURE_CONNECTED } @@ -125,7 +126,7 @@ func (cli *grpcClient) SetResponseCallback(resCb Callback) { func (cli *grpcClient) EchoAsync(msg string) *ReqRes { req := types.ToRequestEcho(msg) - res, err := cli.client.Echo(context.Background(), req.GetEcho(), grpc.FailFast(true)) + res, err := cli.client.Echo(context.Background(), req.GetEcho(), grpc.WaitForReady(true)) if err != nil { cli.StopForError(err) } @@ -134,7 +135,7 @@ func (cli *grpcClient) EchoAsync(msg string) *ReqRes { func (cli *grpcClient) FlushAsync() *ReqRes { req := types.ToRequestFlush() - res, err := cli.client.Flush(context.Background(), req.GetFlush(), grpc.FailFast(true)) + res, err := cli.client.Flush(context.Background(), req.GetFlush(), grpc.WaitForReady(true)) if err != nil { cli.StopForError(err) } @@ -143,7 +144,7 @@ func (cli *grpcClient) FlushAsync() *ReqRes { func (cli *grpcClient) InfoAsync(params types.RequestInfo) *ReqRes { req := types.ToRequestInfo(params) - res, err := cli.client.Info(context.Background(), req.GetInfo(), grpc.FailFast(true)) + res, err := cli.client.Info(context.Background(), req.GetInfo(), grpc.WaitForReady(true)) if err != nil { cli.StopForError(err) } @@ -152,34 +153,34 @@ func (cli *grpcClient) InfoAsync(params types.RequestInfo) *ReqRes { func (cli *grpcClient) SetOptionAsync(params types.RequestSetOption) *ReqRes { req := types.ToRequestSetOption(params) - res, err := cli.client.SetOption(context.Background(), req.GetSetOption(), grpc.FailFast(true)) + res, err := cli.client.SetOption(context.Background(), req.GetSetOption(), grpc.WaitForReady(true)) if err != nil { cli.StopForError(err) } return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_SetOption{SetOption: res}}) } -func (cli *grpcClient) DeliverTxAsync(tx []byte) *ReqRes { - req := types.ToRequestDeliverTx(tx) - res, err := cli.client.DeliverTx(context.Background(), req.GetDeliverTx(), grpc.FailFast(true)) +func (cli *grpcClient) DeliverTxAsync(params types.RequestDeliverTx) *ReqRes { + req := types.ToRequestDeliverTx(params) + res, err := cli.client.DeliverTx(context.Background(), req.GetDeliverTx(), grpc.WaitForReady(true)) if err != nil { cli.StopForError(err) } return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_DeliverTx{DeliverTx: res}}) } -func (cli *grpcClient) CheckTxAsync(tx []byte) *ReqRes { - req := types.ToRequestCheckTx(tx) - res, err := cli.client.CheckTx(context.Background(), req.GetCheckTx(), grpc.FailFast(true)) +func (cli *grpcClient) CheckTxAsync(params types.RequestCheckTx) *ReqRes { + req := types.ToRequestCheckTx(params) + res, err := cli.client.CheckTx(context.Background(), req.GetCheckTx(), grpc.WaitForReady(true)) if err != nil { cli.StopForError(err) } return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_CheckTx{CheckTx: res}}) } -func (cli *grpcClient) ReCheckTxAsync(tx []byte) *ReqRes { +func (cli *grpcClient) ReCheckTxAsync(tx types.RequestCheckTx) *ReqRes { req := types.ToRequestCheckTx(tx) - res, err := cli.client.CheckTx(context.Background(), req.GetCheckTx(), grpc.FailFast(true)) + res, err := cli.client.CheckTx(context.Background(), req.GetCheckTx(), grpc.WaitForReady(true)) if err != nil { cli.StopForError(err) } @@ -188,7 +189,7 @@ func (cli *grpcClient) ReCheckTxAsync(tx []byte) *ReqRes { func (cli *grpcClient) QueryAsync(params types.RequestQuery) *ReqRes { req := types.ToRequestQuery(params) - res, err := cli.client.Query(context.Background(), req.GetQuery(), grpc.FailFast(true)) + res, err := cli.client.Query(context.Background(), req.GetQuery(), grpc.WaitForReady(true)) if err != nil { cli.StopForError(err) } @@ -197,7 +198,7 @@ func (cli *grpcClient) QueryAsync(params types.RequestQuery) *ReqRes { func (cli *grpcClient) CommitAsync() *ReqRes { req := types.ToRequestCommit() - res, err := cli.client.Commit(context.Background(), req.GetCommit(), grpc.FailFast(true)) + res, err := cli.client.Commit(context.Background(), req.GetCommit(), grpc.WaitForReady(true)) if err != nil { cli.StopForError(err) } @@ -206,7 +207,7 @@ func (cli *grpcClient) CommitAsync() *ReqRes { func (cli *grpcClient) InitChainAsync(params types.RequestInitChain) *ReqRes { req := types.ToRequestInitChain(params) - res, err := cli.client.InitChain(context.Background(), req.GetInitChain(), grpc.FailFast(true)) + res, err := cli.client.InitChain(context.Background(), req.GetInitChain(), grpc.WaitForReady(true)) if err != nil { cli.StopForError(err) } @@ -215,7 +216,7 @@ func (cli *grpcClient) InitChainAsync(params types.RequestInitChain) *ReqRes { func (cli *grpcClient) BeginBlockAsync(params types.RequestBeginBlock) *ReqRes { req := types.ToRequestBeginBlock(params) - res, err := cli.client.BeginBlock(context.Background(), req.GetBeginBlock(), grpc.FailFast(true)) + res, err := cli.client.BeginBlock(context.Background(), req.GetBeginBlock(), grpc.WaitForReady(true)) if err != nil { cli.StopForError(err) } @@ -224,7 +225,7 @@ func (cli *grpcClient) BeginBlockAsync(params types.RequestBeginBlock) *ReqRes { func (cli *grpcClient) EndBlockAsync(params types.RequestEndBlock) *ReqRes { req := types.ToRequestEndBlock(params) - res, err := cli.client.EndBlock(context.Background(), req.GetEndBlock(), grpc.FailFast(true)) + res, err := cli.client.EndBlock(context.Background(), req.GetEndBlock(), grpc.WaitForReady(true)) if err != nil { cli.StopForError(err) } @@ -237,18 +238,22 @@ func (cli *grpcClient) finishAsyncCall(req *types.Request, res *types.Response) reqres.Done() // Release waiters reqres.SetDone() // so reqRes.SetCallback will run the callback - // go routine for callbacks + // goroutine for callbacks go func() { - // Notify reqRes listener if set - if cb := reqres.GetCallback(); cb != nil { - cb(res) - } + cli.mtx.Lock() + defer cli.mtx.Unlock() // Notify client listener if set if cli.resCb != nil { cli.resCb(reqres.Request, res) } + + // Notify reqRes listener if set + if cb := reqres.GetCallback(); cb != nil { + cb(res) + } }() + return reqres } @@ -274,13 +279,13 @@ func (cli *grpcClient) SetOptionSync(req types.RequestSetOption) (*types.Respons return reqres.Response.GetSetOption(), cli.Error() } -func (cli *grpcClient) DeliverTxSync(tx []byte) (*types.ResponseDeliverTx, error) { - reqres := cli.DeliverTxAsync(tx) +func (cli *grpcClient) DeliverTxSync(params types.RequestDeliverTx) (*types.ResponseDeliverTx, error) { + reqres := cli.DeliverTxAsync(params) return reqres.Response.GetDeliverTx(), cli.Error() } -func (cli *grpcClient) CheckTxSync(tx []byte) (*types.ResponseCheckTx, error) { - reqres := cli.CheckTxAsync(tx) +func (cli *grpcClient) CheckTxSync(params types.RequestCheckTx) (*types.ResponseCheckTx, error) { + reqres := cli.CheckTxAsync(params) return reqres.Response.GetCheckTx(), cli.Error() } diff --git a/abci/client/local_client.go b/abci/client/local_client.go index 19998f9c0..30cf57ae0 100644 --- a/abci/client/local_client.go +++ b/abci/client/local_client.go @@ -81,34 +81,34 @@ func (app *localClient) SetOptionAsync(req types.RequestSetOption) *ReqRes { ) } -func (app *localClient) DeliverTxAsync(tx []byte) *ReqRes { +func (app *localClient) DeliverTxAsync(req types.RequestDeliverTx) *ReqRes { app.mtx.Lock() defer app.mtx.Unlock() - res := app.Application.DeliverTx(tx) + res := app.Application.DeliverTx(req) return app.callback( - types.ToRequestDeliverTx(tx), + types.ToRequestDeliverTx(req), types.ToResponseDeliverTx(res), ) } -func (app *localClient) CheckTxAsync(tx []byte) *ReqRes { +func (app *localClient) CheckTxAsync(req types.RequestCheckTx) *ReqRes { app.mtx.Lock() defer app.mtx.Unlock() - res := app.Application.CheckTx(tx) + res := app.Application.CheckTx(req) return app.callback( - types.ToRequestCheckTx(tx), + types.ToRequestCheckTx(req), types.ToResponseCheckTx(res), ) } -func (app *localClient) ReCheckTxAsync(tx []byte) *ReqRes { +func (app *localClient) ReCheckTxAsync(req types.RequestCheckTx) *ReqRes { app.mtx.Lock() defer app.mtx.Unlock() - res := app.Application.ReCheckTx(tx) + res := app.Application.ReCheckTx(req) return app.callback( - types.ToRequestCheckTx(tx), + types.ToRequestCheckTx(req), types.ToResponseCheckTx(res), ) } @@ -208,19 +208,19 @@ func (app *localClient) SetOptionSync(req types.RequestSetOption) (*types.Respon return &res, nil } -func (app *localClient) DeliverTxSync(tx []byte) (*types.ResponseDeliverTx, error) { +func (app *localClient) DeliverTxSync(req types.RequestDeliverTx) (*types.ResponseDeliverTx, error) { app.mtx.Lock() defer app.mtx.Unlock() - res := app.Application.DeliverTx(tx) + res := app.Application.DeliverTx(req) return &res, nil } -func (app *localClient) CheckTxSync(tx []byte) (*types.ResponseCheckTx, error) { +func (app *localClient) CheckTxSync(req types.RequestCheckTx) (*types.ResponseCheckTx, error) { app.mtx.Lock() defer app.mtx.Unlock() - res := app.Application.CheckTx(tx) + res := app.Application.CheckTx(req) return &res, nil } diff --git a/abci/client/socket_client.go b/abci/client/socket_client.go index 58452fc5d..483ce7e07 100644 --- a/abci/client/socket_client.go +++ b/abci/client/socket_client.go @@ -55,10 +55,6 @@ func NewSocketClient(addr string, mustConnect bool) *socketClient { } func (cli *socketClient) OnStart() error { - if err := cli.BaseService.OnStart(); err != nil { - return err - } - var err error var conn net.Conn RETRY_LOOP: @@ -82,15 +78,12 @@ RETRY_LOOP: } func (cli *socketClient) OnStop() { - cli.BaseService.OnStop() - - cli.mtx.Lock() - defer cli.mtx.Unlock() if cli.conn != nil { - // does this really need a mutex? cli.conn.Close() } + cli.mtx.Lock() + defer cli.mtx.Unlock() cli.flushQueue() } @@ -209,19 +202,18 @@ func (cli *socketClient) didRecvResponse(res *types.Response) error { reqres.Done() // Release waiters cli.reqSent.Remove(next) // Pop first item from linked list + // Notify client listener if set (global callback). + if cli.resCb != nil { + cli.resCb(reqres.Request, res) + } + // Notify reqRes listener if set (request specific callback). // NOTE: it is possible this callback isn't set on the reqres object. // at this point, in which case it will be called after, when it is set. - // TODO: should we move this after the resCb call so the order is always consistent? if cb := reqres.GetCallback(); cb != nil { cb(res) } - // Notify client listener if set (global callback). - if cli.resCb != nil { - cli.resCb(reqres.Request, res) - } - return nil } @@ -243,15 +235,15 @@ func (cli *socketClient) SetOptionAsync(req types.RequestSetOption) *ReqRes { return cli.queueRequest(types.ToRequestSetOption(req)) } -func (cli *socketClient) DeliverTxAsync(tx []byte) *ReqRes { - return cli.queueRequest(types.ToRequestDeliverTx(tx)) +func (cli *socketClient) DeliverTxAsync(req types.RequestDeliverTx) *ReqRes { + return cli.queueRequest(types.ToRequestDeliverTx(req)) } -func (cli *socketClient) CheckTxAsync(tx []byte) *ReqRes { - return cli.queueRequest(types.ToRequestCheckTx(tx)) +func (cli *socketClient) CheckTxAsync(req types.RequestCheckTx) *ReqRes { + return cli.queueRequest(types.ToRequestCheckTx(req)) } -func (cli *socketClient) ReCheckTxAsync(tx []byte) *ReqRes { +func (cli *socketClient) ReCheckTxAsync(tx types.RequestCheckTx) *ReqRes { return cli.queueRequest(types.ToRequestCheckTx(tx)) } @@ -304,14 +296,14 @@ func (cli *socketClient) SetOptionSync(req types.RequestSetOption) (*types.Respo return reqres.Response.GetSetOption(), cli.Error() } -func (cli *socketClient) DeliverTxSync(tx []byte) (*types.ResponseDeliverTx, error) { - reqres := cli.queueRequest(types.ToRequestDeliverTx(tx)) +func (cli *socketClient) DeliverTxSync(req types.RequestDeliverTx) (*types.ResponseDeliverTx, error) { + reqres := cli.queueRequest(types.ToRequestDeliverTx(req)) cli.FlushSync() return reqres.Response.GetDeliverTx(), cli.Error() } -func (cli *socketClient) CheckTxSync(tx []byte) (*types.ResponseCheckTx, error) { - reqres := cli.queueRequest(types.ToRequestCheckTx(tx)) +func (cli *socketClient) CheckTxSync(req types.RequestCheckTx) (*types.ResponseCheckTx, error) { + reqres := cli.queueRequest(types.ToRequestCheckTx(req)) cli.FlushSync() return reqres.Response.GetCheckTx(), cli.Error() } diff --git a/abci/cmd/abci-cli/abci-cli.go b/abci/cmd/abci-cli/abci-cli.go index 7e55569cb..31721b21b 100644 --- a/abci/cmd/abci-cli/abci-cli.go +++ b/abci/cmd/abci-cli/abci-cli.go @@ -174,9 +174,7 @@ where example.file looks something like: info `, Args: cobra.ExactArgs(0), - RunE: func(cmd *cobra.Command, args []string) error { - return cmdBatch(cmd, args) - }, + RunE: cmdBatch, } var consoleCmd = &cobra.Command{ @@ -189,9 +187,7 @@ without opening a new connection each time `, Args: cobra.ExactArgs(0), ValidArgs: []string{"echo", "info", "set_option", "deliver_tx", "check_tx", "commit", "query"}, - RunE: func(cmd *cobra.Command, args []string) error { - return cmdConsole(cmd, args) - }, + RunE: cmdConsole, } var echoCmd = &cobra.Command{ @@ -199,27 +195,21 @@ var echoCmd = &cobra.Command{ Short: "have the application echo a message", Long: "have the application echo a message", Args: cobra.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - return cmdEcho(cmd, args) - }, + RunE: cmdEcho, } var infoCmd = &cobra.Command{ Use: "info", Short: "get some info about the application", Long: "get some info about the application", Args: cobra.ExactArgs(0), - RunE: func(cmd *cobra.Command, args []string) error { - return cmdInfo(cmd, args) - }, + RunE: cmdInfo, } var setOptionCmd = &cobra.Command{ Use: "set_option", Short: "set an option on the application", Long: "set an option on the application", Args: cobra.ExactArgs(2), - RunE: func(cmd *cobra.Command, args []string) error { - return cmdSetOption(cmd, args) - }, + RunE: cmdSetOption, } var deliverTxCmd = &cobra.Command{ @@ -227,9 +217,7 @@ var deliverTxCmd = &cobra.Command{ Short: "deliver a new transaction to the application", Long: "deliver a new transaction to the application", Args: cobra.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - return cmdDeliverTx(cmd, args) - }, + RunE: cmdDeliverTx, } var checkTxCmd = &cobra.Command{ @@ -237,9 +225,7 @@ var checkTxCmd = &cobra.Command{ Short: "validate a transaction", Long: "validate a transaction", Args: cobra.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - return cmdCheckTx(cmd, args) - }, + RunE: cmdCheckTx, } var commitCmd = &cobra.Command{ @@ -247,9 +233,7 @@ var commitCmd = &cobra.Command{ Short: "commit the application state and return the Merkle root hash", Long: "commit the application state and return the Merkle root hash", Args: cobra.ExactArgs(0), - RunE: func(cmd *cobra.Command, args []string) error { - return cmdCommit(cmd, args) - }, + RunE: cmdCommit, } var versionCmd = &cobra.Command{ @@ -268,9 +252,7 @@ var queryCmd = &cobra.Command{ Short: "query the application state", Long: "query the application state", Args: cobra.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - return cmdQuery(cmd, args) - }, + RunE: cmdQuery, } var counterCmd = &cobra.Command{ @@ -278,9 +260,7 @@ var counterCmd = &cobra.Command{ Short: "ABCI demo example", Long: "ABCI demo example", Args: cobra.ExactArgs(0), - RunE: func(cmd *cobra.Command, args []string) error { - return cmdCounter(cmd, args) - }, + RunE: cmdCounter, } var kvstoreCmd = &cobra.Command{ @@ -288,9 +268,7 @@ var kvstoreCmd = &cobra.Command{ Short: "ABCI demo example", Long: "ABCI demo example", Args: cobra.ExactArgs(0), - RunE: func(cmd *cobra.Command, args []string) error { - return cmdKVStore(cmd, args) - }, + RunE: cmdKVStore, } var testCmd = &cobra.Command{ @@ -298,9 +276,7 @@ var testCmd = &cobra.Command{ Short: "run integration tests", Long: "run integration tests", Args: cobra.ExactArgs(0), - RunE: func(cmd *cobra.Command, args []string) error { - return cmdTest(cmd, args) - }, + RunE: cmdTest, } // Generates new Args array based off of previous call args to maintain flag persistence @@ -356,16 +332,18 @@ func cmdTest(cmd *cobra.Command, args []string) error { func cmdBatch(cmd *cobra.Command, args []string) error { bufReader := bufio.NewReader(os.Stdin) +LOOP: for { line, more, err := bufReader.ReadLine() - if more { + switch { + case more: return errors.New("Input line is too long") - } else if err == io.EOF { - break - } else if len(line) == 0 { + case err == io.EOF: + break LOOP + case len(line) == 0: continue - } else if err != nil { + case err != nil: return err } @@ -419,7 +397,7 @@ func muxOnCommands(cmd *cobra.Command, pArgs []string) error { } // otherwise, we need to skip the next one too - i += 1 + i++ continue } @@ -546,7 +524,7 @@ func cmdDeliverTx(cmd *cobra.Command, args []string) error { if err != nil { return err } - res, err := client.DeliverTxSync(txBytes) + res, err := client.DeliverTxSync(types.RequestDeliverTx{Tx: txBytes}) if err != nil { return err } @@ -572,7 +550,7 @@ func cmdCheckTx(cmd *cobra.Command, args []string) error { if err != nil { return err } - res, err := client.CheckTxSync(txBytes) + res, err := client.CheckTxSync(types.RequestCheckTx{Tx: txBytes}) if err != nil { return err } diff --git a/abci/example/counter/counter.go b/abci/example/counter/counter.go index 8e43a887a..e334c636e 100644 --- a/abci/example/counter/counter.go +++ b/abci/example/counter/counter.go @@ -42,15 +42,15 @@ func (app *CounterApplication) SetOption(req types.RequestSetOption) types.Respo return types.ResponseSetOption{} } -func (app *CounterApplication) DeliverTx(tx []byte) types.ResponseDeliverTx { +func (app *CounterApplication) DeliverTx(req types.RequestDeliverTx) types.ResponseDeliverTx { if app.serial { - if len(tx) > 8 { + if len(req.Tx) > 8 { return types.ResponseDeliverTx{ Code: code.CodeTypeEncodingError, - Log: fmt.Sprintf("Max tx size is 8 bytes, got %d", len(tx))} + Log: fmt.Sprintf("Max tx size is 8 bytes, got %d", len(req.Tx))} } tx8 := make([]byte, 8) - copy(tx8[len(tx8)-len(tx):], tx) + copy(tx8[len(tx8)-len(req.Tx):], req.Tx) txValue := binary.BigEndian.Uint64(tx8) if txValue != uint64(app.txCount) { return types.ResponseDeliverTx{ @@ -62,15 +62,15 @@ func (app *CounterApplication) DeliverTx(tx []byte) types.ResponseDeliverTx { return types.ResponseDeliverTx{Code: code.CodeTypeOK} } -func (app *CounterApplication) CheckTx(tx []byte) types.ResponseCheckTx { +func (app *CounterApplication) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx { if app.serial { - if len(tx) > 8 { + if len(req.Tx) > 8 { return types.ResponseCheckTx{ Code: code.CodeTypeEncodingError, - Log: fmt.Sprintf("Max tx size is 8 bytes, got %d", len(tx))} + Log: fmt.Sprintf("Max tx size is 8 bytes, got %d", len(req.Tx))} } tx8 := make([]byte, 8) - copy(tx8[len(tx8)-len(tx):], tx) + copy(tx8[len(tx8)-len(req.Tx):], req.Tx) txValue := binary.BigEndian.Uint64(tx8) if txValue < uint64(app.txCount) { return types.ResponseCheckTx{ @@ -81,8 +81,8 @@ func (app *CounterApplication) CheckTx(tx []byte) types.ResponseCheckTx { return types.ResponseCheckTx{Code: code.CodeTypeOK} } -func (app *CounterApplication) ReCheckTx(tx []byte) types.ResponseCheckTx { - return app.CheckTx(tx) +func (app *CounterApplication) ReCheckTx(req types.RequestCheckTx) types.ResponseCheckTx { + return app.CheckTx(req) } func (app *CounterApplication) Commit() (resp types.ResponseCommit) { diff --git a/abci/example/example_test.go b/abci/example/example_test.go index 677a2a481..74510700b 100644 --- a/abci/example/example_test.go +++ b/abci/example/example_test.go @@ -87,7 +87,7 @@ func testStream(t *testing.T, app types.Application) { // Write requests for counter := 0; counter < numDeliverTxs; counter++ { // Send request - reqRes := client.DeliverTxAsync([]byte("test")) + reqRes := client.DeliverTxAsync(types.RequestDeliverTx{Tx: []byte("test")}) _ = reqRes // check err ? @@ -107,7 +107,7 @@ func testStream(t *testing.T, app types.Application) { //------------------------- // test grpc -func dialerFunc(addr string, timeout time.Duration) (net.Conn, error) { +func dialerFunc(ctx context.Context, addr string) (net.Conn, error) { return cmn.Connect(addr) } @@ -123,7 +123,7 @@ func testGRPCSync(t *testing.T, app *types.GRPCApplication) { defer server.Stop() // Connect to the socket - conn, err := grpc.Dial("unix://test.sock", grpc.WithInsecure(), grpc.WithDialer(dialerFunc)) + conn, err := grpc.Dial("unix://test.sock", grpc.WithInsecure(), grpc.WithContextDialer(dialerFunc)) if err != nil { t.Fatalf("Error dialing GRPC server: %v", err.Error()) } diff --git a/abci/example/kvstore/README.md b/abci/example/kvstore/README.md index e988eadb0..bed81a598 100644 --- a/abci/example/kvstore/README.md +++ b/abci/example/kvstore/README.md @@ -22,10 +22,10 @@ and the Handshake allows any necessary blocks to be replayed. Validator set changes are effected using the following transaction format: ``` -val:pubkey1/power1,addr2/power2,addr3/power3" +"val:pubkey1!power1,pubkey2!power2,pubkey3!power3" ``` -where `power1` is the new voting power for the validator with `pubkey1` (possibly a new one). +where `pubkeyN` is a base64-encoded 32-byte ed25519 key and `powerN` is a new voting power for the validator with `pubkeyN` (possibly a new one). +To remove a validator from the validator set, set power to `0`. There is no sybil protection against new validators joining. -Validators can be removed by setting their power to `0`. diff --git a/abci/example/kvstore/kvstore.go b/abci/example/kvstore/kvstore.go index 955baefb4..7e408a534 100644 --- a/abci/example/kvstore/kvstore.go +++ b/abci/example/kvstore/kvstore.go @@ -76,25 +76,32 @@ func (app *KVStoreApplication) Info(req types.RequestInfo) (resInfo types.Respon } // tx is either "key=value" or just arbitrary bytes -func (app *KVStoreApplication) DeliverTx(tx []byte) types.ResponseDeliverTx { +func (app *KVStoreApplication) DeliverTx(req types.RequestDeliverTx) types.ResponseDeliverTx { var key, value []byte - parts := bytes.Split(tx, []byte("=")) + parts := bytes.Split(req.Tx, []byte("=")) if len(parts) == 2 { key, value = parts[0], parts[1] } else { - key, value = tx, tx + key, value = req.Tx, req.Tx } + app.state.db.Set(prefixKey(key), value) app.state.Size += 1 - tags := []cmn.KVPair{ - {Key: []byte("app.creator"), Value: []byte("Cosmoshi Netowoko")}, - {Key: []byte("app.key"), Value: key}, + events := []types.Event{ + { + Type: "app", + Attributes: []cmn.KVPair{ + {Key: []byte("creator"), Value: []byte("Cosmoshi Netowoko")}, + {Key: []byte("key"), Value: key}, + }, + }, } - return types.ResponseDeliverTx{Code: code.CodeTypeOK, Tags: tags} + + return types.ResponseDeliverTx{Code: code.CodeTypeOK, Events: events} } -func (app *KVStoreApplication) CheckTx(tx []byte) types.ResponseCheckTx { +func (app *KVStoreApplication) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx { return types.ResponseCheckTx{Code: code.CodeTypeOK, GasWanted: 1} } @@ -108,6 +115,7 @@ func (app *KVStoreApplication) Commit() types.ResponseCommit { return types.ResponseCommit{Data: appHash} } +// Returns an associated value or nil if missing. func (app *KVStoreApplication) Query(reqQuery types.RequestQuery) (resQuery types.ResponseQuery) { if reqQuery.Prove { value := app.state.db.Get(prefixKey(reqQuery.Data)) diff --git a/abci/example/kvstore/kvstore_test.go b/abci/example/kvstore/kvstore_test.go index a18fb8d3c..80b07ff5a 100644 --- a/abci/example/kvstore/kvstore_test.go +++ b/abci/example/kvstore/kvstore_test.go @@ -19,10 +19,11 @@ import ( ) func testKVStore(t *testing.T, app types.Application, tx []byte, key, value string) { - ar := app.DeliverTx(tx) + req := types.RequestDeliverTx{Tx: tx} + ar := app.DeliverTx(req) require.False(t, ar.IsErr(), ar) // repeating tx doesn't raise error - ar = app.DeliverTx(tx) + ar = app.DeliverTx(req) require.False(t, ar.IsErr(), ar) // make sure query is fine @@ -147,7 +148,7 @@ func TestValUpdates(t *testing.T) { makeApplyBlock(t, kvstore, 2, diff, tx1, tx2, tx3) - vals1 = append(vals[:nInit-2], vals[nInit+1]) + vals1 = append(vals[:nInit-2], vals[nInit+1]) // nolint: gocritic vals2 = kvstore.Validators() valsEqual(t, vals1, vals2) @@ -179,7 +180,7 @@ func makeApplyBlock(t *testing.T, kvstore types.Application, heightInt int, diff kvstore.BeginBlock(types.RequestBeginBlock{Hash: hash, Header: header}) for _, tx := range txs { - if r := kvstore.DeliverTx(tx); r.IsErr() { + if r := kvstore.DeliverTx(types.RequestDeliverTx{Tx: tx}); r.IsErr() { t.Fatal(r) } } @@ -282,11 +283,11 @@ func runClientTests(t *testing.T, client abcicli.Client) { } func testClient(t *testing.T, app abcicli.Client, tx []byte, key, value string) { - ar, err := app.DeliverTxSync(tx) + ar, err := app.DeliverTxSync(types.RequestDeliverTx{Tx: tx}) require.NoError(t, err) require.False(t, ar.IsErr(), ar) // repeating tx doesn't raise error - ar, err = app.DeliverTxSync(tx) + ar, err = app.DeliverTxSync(types.RequestDeliverTx{Tx: tx}) require.NoError(t, err) require.False(t, ar.IsErr(), ar) diff --git a/abci/example/kvstore/persistent_kvstore.go b/abci/example/kvstore/persistent_kvstore.go index cdadbc5c0..59b8c8a0d 100644 --- a/abci/example/kvstore/persistent_kvstore.go +++ b/abci/example/kvstore/persistent_kvstore.go @@ -2,7 +2,7 @@ package kvstore import ( "bytes" - "encoding/hex" + "encoding/base64" "fmt" "strconv" "strings" @@ -10,7 +10,9 @@ import ( "github.com/tendermint/tendermint/abci/example/code" "github.com/tendermint/tendermint/abci/types" dbm "github.com/tendermint/tendermint/libs/db" + "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/libs/log" + tmtypes "github.com/tendermint/tendermint/types" ) const ( @@ -27,6 +29,8 @@ type PersistentKVStoreApplication struct { // validator set ValUpdates []types.ValidatorUpdate + valAddrToPubKeyMap map[string]types.PubKey + logger log.Logger } @@ -40,8 +44,9 @@ func NewPersistentKVStoreApplication(dbDir string) *PersistentKVStoreApplication state := loadState(db) return &PersistentKVStoreApplication{ - app: &KVStoreApplication{state: state}, - logger: log.NewNopLogger(), + app: &KVStoreApplication{state: state}, + valAddrToPubKeyMap: make(map[string]types.PubKey), + logger: log.NewNopLogger(), } } @@ -60,26 +65,26 @@ func (app *PersistentKVStoreApplication) SetOption(req types.RequestSetOption) t return app.app.SetOption(req) } -// tx is either "val:pubkey/power" or "key=value" or just arbitrary bytes -func (app *PersistentKVStoreApplication) DeliverTx(tx []byte) types.ResponseDeliverTx { +// tx is either "val:pubkey!power" or "key=value" or just arbitrary bytes +func (app *PersistentKVStoreApplication) DeliverTx(req types.RequestDeliverTx) types.ResponseDeliverTx { // if it starts with "val:", update the validator set - // format is "val:pubkey/power" - if isValidatorTx(tx) { + // format is "val:pubkey!power" + if isValidatorTx(req.Tx) { // update validators in the merkle tree // and in app.ValUpdates - return app.execValidatorTx(tx) + return app.execValidatorTx(req.Tx) } // otherwise, update the key-value store - return app.app.DeliverTx(tx) + return app.app.DeliverTx(req) } -func (app *PersistentKVStoreApplication) CheckTx(tx []byte) types.ResponseCheckTx { - return app.app.CheckTx(tx) +func (app *PersistentKVStoreApplication) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx { + return app.app.CheckTx(req) } -func (app *PersistentKVStoreApplication) ReCheckTx(tx []byte) types.ResponseCheckTx { - return app.app.CheckTx(tx) +func (app *PersistentKVStoreApplication) ReCheckTx(req types.RequestCheckTx) types.ResponseCheckTx { + return app.app.CheckTx(req) } // Commit will panic if InitChain was not called @@ -87,8 +92,20 @@ func (app *PersistentKVStoreApplication) Commit() types.ResponseCommit { return app.app.Commit() } -func (app *PersistentKVStoreApplication) Query(reqQuery types.RequestQuery) types.ResponseQuery { - return app.app.Query(reqQuery) +// When path=/val and data={validator address}, returns the validator update (types.ValidatorUpdate) varint encoded. +// For any other path, returns an associated value or nil if missing. +func (app *PersistentKVStoreApplication) Query(reqQuery types.RequestQuery) (resQuery types.ResponseQuery) { + switch reqQuery.Path { + case "/val": + key := []byte("val:" + string(reqQuery.Data)) + value := app.app.state.db.Get(key) + + resQuery.Key = reqQuery.Data + resQuery.Value = value + return + default: + return app.app.Query(reqQuery) + } } // Save the validators in the merkle tree @@ -106,6 +123,19 @@ func (app *PersistentKVStoreApplication) InitChain(req types.RequestInitChain) t func (app *PersistentKVStoreApplication) BeginBlock(req types.RequestBeginBlock) types.ResponseBeginBlock { // reset valset changes app.ValUpdates = make([]types.ValidatorUpdate, 0) + + for _, ev := range req.ByzantineValidators { + if ev.Type == tmtypes.ABCIEvidenceTypeDuplicateVote { + // decrease voting power by 1 + if ev.TotalVotingPower == 0 { + continue + } + app.updateValidator(types.ValidatorUpdate{ + PubKey: app.valAddrToPubKeyMap[string(ev.Validator.Address)], + Power: ev.TotalVotingPower - 1, + }) + } + } return types.ResponseBeginBlock{} } @@ -141,33 +171,34 @@ func (app *PersistentKVStoreApplication) Validators() (validators []types.Valida } func MakeValSetChangeTx(pubkey types.PubKey, power int64) []byte { - return []byte(fmt.Sprintf("val:%X/%d", pubkey.Data, power)) + pubStr := base64.StdEncoding.EncodeToString(pubkey.Data) + return []byte(fmt.Sprintf("val:%s!%d", pubStr, power)) } func isValidatorTx(tx []byte) bool { return strings.HasPrefix(string(tx), ValidatorSetChangePrefix) } -// format is "val:pubkey/power" -// pubkey is raw 32-byte ed25519 key +// format is "val:pubkey!power" +// pubkey is a base64-encoded 32-byte ed25519 key func (app *PersistentKVStoreApplication) execValidatorTx(tx []byte) types.ResponseDeliverTx { tx = tx[len(ValidatorSetChangePrefix):] //get the pubkey and power - pubKeyAndPower := strings.Split(string(tx), "/") + pubKeyAndPower := strings.Split(string(tx), "!") if len(pubKeyAndPower) != 2 { return types.ResponseDeliverTx{ Code: code.CodeTypeEncodingError, - Log: fmt.Sprintf("Expected 'pubkey/power'. Got %v", pubKeyAndPower)} + Log: fmt.Sprintf("Expected 'pubkey!power'. Got %v", pubKeyAndPower)} } pubkeyS, powerS := pubKeyAndPower[0], pubKeyAndPower[1] // decode the pubkey - pubkey, err := hex.DecodeString(pubkeyS) + pubkey, err := base64.StdEncoding.DecodeString(pubkeyS) if err != nil { return types.ResponseDeliverTx{ Code: code.CodeTypeEncodingError, - Log: fmt.Sprintf("Pubkey (%s) is invalid hex", pubkeyS)} + Log: fmt.Sprintf("Pubkey (%s) is invalid base64", pubkeyS)} } // decode the power @@ -185,14 +216,20 @@ func (app *PersistentKVStoreApplication) execValidatorTx(tx []byte) types.Respon // add, update, or remove a validator func (app *PersistentKVStoreApplication) updateValidator(v types.ValidatorUpdate) types.ResponseDeliverTx { key := []byte("val:" + string(v.PubKey.Data)) + + pubkey := ed25519.PubKeyEd25519{} + copy(pubkey[:], v.PubKey.Data) + if v.Power == 0 { // remove validator if !app.app.state.db.Has(key) { + pubStr := base64.StdEncoding.EncodeToString(v.PubKey.Data) return types.ResponseDeliverTx{ Code: code.CodeTypeUnauthorized, - Log: fmt.Sprintf("Cannot remove non-existent validator %X", key)} + Log: fmt.Sprintf("Cannot remove non-existent validator %s", pubStr)} } app.app.state.db.Delete(key) + delete(app.valAddrToPubKeyMap, string(pubkey.Address())) } else { // add or update validator value := bytes.NewBuffer(make([]byte, 0)) @@ -202,6 +239,7 @@ func (app *PersistentKVStoreApplication) updateValidator(v types.ValidatorUpdate Log: fmt.Sprintf("Error encoding validator: %v", err)} } app.app.state.db.Set(key, value.Bytes()) + app.valAddrToPubKeyMap[string(pubkey.Address())] = v.PubKey } // we only update the changes array if we successfully updated the tree diff --git a/abci/server/socket_server.go b/abci/server/socket_server.go index 4b92f04cf..3e1d775d7 100644 --- a/abci/server/socket_server.go +++ b/abci/server/socket_server.go @@ -127,11 +127,12 @@ func (s *SocketServer) acceptConnectionsRoutine() { func (s *SocketServer) waitForClose(closeConn chan error, connID int) { err := <-closeConn - if err == io.EOF { + switch { + case err == io.EOF: s.Logger.Error("Connection was closed by client") - } else if err != nil { + case err != nil: s.Logger.Error("Connection error", "error", err) - } else { + default: // never happens s.Logger.Error("Connection was closed.") } @@ -146,6 +147,16 @@ func (s *SocketServer) waitForClose(closeConn chan error, connID int) { func (s *SocketServer) handleRequests(closeConn chan error, conn net.Conn, responses chan<- *types.Response) { var count int var bufReader = bufio.NewReader(conn) + + defer func() { + // make sure to recover from any app-related panics to allow proper socket cleanup + r := recover() + if r != nil { + closeConn <- fmt.Errorf("recovered from panic: %v", r) + s.appMtx.Unlock() + } + }() + for { var req = &types.Request{} @@ -154,7 +165,7 @@ func (s *SocketServer) handleRequests(closeConn chan error, conn net.Conn, respo if err == io.EOF { closeConn <- err } else { - closeConn <- fmt.Errorf("Error reading message: %v", err.Error()) + closeConn <- fmt.Errorf("error reading message: %v", err) } return } @@ -178,10 +189,10 @@ func (s *SocketServer) handleRequest(req *types.Request, responses chan<- *types res := s.app.SetOption(*r.SetOption) responses <- types.ToResponseSetOption(res) case *types.Request_DeliverTx: - res := s.app.DeliverTx(r.DeliverTx.Tx) + res := s.app.DeliverTx(*r.DeliverTx) responses <- types.ToResponseDeliverTx(res) case *types.Request_CheckTx: - res := s.app.CheckTx(r.CheckTx.Tx) + res := s.app.CheckTx(*r.CheckTx) responses <- types.ToResponseCheckTx(res) case *types.Request_Commit: res := s.app.Commit() diff --git a/abci/tests/server/client.go b/abci/tests/server/client.go index 5daa1e6af..58a413a4e 100644 --- a/abci/tests/server/client.go +++ b/abci/tests/server/client.go @@ -58,7 +58,7 @@ func Commit(client abcicli.Client, hashExp []byte) error { } func DeliverTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []byte) error { - res, _ := client.DeliverTxSync(txBytes) + res, _ := client.DeliverTxSync(types.RequestDeliverTx{Tx: txBytes}) code, data, log := res.Code, res.Data, res.Log if code != codeExp { fmt.Println("Failed test: DeliverTx") @@ -77,7 +77,7 @@ func DeliverTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp [] } func CheckTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []byte) error { - res, _ := client.CheckTxSync(txBytes) + res, _ := client.CheckTxSync(types.RequestCheckTx{Tx: txBytes}) code, data, log := res.Code, res.Data, res.Log if code != codeExp { fmt.Println("Failed test: CheckTx") diff --git a/abci/tests/test_app/app.go b/abci/tests/test_app/app.go index 25ed2f582..9c32fcc7d 100644 --- a/abci/tests/test_app/app.go +++ b/abci/tests/test_app/app.go @@ -43,7 +43,7 @@ func commit(client abcicli.Client, hashExp []byte) { } func deliverTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []byte) { - res, err := client.DeliverTxSync(txBytes) + res, err := client.DeliverTxSync(types.RequestDeliverTx{Tx: txBytes}) if err != nil { panicf("client error: %v", err) } diff --git a/abci/tests/test_app/test.sh b/abci/tests/test_app/test.sh index 230c94163..c0bdace27 100755 --- a/abci/tests/test_app/test.sh +++ b/abci/tests/test_app/test.sh @@ -3,6 +3,8 @@ set -e # These tests spawn the counter app and server by execing the ABCI_APP command and run some simple client tests against it +export GO111MODULE=on + # Get the directory of where this script is. SOURCE="${BASH_SOURCE[0]}" while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done @@ -13,13 +15,13 @@ cd "$DIR" echo "RUN COUNTER OVER SOCKET" # test golang counter -ABCI_APP="counter" go run ./*.go +ABCI_APP="counter" go run -mod=readonly ./*.go echo "----------------------" echo "RUN COUNTER OVER GRPC" # test golang counter via grpc -ABCI_APP="counter --abci=grpc" ABCI="grpc" go run ./*.go +ABCI_APP="counter --abci=grpc" ABCI="grpc" go run -mod=readonly ./*.go echo "----------------------" # test nodejs counter diff --git a/abci/types/application.go b/abci/types/application.go index eb055e7cc..28cedb06e 100644 --- a/abci/types/application.go +++ b/abci/types/application.go @@ -15,13 +15,13 @@ type Application interface { Query(RequestQuery) ResponseQuery // Query for state // Mempool Connection - CheckTx(tx []byte) ResponseCheckTx // Validate a tx for the mempool - ReCheckTx(tx []byte) ResponseCheckTx // Validate a tx for the mempool + ReCheckTx(RequestCheckTx) ResponseCheckTx // Validate a tx for the mempool + CheckTx(RequestCheckTx) ResponseCheckTx // Validate a tx for the mempool // Consensus Connection InitChain(RequestInitChain) ResponseInitChain // Initialize blockchain with validators and other info from TendermintCore BeginBlock(RequestBeginBlock) ResponseBeginBlock // Signals the beginning of a block - DeliverTx(tx []byte) ResponseDeliverTx // Deliver a tx for full processing + DeliverTx(RequestDeliverTx) ResponseDeliverTx // Deliver a tx for full processing EndBlock(RequestEndBlock) ResponseEndBlock // Signals the end of a block, returns changes to the validator set Commit() ResponseCommit // Commit the state and return the application Merkle root hash @@ -50,15 +50,15 @@ func (BaseApplication) SetOption(req RequestSetOption) ResponseSetOption { return ResponseSetOption{} } -func (BaseApplication) DeliverTx(tx []byte) ResponseDeliverTx { +func (BaseApplication) DeliverTx(req RequestDeliverTx) ResponseDeliverTx { return ResponseDeliverTx{Code: CodeTypeOK} } -func (BaseApplication) CheckTx(tx []byte) ResponseCheckTx { +func (BaseApplication) CheckTx(req RequestCheckTx) ResponseCheckTx { return ResponseCheckTx{Code: CodeTypeOK} } -func (BaseApplication) ReCheckTx(tx []byte) ResponseCheckTx { +func (BaseApplication) ReCheckTx(req RequestCheckTx) ResponseCheckTx { return ResponseCheckTx{Code: CodeTypeOK} } @@ -120,12 +120,12 @@ func (app *GRPCApplication) SetOption(ctx context.Context, req *RequestSetOption } func (app *GRPCApplication) DeliverTx(ctx context.Context, req *RequestDeliverTx) (*ResponseDeliverTx, error) { - res := app.app.DeliverTx(req.Tx) + res := app.app.DeliverTx(*req) return &res, nil } func (app *GRPCApplication) CheckTx(ctx context.Context, req *RequestCheckTx) (*ResponseCheckTx, error) { - res := app.app.CheckTx(req.Tx) + res := app.app.CheckTx(*req) return &res, nil } diff --git a/abci/types/messages.go b/abci/types/messages.go index cb64a15d6..ad18727a8 100644 --- a/abci/types/messages.go +++ b/abci/types/messages.go @@ -93,15 +93,15 @@ func ToRequestSetOption(req RequestSetOption) *Request { } } -func ToRequestDeliverTx(tx []byte) *Request { +func ToRequestDeliverTx(req RequestDeliverTx) *Request { return &Request{ - Value: &Request_DeliverTx{&RequestDeliverTx{Tx: tx}}, + Value: &Request_DeliverTx{&req}, } } -func ToRequestCheckTx(tx []byte) *Request { +func ToRequestCheckTx(req RequestCheckTx) *Request { return &Request{ - Value: &Request_CheckTx{&RequestCheckTx{Tx: tx}}, + Value: &Request_CheckTx{&req}, } } diff --git a/abci/types/messages_test.go b/abci/types/messages_test.go index 762111b61..904b16410 100644 --- a/abci/types/messages_test.go +++ b/abci/types/messages_test.go @@ -8,6 +8,7 @@ import ( "github.com/gogo/protobuf/proto" "github.com/stretchr/testify/assert" + cmn "github.com/tendermint/tendermint/libs/common" ) @@ -21,8 +22,13 @@ func TestMarshalJSON(t *testing.T) { Code: 1, Data: []byte("hello"), GasWanted: 43, - Tags: []cmn.KVPair{ - {Key: []byte("pho"), Value: []byte("bo")}, + Events: []Event{ + { + Type: "testEvent", + Attributes: []cmn.KVPair{ + {Key: []byte("pho"), Value: []byte("bo")}, + }, + }, }, } b, err = json.Marshal(&r1) @@ -82,8 +88,13 @@ func TestWriteReadMessage2(t *testing.T) { Data: []byte(phrase), Log: phrase, GasWanted: 10, - Tags: []cmn.KVPair{ - {Key: []byte("abc"), Value: []byte("def")}, + Events: []Event{ + { + Type: "testEvent", + Attributes: []cmn.KVPair{ + {Key: []byte("abc"), Value: []byte("def")}, + }, + }, }, }, // TODO: add the rest diff --git a/abci/types/protoreplace/protoreplace.go b/abci/types/protoreplace/protoreplace.go index 3ea0c73da..7058a70fb 100644 --- a/abci/types/protoreplace/protoreplace.go +++ b/abci/types/protoreplace/protoreplace.go @@ -40,7 +40,7 @@ func main() { } if writeImportTime && !wroteImport { wroteImport = true - fmt.Fprintf(outFile, "import \"github.com/tendermint/go-wire/data\"\n") + fmt.Fprintf(outFile, "import \"github.com/tendermint/go-amino/data\"\n") } if gotPackageLine { diff --git a/abci/types/snapshot.go b/abci/types/snapshot.go index 69cdd4a31..548158257 100644 --- a/abci/types/snapshot.go +++ b/abci/types/snapshot.go @@ -10,12 +10,12 @@ import ( ) const ( - ManifestVersion int32 = 0 - ChunkPayloadMaxBytes int = 4 * 1024 * 1024 // 4M before compression - snapshotDir string = "snapshot" - finalizedDir string = "current" - restorationDir string = "restoration" - manifestFileName string = "MANIFEST" + ManifestVersion int32 = 0 + ChunkPayloadMaxBytes int = 4 * 1024 * 1024 // 4M before compression + snapshotDir string = "snapshot" + finalizedDir string = "current" + restorationDir string = "restoration" + manifestFileName string = "MANIFEST" ) // AppStateChunk completeness enums diff --git a/abci/types/types.pb.go b/abci/types/types.pb.go index 8e2f77f23..490253d2f 100644 --- a/abci/types/types.pb.go +++ b/abci/types/types.pb.go @@ -38,6 +38,29 @@ var _ = time.Kitchen // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package +type CheckTxType int32 + +const ( + CheckTxType_New CheckTxType = 0 + CheckTxType_Recheck CheckTxType = 1 +) + +var CheckTxType_name = map[int32]string{ + 0: "New", + 1: "Recheck", +} +var CheckTxType_value = map[string]int32{ + "New": 0, + "Recheck": 1, +} + +func (x CheckTxType) String() string { + return proto.EnumName(CheckTxType_name, int32(x)) +} +func (CheckTxType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_types_765aa6431c012acb, []int{0} +} + type Request struct { // Types that are valid to be assigned to Value: // *Request_Echo @@ -61,7 +84,7 @@ func (m *Request) Reset() { *m = Request{} } func (m *Request) String() string { return proto.CompactTextString(m) } func (*Request) ProtoMessage() {} func (*Request) Descriptor() ([]byte, []int) { - return fileDescriptor_types_dfa4953f824ab2aa, []int{0} + return fileDescriptor_types_765aa6431c012acb, []int{0} } func (m *Request) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -483,7 +506,7 @@ func (m *RequestEcho) Reset() { *m = RequestEcho{} } func (m *RequestEcho) String() string { return proto.CompactTextString(m) } func (*RequestEcho) ProtoMessage() {} func (*RequestEcho) Descriptor() ([]byte, []int) { - return fileDescriptor_types_dfa4953f824ab2aa, []int{1} + return fileDescriptor_types_765aa6431c012acb, []int{1} } func (m *RequestEcho) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -529,7 +552,7 @@ func (m *RequestFlush) Reset() { *m = RequestFlush{} } func (m *RequestFlush) String() string { return proto.CompactTextString(m) } func (*RequestFlush) ProtoMessage() {} func (*RequestFlush) Descriptor() ([]byte, []int) { - return fileDescriptor_types_dfa4953f824ab2aa, []int{2} + return fileDescriptor_types_765aa6431c012acb, []int{2} } func (m *RequestFlush) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -571,7 +594,7 @@ func (m *RequestInfo) Reset() { *m = RequestInfo{} } func (m *RequestInfo) String() string { return proto.CompactTextString(m) } func (*RequestInfo) ProtoMessage() {} func (*RequestInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_types_dfa4953f824ab2aa, []int{3} + return fileDescriptor_types_765aa6431c012acb, []int{3} } func (m *RequestInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -634,7 +657,7 @@ func (m *RequestSetOption) Reset() { *m = RequestSetOption{} } func (m *RequestSetOption) String() string { return proto.CompactTextString(m) } func (*RequestSetOption) ProtoMessage() {} func (*RequestSetOption) Descriptor() ([]byte, []int) { - return fileDescriptor_types_dfa4953f824ab2aa, []int{4} + return fileDescriptor_types_765aa6431c012acb, []int{4} } func (m *RequestSetOption) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -692,7 +715,7 @@ func (m *RequestInitChain) Reset() { *m = RequestInitChain{} } func (m *RequestInitChain) String() string { return proto.CompactTextString(m) } func (*RequestInitChain) ProtoMessage() {} func (*RequestInitChain) Descriptor() ([]byte, []int) { - return fileDescriptor_types_dfa4953f824ab2aa, []int{5} + return fileDescriptor_types_765aa6431c012acb, []int{5} } func (m *RequestInitChain) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -770,7 +793,7 @@ func (m *RequestQuery) Reset() { *m = RequestQuery{} } func (m *RequestQuery) String() string { return proto.CompactTextString(m) } func (*RequestQuery) ProtoMessage() {} func (*RequestQuery) Descriptor() ([]byte, []int) { - return fileDescriptor_types_dfa4953f824ab2aa, []int{6} + return fileDescriptor_types_765aa6431c012acb, []int{6} } func (m *RequestQuery) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -841,7 +864,7 @@ func (m *RequestBeginBlock) Reset() { *m = RequestBeginBlock{} } func (m *RequestBeginBlock) String() string { return proto.CompactTextString(m) } func (*RequestBeginBlock) ProtoMessage() {} func (*RequestBeginBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_types_dfa4953f824ab2aa, []int{7} + return fileDescriptor_types_765aa6431c012acb, []int{7} } func (m *RequestBeginBlock) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -899,17 +922,18 @@ func (m *RequestBeginBlock) GetByzantineValidators() []Evidence { } type RequestCheckTx struct { - Tx []byte `protobuf:"bytes,1,opt,name=tx,proto3" json:"tx,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Tx []byte `protobuf:"bytes,1,opt,name=tx,proto3" json:"tx,omitempty"` + Type CheckTxType `protobuf:"varint,2,opt,name=type,proto3,enum=types.CheckTxType" json:"type,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *RequestCheckTx) Reset() { *m = RequestCheckTx{} } func (m *RequestCheckTx) String() string { return proto.CompactTextString(m) } func (*RequestCheckTx) ProtoMessage() {} func (*RequestCheckTx) Descriptor() ([]byte, []int) { - return fileDescriptor_types_dfa4953f824ab2aa, []int{8} + return fileDescriptor_types_765aa6431c012acb, []int{8} } func (m *RequestCheckTx) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -945,6 +969,13 @@ func (m *RequestCheckTx) GetTx() []byte { return nil } +func (m *RequestCheckTx) GetType() CheckTxType { + if m != nil { + return m.Type + } + return CheckTxType_New +} + type RequestDeliverTx struct { Tx []byte `protobuf:"bytes,1,opt,name=tx,proto3" json:"tx,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` @@ -956,7 +987,7 @@ func (m *RequestDeliverTx) Reset() { *m = RequestDeliverTx{} } func (m *RequestDeliverTx) String() string { return proto.CompactTextString(m) } func (*RequestDeliverTx) ProtoMessage() {} func (*RequestDeliverTx) Descriptor() ([]byte, []int) { - return fileDescriptor_types_dfa4953f824ab2aa, []int{9} + return fileDescriptor_types_765aa6431c012acb, []int{9} } func (m *RequestDeliverTx) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1003,7 +1034,7 @@ func (m *RequestEndBlock) Reset() { *m = RequestEndBlock{} } func (m *RequestEndBlock) String() string { return proto.CompactTextString(m) } func (*RequestEndBlock) ProtoMessage() {} func (*RequestEndBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_types_dfa4953f824ab2aa, []int{10} + return fileDescriptor_types_765aa6431c012acb, []int{10} } func (m *RequestEndBlock) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1049,7 +1080,7 @@ func (m *RequestCommit) Reset() { *m = RequestCommit{} } func (m *RequestCommit) String() string { return proto.CompactTextString(m) } func (*RequestCommit) ProtoMessage() {} func (*RequestCommit) Descriptor() ([]byte, []int) { - return fileDescriptor_types_dfa4953f824ab2aa, []int{11} + return fileDescriptor_types_765aa6431c012acb, []int{11} } func (m *RequestCommit) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1102,7 +1133,7 @@ func (m *Response) Reset() { *m = Response{} } func (m *Response) String() string { return proto.CompactTextString(m) } func (*Response) ProtoMessage() {} func (*Response) Descriptor() ([]byte, []int) { - return fileDescriptor_types_dfa4953f824ab2aa, []int{12} + return fileDescriptor_types_765aa6431c012acb, []int{12} } func (m *Response) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1555,7 +1586,7 @@ func (m *ResponseException) Reset() { *m = ResponseException{} } func (m *ResponseException) String() string { return proto.CompactTextString(m) } func (*ResponseException) ProtoMessage() {} func (*ResponseException) Descriptor() ([]byte, []int) { - return fileDescriptor_types_dfa4953f824ab2aa, []int{13} + return fileDescriptor_types_765aa6431c012acb, []int{13} } func (m *ResponseException) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1602,7 +1633,7 @@ func (m *ResponseEcho) Reset() { *m = ResponseEcho{} } func (m *ResponseEcho) String() string { return proto.CompactTextString(m) } func (*ResponseEcho) ProtoMessage() {} func (*ResponseEcho) Descriptor() ([]byte, []int) { - return fileDescriptor_types_dfa4953f824ab2aa, []int{14} + return fileDescriptor_types_765aa6431c012acb, []int{14} } func (m *ResponseEcho) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1648,7 +1679,7 @@ func (m *ResponseFlush) Reset() { *m = ResponseFlush{} } func (m *ResponseFlush) String() string { return proto.CompactTextString(m) } func (*ResponseFlush) ProtoMessage() {} func (*ResponseFlush) Descriptor() ([]byte, []int) { - return fileDescriptor_types_dfa4953f824ab2aa, []int{15} + return fileDescriptor_types_765aa6431c012acb, []int{15} } func (m *ResponseFlush) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1692,7 +1723,7 @@ func (m *ResponseInfo) Reset() { *m = ResponseInfo{} } func (m *ResponseInfo) String() string { return proto.CompactTextString(m) } func (*ResponseInfo) ProtoMessage() {} func (*ResponseInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_types_dfa4953f824ab2aa, []int{16} + return fileDescriptor_types_765aa6431c012acb, []int{16} } func (m *ResponseInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1771,7 +1802,7 @@ func (m *ResponseSetOption) Reset() { *m = ResponseSetOption{} } func (m *ResponseSetOption) String() string { return proto.CompactTextString(m) } func (*ResponseSetOption) ProtoMessage() {} func (*ResponseSetOption) Descriptor() ([]byte, []int) { - return fileDescriptor_types_dfa4953f824ab2aa, []int{17} + return fileDescriptor_types_765aa6431c012acb, []int{17} } func (m *ResponseSetOption) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1833,7 +1864,7 @@ func (m *ResponseInitChain) Reset() { *m = ResponseInitChain{} } func (m *ResponseInitChain) String() string { return proto.CompactTextString(m) } func (*ResponseInitChain) ProtoMessage() {} func (*ResponseInitChain) Descriptor() ([]byte, []int) { - return fileDescriptor_types_dfa4953f824ab2aa, []int{18} + return fileDescriptor_types_765aa6431c012acb, []int{18} } func (m *ResponseInitChain) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1896,7 +1927,7 @@ func (m *ResponseQuery) Reset() { *m = ResponseQuery{} } func (m *ResponseQuery) String() string { return proto.CompactTextString(m) } func (*ResponseQuery) ProtoMessage() {} func (*ResponseQuery) Descriptor() ([]byte, []int) { - return fileDescriptor_types_dfa4953f824ab2aa, []int{19} + return fileDescriptor_types_765aa6431c012acb, []int{19} } func (m *ResponseQuery) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1989,17 +2020,17 @@ func (m *ResponseQuery) GetCodespace() string { } type ResponseBeginBlock struct { - Tags []common.KVPair `protobuf:"bytes,1,rep,name=tags" json:"tags,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Events []Event `protobuf:"bytes,1,rep,name=events" json:"events,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *ResponseBeginBlock) Reset() { *m = ResponseBeginBlock{} } func (m *ResponseBeginBlock) String() string { return proto.CompactTextString(m) } func (*ResponseBeginBlock) ProtoMessage() {} func (*ResponseBeginBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_types_dfa4953f824ab2aa, []int{20} + return fileDescriptor_types_765aa6431c012acb, []int{20} } func (m *ResponseBeginBlock) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2028,32 +2059,79 @@ func (m *ResponseBeginBlock) XXX_DiscardUnknown() { var xxx_messageInfo_ResponseBeginBlock proto.InternalMessageInfo -func (m *ResponseBeginBlock) GetTags() []common.KVPair { +func (m *ResponseBeginBlock) GetEvents() []Event { if m != nil { - return m.Tags + return m.Events } return nil } -type ResponseCheckTx struct { - Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` - Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` - Log string `protobuf:"bytes,3,opt,name=log,proto3" json:"log,omitempty"` - Info string `protobuf:"bytes,4,opt,name=info,proto3" json:"info,omitempty"` - GasWanted int64 `protobuf:"varint,5,opt,name=gas_wanted,json=gasWanted,proto3" json:"gas_wanted,omitempty"` - GasUsed int64 `protobuf:"varint,6,opt,name=gas_used,json=gasUsed,proto3" json:"gas_used,omitempty"` - Tags []common.KVPair `protobuf:"bytes,7,rep,name=tags" json:"tags,omitempty"` - Codespace string `protobuf:"bytes,8,opt,name=codespace,proto3" json:"codespace,omitempty"` +type ResponseBeginBlockDeprecated struct { + Tags []common.KVPair `protobuf:"bytes,1,rep,name=tags" json:"tags,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } +func (m *ResponseBeginBlockDeprecated) Reset() { *m = ResponseBeginBlockDeprecated{} } +func (m *ResponseBeginBlockDeprecated) String() string { return proto.CompactTextString(m) } +func (*ResponseBeginBlockDeprecated) ProtoMessage() {} +func (*ResponseBeginBlockDeprecated) Descriptor() ([]byte, []int) { + return fileDescriptor_types_765aa6431c012acb, []int{21} +} +func (m *ResponseBeginBlockDeprecated) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResponseBeginBlockDeprecated) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResponseBeginBlockDeprecated.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *ResponseBeginBlockDeprecated) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseBeginBlockDeprecated.Merge(dst, src) +} +func (m *ResponseBeginBlockDeprecated) XXX_Size() int { + return m.Size() +} +func (m *ResponseBeginBlockDeprecated) XXX_DiscardUnknown() { + xxx_messageInfo_ResponseBeginBlockDeprecated.DiscardUnknown(m) +} + +var xxx_messageInfo_ResponseBeginBlockDeprecated proto.InternalMessageInfo + +func (m *ResponseBeginBlockDeprecated) GetTags() []common.KVPair { + if m != nil { + return m.Tags + } + return nil +} + +type ResponseCheckTx struct { + Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + Log string `protobuf:"bytes,3,opt,name=log,proto3" json:"log,omitempty"` + Info string `protobuf:"bytes,4,opt,name=info,proto3" json:"info,omitempty"` + GasWanted int64 `protobuf:"varint,5,opt,name=gas_wanted,json=gasWanted,proto3" json:"gas_wanted,omitempty"` + GasUsed int64 `protobuf:"varint,6,opt,name=gas_used,json=gasUsed,proto3" json:"gas_used,omitempty"` + Events []Event `protobuf:"bytes,7,rep,name=events" json:"events,omitempty"` + Codespace string `protobuf:"bytes,8,opt,name=codespace,proto3" json:"codespace,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + func (m *ResponseCheckTx) Reset() { *m = ResponseCheckTx{} } func (m *ResponseCheckTx) String() string { return proto.CompactTextString(m) } func (*ResponseCheckTx) ProtoMessage() {} func (*ResponseCheckTx) Descriptor() ([]byte, []int) { - return fileDescriptor_types_dfa4953f824ab2aa, []int{21} + return fileDescriptor_types_765aa6431c012acb, []int{22} } func (m *ResponseCheckTx) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2124,9 +2202,9 @@ func (m *ResponseCheckTx) GetGasUsed() int64 { return 0 } -func (m *ResponseCheckTx) GetTags() []common.KVPair { +func (m *ResponseCheckTx) GetEvents() []Event { if m != nil { - return m.Tags + return m.Events } return nil } @@ -2138,7 +2216,7 @@ func (m *ResponseCheckTx) GetCodespace() string { return "" } -type ResponseDeliverTx struct { +type ResponseCheckTxDeprecated struct { Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` Log string `protobuf:"bytes,3,opt,name=log,proto3" json:"log,omitempty"` @@ -2152,11 +2230,114 @@ type ResponseDeliverTx struct { XXX_sizecache int32 `json:"-"` } +func (m *ResponseCheckTxDeprecated) Reset() { *m = ResponseCheckTxDeprecated{} } +func (m *ResponseCheckTxDeprecated) String() string { return proto.CompactTextString(m) } +func (*ResponseCheckTxDeprecated) ProtoMessage() {} +func (*ResponseCheckTxDeprecated) Descriptor() ([]byte, []int) { + return fileDescriptor_types_765aa6431c012acb, []int{23} +} +func (m *ResponseCheckTxDeprecated) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResponseCheckTxDeprecated) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResponseCheckTxDeprecated.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *ResponseCheckTxDeprecated) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseCheckTxDeprecated.Merge(dst, src) +} +func (m *ResponseCheckTxDeprecated) XXX_Size() int { + return m.Size() +} +func (m *ResponseCheckTxDeprecated) XXX_DiscardUnknown() { + xxx_messageInfo_ResponseCheckTxDeprecated.DiscardUnknown(m) +} + +var xxx_messageInfo_ResponseCheckTxDeprecated proto.InternalMessageInfo + +func (m *ResponseCheckTxDeprecated) GetCode() uint32 { + if m != nil { + return m.Code + } + return 0 +} + +func (m *ResponseCheckTxDeprecated) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +func (m *ResponseCheckTxDeprecated) GetLog() string { + if m != nil { + return m.Log + } + return "" +} + +func (m *ResponseCheckTxDeprecated) GetInfo() string { + if m != nil { + return m.Info + } + return "" +} + +func (m *ResponseCheckTxDeprecated) GetGasWanted() int64 { + if m != nil { + return m.GasWanted + } + return 0 +} + +func (m *ResponseCheckTxDeprecated) GetGasUsed() int64 { + if m != nil { + return m.GasUsed + } + return 0 +} + +func (m *ResponseCheckTxDeprecated) GetTags() []common.KVPair { + if m != nil { + return m.Tags + } + return nil +} + +func (m *ResponseCheckTxDeprecated) GetCodespace() string { + if m != nil { + return m.Codespace + } + return "" +} + +type ResponseDeliverTx struct { + Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + Log string `protobuf:"bytes,3,opt,name=log,proto3" json:"log,omitempty"` + Info string `protobuf:"bytes,4,opt,name=info,proto3" json:"info,omitempty"` + GasWanted int64 `protobuf:"varint,5,opt,name=gas_wanted,json=gasWanted,proto3" json:"gas_wanted,omitempty"` + GasUsed int64 `protobuf:"varint,6,opt,name=gas_used,json=gasUsed,proto3" json:"gas_used,omitempty"` + Events []Event `protobuf:"bytes,7,rep,name=events" json:"events,omitempty"` + Codespace string `protobuf:"bytes,8,opt,name=codespace,proto3" json:"codespace,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + func (m *ResponseDeliverTx) Reset() { *m = ResponseDeliverTx{} } func (m *ResponseDeliverTx) String() string { return proto.CompactTextString(m) } func (*ResponseDeliverTx) ProtoMessage() {} func (*ResponseDeliverTx) Descriptor() ([]byte, []int) { - return fileDescriptor_types_dfa4953f824ab2aa, []int{22} + return fileDescriptor_types_765aa6431c012acb, []int{24} } func (m *ResponseDeliverTx) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2227,9 +2408,9 @@ func (m *ResponseDeliverTx) GetGasUsed() int64 { return 0 } -func (m *ResponseDeliverTx) GetTags() []common.KVPair { +func (m *ResponseDeliverTx) GetEvents() []Event { if m != nil { - return m.Tags + return m.Events } return nil } @@ -2241,10 +2422,113 @@ func (m *ResponseDeliverTx) GetCodespace() string { return "" } +type ResponseDeliverTxDeprecated struct { + Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + Log string `protobuf:"bytes,3,opt,name=log,proto3" json:"log,omitempty"` + Info string `protobuf:"bytes,4,opt,name=info,proto3" json:"info,omitempty"` + GasWanted int64 `protobuf:"varint,5,opt,name=gas_wanted,json=gasWanted,proto3" json:"gas_wanted,omitempty"` + GasUsed int64 `protobuf:"varint,6,opt,name=gas_used,json=gasUsed,proto3" json:"gas_used,omitempty"` + Tags []common.KVPair `protobuf:"bytes,7,rep,name=tags" json:"tags,omitempty"` + Codespace string `protobuf:"bytes,8,opt,name=codespace,proto3" json:"codespace,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ResponseDeliverTxDeprecated) Reset() { *m = ResponseDeliverTxDeprecated{} } +func (m *ResponseDeliverTxDeprecated) String() string { return proto.CompactTextString(m) } +func (*ResponseDeliverTxDeprecated) ProtoMessage() {} +func (*ResponseDeliverTxDeprecated) Descriptor() ([]byte, []int) { + return fileDescriptor_types_765aa6431c012acb, []int{25} +} +func (m *ResponseDeliverTxDeprecated) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResponseDeliverTxDeprecated) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResponseDeliverTxDeprecated.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *ResponseDeliverTxDeprecated) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseDeliverTxDeprecated.Merge(dst, src) +} +func (m *ResponseDeliverTxDeprecated) XXX_Size() int { + return m.Size() +} +func (m *ResponseDeliverTxDeprecated) XXX_DiscardUnknown() { + xxx_messageInfo_ResponseDeliverTxDeprecated.DiscardUnknown(m) +} + +var xxx_messageInfo_ResponseDeliverTxDeprecated proto.InternalMessageInfo + +func (m *ResponseDeliverTxDeprecated) GetCode() uint32 { + if m != nil { + return m.Code + } + return 0 +} + +func (m *ResponseDeliverTxDeprecated) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +func (m *ResponseDeliverTxDeprecated) GetLog() string { + if m != nil { + return m.Log + } + return "" +} + +func (m *ResponseDeliverTxDeprecated) GetInfo() string { + if m != nil { + return m.Info + } + return "" +} + +func (m *ResponseDeliverTxDeprecated) GetGasWanted() int64 { + if m != nil { + return m.GasWanted + } + return 0 +} + +func (m *ResponseDeliverTxDeprecated) GetGasUsed() int64 { + if m != nil { + return m.GasUsed + } + return 0 +} + +func (m *ResponseDeliverTxDeprecated) GetTags() []common.KVPair { + if m != nil { + return m.Tags + } + return nil +} + +func (m *ResponseDeliverTxDeprecated) GetCodespace() string { + if m != nil { + return m.Codespace + } + return "" +} + type ResponseEndBlock struct { ValidatorUpdates []ValidatorUpdate `protobuf:"bytes,1,rep,name=validator_updates,json=validatorUpdates" json:"validator_updates"` ConsensusParamUpdates *ConsensusParams `protobuf:"bytes,2,opt,name=consensus_param_updates,json=consensusParamUpdates" json:"consensus_param_updates,omitempty"` - Tags []common.KVPair `protobuf:"bytes,3,rep,name=tags" json:"tags,omitempty"` + Events []Event `protobuf:"bytes,3,rep,name=events" json:"events,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -2254,7 +2538,7 @@ func (m *ResponseEndBlock) Reset() { *m = ResponseEndBlock{} } func (m *ResponseEndBlock) String() string { return proto.CompactTextString(m) } func (*ResponseEndBlock) ProtoMessage() {} func (*ResponseEndBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_types_dfa4953f824ab2aa, []int{23} + return fileDescriptor_types_765aa6431c012acb, []int{26} } func (m *ResponseEndBlock) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2297,33 +2581,34 @@ func (m *ResponseEndBlock) GetConsensusParamUpdates() *ConsensusParams { return nil } -func (m *ResponseEndBlock) GetTags() []common.KVPair { +func (m *ResponseEndBlock) GetEvents() []Event { if m != nil { - return m.Tags + return m.Events } return nil } -type ResponseCommit struct { - // reserve 1 - Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +type ResponseEndBlockDeprecated struct { + ValidatorUpdates []ValidatorUpdate `protobuf:"bytes,1,rep,name=validator_updates,json=validatorUpdates" json:"validator_updates"` + ConsensusParamUpdates *ConsensusParams `protobuf:"bytes,2,opt,name=consensus_param_updates,json=consensusParamUpdates" json:"consensus_param_updates,omitempty"` + Tags []common.KVPair `protobuf:"bytes,3,rep,name=tags" json:"tags,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *ResponseCommit) Reset() { *m = ResponseCommit{} } -func (m *ResponseCommit) String() string { return proto.CompactTextString(m) } -func (*ResponseCommit) ProtoMessage() {} -func (*ResponseCommit) Descriptor() ([]byte, []int) { - return fileDescriptor_types_dfa4953f824ab2aa, []int{24} +func (m *ResponseEndBlockDeprecated) Reset() { *m = ResponseEndBlockDeprecated{} } +func (m *ResponseEndBlockDeprecated) String() string { return proto.CompactTextString(m) } +func (*ResponseEndBlockDeprecated) ProtoMessage() {} +func (*ResponseEndBlockDeprecated) Descriptor() ([]byte, []int) { + return fileDescriptor_types_765aa6431c012acb, []int{27} } -func (m *ResponseCommit) XXX_Unmarshal(b []byte) error { +func (m *ResponseEndBlockDeprecated) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *ResponseCommit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *ResponseEndBlockDeprecated) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_ResponseCommit.Marshal(b, m, deterministic) + return xxx_messageInfo_ResponseEndBlockDeprecated.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) @@ -2333,17 +2618,79 @@ func (m *ResponseCommit) XXX_Marshal(b []byte, deterministic bool) ([]byte, erro return b[:n], nil } } -func (dst *ResponseCommit) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResponseCommit.Merge(dst, src) +func (dst *ResponseEndBlockDeprecated) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseEndBlockDeprecated.Merge(dst, src) } -func (m *ResponseCommit) XXX_Size() int { +func (m *ResponseEndBlockDeprecated) XXX_Size() int { return m.Size() } -func (m *ResponseCommit) XXX_DiscardUnknown() { - xxx_messageInfo_ResponseCommit.DiscardUnknown(m) +func (m *ResponseEndBlockDeprecated) XXX_DiscardUnknown() { + xxx_messageInfo_ResponseEndBlockDeprecated.DiscardUnknown(m) } -var xxx_messageInfo_ResponseCommit proto.InternalMessageInfo +var xxx_messageInfo_ResponseEndBlockDeprecated proto.InternalMessageInfo + +func (m *ResponseEndBlockDeprecated) GetValidatorUpdates() []ValidatorUpdate { + if m != nil { + return m.ValidatorUpdates + } + return nil +} + +func (m *ResponseEndBlockDeprecated) GetConsensusParamUpdates() *ConsensusParams { + if m != nil { + return m.ConsensusParamUpdates + } + return nil +} + +func (m *ResponseEndBlockDeprecated) GetTags() []common.KVPair { + if m != nil { + return m.Tags + } + return nil +} + +type ResponseCommit struct { + // reserve 1 + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ResponseCommit) Reset() { *m = ResponseCommit{} } +func (m *ResponseCommit) String() string { return proto.CompactTextString(m) } +func (*ResponseCommit) ProtoMessage() {} +func (*ResponseCommit) Descriptor() ([]byte, []int) { + return fileDescriptor_types_765aa6431c012acb, []int{28} +} +func (m *ResponseCommit) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResponseCommit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResponseCommit.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *ResponseCommit) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseCommit.Merge(dst, src) +} +func (m *ResponseCommit) XXX_Size() int { + return m.Size() +} +func (m *ResponseCommit) XXX_DiscardUnknown() { + xxx_messageInfo_ResponseCommit.DiscardUnknown(m) +} + +var xxx_messageInfo_ResponseCommit proto.InternalMessageInfo func (m *ResponseCommit) GetData() []byte { if m != nil { @@ -2367,7 +2714,7 @@ func (m *ConsensusParams) Reset() { *m = ConsensusParams{} } func (m *ConsensusParams) String() string { return proto.CompactTextString(m) } func (*ConsensusParams) ProtoMessage() {} func (*ConsensusParams) Descriptor() ([]byte, []int) { - return fileDescriptor_types_dfa4953f824ab2aa, []int{25} + return fileDescriptor_types_765aa6431c012acb, []int{29} } func (m *ConsensusParams) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2417,7 +2764,6 @@ func (m *ConsensusParams) GetValidator() *ValidatorParams { return nil } -// BlockSize contains limits on the block size. type BlockSizeParams struct { // Note: must be greater than 0 MaxBytes int64 `protobuf:"varint,1,opt,name=max_bytes,json=maxBytes,proto3" json:"max_bytes,omitempty"` @@ -2432,7 +2778,7 @@ func (m *BlockSizeParams) Reset() { *m = BlockSizeParams{} } func (m *BlockSizeParams) String() string { return proto.CompactTextString(m) } func (*BlockSizeParams) ProtoMessage() {} func (*BlockSizeParams) Descriptor() ([]byte, []int) { - return fileDescriptor_types_dfa4953f824ab2aa, []int{26} + return fileDescriptor_types_765aa6431c012acb, []int{30} } func (m *BlockSizeParams) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2488,7 +2834,7 @@ func (m *EvidenceParams) Reset() { *m = EvidenceParams{} } func (m *EvidenceParams) String() string { return proto.CompactTextString(m) } func (*EvidenceParams) ProtoMessage() {} func (*EvidenceParams) Descriptor() ([]byte, []int) { - return fileDescriptor_types_dfa4953f824ab2aa, []int{27} + return fileDescriptor_types_765aa6431c012acb, []int{31} } func (m *EvidenceParams) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2536,7 +2882,7 @@ func (m *ValidatorParams) Reset() { *m = ValidatorParams{} } func (m *ValidatorParams) String() string { return proto.CompactTextString(m) } func (*ValidatorParams) ProtoMessage() {} func (*ValidatorParams) Descriptor() ([]byte, []int) { - return fileDescriptor_types_dfa4953f824ab2aa, []int{28} + return fileDescriptor_types_765aa6431c012acb, []int{32} } func (m *ValidatorParams) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2584,7 +2930,7 @@ func (m *LastCommitInfo) Reset() { *m = LastCommitInfo{} } func (m *LastCommitInfo) String() string { return proto.CompactTextString(m) } func (*LastCommitInfo) ProtoMessage() {} func (*LastCommitInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_types_dfa4953f824ab2aa, []int{29} + return fileDescriptor_types_765aa6431c012acb, []int{33} } func (m *LastCommitInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2627,6 +2973,61 @@ func (m *LastCommitInfo) GetVotes() []VoteInfo { return nil } +type Event struct { + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Attributes []common.KVPair `protobuf:"bytes,2,rep,name=attributes" json:"attributes,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Event) Reset() { *m = Event{} } +func (m *Event) String() string { return proto.CompactTextString(m) } +func (*Event) ProtoMessage() {} +func (*Event) Descriptor() ([]byte, []int) { + return fileDescriptor_types_765aa6431c012acb, []int{34} +} +func (m *Event) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Event) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Event.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *Event) XXX_Merge(src proto.Message) { + xxx_messageInfo_Event.Merge(dst, src) +} +func (m *Event) XXX_Size() int { + return m.Size() +} +func (m *Event) XXX_DiscardUnknown() { + xxx_messageInfo_Event.DiscardUnknown(m) +} + +var xxx_messageInfo_Event proto.InternalMessageInfo + +func (m *Event) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *Event) GetAttributes() []common.KVPair { + if m != nil { + return m.Attributes + } + return nil +} + type Header struct { // basic block info Version Version `protobuf:"bytes,1,opt,name=version" json:"version"` @@ -2658,7 +3059,7 @@ func (m *Header) Reset() { *m = Header{} } func (m *Header) String() string { return proto.CompactTextString(m) } func (*Header) ProtoMessage() {} func (*Header) Descriptor() ([]byte, []int) { - return fileDescriptor_types_dfa4953f824ab2aa, []int{30} + return fileDescriptor_types_765aa6431c012acb, []int{35} } func (m *Header) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2811,7 +3212,7 @@ func (m *Version) Reset() { *m = Version{} } func (m *Version) String() string { return proto.CompactTextString(m) } func (*Version) ProtoMessage() {} func (*Version) Descriptor() ([]byte, []int) { - return fileDescriptor_types_dfa4953f824ab2aa, []int{31} + return fileDescriptor_types_765aa6431c012acb, []int{36} } func (m *Version) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2866,7 +3267,7 @@ func (m *BlockID) Reset() { *m = BlockID{} } func (m *BlockID) String() string { return proto.CompactTextString(m) } func (*BlockID) ProtoMessage() {} func (*BlockID) Descriptor() ([]byte, []int) { - return fileDescriptor_types_dfa4953f824ab2aa, []int{32} + return fileDescriptor_types_765aa6431c012acb, []int{37} } func (m *BlockID) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2921,7 +3322,7 @@ func (m *PartSetHeader) Reset() { *m = PartSetHeader{} } func (m *PartSetHeader) String() string { return proto.CompactTextString(m) } func (*PartSetHeader) ProtoMessage() {} func (*PartSetHeader) Descriptor() ([]byte, []int) { - return fileDescriptor_types_dfa4953f824ab2aa, []int{33} + return fileDescriptor_types_765aa6431c012acb, []int{38} } func (m *PartSetHeader) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2978,7 +3379,7 @@ func (m *Validator) Reset() { *m = Validator{} } func (m *Validator) String() string { return proto.CompactTextString(m) } func (*Validator) ProtoMessage() {} func (*Validator) Descriptor() ([]byte, []int) { - return fileDescriptor_types_dfa4953f824ab2aa, []int{34} + return fileDescriptor_types_765aa6431c012acb, []int{39} } func (m *Validator) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3034,7 +3435,7 @@ func (m *ValidatorUpdate) Reset() { *m = ValidatorUpdate{} } func (m *ValidatorUpdate) String() string { return proto.CompactTextString(m) } func (*ValidatorUpdate) ProtoMessage() {} func (*ValidatorUpdate) Descriptor() ([]byte, []int) { - return fileDescriptor_types_dfa4953f824ab2aa, []int{35} + return fileDescriptor_types_765aa6431c012acb, []int{40} } func (m *ValidatorUpdate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3090,7 +3491,7 @@ func (m *VoteInfo) Reset() { *m = VoteInfo{} } func (m *VoteInfo) String() string { return proto.CompactTextString(m) } func (*VoteInfo) ProtoMessage() {} func (*VoteInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_types_dfa4953f824ab2aa, []int{36} + return fileDescriptor_types_765aa6431c012acb, []int{41} } func (m *VoteInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3145,7 +3546,7 @@ func (m *PubKey) Reset() { *m = PubKey{} } func (m *PubKey) String() string { return proto.CompactTextString(m) } func (*PubKey) ProtoMessage() {} func (*PubKey) Descriptor() ([]byte, []int) { - return fileDescriptor_types_dfa4953f824ab2aa, []int{37} + return fileDescriptor_types_765aa6431c012acb, []int{42} } func (m *PubKey) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3203,7 +3604,7 @@ func (m *Evidence) Reset() { *m = Evidence{} } func (m *Evidence) String() string { return proto.CompactTextString(m) } func (*Evidence) ProtoMessage() {} func (*Evidence) Descriptor() ([]byte, []int) { - return fileDescriptor_types_dfa4953f824ab2aa, []int{38} + return fileDescriptor_types_765aa6431c012acb, []int{43} } func (m *Evidence) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3310,12 +3711,20 @@ func init() { golang_proto.RegisterType((*ResponseQuery)(nil), "types.ResponseQuery") proto.RegisterType((*ResponseBeginBlock)(nil), "types.ResponseBeginBlock") golang_proto.RegisterType((*ResponseBeginBlock)(nil), "types.ResponseBeginBlock") + proto.RegisterType((*ResponseBeginBlockDeprecated)(nil), "types.ResponseBeginBlockDeprecated") + golang_proto.RegisterType((*ResponseBeginBlockDeprecated)(nil), "types.ResponseBeginBlockDeprecated") proto.RegisterType((*ResponseCheckTx)(nil), "types.ResponseCheckTx") golang_proto.RegisterType((*ResponseCheckTx)(nil), "types.ResponseCheckTx") + proto.RegisterType((*ResponseCheckTxDeprecated)(nil), "types.ResponseCheckTxDeprecated") + golang_proto.RegisterType((*ResponseCheckTxDeprecated)(nil), "types.ResponseCheckTxDeprecated") proto.RegisterType((*ResponseDeliverTx)(nil), "types.ResponseDeliverTx") golang_proto.RegisterType((*ResponseDeliverTx)(nil), "types.ResponseDeliverTx") + proto.RegisterType((*ResponseDeliverTxDeprecated)(nil), "types.ResponseDeliverTxDeprecated") + golang_proto.RegisterType((*ResponseDeliverTxDeprecated)(nil), "types.ResponseDeliverTxDeprecated") proto.RegisterType((*ResponseEndBlock)(nil), "types.ResponseEndBlock") golang_proto.RegisterType((*ResponseEndBlock)(nil), "types.ResponseEndBlock") + proto.RegisterType((*ResponseEndBlockDeprecated)(nil), "types.ResponseEndBlockDeprecated") + golang_proto.RegisterType((*ResponseEndBlockDeprecated)(nil), "types.ResponseEndBlockDeprecated") proto.RegisterType((*ResponseCommit)(nil), "types.ResponseCommit") golang_proto.RegisterType((*ResponseCommit)(nil), "types.ResponseCommit") proto.RegisterType((*ConsensusParams)(nil), "types.ConsensusParams") @@ -3328,6 +3737,8 @@ func init() { golang_proto.RegisterType((*ValidatorParams)(nil), "types.ValidatorParams") proto.RegisterType((*LastCommitInfo)(nil), "types.LastCommitInfo") golang_proto.RegisterType((*LastCommitInfo)(nil), "types.LastCommitInfo") + proto.RegisterType((*Event)(nil), "types.Event") + golang_proto.RegisterType((*Event)(nil), "types.Event") proto.RegisterType((*Header)(nil), "types.Header") golang_proto.RegisterType((*Header)(nil), "types.Header") proto.RegisterType((*Version)(nil), "types.Version") @@ -3346,6 +3757,8 @@ func init() { golang_proto.RegisterType((*PubKey)(nil), "types.PubKey") proto.RegisterType((*Evidence)(nil), "types.Evidence") golang_proto.RegisterType((*Evidence)(nil), "types.Evidence") + proto.RegisterEnum("types.CheckTxType", CheckTxType_name, CheckTxType_value) + golang_proto.RegisterEnum("types.CheckTxType", CheckTxType_name, CheckTxType_value) } func (this *Request) Equal(that interface{}) bool { if that == nil { @@ -3901,6 +4314,9 @@ func (this *RequestCheckTx) Equal(that interface{}) bool { if !bytes.Equal(this.Tx, that1.Tx) { return false } + if this.Type != that1.Type { + return false + } if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { return false } @@ -4560,6 +4976,38 @@ func (this *ResponseBeginBlock) Equal(that interface{}) bool { } else if this == nil { return false } + if len(this.Events) != len(that1.Events) { + return false + } + for i := range this.Events { + if !this.Events[i].Equal(&that1.Events[i]) { + return false + } + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *ResponseBeginBlockDeprecated) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ResponseBeginBlockDeprecated) + if !ok { + that2, ok := that.(ResponseBeginBlockDeprecated) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } if len(this.Tags) != len(that1.Tags) { return false } @@ -4610,6 +5058,59 @@ func (this *ResponseCheckTx) Equal(that interface{}) bool { if this.GasUsed != that1.GasUsed { return false } + if len(this.Events) != len(that1.Events) { + return false + } + for i := range this.Events { + if !this.Events[i].Equal(&that1.Events[i]) { + return false + } + } + if this.Codespace != that1.Codespace { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *ResponseCheckTxDeprecated) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ResponseCheckTxDeprecated) + if !ok { + that2, ok := that.(ResponseCheckTxDeprecated) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Code != that1.Code { + return false + } + if !bytes.Equal(this.Data, that1.Data) { + return false + } + if this.Log != that1.Log { + return false + } + if this.Info != that1.Info { + return false + } + if this.GasWanted != that1.GasWanted { + return false + } + if this.GasUsed != that1.GasUsed { + return false + } if len(this.Tags) != len(that1.Tags) { return false } @@ -4663,6 +5164,59 @@ func (this *ResponseDeliverTx) Equal(that interface{}) bool { if this.GasUsed != that1.GasUsed { return false } + if len(this.Events) != len(that1.Events) { + return false + } + for i := range this.Events { + if !this.Events[i].Equal(&that1.Events[i]) { + return false + } + } + if this.Codespace != that1.Codespace { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *ResponseDeliverTxDeprecated) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ResponseDeliverTxDeprecated) + if !ok { + that2, ok := that.(ResponseDeliverTxDeprecated) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Code != that1.Code { + return false + } + if !bytes.Equal(this.Data, that1.Data) { + return false + } + if this.Log != that1.Log { + return false + } + if this.Info != that1.Info { + return false + } + if this.GasWanted != that1.GasWanted { + return false + } + if this.GasUsed != that1.GasUsed { + return false + } if len(this.Tags) != len(that1.Tags) { return false } @@ -4709,6 +5263,49 @@ func (this *ResponseEndBlock) Equal(that interface{}) bool { if !this.ConsensusParamUpdates.Equal(that1.ConsensusParamUpdates) { return false } + if len(this.Events) != len(that1.Events) { + return false + } + for i := range this.Events { + if !this.Events[i].Equal(&that1.Events[i]) { + return false + } + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *ResponseEndBlockDeprecated) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ResponseEndBlockDeprecated) + if !ok { + that2, ok := that.(ResponseEndBlockDeprecated) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.ValidatorUpdates) != len(that1.ValidatorUpdates) { + return false + } + for i := range this.ValidatorUpdates { + if !this.ValidatorUpdates[i].Equal(&that1.ValidatorUpdates[i]) { + return false + } + } + if !this.ConsensusParamUpdates.Equal(that1.ConsensusParamUpdates) { + return false + } if len(this.Tags) != len(that1.Tags) { return false } @@ -4906,14 +5503,49 @@ func (this *LastCommitInfo) Equal(that interface{}) bool { } return true } -func (this *Header) Equal(that interface{}) bool { +func (this *Event) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*Header) + that1, ok := that.(*Event) if !ok { - that2, ok := that.(Header) + that2, ok := that.(Event) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Type != that1.Type { + return false + } + if len(this.Attributes) != len(that1.Attributes) { + return false + } + for i := range this.Attributes { + if !this.Attributes[i].Equal(&that1.Attributes[i]) { + return false + } + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Header) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Header) + if !ok { + that2, ok := that.(Header) if ok { that1 = &that2 } else { @@ -6119,6 +6751,11 @@ func (m *RequestCheckTx) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintTypes(dAtA, i, uint64(len(m.Tx))) i += copy(dAtA[i:], m.Tx) } + if m.Type != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Type)) + } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } @@ -6687,6 +7324,39 @@ func (m *ResponseBeginBlock) Marshal() (dAtA []byte, err error) { } func (m *ResponseBeginBlock) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Events) > 0 { + for _, msg := range m.Events { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *ResponseBeginBlockDeprecated) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseBeginBlockDeprecated) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int @@ -6720,6 +7390,78 @@ func (m *ResponseCheckTx) Marshal() (dAtA []byte, err error) { } func (m *ResponseCheckTx) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Code != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Code)) + } + if len(m.Data) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) + } + if len(m.Log) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Log))) + i += copy(dAtA[i:], m.Log) + } + if len(m.Info) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Info))) + i += copy(dAtA[i:], m.Info) + } + if m.GasWanted != 0 { + dAtA[i] = 0x28 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.GasWanted)) + } + if m.GasUsed != 0 { + dAtA[i] = 0x30 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.GasUsed)) + } + if len(m.Events) > 0 { + for _, msg := range m.Events { + dAtA[i] = 0x3a + i++ + i = encodeVarintTypes(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Codespace) > 0 { + dAtA[i] = 0x42 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Codespace))) + i += copy(dAtA[i:], m.Codespace) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *ResponseCheckTxDeprecated) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseCheckTxDeprecated) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int @@ -6792,6 +7534,78 @@ func (m *ResponseDeliverTx) Marshal() (dAtA []byte, err error) { } func (m *ResponseDeliverTx) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Code != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Code)) + } + if len(m.Data) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) + } + if len(m.Log) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Log))) + i += copy(dAtA[i:], m.Log) + } + if len(m.Info) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Info))) + i += copy(dAtA[i:], m.Info) + } + if m.GasWanted != 0 { + dAtA[i] = 0x28 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.GasWanted)) + } + if m.GasUsed != 0 { + dAtA[i] = 0x30 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.GasUsed)) + } + if len(m.Events) > 0 { + for _, msg := range m.Events { + dAtA[i] = 0x3a + i++ + i = encodeVarintTypes(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Codespace) > 0 { + dAtA[i] = 0x42 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Codespace))) + i += copy(dAtA[i:], m.Codespace) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *ResponseDeliverTxDeprecated) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseDeliverTxDeprecated) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int @@ -6890,6 +7704,61 @@ func (m *ResponseEndBlock) MarshalTo(dAtA []byte) (int, error) { } i += n32 } + if len(m.Events) > 0 { + for _, msg := range m.Events { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *ResponseEndBlockDeprecated) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseEndBlockDeprecated) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ValidatorUpdates) > 0 { + for _, msg := range m.ValidatorUpdates { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.ConsensusParamUpdates != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.ConsensusParamUpdates.Size())) + n33, err := m.ConsensusParamUpdates.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n33 + } if len(m.Tags) > 0 { for _, msg := range m.Tags { dAtA[i] = 0x1a @@ -6954,31 +7823,31 @@ func (m *ConsensusParams) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintTypes(dAtA, i, uint64(m.BlockSize.Size())) - n33, err := m.BlockSize.MarshalTo(dAtA[i:]) + n34, err := m.BlockSize.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n33 + i += n34 } if m.Evidence != nil { dAtA[i] = 0x12 i++ i = encodeVarintTypes(dAtA, i, uint64(m.Evidence.Size())) - n34, err := m.Evidence.MarshalTo(dAtA[i:]) + n35, err := m.Evidence.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n34 + i += n35 } if m.Validator != nil { dAtA[i] = 0x1a i++ i = encodeVarintTypes(dAtA, i, uint64(m.Validator.Size())) - n35, err := m.Validator.MarshalTo(dAtA[i:]) + n36, err := m.Validator.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n35 + i += n36 } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -7117,7 +7986,46 @@ func (m *LastCommitInfo) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func (m *Header) Marshal() (dAtA []byte, err error) { +func (m *Event) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Event) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Type) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + } + if len(m.Attributes) > 0 { + for _, msg := range m.Attributes { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *Header) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) @@ -7135,11 +8043,11 @@ func (m *Header) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintTypes(dAtA, i, uint64(m.Version.Size())) - n36, err := m.Version.MarshalTo(dAtA[i:]) + n37, err := m.Version.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n36 + i += n37 if len(m.ChainID) > 0 { dAtA[i] = 0x12 i++ @@ -7154,11 +8062,11 @@ func (m *Header) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x22 i++ i = encodeVarintTypes(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.Time))) - n37, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i:]) + n38, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i:]) if err != nil { return 0, err } - i += n37 + i += n38 if m.NumTxs != 0 { dAtA[i] = 0x28 i++ @@ -7172,11 +8080,11 @@ func (m *Header) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x3a i++ i = encodeVarintTypes(dAtA, i, uint64(m.LastBlockId.Size())) - n38, err := m.LastBlockId.MarshalTo(dAtA[i:]) + n39, err := m.LastBlockId.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n38 + i += n39 if len(m.LastCommitHash) > 0 { dAtA[i] = 0x42 i++ @@ -7294,11 +8202,11 @@ func (m *BlockID) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintTypes(dAtA, i, uint64(m.PartsHeader.Size())) - n39, err := m.PartsHeader.MarshalTo(dAtA[i:]) + n40, err := m.PartsHeader.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n39 + i += n40 if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } @@ -7387,11 +8295,11 @@ func (m *ValidatorUpdate) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintTypes(dAtA, i, uint64(m.PubKey.Size())) - n40, err := m.PubKey.MarshalTo(dAtA[i:]) + n41, err := m.PubKey.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n40 + i += n41 if m.Power != 0 { dAtA[i] = 0x10 i++ @@ -7421,11 +8329,11 @@ func (m *VoteInfo) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintTypes(dAtA, i, uint64(m.Validator.Size())) - n41, err := m.Validator.MarshalTo(dAtA[i:]) + n42, err := m.Validator.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n41 + i += n42 if m.SignedLastBlock { dAtA[i] = 0x10 i++ @@ -7499,11 +8407,11 @@ func (m *Evidence) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintTypes(dAtA, i, uint64(m.Validator.Size())) - n42, err := m.Validator.MarshalTo(dAtA[i:]) + n43, err := m.Validator.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n42 + i += n43 if m.Height != 0 { dAtA[i] = 0x18 i++ @@ -7512,11 +8420,11 @@ func (m *Evidence) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x22 i++ i = encodeVarintTypes(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.Time))) - n43, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i:]) + n44, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i:]) if err != nil { return 0, err } - i += n43 + i += n44 if m.TotalVotingPower != 0 { dAtA[i] = 0x28 i++ @@ -7741,8 +8649,9 @@ func NewPopulatedRequestCheckTx(r randyTypes, easy bool) *RequestCheckTx { for i := 0; i < v11; i++ { this.Tx[i] = byte(r.Intn(256)) } + this.Type = CheckTxType([]int32{0, 1}[r.Intn(2)]) if !easy && r.Intn(10) != 0 { - this.XXX_unrecognized = randUnrecognizedTypes(r, 2) + this.XXX_unrecognized = randUnrecognizedTypes(r, 3) } return this } @@ -7988,10 +8897,26 @@ func NewPopulatedResponseBeginBlock(r randyTypes, easy bool) *ResponseBeginBlock this := &ResponseBeginBlock{} if r.Intn(10) != 0 { v18 := r.Intn(5) - this.Tags = make([]common.KVPair, v18) + this.Events = make([]Event, v18) for i := 0; i < v18; i++ { - v19 := common.NewPopulatedKVPair(r, easy) - this.Tags[i] = *v19 + v19 := NewPopulatedEvent(r, easy) + this.Events[i] = *v19 + } + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedTypes(r, 2) + } + return this +} + +func NewPopulatedResponseBeginBlockDeprecated(r randyTypes, easy bool) *ResponseBeginBlockDeprecated { + this := &ResponseBeginBlockDeprecated{} + if r.Intn(10) != 0 { + v20 := r.Intn(5) + this.Tags = make([]common.KVPair, v20) + for i := 0; i < v20; i++ { + v21 := common.NewPopulatedKVPair(r, easy) + this.Tags[i] = *v21 } } if !easy && r.Intn(10) != 0 { @@ -8003,9 +8928,42 @@ func NewPopulatedResponseBeginBlock(r randyTypes, easy bool) *ResponseBeginBlock func NewPopulatedResponseCheckTx(r randyTypes, easy bool) *ResponseCheckTx { this := &ResponseCheckTx{} this.Code = uint32(r.Uint32()) - v20 := r.Intn(100) - this.Data = make([]byte, v20) - for i := 0; i < v20; i++ { + v22 := r.Intn(100) + this.Data = make([]byte, v22) + for i := 0; i < v22; i++ { + this.Data[i] = byte(r.Intn(256)) + } + this.Log = string(randStringTypes(r)) + this.Info = string(randStringTypes(r)) + this.GasWanted = int64(r.Int63()) + if r.Intn(2) == 0 { + this.GasWanted *= -1 + } + this.GasUsed = int64(r.Int63()) + if r.Intn(2) == 0 { + this.GasUsed *= -1 + } + if r.Intn(10) != 0 { + v23 := r.Intn(5) + this.Events = make([]Event, v23) + for i := 0; i < v23; i++ { + v24 := NewPopulatedEvent(r, easy) + this.Events[i] = *v24 + } + } + this.Codespace = string(randStringTypes(r)) + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedTypes(r, 9) + } + return this +} + +func NewPopulatedResponseCheckTxDeprecated(r randyTypes, easy bool) *ResponseCheckTxDeprecated { + this := &ResponseCheckTxDeprecated{} + this.Code = uint32(r.Uint32()) + v25 := r.Intn(100) + this.Data = make([]byte, v25) + for i := 0; i < v25; i++ { this.Data[i] = byte(r.Intn(256)) } this.Log = string(randStringTypes(r)) @@ -8019,11 +8977,11 @@ func NewPopulatedResponseCheckTx(r randyTypes, easy bool) *ResponseCheckTx { this.GasUsed *= -1 } if r.Intn(10) != 0 { - v21 := r.Intn(5) - this.Tags = make([]common.KVPair, v21) - for i := 0; i < v21; i++ { - v22 := common.NewPopulatedKVPair(r, easy) - this.Tags[i] = *v22 + v26 := r.Intn(5) + this.Tags = make([]common.KVPair, v26) + for i := 0; i < v26; i++ { + v27 := common.NewPopulatedKVPair(r, easy) + this.Tags[i] = *v27 } } this.Codespace = string(randStringTypes(r)) @@ -8036,9 +8994,42 @@ func NewPopulatedResponseCheckTx(r randyTypes, easy bool) *ResponseCheckTx { func NewPopulatedResponseDeliverTx(r randyTypes, easy bool) *ResponseDeliverTx { this := &ResponseDeliverTx{} this.Code = uint32(r.Uint32()) - v23 := r.Intn(100) - this.Data = make([]byte, v23) - for i := 0; i < v23; i++ { + v28 := r.Intn(100) + this.Data = make([]byte, v28) + for i := 0; i < v28; i++ { + this.Data[i] = byte(r.Intn(256)) + } + this.Log = string(randStringTypes(r)) + this.Info = string(randStringTypes(r)) + this.GasWanted = int64(r.Int63()) + if r.Intn(2) == 0 { + this.GasWanted *= -1 + } + this.GasUsed = int64(r.Int63()) + if r.Intn(2) == 0 { + this.GasUsed *= -1 + } + if r.Intn(10) != 0 { + v29 := r.Intn(5) + this.Events = make([]Event, v29) + for i := 0; i < v29; i++ { + v30 := NewPopulatedEvent(r, easy) + this.Events[i] = *v30 + } + } + this.Codespace = string(randStringTypes(r)) + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedTypes(r, 9) + } + return this +} + +func NewPopulatedResponseDeliverTxDeprecated(r randyTypes, easy bool) *ResponseDeliverTxDeprecated { + this := &ResponseDeliverTxDeprecated{} + this.Code = uint32(r.Uint32()) + v31 := r.Intn(100) + this.Data = make([]byte, v31) + for i := 0; i < v31; i++ { this.Data[i] = byte(r.Intn(256)) } this.Log = string(randStringTypes(r)) @@ -8052,11 +9043,11 @@ func NewPopulatedResponseDeliverTx(r randyTypes, easy bool) *ResponseDeliverTx { this.GasUsed *= -1 } if r.Intn(10) != 0 { - v24 := r.Intn(5) - this.Tags = make([]common.KVPair, v24) - for i := 0; i < v24; i++ { - v25 := common.NewPopulatedKVPair(r, easy) - this.Tags[i] = *v25 + v32 := r.Intn(5) + this.Tags = make([]common.KVPair, v32) + for i := 0; i < v32; i++ { + v33 := common.NewPopulatedKVPair(r, easy) + this.Tags[i] = *v33 } } this.Codespace = string(randStringTypes(r)) @@ -8069,22 +9060,49 @@ func NewPopulatedResponseDeliverTx(r randyTypes, easy bool) *ResponseDeliverTx { func NewPopulatedResponseEndBlock(r randyTypes, easy bool) *ResponseEndBlock { this := &ResponseEndBlock{} if r.Intn(10) != 0 { - v26 := r.Intn(5) - this.ValidatorUpdates = make([]ValidatorUpdate, v26) - for i := 0; i < v26; i++ { - v27 := NewPopulatedValidatorUpdate(r, easy) - this.ValidatorUpdates[i] = *v27 + v34 := r.Intn(5) + this.ValidatorUpdates = make([]ValidatorUpdate, v34) + for i := 0; i < v34; i++ { + v35 := NewPopulatedValidatorUpdate(r, easy) + this.ValidatorUpdates[i] = *v35 + } + } + if r.Intn(10) != 0 { + this.ConsensusParamUpdates = NewPopulatedConsensusParams(r, easy) + } + if r.Intn(10) != 0 { + v36 := r.Intn(5) + this.Events = make([]Event, v36) + for i := 0; i < v36; i++ { + v37 := NewPopulatedEvent(r, easy) + this.Events[i] = *v37 + } + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedTypes(r, 4) + } + return this +} + +func NewPopulatedResponseEndBlockDeprecated(r randyTypes, easy bool) *ResponseEndBlockDeprecated { + this := &ResponseEndBlockDeprecated{} + if r.Intn(10) != 0 { + v38 := r.Intn(5) + this.ValidatorUpdates = make([]ValidatorUpdate, v38) + for i := 0; i < v38; i++ { + v39 := NewPopulatedValidatorUpdate(r, easy) + this.ValidatorUpdates[i] = *v39 } } if r.Intn(10) != 0 { this.ConsensusParamUpdates = NewPopulatedConsensusParams(r, easy) } if r.Intn(10) != 0 { - v28 := r.Intn(5) - this.Tags = make([]common.KVPair, v28) - for i := 0; i < v28; i++ { - v29 := common.NewPopulatedKVPair(r, easy) - this.Tags[i] = *v29 + v40 := r.Intn(5) + this.Tags = make([]common.KVPair, v40) + for i := 0; i < v40; i++ { + v41 := common.NewPopulatedKVPair(r, easy) + this.Tags[i] = *v41 } } if !easy && r.Intn(10) != 0 { @@ -8095,9 +9113,9 @@ func NewPopulatedResponseEndBlock(r randyTypes, easy bool) *ResponseEndBlock { func NewPopulatedResponseCommit(r randyTypes, easy bool) *ResponseCommit { this := &ResponseCommit{} - v30 := r.Intn(100) - this.Data = make([]byte, v30) - for i := 0; i < v30; i++ { + v42 := r.Intn(100) + this.Data = make([]byte, v42) + for i := 0; i < v42; i++ { this.Data[i] = byte(r.Intn(256)) } if !easy && r.Intn(10) != 0 { @@ -8153,9 +9171,9 @@ func NewPopulatedEvidenceParams(r randyTypes, easy bool) *EvidenceParams { func NewPopulatedValidatorParams(r randyTypes, easy bool) *ValidatorParams { this := &ValidatorParams{} - v31 := r.Intn(10) - this.PubKeyTypes = make([]string, v31) - for i := 0; i < v31; i++ { + v43 := r.Intn(10) + this.PubKeyTypes = make([]string, v43) + for i := 0; i < v43; i++ { this.PubKeyTypes[i] = string(randStringTypes(r)) } if !easy && r.Intn(10) != 0 { @@ -8171,11 +9189,28 @@ func NewPopulatedLastCommitInfo(r randyTypes, easy bool) *LastCommitInfo { this.Round *= -1 } if r.Intn(10) != 0 { - v32 := r.Intn(5) - this.Votes = make([]VoteInfo, v32) - for i := 0; i < v32; i++ { - v33 := NewPopulatedVoteInfo(r, easy) - this.Votes[i] = *v33 + v44 := r.Intn(5) + this.Votes = make([]VoteInfo, v44) + for i := 0; i < v44; i++ { + v45 := NewPopulatedVoteInfo(r, easy) + this.Votes[i] = *v45 + } + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedTypes(r, 3) + } + return this +} + +func NewPopulatedEvent(r randyTypes, easy bool) *Event { + this := &Event{} + this.Type = string(randStringTypes(r)) + if r.Intn(10) != 0 { + v46 := r.Intn(5) + this.Attributes = make([]common.KVPair, v46) + for i := 0; i < v46; i++ { + v47 := common.NewPopulatedKVPair(r, easy) + this.Attributes[i] = *v47 } } if !easy && r.Intn(10) != 0 { @@ -8186,15 +9221,15 @@ func NewPopulatedLastCommitInfo(r randyTypes, easy bool) *LastCommitInfo { func NewPopulatedHeader(r randyTypes, easy bool) *Header { this := &Header{} - v34 := NewPopulatedVersion(r, easy) - this.Version = *v34 + v48 := NewPopulatedVersion(r, easy) + this.Version = *v48 this.ChainID = string(randStringTypes(r)) this.Height = int64(r.Int63()) if r.Intn(2) == 0 { this.Height *= -1 } - v35 := github_com_gogo_protobuf_types.NewPopulatedStdTime(r, easy) - this.Time = *v35 + v49 := github_com_gogo_protobuf_types.NewPopulatedStdTime(r, easy) + this.Time = *v49 this.NumTxs = int64(r.Int63()) if r.Intn(2) == 0 { this.NumTxs *= -1 @@ -8203,51 +9238,51 @@ func NewPopulatedHeader(r randyTypes, easy bool) *Header { if r.Intn(2) == 0 { this.TotalTxs *= -1 } - v36 := NewPopulatedBlockID(r, easy) - this.LastBlockId = *v36 - v37 := r.Intn(100) - this.LastCommitHash = make([]byte, v37) - for i := 0; i < v37; i++ { + v50 := NewPopulatedBlockID(r, easy) + this.LastBlockId = *v50 + v51 := r.Intn(100) + this.LastCommitHash = make([]byte, v51) + for i := 0; i < v51; i++ { this.LastCommitHash[i] = byte(r.Intn(256)) } - v38 := r.Intn(100) - this.DataHash = make([]byte, v38) - for i := 0; i < v38; i++ { + v52 := r.Intn(100) + this.DataHash = make([]byte, v52) + for i := 0; i < v52; i++ { this.DataHash[i] = byte(r.Intn(256)) } - v39 := r.Intn(100) - this.ValidatorsHash = make([]byte, v39) - for i := 0; i < v39; i++ { + v53 := r.Intn(100) + this.ValidatorsHash = make([]byte, v53) + for i := 0; i < v53; i++ { this.ValidatorsHash[i] = byte(r.Intn(256)) } - v40 := r.Intn(100) - this.NextValidatorsHash = make([]byte, v40) - for i := 0; i < v40; i++ { + v54 := r.Intn(100) + this.NextValidatorsHash = make([]byte, v54) + for i := 0; i < v54; i++ { this.NextValidatorsHash[i] = byte(r.Intn(256)) } - v41 := r.Intn(100) - this.ConsensusHash = make([]byte, v41) - for i := 0; i < v41; i++ { + v55 := r.Intn(100) + this.ConsensusHash = make([]byte, v55) + for i := 0; i < v55; i++ { this.ConsensusHash[i] = byte(r.Intn(256)) } - v42 := r.Intn(100) - this.AppHash = make([]byte, v42) - for i := 0; i < v42; i++ { + v56 := r.Intn(100) + this.AppHash = make([]byte, v56) + for i := 0; i < v56; i++ { this.AppHash[i] = byte(r.Intn(256)) } - v43 := r.Intn(100) - this.LastResultsHash = make([]byte, v43) - for i := 0; i < v43; i++ { + v57 := r.Intn(100) + this.LastResultsHash = make([]byte, v57) + for i := 0; i < v57; i++ { this.LastResultsHash[i] = byte(r.Intn(256)) } - v44 := r.Intn(100) - this.EvidenceHash = make([]byte, v44) - for i := 0; i < v44; i++ { + v58 := r.Intn(100) + this.EvidenceHash = make([]byte, v58) + for i := 0; i < v58; i++ { this.EvidenceHash[i] = byte(r.Intn(256)) } - v45 := r.Intn(100) - this.ProposerAddress = make([]byte, v45) - for i := 0; i < v45; i++ { + v59 := r.Intn(100) + this.ProposerAddress = make([]byte, v59) + for i := 0; i < v59; i++ { this.ProposerAddress[i] = byte(r.Intn(256)) } if !easy && r.Intn(10) != 0 { @@ -8268,13 +9303,13 @@ func NewPopulatedVersion(r randyTypes, easy bool) *Version { func NewPopulatedBlockID(r randyTypes, easy bool) *BlockID { this := &BlockID{} - v46 := r.Intn(100) - this.Hash = make([]byte, v46) - for i := 0; i < v46; i++ { + v60 := r.Intn(100) + this.Hash = make([]byte, v60) + for i := 0; i < v60; i++ { this.Hash[i] = byte(r.Intn(256)) } - v47 := NewPopulatedPartSetHeader(r, easy) - this.PartsHeader = *v47 + v61 := NewPopulatedPartSetHeader(r, easy) + this.PartsHeader = *v61 if !easy && r.Intn(10) != 0 { this.XXX_unrecognized = randUnrecognizedTypes(r, 3) } @@ -8287,9 +9322,9 @@ func NewPopulatedPartSetHeader(r randyTypes, easy bool) *PartSetHeader { if r.Intn(2) == 0 { this.Total *= -1 } - v48 := r.Intn(100) - this.Hash = make([]byte, v48) - for i := 0; i < v48; i++ { + v62 := r.Intn(100) + this.Hash = make([]byte, v62) + for i := 0; i < v62; i++ { this.Hash[i] = byte(r.Intn(256)) } if !easy && r.Intn(10) != 0 { @@ -8300,9 +9335,9 @@ func NewPopulatedPartSetHeader(r randyTypes, easy bool) *PartSetHeader { func NewPopulatedValidator(r randyTypes, easy bool) *Validator { this := &Validator{} - v49 := r.Intn(100) - this.Address = make([]byte, v49) - for i := 0; i < v49; i++ { + v63 := r.Intn(100) + this.Address = make([]byte, v63) + for i := 0; i < v63; i++ { this.Address[i] = byte(r.Intn(256)) } this.Power = int64(r.Int63()) @@ -8317,8 +9352,8 @@ func NewPopulatedValidator(r randyTypes, easy bool) *Validator { func NewPopulatedValidatorUpdate(r randyTypes, easy bool) *ValidatorUpdate { this := &ValidatorUpdate{} - v50 := NewPopulatedPubKey(r, easy) - this.PubKey = *v50 + v64 := NewPopulatedPubKey(r, easy) + this.PubKey = *v64 this.Power = int64(r.Int63()) if r.Intn(2) == 0 { this.Power *= -1 @@ -8331,8 +9366,8 @@ func NewPopulatedValidatorUpdate(r randyTypes, easy bool) *ValidatorUpdate { func NewPopulatedVoteInfo(r randyTypes, easy bool) *VoteInfo { this := &VoteInfo{} - v51 := NewPopulatedValidator(r, easy) - this.Validator = *v51 + v65 := NewPopulatedValidator(r, easy) + this.Validator = *v65 this.SignedLastBlock = bool(bool(r.Intn(2) == 0)) if !easy && r.Intn(10) != 0 { this.XXX_unrecognized = randUnrecognizedTypes(r, 3) @@ -8343,9 +9378,9 @@ func NewPopulatedVoteInfo(r randyTypes, easy bool) *VoteInfo { func NewPopulatedPubKey(r randyTypes, easy bool) *PubKey { this := &PubKey{} this.Type = string(randStringTypes(r)) - v52 := r.Intn(100) - this.Data = make([]byte, v52) - for i := 0; i < v52; i++ { + v66 := r.Intn(100) + this.Data = make([]byte, v66) + for i := 0; i < v66; i++ { this.Data[i] = byte(r.Intn(256)) } if !easy && r.Intn(10) != 0 { @@ -8357,14 +9392,14 @@ func NewPopulatedPubKey(r randyTypes, easy bool) *PubKey { func NewPopulatedEvidence(r randyTypes, easy bool) *Evidence { this := &Evidence{} this.Type = string(randStringTypes(r)) - v53 := NewPopulatedValidator(r, easy) - this.Validator = *v53 + v67 := NewPopulatedValidator(r, easy) + this.Validator = *v67 this.Height = int64(r.Int63()) if r.Intn(2) == 0 { this.Height *= -1 } - v54 := github_com_gogo_protobuf_types.NewPopulatedStdTime(r, easy) - this.Time = *v54 + v68 := github_com_gogo_protobuf_types.NewPopulatedStdTime(r, easy) + this.Time = *v68 this.TotalVotingPower = int64(r.Int63()) if r.Intn(2) == 0 { this.TotalVotingPower *= -1 @@ -8394,9 +9429,9 @@ func randUTF8RuneTypes(r randyTypes) rune { return rune(ru + 61) } func randStringTypes(r randyTypes) string { - v55 := r.Intn(100) - tmps := make([]rune, v55) - for i := 0; i < v55; i++ { + v69 := r.Intn(100) + tmps := make([]rune, v69) + for i := 0; i < v69; i++ { tmps[i] = randUTF8RuneTypes(r) } return string(tmps) @@ -8418,11 +9453,11 @@ func randFieldTypes(dAtA []byte, r randyTypes, fieldNumber int, wire int) []byte switch wire { case 0: dAtA = encodeVarintPopulateTypes(dAtA, uint64(key)) - v56 := r.Int63() + v70 := r.Int63() if r.Intn(2) == 0 { - v56 *= -1 + v70 *= -1 } - dAtA = encodeVarintPopulateTypes(dAtA, uint64(v56)) + dAtA = encodeVarintPopulateTypes(dAtA, uint64(v70)) case 1: dAtA = encodeVarintPopulateTypes(dAtA, uint64(key)) dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) @@ -8758,6 +9793,9 @@ func (m *RequestCheckTx) Size() (n int) { if l > 0 { n += 1 + l + sovTypes(uint64(l)) } + if m.Type != 0 { + n += 1 + sovTypes(uint64(m.Type)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -9131,6 +10169,24 @@ func (m *ResponseQuery) Size() (n int) { } func (m *ResponseBeginBlock) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Events) > 0 { + for _, e := range m.Events { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ResponseBeginBlockDeprecated) Size() (n int) { if m == nil { return 0 } @@ -9175,8 +10231,8 @@ func (m *ResponseCheckTx) Size() (n int) { if m.GasUsed != 0 { n += 1 + sovTypes(uint64(m.GasUsed)) } - if len(m.Tags) > 0 { - for _, e := range m.Tags { + if len(m.Events) > 0 { + for _, e := range m.Events { l = e.Size() n += 1 + l + sovTypes(uint64(l)) } @@ -9191,7 +10247,7 @@ func (m *ResponseCheckTx) Size() (n int) { return n } -func (m *ResponseDeliverTx) Size() (n int) { +func (m *ResponseCheckTxDeprecated) Size() (n int) { if m == nil { return 0 } @@ -9234,66 +10290,84 @@ func (m *ResponseDeliverTx) Size() (n int) { return n } -func (m *ResponseEndBlock) Size() (n int) { +func (m *ResponseDeliverTx) Size() (n int) { if m == nil { return 0 } var l int _ = l - if len(m.ValidatorUpdates) > 0 { - for _, e := range m.ValidatorUpdates { - l = e.Size() - n += 1 + l + sovTypes(uint64(l)) - } + if m.Code != 0 { + n += 1 + sovTypes(uint64(m.Code)) } - if m.ConsensusParamUpdates != nil { - l = m.ConsensusParamUpdates.Size() + l = len(m.Data) + if l > 0 { n += 1 + l + sovTypes(uint64(l)) } - if len(m.Tags) > 0 { - for _, e := range m.Tags { + l = len(m.Log) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Info) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.GasWanted != 0 { + n += 1 + sovTypes(uint64(m.GasWanted)) + } + if m.GasUsed != 0 { + n += 1 + sovTypes(uint64(m.GasUsed)) + } + if len(m.Events) > 0 { + for _, e := range m.Events { l = e.Size() n += 1 + l + sovTypes(uint64(l)) } } + l = len(m.Codespace) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } -func (m *ResponseCommit) Size() (n int) { +func (m *ResponseDeliverTxDeprecated) Size() (n int) { if m == nil { return 0 } var l int _ = l + if m.Code != 0 { + n += 1 + sovTypes(uint64(m.Code)) + } l = len(m.Data) if l > 0 { n += 1 + l + sovTypes(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ConsensusParams) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.BlockSize != nil { - l = m.BlockSize.Size() + l = len(m.Log) + if l > 0 { n += 1 + l + sovTypes(uint64(l)) } - if m.Evidence != nil { - l = m.Evidence.Size() + l = len(m.Info) + if l > 0 { n += 1 + l + sovTypes(uint64(l)) } - if m.Validator != nil { - l = m.Validator.Size() + if m.GasWanted != 0 { + n += 1 + sovTypes(uint64(m.GasWanted)) + } + if m.GasUsed != 0 { + n += 1 + sovTypes(uint64(m.GasUsed)) + } + if len(m.Tags) > 0 { + for _, e := range m.Tags { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + l = len(m.Codespace) + if l > 0 { n += 1 + l + sovTypes(uint64(l)) } if m.XXX_unrecognized != nil { @@ -9302,16 +10376,112 @@ func (m *ConsensusParams) Size() (n int) { return n } -func (m *BlockSizeParams) Size() (n int) { +func (m *ResponseEndBlock) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.MaxBytes != 0 { - n += 1 + sovTypes(uint64(m.MaxBytes)) - } - if m.MaxGas != 0 { + if len(m.ValidatorUpdates) > 0 { + for _, e := range m.ValidatorUpdates { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + if m.ConsensusParamUpdates != nil { + l = m.ConsensusParamUpdates.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if len(m.Events) > 0 { + for _, e := range m.Events { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ResponseEndBlockDeprecated) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ValidatorUpdates) > 0 { + for _, e := range m.ValidatorUpdates { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + if m.ConsensusParamUpdates != nil { + l = m.ConsensusParamUpdates.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if len(m.Tags) > 0 { + for _, e := range m.Tags { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ResponseCommit) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Data) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ConsensusParams) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.BlockSize != nil { + l = m.BlockSize.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.Evidence != nil { + l = m.Evidence.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.Validator != nil { + l = m.Validator.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *BlockSizeParams) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MaxBytes != 0 { + n += 1 + sovTypes(uint64(m.MaxBytes)) + } + if m.MaxGas != 0 { n += 1 + sovTypes(uint64(m.MaxGas)) } if m.XXX_unrecognized != nil { @@ -9374,6 +10544,28 @@ func (m *LastCommitInfo) Size() (n int) { return n } +func (m *Event) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if len(m.Attributes) > 0 { + for _, e := range m.Attributes { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + func (m *Header) Size() (n int) { if m == nil { return 0 @@ -10957,6 +12149,25 @@ func (m *RequestCheckTx) Unmarshal(dAtA []byte) error { m.Tx = []byte{} } iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= (CheckTxType(b) & 0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) @@ -12568,6 +13779,88 @@ func (m *ResponseBeginBlock) Unmarshal(dAtA []byte) error { return fmt.Errorf("proto: ResponseBeginBlock: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Events = append(m.Events, Event{}) + if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResponseBeginBlockDeprecated) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResponseBeginBlockDeprecated: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResponseBeginBlockDeprecated: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Tags", wireType) @@ -12798,7 +14091,7 @@ func (m *ResponseCheckTx) Unmarshal(dAtA []byte) error { } case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Tags", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12822,8 +14115,8 @@ func (m *ResponseCheckTx) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Tags = append(m.Tags, common.KVPair{}) - if err := m.Tags[len(m.Tags)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Events = append(m.Events, Event{}) + if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -12878,7 +14171,7 @@ func (m *ResponseCheckTx) Unmarshal(dAtA []byte) error { } return nil } -func (m *ResponseDeliverTx) Unmarshal(dAtA []byte) error { +func (m *ResponseCheckTxDeprecated) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12901,10 +14194,10 @@ func (m *ResponseDeliverTx) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ResponseDeliverTx: wiretype end group for non-group") + return fmt.Errorf("proto: ResponseCheckTxDeprecated: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ResponseDeliverTx: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ResponseCheckTxDeprecated: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -13135,7 +14428,7 @@ func (m *ResponseDeliverTx) Unmarshal(dAtA []byte) error { } return nil } -func (m *ResponseEndBlock) Unmarshal(dAtA []byte) error { +func (m *ResponseDeliverTx) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -13158,17 +14451,17 @@ func (m *ResponseEndBlock) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ResponseEndBlock: wiretype end group for non-group") + return fmt.Errorf("proto: ResponseDeliverTx: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ResponseEndBlock: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ResponseDeliverTx: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ValidatorUpdates", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Code", wireType) } - var msglen int + m.Code = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -13178,28 +14471,16 @@ func (m *ResponseEndBlock) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + m.Code |= (uint32(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ValidatorUpdates = append(m.ValidatorUpdates, ValidatorUpdate{}) - if err := m.ValidatorUpdates[len(m.ValidatorUpdates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConsensusParamUpdates", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) } - var msglen int + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -13209,20 +14490,692 @@ func (m *ResponseEndBlock) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + if byteLen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + msglen + postIndex := iNdEx + byteLen if postIndex > l { return io.ErrUnexpectedEOF } - if m.ConsensusParamUpdates == nil { - m.ConsensusParamUpdates = &ConsensusParams{} + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Log", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Log = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Info = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field GasWanted", wireType) + } + m.GasWanted = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.GasWanted |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field GasUsed", wireType) + } + m.GasUsed = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.GasUsed |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Events = append(m.Events, Event{}) + if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Codespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Codespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResponseDeliverTxDeprecated) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResponseDeliverTxDeprecated: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResponseDeliverTxDeprecated: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Code", wireType) + } + m.Code = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Code |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Log", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Log = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Info = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field GasWanted", wireType) + } + m.GasWanted = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.GasWanted |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field GasUsed", wireType) + } + m.GasUsed = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.GasUsed |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tags", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tags = append(m.Tags, common.KVPair{}) + if err := m.Tags[len(m.Tags)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Codespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Codespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResponseEndBlock) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResponseEndBlock: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResponseEndBlock: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ValidatorUpdates", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ValidatorUpdates = append(m.ValidatorUpdates, ValidatorUpdate{}) + if err := m.ValidatorUpdates[len(m.ValidatorUpdates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConsensusParamUpdates", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ConsensusParamUpdates == nil { + m.ConsensusParamUpdates = &ConsensusParams{} + } + if err := m.ConsensusParamUpdates.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Events = append(m.Events, Event{}) + if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResponseEndBlockDeprecated) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResponseEndBlockDeprecated: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResponseEndBlockDeprecated: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ValidatorUpdates", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ValidatorUpdates = append(m.ValidatorUpdates, ValidatorUpdate{}) + if err := m.ValidatorUpdates[len(m.ValidatorUpdates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConsensusParamUpdates", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ConsensusParamUpdates == nil { + m.ConsensusParamUpdates = &ConsensusParams{} } if err := m.ConsensusParamUpdates.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -13853,6 +15806,117 @@ func (m *LastCommitInfo) Unmarshal(dAtA []byte) error { } return nil } +func (m *Event) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Event: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Event: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Attributes = append(m.Attributes, common.KVPair{}) + if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *Header) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -15357,150 +17421,161 @@ var ( ErrIntOverflowTypes = fmt.Errorf("proto: integer overflow") ) -func init() { proto.RegisterFile("abci/types/types.proto", fileDescriptor_types_dfa4953f824ab2aa) } +func init() { proto.RegisterFile("abci/types/types.proto", fileDescriptor_types_765aa6431c012acb) } func init() { - golang_proto.RegisterFile("abci/types/types.proto", fileDescriptor_types_dfa4953f824ab2aa) -} - -var fileDescriptor_types_dfa4953f824ab2aa = []byte{ - // 2214 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x58, 0xcb, 0x73, 0x1b, 0xc7, - 0xd1, 0xe7, 0x82, 0x20, 0x81, 0x6d, 0x10, 0x0f, 0x8d, 0x28, 0x09, 0xc2, 0xe7, 0x8f, 0x54, 0xad, - 0x12, 0x5b, 0x8c, 0x65, 0xd0, 0xa6, 0xa3, 0x14, 0x65, 0x39, 0xa9, 0x22, 0x24, 0xc5, 0x64, 0xd9, - 0x49, 0x98, 0x95, 0xc4, 0x5c, 0x52, 0xb5, 0x35, 0xc0, 0x8e, 0x80, 0x2d, 0x02, 0xbb, 0xeb, 0xdd, - 0x01, 0x0d, 0xea, 0x98, 0xb3, 0x0f, 0x3e, 0xe4, 0x8f, 0xc8, 0x35, 0x37, 0x1f, 0x73, 0x4a, 0xf9, - 0x98, 0x43, 0xce, 0x4a, 0xc2, 0x54, 0x0e, 0xc9, 0x35, 0x95, 0xaa, 0x1c, 0x53, 0xd3, 0x33, 0xb3, - 0x2f, 0x2e, 0x14, 0xcb, 0xc9, 0x29, 0x17, 0x60, 0xa6, 0x1f, 0xf3, 0xe8, 0xed, 0xee, 0x5f, 0xf7, - 0xc0, 0x75, 0x3a, 0x1c, 0x79, 0xbb, 0xfc, 0x3c, 0x64, 0xb1, 0xfc, 0xed, 0x87, 0x51, 0xc0, 0x03, - 0xb2, 0x86, 0x93, 0xde, 0x3b, 0x63, 0x8f, 0x4f, 0xe6, 0xc3, 0xfe, 0x28, 0x98, 0xed, 0x8e, 0x83, - 0x71, 0xb0, 0x8b, 0xdc, 0xe1, 0xfc, 0x39, 0xce, 0x70, 0x82, 0x23, 0xa9, 0xd5, 0xdb, 0x1e, 0x07, - 0xc1, 0x78, 0xca, 0x52, 0x29, 0xee, 0xcd, 0x58, 0xcc, 0xe9, 0x2c, 0x54, 0x02, 0xfb, 0x99, 0xf5, - 0x38, 0xf3, 0x5d, 0x16, 0xcd, 0x3c, 0x9f, 0x67, 0x87, 0x53, 0x6f, 0x18, 0xef, 0x8e, 0x82, 0xd9, - 0x2c, 0xf0, 0xb3, 0x07, 0xea, 0x3d, 0xf8, 0xb7, 0x9a, 0xa3, 0xe8, 0x3c, 0xe4, 0xc1, 0xee, 0x8c, - 0x45, 0xa7, 0x53, 0xa6, 0xfe, 0xa4, 0xb2, 0xf5, 0xdb, 0x2a, 0xd4, 0x6c, 0xf6, 0xe9, 0x9c, 0xc5, - 0x9c, 0xdc, 0x81, 0x2a, 0x1b, 0x4d, 0x82, 0x6e, 0xe5, 0x96, 0x71, 0xa7, 0xb1, 0x47, 0xfa, 0x72, - 0x13, 0xc5, 0x7d, 0x3c, 0x9a, 0x04, 0x87, 0x2b, 0x36, 0x4a, 0x90, 0xb7, 0x61, 0xed, 0xf9, 0x74, - 0x1e, 0x4f, 0xba, 0xab, 0x28, 0x7a, 0x35, 0x2f, 0xfa, 0x43, 0xc1, 0x3a, 0x5c, 0xb1, 0xa5, 0x8c, - 0x58, 0xd6, 0xf3, 0x9f, 0x07, 0xdd, 0x6a, 0xd9, 0xb2, 0x47, 0xfe, 0x73, 0x5c, 0x56, 0x48, 0x90, - 0x7d, 0x80, 0x98, 0x71, 0x27, 0x08, 0xb9, 0x17, 0xf8, 0xdd, 0x35, 0x94, 0xbf, 0x91, 0x97, 0x7f, - 0xc2, 0xf8, 0x4f, 0x90, 0x7d, 0xb8, 0x62, 0x9b, 0xb1, 0x9e, 0x08, 0x4d, 0xcf, 0xf7, 0xb8, 0x33, - 0x9a, 0x50, 0xcf, 0xef, 0xae, 0x97, 0x69, 0x1e, 0xf9, 0x1e, 0x7f, 0x28, 0xd8, 0x42, 0xd3, 0xd3, - 0x13, 0x71, 0x95, 0x4f, 0xe7, 0x2c, 0x3a, 0xef, 0xd6, 0xca, 0xae, 0xf2, 0x53, 0xc1, 0x12, 0x57, - 0x41, 0x19, 0xf2, 0x00, 0x1a, 0x43, 0x36, 0xf6, 0x7c, 0x67, 0x38, 0x0d, 0x46, 0xa7, 0xdd, 0x3a, - 0xaa, 0x74, 0xf3, 0x2a, 0x03, 0x21, 0x30, 0x10, 0xfc, 0xc3, 0x15, 0x1b, 0x86, 0xc9, 0x8c, 0xec, - 0x41, 0x7d, 0x34, 0x61, 0xa3, 0x53, 0x87, 0x2f, 0xba, 0x26, 0x6a, 0x5e, 0xcb, 0x6b, 0x3e, 0x14, - 0xdc, 0xa7, 0x8b, 0xc3, 0x15, 0xbb, 0x36, 0x92, 0x43, 0x72, 0x0f, 0x4c, 0xe6, 0xbb, 0x6a, 0xbb, - 0x06, 0x2a, 0x5d, 0x2f, 0x7c, 0x17, 0xdf, 0xd5, 0x9b, 0xd5, 0x99, 0x1a, 0x93, 0x3e, 0xac, 0x0b, - 0x47, 0xf1, 0x78, 0x77, 0x03, 0x75, 0x36, 0x0b, 0x1b, 0x21, 0xef, 0x70, 0xc5, 0x56, 0x52, 0xc2, - 0x7c, 0x2e, 0x9b, 0x7a, 0x67, 0x2c, 0x12, 0x87, 0xbb, 0x5a, 0x66, 0xbe, 0x47, 0x92, 0x8f, 0xc7, - 0x33, 0x5d, 0x3d, 0x19, 0xd4, 0x60, 0xed, 0x8c, 0x4e, 0xe7, 0xcc, 0x7a, 0x0b, 0x1a, 0x19, 0x4f, - 0x21, 0x5d, 0xa8, 0xcd, 0x58, 0x1c, 0xd3, 0x31, 0xeb, 0x1a, 0xb7, 0x8c, 0x3b, 0xa6, 0xad, 0xa7, - 0x56, 0x0b, 0x36, 0xb2, 0x7e, 0x62, 0xcd, 0x12, 0x45, 0xe1, 0x0b, 0x42, 0xf1, 0x8c, 0x45, 0xb1, - 0x70, 0x00, 0xa5, 0xa8, 0xa6, 0xe4, 0x36, 0x34, 0xd1, 0x0e, 0x8e, 0xe6, 0x0b, 0x3f, 0xad, 0xda, - 0x1b, 0x48, 0x3c, 0x51, 0x42, 0xdb, 0xd0, 0x08, 0xf7, 0xc2, 0x44, 0x64, 0x15, 0x45, 0x20, 0xdc, - 0x0b, 0x95, 0x80, 0xf5, 0x01, 0x74, 0x8a, 0xae, 0x44, 0x3a, 0xb0, 0x7a, 0xca, 0xce, 0xd5, 0x7e, - 0x62, 0x48, 0x36, 0xd5, 0xb5, 0x70, 0x0f, 0xd3, 0x56, 0x77, 0xfc, 0xa2, 0x92, 0x28, 0x27, 0xde, - 0x44, 0xf6, 0xa1, 0x2a, 0x62, 0x19, 0xb5, 0x1b, 0x7b, 0xbd, 0xbe, 0x0c, 0xf4, 0xbe, 0x0e, 0xf4, - 0xfe, 0x53, 0x1d, 0xe8, 0x83, 0xfa, 0x57, 0x2f, 0xb7, 0x57, 0xbe, 0xf8, 0xc3, 0xb6, 0x61, 0xa3, - 0x06, 0xb9, 0x29, 0x1c, 0x82, 0x7a, 0xbe, 0xe3, 0xb9, 0x6a, 0x9f, 0x1a, 0xce, 0x8f, 0x5c, 0x72, - 0x00, 0x9d, 0x51, 0xe0, 0xc7, 0xcc, 0x8f, 0xe7, 0xb1, 0x13, 0xd2, 0x88, 0xce, 0x62, 0x15, 0x6b, - 0xfa, 0xf3, 0x3f, 0xd4, 0xec, 0x63, 0xe4, 0xda, 0xed, 0x51, 0x9e, 0x40, 0x3e, 0x04, 0x38, 0xa3, - 0x53, 0xcf, 0xa5, 0x3c, 0x88, 0xe2, 0x6e, 0xf5, 0xd6, 0x6a, 0x46, 0xf9, 0x44, 0x33, 0x9e, 0x85, - 0x2e, 0xe5, 0x6c, 0x50, 0x15, 0x27, 0xb3, 0x33, 0xf2, 0xe4, 0x4d, 0x68, 0xd3, 0x30, 0x74, 0x62, - 0x4e, 0x39, 0x73, 0x86, 0xe7, 0x9c, 0xc5, 0x18, 0x8f, 0x1b, 0x76, 0x93, 0x86, 0xe1, 0x13, 0x41, - 0x1d, 0x08, 0xa2, 0xe5, 0x26, 0x5f, 0x13, 0x43, 0x85, 0x10, 0xa8, 0xba, 0x94, 0x53, 0xb4, 0xc6, - 0x86, 0x8d, 0x63, 0x41, 0x0b, 0x29, 0x9f, 0xa8, 0x3b, 0xe2, 0x98, 0x5c, 0x87, 0xf5, 0x09, 0xf3, - 0xc6, 0x13, 0x8e, 0xd7, 0x5a, 0xb5, 0xd5, 0x4c, 0x18, 0x3e, 0x8c, 0x82, 0x33, 0x86, 0xd9, 0xa2, - 0x6e, 0xcb, 0x89, 0xf5, 0x17, 0x03, 0xae, 0x5c, 0x0a, 0x2f, 0xb1, 0xee, 0x84, 0xc6, 0x13, 0xbd, - 0x97, 0x18, 0x93, 0xb7, 0xc5, 0xba, 0xd4, 0x65, 0x91, 0xca, 0x62, 0x4d, 0x75, 0xe3, 0x43, 0x24, - 0xaa, 0x8b, 0x2a, 0x11, 0xf2, 0x18, 0x3a, 0x53, 0x1a, 0x73, 0x47, 0x46, 0x81, 0x83, 0x59, 0x6a, - 0x35, 0x17, 0x99, 0x9f, 0x50, 0x1d, 0x2d, 0xc2, 0x39, 0x95, 0x7a, 0x6b, 0x9a, 0xa3, 0x92, 0x43, - 0xd8, 0x1c, 0x9e, 0xbf, 0xa0, 0x3e, 0xf7, 0x7c, 0xe6, 0x5c, 0xb2, 0x79, 0x5b, 0x2d, 0xf5, 0xf8, - 0xcc, 0x73, 0x99, 0x3f, 0xd2, 0xc6, 0xbe, 0x9a, 0xa8, 0x24, 0x1f, 0x23, 0xb6, 0x6e, 0x41, 0x2b, - 0x9f, 0x0b, 0x48, 0x0b, 0x2a, 0x7c, 0xa1, 0x6e, 0x58, 0xe1, 0x0b, 0xcb, 0x4a, 0x3c, 0x30, 0x09, - 0xc8, 0x4b, 0x32, 0x3b, 0xd0, 0x2e, 0x24, 0x87, 0x8c, 0xb9, 0x8d, 0xac, 0xb9, 0xad, 0x36, 0x34, - 0x73, 0x39, 0xc1, 0xfa, 0x7c, 0x0d, 0xea, 0x36, 0x8b, 0x43, 0xe1, 0x4c, 0x64, 0x1f, 0x4c, 0xb6, - 0x18, 0x31, 0x99, 0x8e, 0x8d, 0x42, 0xb2, 0x93, 0x32, 0x8f, 0x35, 0x5f, 0xa4, 0x85, 0x44, 0x98, - 0xec, 0xe4, 0xa0, 0xe4, 0x6a, 0x51, 0x29, 0x8b, 0x25, 0x77, 0xf3, 0x58, 0xb2, 0x59, 0x90, 0x2d, - 0x80, 0xc9, 0x4e, 0x0e, 0x4c, 0x8a, 0x0b, 0xe7, 0xd0, 0xe4, 0x7e, 0x09, 0x9a, 0x14, 0x8f, 0xbf, - 0x04, 0x4e, 0xee, 0x97, 0xc0, 0x49, 0xf7, 0xd2, 0x5e, 0xa5, 0x78, 0x72, 0x37, 0x8f, 0x27, 0xc5, - 0xeb, 0x14, 0x00, 0xe5, 0xc3, 0x32, 0x40, 0xb9, 0x59, 0xd0, 0x59, 0x8a, 0x28, 0xef, 0x5f, 0x42, - 0x94, 0xeb, 0x05, 0xd5, 0x12, 0x48, 0xb9, 0x9f, 0xcb, 0xf5, 0x50, 0x7a, 0xb7, 0xf2, 0x64, 0x4f, - 0xbe, 0x77, 0x19, 0x8d, 0x6e, 0x14, 0x3f, 0x6d, 0x19, 0x1c, 0xed, 0x16, 0xe0, 0xe8, 0x5a, 0xf1, - 0x94, 0x05, 0x3c, 0x4a, 0x51, 0x65, 0x47, 0xc4, 0x7d, 0xc1, 0xd3, 0x44, 0x8e, 0x60, 0x51, 0x14, - 0x44, 0x2a, 0x61, 0xcb, 0x89, 0x75, 0x47, 0x64, 0xa2, 0xd4, 0xbf, 0x5e, 0x81, 0x40, 0xe8, 0xf4, - 0x19, 0xef, 0xb2, 0xbe, 0x34, 0x52, 0x5d, 0x8c, 0xe8, 0x6c, 0x16, 0x33, 0x55, 0x16, 0xcb, 0x00, - 0x53, 0x25, 0x0f, 0x4c, 0xdb, 0xd0, 0x10, 0xb9, 0xb2, 0x80, 0x39, 0x34, 0xd4, 0x98, 0x43, 0xbe, - 0x03, 0x57, 0x30, 0xcf, 0x48, 0xf8, 0x52, 0x81, 0x58, 0xc5, 0x40, 0x6c, 0x0b, 0x86, 0xb4, 0x98, - 0x4c, 0x80, 0xef, 0xc0, 0xd5, 0x8c, 0xac, 0x58, 0x17, 0x73, 0x9c, 0x4c, 0xbe, 0x9d, 0x44, 0xfa, - 0x20, 0x0c, 0x0f, 0x69, 0x3c, 0xb1, 0x7e, 0x94, 0x1a, 0x28, 0xc5, 0x33, 0x02, 0xd5, 0x51, 0xe0, - 0xca, 0x7b, 0x37, 0x6d, 0x1c, 0x0b, 0x8c, 0x9b, 0x06, 0x63, 0x3c, 0x9c, 0x69, 0x8b, 0xa1, 0x90, - 0x4a, 0x42, 0xc9, 0x94, 0x31, 0x63, 0xfd, 0xd2, 0x48, 0xd7, 0x4b, 0x21, 0xae, 0x0c, 0x8d, 0x8c, - 0xff, 0x04, 0x8d, 0x2a, 0xaf, 0x87, 0x46, 0xd6, 0x85, 0x91, 0x7e, 0xb2, 0x04, 0x67, 0xbe, 0xd9, - 0x15, 0x85, 0xf7, 0x78, 0xbe, 0xcb, 0x16, 0x68, 0xd2, 0x55, 0x5b, 0x4e, 0x74, 0x09, 0xb0, 0x8e, - 0x66, 0xce, 0x97, 0x00, 0x35, 0xa4, 0xc9, 0x09, 0xb9, 0x8d, 0xf8, 0x14, 0x3c, 0x57, 0xa1, 0xda, - 0xec, 0xab, 0x6a, 0xfa, 0x58, 0x10, 0x6d, 0xc9, 0xcb, 0x64, 0x5b, 0x33, 0x07, 0x6e, 0x6f, 0x80, - 0x29, 0x0e, 0x1a, 0x87, 0x74, 0xc4, 0x30, 0xf2, 0x4c, 0x3b, 0x25, 0x58, 0xc7, 0x40, 0x2e, 0x47, - 0x3c, 0xf9, 0x00, 0xaa, 0x9c, 0x8e, 0x85, 0xbd, 0x85, 0xc9, 0x5a, 0x7d, 0xd9, 0x00, 0xf4, 0x3f, - 0x3e, 0x39, 0xa6, 0x5e, 0x34, 0xb8, 0x2e, 0x4c, 0xf5, 0xb7, 0x97, 0xdb, 0x2d, 0x21, 0x73, 0x37, - 0x98, 0x79, 0x9c, 0xcd, 0x42, 0x7e, 0x6e, 0xa3, 0x8e, 0xf5, 0x77, 0x43, 0x20, 0x41, 0x2e, 0x13, - 0x94, 0x1a, 0x4e, 0xbb, 0x7b, 0x25, 0x03, 0xda, 0x5f, 0xcf, 0x98, 0xff, 0x0f, 0x30, 0xa6, 0xb1, - 0xf3, 0x19, 0xf5, 0x39, 0x73, 0x95, 0x45, 0xcd, 0x31, 0x8d, 0x7f, 0x86, 0x04, 0x51, 0xe1, 0x08, - 0xf6, 0x3c, 0x66, 0x2e, 0x9a, 0x76, 0xd5, 0xae, 0x8d, 0x69, 0xfc, 0x2c, 0x66, 0x6e, 0x72, 0xaf, - 0xda, 0xeb, 0xdf, 0x2b, 0x6f, 0xc7, 0x7a, 0xd1, 0x8e, 0xff, 0xc8, 0xf8, 0x70, 0x0a, 0x92, 0xff, - 0xfb, 0xf7, 0xfe, 0xab, 0x21, 0x6a, 0x83, 0x7c, 0x1a, 0x26, 0x47, 0x70, 0x25, 0x89, 0x23, 0x67, - 0x8e, 0xf1, 0xa5, 0x7d, 0xe9, 0xd5, 0xe1, 0xd7, 0x39, 0xcb, 0x93, 0x63, 0xf2, 0x63, 0xb8, 0x51, - 0xc8, 0x02, 0xc9, 0x82, 0x95, 0x57, 0x26, 0x83, 0x6b, 0xf9, 0x64, 0xa0, 0xd7, 0xd3, 0x96, 0x58, - 0xfd, 0x06, 0x9e, 0xfd, 0x2d, 0x51, 0x28, 0x65, 0xc1, 0xa3, 0xec, 0x5b, 0x5a, 0xbf, 0x36, 0xa0, - 0x5d, 0x38, 0x0c, 0xb9, 0x07, 0x20, 0x53, 0x6b, 0xec, 0xbd, 0x60, 0x85, 0x2c, 0x86, 0x26, 0x7b, - 0xe2, 0xbd, 0x60, 0xea, 0xe0, 0xe6, 0x50, 0x13, 0xc8, 0x7b, 0x50, 0x67, 0xaa, 0x80, 0x53, 0xb7, - 0xbd, 0x56, 0xa8, 0xeb, 0x94, 0x4e, 0x22, 0x46, 0xbe, 0x0b, 0x66, 0x62, 0xc3, 0x42, 0xf1, 0x9e, - 0x98, 0x5c, 0x6f, 0x94, 0x08, 0x5a, 0x1f, 0x41, 0xbb, 0x70, 0x0c, 0xf2, 0x7f, 0x60, 0xce, 0xe8, - 0x42, 0x55, 0xe1, 0xb2, 0x7e, 0xab, 0xcf, 0xe8, 0x02, 0x0b, 0x70, 0x72, 0x03, 0x6a, 0x82, 0x39, - 0xa6, 0xf2, 0x2b, 0xac, 0xda, 0xeb, 0x33, 0xba, 0xf8, 0x88, 0xc6, 0xd6, 0x0e, 0xb4, 0xf2, 0x47, - 0xd3, 0xa2, 0x1a, 0x11, 0xa5, 0xe8, 0xc1, 0x98, 0x59, 0xf7, 0xa0, 0x5d, 0x38, 0x11, 0xb1, 0xa0, - 0x19, 0xce, 0x87, 0xce, 0x29, 0x3b, 0x77, 0xf0, 0xc8, 0xe8, 0x33, 0xa6, 0xdd, 0x08, 0xe7, 0xc3, - 0x8f, 0xd9, 0xf9, 0x53, 0x41, 0xb2, 0x9e, 0x40, 0x2b, 0x5f, 0x1f, 0x8b, 0x9c, 0x19, 0x05, 0x73, - 0xdf, 0xc5, 0xf5, 0xd7, 0x6c, 0x39, 0x11, 0x2d, 0xf6, 0x59, 0x20, 0xdd, 0x24, 0x5b, 0x10, 0x9f, - 0x04, 0x9c, 0x65, 0xaa, 0x6a, 0x29, 0x63, 0xfd, 0x62, 0x0d, 0xd6, 0x65, 0xb1, 0x4e, 0xfa, 0xf9, - 0x56, 0x50, 0xf8, 0x88, 0xd2, 0x94, 0x54, 0xa5, 0x98, 0xe0, 0xf0, 0x9b, 0xc5, 0x7e, 0x6a, 0xd0, - 0xb8, 0x78, 0xb9, 0x5d, 0x43, 0x0c, 0x3b, 0x7a, 0x94, 0x36, 0x57, 0xcb, 0x7a, 0x0f, 0xdd, 0xc9, - 0x55, 0x5f, 0xbb, 0x93, 0xbb, 0x01, 0x35, 0x7f, 0x3e, 0x73, 0xf8, 0x22, 0x56, 0xb9, 0x60, 0xdd, - 0x9f, 0xcf, 0x9e, 0x2e, 0xf0, 0xd3, 0xf1, 0x80, 0xd3, 0x29, 0xb2, 0x64, 0x26, 0xa8, 0x23, 0x41, - 0x30, 0xf7, 0xa1, 0x99, 0x81, 0x7a, 0xcf, 0x55, 0x25, 0x63, 0x2b, 0xeb, 0x8d, 0x47, 0x8f, 0xd4, - 0x2d, 0x1b, 0x09, 0xf4, 0x1f, 0xb9, 0xe4, 0x4e, 0xbe, 0x71, 0xc1, 0x0a, 0xa1, 0x8e, 0x8e, 0x9f, - 0xe9, 0x4d, 0x44, 0x7d, 0x20, 0x0e, 0x20, 0x42, 0x41, 0x8a, 0x98, 0x28, 0x52, 0x17, 0x04, 0x64, - 0xbe, 0x05, 0xed, 0x14, 0x64, 0xa5, 0x08, 0xc8, 0x55, 0x52, 0x32, 0x0a, 0xbe, 0x0b, 0x9b, 0x3e, - 0x5b, 0x70, 0xa7, 0x28, 0xdd, 0x40, 0x69, 0x22, 0x78, 0x27, 0x79, 0x8d, 0x6f, 0x43, 0x2b, 0x4d, - 0x16, 0x28, 0xbb, 0x21, 0xdb, 0xc7, 0x84, 0x8a, 0x62, 0x37, 0xa1, 0x9e, 0x94, 0x38, 0x4d, 0x14, - 0xa8, 0x51, 0x59, 0xd9, 0x24, 0x45, 0x53, 0xc4, 0xe2, 0xf9, 0x94, 0xab, 0x45, 0x5a, 0x28, 0x83, - 0x45, 0x93, 0x2d, 0xe9, 0x28, 0x7b, 0x1b, 0x9a, 0x3a, 0xec, 0xa4, 0x5c, 0x1b, 0xe5, 0x36, 0x34, - 0x11, 0x85, 0x76, 0xa0, 0x13, 0x46, 0x41, 0x18, 0xc4, 0x2c, 0x72, 0xa8, 0xeb, 0x46, 0x2c, 0x8e, - 0xbb, 0x1d, 0xb9, 0x9e, 0xa6, 0x1f, 0x48, 0xb2, 0xf5, 0x1e, 0xd4, 0x74, 0xed, 0xb6, 0x09, 0x6b, - 0x68, 0x75, 0x74, 0xc1, 0xaa, 0x2d, 0x27, 0x02, 0x25, 0x0e, 0xc2, 0x50, 0xbd, 0x40, 0x88, 0xa1, - 0xf5, 0x73, 0xa8, 0xa9, 0x0f, 0x56, 0xda, 0x97, 0x7e, 0x1f, 0x36, 0x42, 0x1a, 0x89, 0x6b, 0x64, - 0xbb, 0x53, 0xdd, 0x1d, 0x1c, 0xd3, 0x88, 0x3f, 0x61, 0x3c, 0xd7, 0xa4, 0x36, 0x50, 0x5e, 0x92, - 0xac, 0xfb, 0xd0, 0xcc, 0xc9, 0x88, 0x63, 0xa1, 0x1f, 0xe9, 0x48, 0xc3, 0x49, 0xb2, 0x73, 0x25, - 0xdd, 0xd9, 0x7a, 0x00, 0x66, 0xf2, 0x6d, 0x44, 0x11, 0xab, 0xaf, 0x6e, 0x28, 0x73, 0xcb, 0x29, - 0x36, 0xde, 0xc1, 0x67, 0x2c, 0x52, 0x31, 0x21, 0x27, 0xd6, 0xb3, 0x4c, 0x66, 0x90, 0x79, 0x9b, - 0xdc, 0x85, 0x9a, 0xca, 0x0c, 0x2a, 0x2a, 0x75, 0x8b, 0x7d, 0x8c, 0xa9, 0x41, 0xb7, 0xd8, 0x32, - 0x51, 0xa4, 0xcb, 0x56, 0xb2, 0xcb, 0x4e, 0xa1, 0xae, 0xa3, 0x3f, 0x9f, 0x26, 0xe5, 0x8a, 0x9d, - 0x62, 0x9a, 0x54, 0x8b, 0xa6, 0x82, 0xc2, 0x3b, 0x62, 0x6f, 0xec, 0x33, 0xd7, 0x49, 0x43, 0x08, - 0xf7, 0xa8, 0xdb, 0x6d, 0xc9, 0xf8, 0x44, 0xc7, 0x8b, 0xf5, 0x2e, 0xac, 0xcb, 0xb3, 0x09, 0xfb, - 0x88, 0x95, 0x75, 0x5d, 0x2f, 0xc6, 0xa5, 0xc0, 0xf1, 0x7b, 0x03, 0xea, 0x3a, 0x79, 0x96, 0x2a, - 0xe5, 0x0e, 0x5d, 0xf9, 0xba, 0x87, 0xfe, 0xef, 0x27, 0x9e, 0xbb, 0x40, 0x64, 0x7e, 0x39, 0x0b, - 0xb8, 0xe7, 0x8f, 0x1d, 0x69, 0x6b, 0x99, 0x83, 0x3a, 0xc8, 0x39, 0x41, 0xc6, 0xb1, 0xa0, 0xef, - 0x7d, 0xbe, 0x06, 0xed, 0x83, 0xc1, 0xc3, 0xa3, 0x83, 0x30, 0x9c, 0x7a, 0x23, 0x8a, 0xbd, 0xc2, - 0x2e, 0x54, 0xb1, 0x5d, 0x2a, 0x79, 0xee, 0xed, 0x95, 0xf5, 0xed, 0x64, 0x0f, 0xd6, 0xb0, 0x6b, - 0x22, 0x65, 0xaf, 0xbe, 0xbd, 0xd2, 0xf6, 0x5d, 0x6c, 0x22, 0xfb, 0xaa, 0xcb, 0x8f, 0xbf, 0xbd, - 0xb2, 0x1e, 0x9e, 0xfc, 0x00, 0xcc, 0xb4, 0x9d, 0x59, 0xf6, 0x04, 0xdc, 0x5b, 0xda, 0xcd, 0x0b, - 0xfd, 0xb4, 0xf4, 0x5b, 0xf6, 0x92, 0xd9, 0x5b, 0xda, 0xf6, 0x92, 0x7d, 0xa8, 0xe9, 0x82, 0xb9, - 0xfc, 0x91, 0xb6, 0xb7, 0xa4, 0xd3, 0x16, 0xe6, 0x91, 0x1d, 0x4a, 0xd9, 0x4b, 0x72, 0xaf, 0xf4, - 0x39, 0x80, 0xdc, 0x83, 0x75, 0x55, 0xc5, 0x94, 0x3e, 0xd4, 0xf6, 0xca, 0xfb, 0x65, 0x71, 0xc9, - 0xb4, 0x47, 0x5b, 0xf6, 0xda, 0xdd, 0x5b, 0xfa, 0x6e, 0x41, 0x0e, 0x00, 0x32, 0x8d, 0xc6, 0xd2, - 0x67, 0xec, 0xde, 0xf2, 0xf7, 0x08, 0xf2, 0x00, 0xea, 0xe9, 0x1b, 0x53, 0xf9, 0xc3, 0x74, 0x6f, - 0xd9, 0x13, 0xc1, 0xe0, 0x8d, 0x7f, 0xfe, 0x69, 0xcb, 0xf8, 0xd5, 0xc5, 0x96, 0xf1, 0xe5, 0xc5, - 0x96, 0xf1, 0xd5, 0xc5, 0x96, 0xf1, 0xbb, 0x8b, 0x2d, 0xe3, 0x8f, 0x17, 0x5b, 0xc6, 0x6f, 0xfe, - 0xbc, 0x65, 0x0c, 0xd7, 0xd1, 0xfd, 0xdf, 0xff, 0x57, 0x00, 0x00, 0x00, 0xff, 0xff, 0xfc, 0x8d, - 0xcb, 0x04, 0x88, 0x19, 0x00, 0x00, + golang_proto.RegisterFile("abci/types/types.proto", fileDescriptor_types_765aa6431c012acb) +} + +var fileDescriptor_types_765aa6431c012acb = []byte{ + // 2388 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x59, 0xcb, 0x6f, 0x1c, 0xc7, + 0xd1, 0xe7, 0xec, 0x7b, 0x6b, 0xb9, 0x0f, 0xb5, 0x28, 0x69, 0xb5, 0xd6, 0x47, 0x0a, 0xa3, 0x2f, + 0xb2, 0x68, 0xc9, 0x4b, 0x9b, 0x8e, 0x02, 0xca, 0x72, 0x0c, 0x70, 0x25, 0xc5, 0x24, 0xac, 0x38, + 0xcc, 0x48, 0x62, 0x80, 0x20, 0xc0, 0xa0, 0x77, 0xa7, 0xb5, 0x3b, 0xe0, 0xee, 0xcc, 0x78, 0xa6, + 0x97, 0x5a, 0xea, 0x98, 0xb3, 0x0f, 0x3e, 0xe4, 0x8f, 0xc8, 0x35, 0x37, 0x1f, 0x73, 0x0a, 0x7c, + 0xcc, 0x21, 0x67, 0x25, 0x61, 0x90, 0x4b, 0x80, 0x9c, 0x72, 0x48, 0x72, 0x08, 0x12, 0x74, 0x75, + 0xcf, 0x93, 0xb3, 0xb2, 0xa5, 0xe4, 0x60, 0xc0, 0x17, 0x72, 0xba, 0xeb, 0x57, 0xd5, 0x5d, 0xd5, + 0xf5, 0xe8, 0xae, 0x85, 0x8b, 0x74, 0x38, 0xb2, 0xb7, 0xf8, 0x89, 0xc7, 0x02, 0xf9, 0xb7, 0xef, + 0xf9, 0x2e, 0x77, 0x49, 0x19, 0x07, 0xbd, 0xb7, 0xc7, 0x36, 0x9f, 0xcc, 0x87, 0xfd, 0x91, 0x3b, + 0xdb, 0x1a, 0xbb, 0x63, 0x77, 0x0b, 0xa9, 0xc3, 0xf9, 0x53, 0x1c, 0xe1, 0x00, 0xbf, 0x24, 0x57, + 0x6f, 0x63, 0xec, 0xba, 0xe3, 0x29, 0x8b, 0x51, 0xdc, 0x9e, 0xb1, 0x80, 0xd3, 0x99, 0xa7, 0x00, + 0x3b, 0x09, 0x79, 0x9c, 0x39, 0x16, 0xf3, 0x67, 0xb6, 0xc3, 0x93, 0x9f, 0x53, 0x7b, 0x18, 0x6c, + 0x8d, 0xdc, 0xd9, 0xcc, 0x75, 0x92, 0x1b, 0xea, 0xdd, 0xfd, 0x4a, 0xce, 0x91, 0x7f, 0xe2, 0x71, + 0x77, 0x6b, 0xc6, 0xfc, 0xa3, 0x29, 0x53, 0xff, 0x24, 0xb3, 0xfe, 0x9b, 0x12, 0x54, 0x0d, 0xf6, + 0xe9, 0x9c, 0x05, 0x9c, 0xdc, 0x80, 0x12, 0x1b, 0x4d, 0xdc, 0x6e, 0xe1, 0xaa, 0x76, 0xa3, 0xb1, + 0x4d, 0xfa, 0x72, 0x11, 0x45, 0x7d, 0x30, 0x9a, 0xb8, 0x7b, 0x2b, 0x06, 0x22, 0xc8, 0x4d, 0x28, + 0x3f, 0x9d, 0xce, 0x83, 0x49, 0xb7, 0x88, 0xd0, 0xf3, 0x69, 0xe8, 0x0f, 0x04, 0x69, 0x6f, 0xc5, + 0x90, 0x18, 0x21, 0xd6, 0x76, 0x9e, 0xba, 0xdd, 0x52, 0x9e, 0xd8, 0x7d, 0xe7, 0x29, 0x8a, 0x15, + 0x08, 0xb2, 0x03, 0x10, 0x30, 0x6e, 0xba, 0x1e, 0xb7, 0x5d, 0xa7, 0x5b, 0x46, 0xfc, 0xa5, 0x34, + 0xfe, 0x11, 0xe3, 0x3f, 0x42, 0xf2, 0xde, 0x8a, 0x51, 0x0f, 0xc2, 0x81, 0xe0, 0xb4, 0x1d, 0x9b, + 0x9b, 0xa3, 0x09, 0xb5, 0x9d, 0x6e, 0x25, 0x8f, 0x73, 0xdf, 0xb1, 0xf9, 0x3d, 0x41, 0x16, 0x9c, + 0x76, 0x38, 0x10, 0xaa, 0x7c, 0x3a, 0x67, 0xfe, 0x49, 0xb7, 0x9a, 0xa7, 0xca, 0x8f, 0x05, 0x49, + 0xa8, 0x82, 0x18, 0x72, 0x17, 0x1a, 0x43, 0x36, 0xb6, 0x1d, 0x73, 0x38, 0x75, 0x47, 0x47, 0xdd, + 0x1a, 0xb2, 0x74, 0xd3, 0x2c, 0x03, 0x01, 0x18, 0x08, 0xfa, 0xde, 0x8a, 0x01, 0xc3, 0x68, 0x44, + 0xb6, 0xa1, 0x36, 0x9a, 0xb0, 0xd1, 0x91, 0xc9, 0x17, 0xdd, 0x3a, 0x72, 0x5e, 0x48, 0x73, 0xde, + 0x13, 0xd4, 0xc7, 0x8b, 0xbd, 0x15, 0xa3, 0x3a, 0x92, 0x9f, 0xe4, 0x36, 0xd4, 0x99, 0x63, 0xa9, + 0xe5, 0x1a, 0xc8, 0x74, 0x31, 0x73, 0x2e, 0x8e, 0x15, 0x2e, 0x56, 0x63, 0xea, 0x9b, 0xf4, 0xa1, + 0x22, 0x1c, 0xc5, 0xe6, 0xdd, 0x55, 0xe4, 0x59, 0xcb, 0x2c, 0x84, 0xb4, 0xbd, 0x15, 0x43, 0xa1, + 0x84, 0xf9, 0x2c, 0x36, 0xb5, 0x8f, 0x99, 0x2f, 0x36, 0x77, 0x3e, 0xcf, 0x7c, 0xf7, 0x25, 0x1d, + 0xb7, 0x57, 0xb7, 0xc2, 0xc1, 0xa0, 0x0a, 0xe5, 0x63, 0x3a, 0x9d, 0x33, 0xfd, 0x4d, 0x68, 0x24, + 0x3c, 0x85, 0x74, 0xa1, 0x3a, 0x63, 0x41, 0x40, 0xc7, 0xac, 0xab, 0x5d, 0xd5, 0x6e, 0xd4, 0x8d, + 0x70, 0xa8, 0xb7, 0x60, 0x35, 0xe9, 0x27, 0xfa, 0x2c, 0x62, 0x14, 0xbe, 0x20, 0x18, 0x8f, 0x99, + 0x1f, 0x08, 0x07, 0x50, 0x8c, 0x6a, 0x48, 0xae, 0x41, 0x13, 0xed, 0x60, 0x86, 0x74, 0xe1, 0xa7, + 0x25, 0x63, 0x15, 0x27, 0x0f, 0x15, 0x68, 0x03, 0x1a, 0xde, 0xb6, 0x17, 0x41, 0x8a, 0x08, 0x01, + 0x6f, 0xdb, 0x53, 0x00, 0xfd, 0x7d, 0xe8, 0x64, 0x5d, 0x89, 0x74, 0xa0, 0x78, 0xc4, 0x4e, 0xd4, + 0x7a, 0xe2, 0x93, 0xac, 0x29, 0xb5, 0x70, 0x8d, 0xba, 0xa1, 0x74, 0xfc, 0xbc, 0x10, 0x31, 0x47, + 0xde, 0x44, 0x76, 0xa0, 0x24, 0x62, 0x19, 0xb9, 0x1b, 0xdb, 0xbd, 0xbe, 0x0c, 0xf4, 0x7e, 0x18, + 0xe8, 0xfd, 0xc7, 0x61, 0xa0, 0x0f, 0x6a, 0x5f, 0xbe, 0xd8, 0x58, 0xf9, 0xfc, 0xf7, 0x1b, 0x9a, + 0x81, 0x1c, 0xe4, 0xb2, 0x70, 0x08, 0x6a, 0x3b, 0xa6, 0x6d, 0xa9, 0x75, 0xaa, 0x38, 0xde, 0xb7, + 0xc8, 0x2e, 0x74, 0x46, 0xae, 0x13, 0x30, 0x27, 0x98, 0x07, 0xa6, 0x47, 0x7d, 0x3a, 0x0b, 0x54, + 0xac, 0x85, 0xc7, 0x7f, 0x2f, 0x24, 0x1f, 0x20, 0xd5, 0x68, 0x8f, 0xd2, 0x13, 0xe4, 0x03, 0x80, + 0x63, 0x3a, 0xb5, 0x2d, 0xca, 0x5d, 0x3f, 0xe8, 0x96, 0xae, 0x16, 0x13, 0xcc, 0x87, 0x21, 0xe1, + 0x89, 0x67, 0x51, 0xce, 0x06, 0x25, 0xb1, 0x33, 0x23, 0x81, 0x27, 0xd7, 0xa1, 0x4d, 0x3d, 0xcf, + 0x0c, 0x38, 0xe5, 0xcc, 0x1c, 0x9e, 0x70, 0x16, 0x60, 0x3c, 0xae, 0x1a, 0x4d, 0xea, 0x79, 0x8f, + 0xc4, 0xec, 0x40, 0x4c, 0xea, 0x56, 0x74, 0x9a, 0x18, 0x2a, 0x84, 0x40, 0xc9, 0xa2, 0x9c, 0xa2, + 0x35, 0x56, 0x0d, 0xfc, 0x16, 0x73, 0x1e, 0xe5, 0x13, 0xa5, 0x23, 0x7e, 0x93, 0x8b, 0x50, 0x99, + 0x30, 0x7b, 0x3c, 0xe1, 0xa8, 0x56, 0xd1, 0x50, 0x23, 0x61, 0x78, 0xcf, 0x77, 0x8f, 0x19, 0x66, + 0x8b, 0x9a, 0x21, 0x07, 0xfa, 0x9f, 0x35, 0x38, 0x77, 0x26, 0xbc, 0x84, 0xdc, 0x09, 0x0d, 0x26, + 0xe1, 0x5a, 0xe2, 0x9b, 0xdc, 0x14, 0x72, 0xa9, 0xc5, 0x7c, 0x95, 0xc5, 0x9a, 0x4a, 0xe3, 0x3d, + 0x9c, 0x54, 0x8a, 0x2a, 0x08, 0x79, 0x00, 0x9d, 0x29, 0x0d, 0xb8, 0x29, 0xa3, 0xc0, 0xc4, 0x2c, + 0x55, 0x4c, 0x45, 0xe6, 0x43, 0x1a, 0x46, 0x8b, 0x70, 0x4e, 0xc5, 0xde, 0x9a, 0xa6, 0x66, 0xc9, + 0x1e, 0xac, 0x0d, 0x4f, 0x9e, 0x53, 0x87, 0xdb, 0x0e, 0x33, 0xcf, 0xd8, 0xbc, 0xad, 0x44, 0x3d, + 0x38, 0xb6, 0x2d, 0xe6, 0x8c, 0x42, 0x63, 0x9f, 0x8f, 0x58, 0xa2, 0xc3, 0x08, 0xf4, 0x3d, 0x68, + 0xa5, 0x73, 0x01, 0x69, 0x41, 0x81, 0x2f, 0x94, 0x86, 0x05, 0xbe, 0x20, 0xd7, 0xa1, 0x24, 0xc4, + 0xa1, 0x76, 0xad, 0x28, 0x99, 0x2a, 0xf4, 0xe3, 0x13, 0x8f, 0x19, 0x48, 0xd7, 0xf5, 0xc8, 0x53, + 0xa3, 0xc0, 0xcd, 0xca, 0xd2, 0x37, 0xa1, 0x9d, 0x49, 0x22, 0x89, 0x63, 0xd1, 0x92, 0xc7, 0xa2, + 0xb7, 0xa1, 0x99, 0xca, 0x1d, 0xfa, 0x67, 0x65, 0xa8, 0x19, 0x2c, 0xf0, 0x84, 0xd3, 0x91, 0x1d, + 0xa8, 0xb3, 0xc5, 0x88, 0xc9, 0xb4, 0xad, 0x65, 0x92, 0xa2, 0xc4, 0x3c, 0x08, 0xe9, 0x22, 0x7d, + 0x44, 0x60, 0xb2, 0x99, 0x2a, 0x39, 0xe7, 0xb3, 0x4c, 0xc9, 0x9a, 0x73, 0x2b, 0x5d, 0x73, 0xd6, + 0x32, 0xd8, 0x4c, 0xd1, 0xd9, 0x4c, 0x15, 0x9d, 0xac, 0xe0, 0x54, 0xd5, 0xb9, 0x93, 0x53, 0x75, + 0xb2, 0xdb, 0x5f, 0x52, 0x76, 0xee, 0xe4, 0x94, 0x9d, 0xee, 0x99, 0xb5, 0x72, 0xeb, 0xce, 0xad, + 0x74, 0xdd, 0xc9, 0xaa, 0x93, 0x29, 0x3c, 0x1f, 0xe4, 0x15, 0x9e, 0xcb, 0x19, 0x9e, 0xa5, 0x95, + 0xe7, 0xbd, 0x33, 0x95, 0xe7, 0x62, 0x86, 0x35, 0xa7, 0xf4, 0xdc, 0x49, 0xd5, 0x04, 0xc8, 0xd5, + 0x2d, 0xbf, 0x28, 0x90, 0xef, 0x9d, 0xad, 0x5a, 0x97, 0xb2, 0x47, 0x9b, 0x57, 0xb6, 0xb6, 0x32, + 0x65, 0xeb, 0x42, 0x76, 0x97, 0x99, 0xba, 0x15, 0x57, 0x9f, 0x4d, 0x91, 0x1f, 0x32, 0x9e, 0x26, + 0x72, 0x09, 0xf3, 0x7d, 0xd7, 0x57, 0x89, 0x5d, 0x0e, 0xf4, 0x1b, 0x22, 0x63, 0xc5, 0xfe, 0xf5, + 0x92, 0x4a, 0x85, 0x4e, 0x9f, 0xf0, 0x2e, 0xfd, 0x0b, 0x2d, 0xe6, 0xc5, 0xc8, 0x4f, 0x66, 0xbb, + 0xba, 0xca, 0x76, 0x89, 0x02, 0x56, 0x48, 0x17, 0xb0, 0x0d, 0x68, 0x88, 0x9c, 0x9a, 0xa9, 0x4d, + 0xd4, 0x0b, 0x6b, 0x13, 0x79, 0x0b, 0xce, 0x61, 0x3e, 0x92, 0x65, 0x4e, 0x05, 0x62, 0x09, 0x03, + 0xb1, 0x2d, 0x08, 0xd2, 0x62, 0x32, 0x51, 0xbe, 0x0d, 0xe7, 0x13, 0x58, 0x21, 0x17, 0x73, 0xa1, + 0x4c, 0xd2, 0x9d, 0x08, 0xbd, 0xeb, 0x79, 0x7b, 0x34, 0x98, 0xe8, 0x3f, 0x8c, 0x0d, 0x14, 0xd7, + 0x3d, 0x02, 0xa5, 0x91, 0x6b, 0x49, 0xbd, 0x9b, 0x06, 0x7e, 0x8b, 0x5a, 0x38, 0x75, 0xc7, 0xb8, + 0xb9, 0xba, 0x21, 0x3e, 0x05, 0x2a, 0x0a, 0xa5, 0xba, 0x8c, 0x19, 0xfd, 0x17, 0x5a, 0x2c, 0x2f, + 0x2e, 0x85, 0x79, 0x55, 0x4b, 0xfb, 0x6f, 0xaa, 0x56, 0xe1, 0xd5, 0xaa, 0x96, 0x7e, 0xaa, 0xc5, + 0x47, 0x16, 0xd5, 0xa3, 0xd7, 0x53, 0x51, 0x78, 0x8f, 0xed, 0x58, 0x6c, 0x81, 0x26, 0x2d, 0x1a, + 0x72, 0x10, 0x5e, 0x15, 0x2a, 0x68, 0xe6, 0xf4, 0x55, 0xa1, 0x8a, 0x73, 0x72, 0x40, 0xae, 0x61, + 0x1d, 0x73, 0x9f, 0xaa, 0x50, 0x6d, 0xf6, 0xd5, 0xad, 0xfb, 0x40, 0x4c, 0x1a, 0x92, 0x96, 0xc8, + 0xb6, 0xf5, 0x54, 0x11, 0xbc, 0x02, 0x75, 0xb1, 0xd1, 0xc0, 0xa3, 0x23, 0x86, 0x91, 0x57, 0x37, + 0xe2, 0x09, 0xfd, 0x31, 0x90, 0xb3, 0x11, 0x4f, 0x3e, 0x84, 0x0a, 0x3b, 0x66, 0x0e, 0x17, 0x16, + 0x17, 0x46, 0x5b, 0x8d, 0xca, 0x0e, 0x73, 0xf8, 0xa0, 0x2b, 0x4c, 0xf5, 0x97, 0x17, 0x1b, 0x1d, + 0x89, 0xb9, 0xe5, 0xce, 0x6c, 0xce, 0x66, 0x1e, 0x3f, 0x31, 0x14, 0x97, 0xfe, 0x53, 0xb8, 0x72, + 0x56, 0xea, 0x7d, 0xe6, 0xf9, 0x6c, 0x44, 0x39, 0xb3, 0xc8, 0xfb, 0x50, 0xe2, 0x74, 0x1c, 0x4a, + 0x6f, 0xf5, 0xe5, 0x43, 0xa4, 0xff, 0xf1, 0xe1, 0x01, 0xb5, 0xfd, 0xc1, 0x45, 0x25, 0xbf, 0x25, + 0x30, 0x09, 0xe9, 0xc8, 0xa3, 0xff, 0x5d, 0x13, 0x95, 0x26, 0x95, 0x69, 0x72, 0x0f, 0x26, 0x0c, + 0xa7, 0x42, 0xe2, 0xf2, 0xf0, 0xf5, 0x0e, 0xeb, 0xff, 0x00, 0xc6, 0x34, 0x30, 0x9f, 0x51, 0x87, + 0x33, 0x4b, 0x9d, 0x58, 0x7d, 0x4c, 0x83, 0x9f, 0xe0, 0x84, 0xb8, 0x69, 0x09, 0xf2, 0x3c, 0x60, + 0x16, 0x1e, 0x5d, 0xd1, 0xa8, 0x8e, 0x69, 0xf0, 0x24, 0x60, 0x56, 0xc2, 0x6e, 0xd5, 0xd7, 0xb1, + 0x5b, 0xfa, 0xac, 0x6a, 0xd9, 0xb3, 0xfa, 0x97, 0x06, 0x97, 0x33, 0x9a, 0x27, 0x6c, 0xfa, 0x4d, + 0xb1, 0x41, 0x78, 0xb6, 0xd5, 0x57, 0x3f, 0xdb, 0xaf, 0xd0, 0xff, 0x9f, 0x89, 0x3c, 0x11, 0x5f, + 0x44, 0xbe, 0x1d, 0x67, 0xff, 0x6f, 0x0d, 0xde, 0x38, 0xa3, 0xfb, 0xb7, 0xea, 0xf4, 0xff, 0xaa, + 0x89, 0x5b, 0x68, 0xba, 0xe0, 0x93, 0x7d, 0x38, 0x17, 0x65, 0x6c, 0x73, 0x8e, 0x99, 0x3c, 0xcc, + 0x2a, 0x2f, 0x4f, 0xf4, 0x9d, 0xe3, 0xf4, 0x74, 0x40, 0x3e, 0x81, 0x4b, 0x99, 0x7a, 0x13, 0x09, + 0x2c, 0xbc, 0xb4, 0xec, 0x5c, 0x48, 0x97, 0x9d, 0x50, 0x5e, 0xec, 0x0f, 0xc5, 0xd7, 0xca, 0xa1, + 0x7f, 0xd3, 0xa0, 0x97, 0xd5, 0x37, 0x71, 0xe0, 0xdf, 0x60, 0xcd, 0x43, 0x1f, 0x28, 0xbe, 0x46, + 0x76, 0xff, 0x7f, 0xf1, 0x68, 0x49, 0x5e, 0xd0, 0xf2, 0xbc, 0x58, 0xff, 0x95, 0x06, 0xed, 0xcc, + 0x66, 0xc8, 0x6d, 0x00, 0x79, 0x7d, 0x09, 0xec, 0xe7, 0x2c, 0x73, 0x53, 0x40, 0xe3, 0x3d, 0xb2, + 0x9f, 0x33, 0xb5, 0xf1, 0xfa, 0x30, 0x9c, 0x20, 0xef, 0x42, 0x8d, 0xa9, 0xc7, 0x94, 0xd2, 0xf6, + 0x42, 0xe6, 0x8d, 0xa5, 0x78, 0x22, 0x18, 0xf9, 0x2e, 0xd4, 0x23, 0x1b, 0x66, 0x1e, 0xd2, 0x91, + 0xc9, 0xc3, 0x85, 0x22, 0xa0, 0xfe, 0x11, 0xb4, 0x33, 0xdb, 0x20, 0x6f, 0x40, 0x7d, 0x46, 0x17, + 0xea, 0x45, 0x2c, 0xdf, 0x48, 0xb5, 0x19, 0x5d, 0xe0, 0x63, 0x98, 0x5c, 0x82, 0xaa, 0x20, 0x8e, + 0xa9, 0x3c, 0x85, 0xa2, 0x51, 0x99, 0xd1, 0xc5, 0x47, 0x34, 0xd0, 0x37, 0xa1, 0x95, 0xde, 0x5a, + 0x08, 0x0d, 0x6f, 0x9d, 0x12, 0xba, 0x3b, 0x66, 0xfa, 0x6d, 0x68, 0x67, 0x76, 0x44, 0x74, 0x68, + 0x7a, 0xf3, 0xa1, 0x79, 0xc4, 0x4e, 0x4c, 0xdc, 0x32, 0xfa, 0x4c, 0xdd, 0x68, 0x78, 0xf3, 0xe1, + 0xc7, 0xec, 0x44, 0x3c, 0xfa, 0x02, 0xfd, 0x11, 0xb4, 0xd2, 0x6f, 0x55, 0x71, 0x2f, 0xf1, 0xdd, + 0xb9, 0x63, 0xa1, 0xfc, 0xb2, 0x21, 0x07, 0xe4, 0x26, 0x94, 0x8f, 0x5d, 0xe9, 0x26, 0xc9, 0xc7, + 0xe9, 0xa1, 0xcb, 0x59, 0xe2, 0x85, 0x2b, 0x31, 0xba, 0x0d, 0x65, 0x74, 0x7d, 0x71, 0xa0, 0xf8, + 0xea, 0x54, 0xf7, 0x5c, 0xf1, 0x4d, 0x1e, 0x02, 0x50, 0xce, 0x7d, 0x7b, 0x38, 0x8f, 0xc5, 0x65, + 0x1d, 0xe7, 0x8a, 0x72, 0x9c, 0xb5, 0x18, 0x99, 0x70, 0x9f, 0x04, 0xbf, 0xfe, 0xf3, 0x32, 0x54, + 0xe4, 0x1b, 0x9d, 0xf4, 0xd3, 0x1d, 0x20, 0x21, 0x55, 0x6d, 0x52, 0xce, 0xaa, 0x3d, 0x46, 0xd7, + 0xea, 0xeb, 0xd9, 0x36, 0xca, 0xa0, 0x71, 0xfa, 0x62, 0xa3, 0x8a, 0x57, 0xd2, 0xfd, 0xfb, 0x71, + 0x4f, 0x65, 0x59, 0xcb, 0x21, 0x6c, 0xe0, 0x94, 0x5e, 0xb9, 0x81, 0x73, 0x09, 0xaa, 0xce, 0x7c, + 0x66, 0xf2, 0x45, 0xa0, 0x12, 0x6e, 0xc5, 0x99, 0xcf, 0x1e, 0x2f, 0xd0, 0x4b, 0xb8, 0xcb, 0xe9, + 0x14, 0x49, 0x32, 0xdd, 0xd6, 0x70, 0x42, 0x10, 0x77, 0xa0, 0x99, 0xb8, 0xb9, 0xdb, 0x96, 0x7a, + 0x01, 0xb6, 0x92, 0x8e, 0xbf, 0x7f, 0x5f, 0x69, 0xd9, 0x88, 0x6e, 0xf2, 0xfb, 0x16, 0xb9, 0x91, + 0xee, 0x57, 0xe0, 0x85, 0xbf, 0x86, 0x31, 0x96, 0x68, 0x49, 0x88, 0xeb, 0xbe, 0xd8, 0x80, 0x88, + 0x3a, 0x09, 0xa9, 0x23, 0xa4, 0x26, 0x26, 0x90, 0xf8, 0x26, 0xb4, 0xe3, 0x3b, 0xb3, 0x84, 0x80, + 0x94, 0x12, 0x4f, 0x23, 0xf0, 0x1d, 0x58, 0x73, 0xd8, 0x82, 0x9b, 0x59, 0x74, 0x03, 0xd1, 0x44, + 0xd0, 0x0e, 0xd3, 0x1c, 0xdf, 0x81, 0x56, 0x9c, 0x97, 0x10, 0xbb, 0x2a, 0xbb, 0x46, 0xd1, 0x2c, + 0xc2, 0x2e, 0x43, 0x2d, 0x7a, 0xb1, 0x34, 0x11, 0x50, 0xa5, 0xf2, 0xa1, 0x12, 0xbd, 0x81, 0x7c, + 0x16, 0xcc, 0xa7, 0x5c, 0x09, 0x69, 0x21, 0x06, 0xdf, 0x40, 0x86, 0x9c, 0x47, 0xec, 0x35, 0x68, + 0x86, 0x11, 0x2e, 0x71, 0x6d, 0xc4, 0xad, 0x86, 0x93, 0x08, 0xda, 0x84, 0x8e, 0xe7, 0xbb, 0x9e, + 0x1b, 0x30, 0xdf, 0xa4, 0x96, 0xe5, 0xb3, 0x20, 0xe8, 0x76, 0xa4, 0xbc, 0x70, 0x7e, 0x57, 0x4e, + 0xeb, 0xef, 0x42, 0x35, 0x7c, 0x8a, 0xad, 0x41, 0x19, 0xad, 0x8e, 0x2e, 0x58, 0x32, 0xe4, 0x40, + 0x94, 0xe2, 0x5d, 0xcf, 0x53, 0x8d, 0x47, 0xf1, 0xa9, 0xff, 0x0c, 0xaa, 0xea, 0xc0, 0x72, 0xdb, + 0x51, 0xdf, 0x87, 0x55, 0x8f, 0xfa, 0x42, 0x8d, 0x64, 0x53, 0x2a, 0x7c, 0xec, 0x1f, 0x50, 0x9f, + 0x3f, 0x62, 0x3c, 0xd5, 0x9b, 0x6a, 0x20, 0x5e, 0x4e, 0xe9, 0x77, 0xa0, 0x99, 0xc2, 0x88, 0x6d, + 0xa1, 0x1f, 0x85, 0x41, 0x8d, 0x83, 0x68, 0xe5, 0x42, 0xbc, 0xb2, 0x7e, 0x17, 0xea, 0xd1, 0xd9, + 0x88, 0x37, 0x69, 0xa8, 0xba, 0xa6, 0xcc, 0x2d, 0x87, 0xd8, 0x6f, 0x73, 0x9f, 0x31, 0x5f, 0xc5, + 0x84, 0x1c, 0xe8, 0x4f, 0x12, 0x49, 0x48, 0x96, 0x08, 0x72, 0x0b, 0xaa, 0x2a, 0x09, 0xa9, 0xa8, + 0x0c, 0x3b, 0x6b, 0x07, 0x98, 0x85, 0xc2, 0xce, 0x9a, 0xcc, 0x49, 0xb1, 0xd8, 0x42, 0x52, 0xec, + 0x14, 0x6a, 0x61, 0xa2, 0x49, 0x67, 0x64, 0x29, 0xb1, 0x93, 0xcd, 0xc8, 0x4a, 0x68, 0x0c, 0x14, + 0xde, 0x11, 0xd8, 0x63, 0x87, 0x59, 0x66, 0x1c, 0x42, 0xb8, 0x46, 0xcd, 0x68, 0x4b, 0xc2, 0xc3, + 0x30, 0x5e, 0xf4, 0x77, 0xa0, 0x22, 0xf7, 0x96, 0x9b, 0xbe, 0xf2, 0x6a, 0xd4, 0xef, 0x34, 0xa8, + 0x85, 0x79, 0x3a, 0x97, 0x29, 0xb5, 0xe9, 0xc2, 0xd7, 0xdd, 0xf4, 0xff, 0x3e, 0xf1, 0xdc, 0x02, + 0x22, 0xf3, 0xcb, 0xb1, 0xcb, 0x6d, 0x67, 0x6c, 0x4a, 0x5b, 0xcb, 0x1c, 0xd4, 0x41, 0xca, 0x21, + 0x12, 0x0e, 0xc4, 0xfc, 0x5b, 0xd7, 0xa0, 0x91, 0x68, 0x10, 0x92, 0x2a, 0x14, 0x3f, 0x61, 0xcf, + 0x3a, 0x2b, 0xa4, 0x01, 0x55, 0x83, 0x61, 0xbb, 0xa7, 0xa3, 0x6d, 0x7f, 0x56, 0x86, 0xf6, 0xee, + 0xe0, 0xde, 0xfe, 0xae, 0xe7, 0x4d, 0xed, 0x11, 0xc5, 0xfe, 0xc0, 0x16, 0x94, 0xb0, 0x45, 0x92, + 0xf3, 0x53, 0x50, 0x2f, 0xaf, 0x57, 0x47, 0xb6, 0xa1, 0x8c, 0x9d, 0x12, 0x92, 0xf7, 0x8b, 0x50, + 0x2f, 0xb7, 0x65, 0x27, 0x16, 0x91, 0xbd, 0x94, 0xb3, 0x3f, 0x0c, 0xf5, 0xf2, 0xfa, 0x76, 0xe4, + 0x43, 0xa8, 0xc7, 0x2d, 0x8c, 0x65, 0x3f, 0x0f, 0xf5, 0x96, 0x76, 0xf0, 0x04, 0x7f, 0xfc, 0x14, + 0x59, 0xf6, 0x2b, 0x47, 0x6f, 0x69, 0xab, 0x8b, 0xec, 0x40, 0x35, 0x7c, 0xc4, 0xe6, 0xff, 0x80, + 0xd3, 0x5b, 0xd2, 0x5d, 0x13, 0xe6, 0x91, 0x5d, 0x89, 0xbc, 0x5f, 0x99, 0x7a, 0xb9, 0x2d, 0x40, + 0x72, 0x1b, 0x2a, 0xea, 0x56, 0x95, 0xfb, 0x23, 0x4e, 0x2f, 0xbf, 0x47, 0x26, 0x94, 0x8c, 0xfb, + 0x32, 0xcb, 0x7e, 0x09, 0xeb, 0x2d, 0xed, 0x55, 0x92, 0x5d, 0x80, 0x44, 0x73, 0x61, 0xe9, 0x4f, + 0x5c, 0xbd, 0xe5, 0x3d, 0x48, 0x72, 0x17, 0x6a, 0x71, 0x5f, 0x39, 0xff, 0x47, 0xab, 0xde, 0xb2, + 0xb6, 0xe0, 0xe0, 0xca, 0x3f, 0xfe, 0xb8, 0xae, 0xfd, 0xf2, 0x74, 0x5d, 0xfb, 0xe2, 0x74, 0x5d, + 0xfb, 0xf2, 0x74, 0x5d, 0xfb, 0xed, 0xe9, 0xba, 0xf6, 0x87, 0xd3, 0x75, 0xed, 0xd7, 0x7f, 0x5a, + 0xd7, 0x86, 0x15, 0x8c, 0x91, 0xf7, 0xfe, 0x13, 0x00, 0x00, 0xff, 0xff, 0x3d, 0x1d, 0xa3, 0x8f, + 0xa4, 0x1d, 0x00, 0x00, } diff --git a/abci/types/types.proto b/abci/types/types.proto index b48ff1e8b..b52c612b6 100644 --- a/abci/types/types.proto +++ b/abci/types/types.proto @@ -81,8 +81,14 @@ message RequestBeginBlock { repeated Evidence byzantine_validators = 4 [(gogoproto.nullable)=false]; } +enum CheckTxType { + New = 0; + Recheck = 1; +} + message RequestCheckTx { bytes tx = 1; + CheckTxType type = 2; } message RequestDeliverTx { @@ -165,10 +171,25 @@ message ResponseQuery { } message ResponseBeginBlock { + repeated Event events = 1 [(gogoproto.nullable)=false, (gogoproto.jsontag)="events,omitempty"]; +} + +message ResponseBeginBlockDeprecated { repeated common.KVPair tags = 1 [(gogoproto.nullable)=false, (gogoproto.jsontag)="tags,omitempty"]; } message ResponseCheckTx { + uint32 code = 1; + bytes data = 2; + string log = 3; // nondeterministic + string info = 4; // nondeterministic + int64 gas_wanted = 5; + int64 gas_used = 6; + repeated Event events = 7 [(gogoproto.nullable)=false, (gogoproto.jsontag)="events,omitempty"]; + string codespace = 8; +} + +message ResponseCheckTxDeprecated { uint32 code = 1; bytes data = 2; string log = 3; // nondeterministic @@ -180,6 +201,17 @@ message ResponseCheckTx { } message ResponseDeliverTx { + uint32 code = 1; + bytes data = 2; + string log = 3; // nondeterministic + string info = 4; // nondeterministic + int64 gas_wanted = 5; + int64 gas_used = 6; + repeated Event events = 7 [(gogoproto.nullable)=false, (gogoproto.jsontag)="events,omitempty"]; + string codespace = 8; +} + +message ResponseDeliverTxDeprecated { uint32 code = 1; bytes data = 2; string log = 3; // nondeterministic @@ -191,6 +223,12 @@ message ResponseDeliverTx { } message ResponseEndBlock { + repeated ValidatorUpdate validator_updates = 1 [(gogoproto.nullable)=false]; + ConsensusParams consensus_param_updates = 2; + repeated Event events = 3 [(gogoproto.nullable)=false, (gogoproto.jsontag)="events,omitempty"]; +} + +message ResponseEndBlockDeprecated { repeated ValidatorUpdate validator_updates = 1 [(gogoproto.nullable)=false]; ConsensusParams consensus_param_updates = 2; repeated common.KVPair tags = 3 [(gogoproto.nullable)=false, (gogoproto.jsontag)="tags,omitempty"]; @@ -212,7 +250,6 @@ message ConsensusParams { ValidatorParams validator = 3; } -// BlockSize contains limits on the block size. message BlockSizeParams { // Note: must be greater than 0 int64 max_bytes = 1; @@ -236,6 +273,11 @@ message LastCommitInfo { repeated VoteInfo votes = 2 [(gogoproto.nullable)=false]; } +message Event { + string type = 1; + repeated common.KVPair attributes = 2 [(gogoproto.nullable)=false, (gogoproto.jsontag)="attributes,omitempty"]; +} + //---------------------------------------- // Blockchain Types diff --git a/abci/types/typespb_test.go b/abci/types/typespb_test.go index 9375cc7f1..c289f48a7 100644 --- a/abci/types/typespb_test.go +++ b/abci/types/typespb_test.go @@ -1199,6 +1199,62 @@ func TestResponseBeginBlockMarshalTo(t *testing.T) { } } +func TestResponseBeginBlockDeprecatedProto(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedResponseBeginBlockDeprecated(popr, false) + dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &ResponseBeginBlockDeprecated{} + if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + littlefuzz := make([]byte, len(dAtA)) + copy(littlefuzz, dAtA) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } + if len(littlefuzz) > 0 { + fuzzamount := 100 + for i := 0; i < fuzzamount; i++ { + littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) + littlefuzz = append(littlefuzz, byte(popr.Intn(256))) + } + // shouldn't panic + _ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg) + } +} + +func TestResponseBeginBlockDeprecatedMarshalTo(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedResponseBeginBlockDeprecated(popr, false) + size := p.Size() + dAtA := make([]byte, size) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + _, err := p.MarshalTo(dAtA) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &ResponseBeginBlockDeprecated{} + if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + func TestResponseCheckTxProto(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) @@ -1255,6 +1311,62 @@ func TestResponseCheckTxMarshalTo(t *testing.T) { } } +func TestResponseCheckTxDeprecatedProto(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedResponseCheckTxDeprecated(popr, false) + dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &ResponseCheckTxDeprecated{} + if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + littlefuzz := make([]byte, len(dAtA)) + copy(littlefuzz, dAtA) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } + if len(littlefuzz) > 0 { + fuzzamount := 100 + for i := 0; i < fuzzamount; i++ { + littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) + littlefuzz = append(littlefuzz, byte(popr.Intn(256))) + } + // shouldn't panic + _ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg) + } +} + +func TestResponseCheckTxDeprecatedMarshalTo(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedResponseCheckTxDeprecated(popr, false) + size := p.Size() + dAtA := make([]byte, size) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + _, err := p.MarshalTo(dAtA) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &ResponseCheckTxDeprecated{} + if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + func TestResponseDeliverTxProto(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) @@ -1311,6 +1423,62 @@ func TestResponseDeliverTxMarshalTo(t *testing.T) { } } +func TestResponseDeliverTxDeprecatedProto(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedResponseDeliverTxDeprecated(popr, false) + dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &ResponseDeliverTxDeprecated{} + if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + littlefuzz := make([]byte, len(dAtA)) + copy(littlefuzz, dAtA) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } + if len(littlefuzz) > 0 { + fuzzamount := 100 + for i := 0; i < fuzzamount; i++ { + littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) + littlefuzz = append(littlefuzz, byte(popr.Intn(256))) + } + // shouldn't panic + _ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg) + } +} + +func TestResponseDeliverTxDeprecatedMarshalTo(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedResponseDeliverTxDeprecated(popr, false) + size := p.Size() + dAtA := make([]byte, size) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + _, err := p.MarshalTo(dAtA) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &ResponseDeliverTxDeprecated{} + if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + func TestResponseEndBlockProto(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) @@ -1367,6 +1535,62 @@ func TestResponseEndBlockMarshalTo(t *testing.T) { } } +func TestResponseEndBlockDeprecatedProto(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedResponseEndBlockDeprecated(popr, false) + dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &ResponseEndBlockDeprecated{} + if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + littlefuzz := make([]byte, len(dAtA)) + copy(littlefuzz, dAtA) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } + if len(littlefuzz) > 0 { + fuzzamount := 100 + for i := 0; i < fuzzamount; i++ { + littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) + littlefuzz = append(littlefuzz, byte(popr.Intn(256))) + } + // shouldn't panic + _ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg) + } +} + +func TestResponseEndBlockDeprecatedMarshalTo(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedResponseEndBlockDeprecated(popr, false) + size := p.Size() + dAtA := make([]byte, size) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + _, err := p.MarshalTo(dAtA) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &ResponseEndBlockDeprecated{} + if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + func TestResponseCommitProto(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) @@ -1703,6 +1927,62 @@ func TestLastCommitInfoMarshalTo(t *testing.T) { } } +func TestEventProto(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedEvent(popr, false) + dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &Event{} + if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + littlefuzz := make([]byte, len(dAtA)) + copy(littlefuzz, dAtA) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } + if len(littlefuzz) > 0 { + fuzzamount := 100 + for i := 0; i < fuzzamount; i++ { + littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) + littlefuzz = append(littlefuzz, byte(popr.Intn(256))) + } + // shouldn't panic + _ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg) + } +} + +func TestEventMarshalTo(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedEvent(popr, false) + size := p.Size() + dAtA := make([]byte, size) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + _, err := p.MarshalTo(dAtA) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &Event{} + if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + func TestHeaderProto(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) @@ -2513,16 +2793,88 @@ func TestResponseInfoJSON(t *testing.T) { t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) } } -func TestResponseSetOptionJSON(t *testing.T) { +func TestResponseSetOptionJSON(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedResponseSetOption(popr, true) + marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} + jsondata, err := marshaler.MarshalToString(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &ResponseSetOption{} + err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) + } +} +func TestResponseInitChainJSON(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedResponseInitChain(popr, true) + marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} + jsondata, err := marshaler.MarshalToString(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &ResponseInitChain{} + err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) + } +} +func TestResponseQueryJSON(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedResponseQuery(popr, true) + marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} + jsondata, err := marshaler.MarshalToString(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &ResponseQuery{} + err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) + } +} +func TestResponseBeginBlockJSON(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedResponseBeginBlock(popr, true) + marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} + jsondata, err := marshaler.MarshalToString(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &ResponseBeginBlock{} + err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) + } +} +func TestResponseBeginBlockDeprecatedJSON(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponseSetOption(popr, true) + p := NewPopulatedResponseBeginBlockDeprecated(popr, true) marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} jsondata, err := marshaler.MarshalToString(p) if err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } - msg := &ResponseSetOption{} + msg := &ResponseBeginBlockDeprecated{} err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) if err != nil { t.Fatalf("seed = %d, err = %v", seed, err) @@ -2531,16 +2883,16 @@ func TestResponseSetOptionJSON(t *testing.T) { t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) } } -func TestResponseInitChainJSON(t *testing.T) { +func TestResponseCheckTxJSON(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponseInitChain(popr, true) + p := NewPopulatedResponseCheckTx(popr, true) marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} jsondata, err := marshaler.MarshalToString(p) if err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } - msg := &ResponseInitChain{} + msg := &ResponseCheckTx{} err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) if err != nil { t.Fatalf("seed = %d, err = %v", seed, err) @@ -2549,16 +2901,16 @@ func TestResponseInitChainJSON(t *testing.T) { t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) } } -func TestResponseQueryJSON(t *testing.T) { +func TestResponseCheckTxDeprecatedJSON(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponseQuery(popr, true) + p := NewPopulatedResponseCheckTxDeprecated(popr, true) marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} jsondata, err := marshaler.MarshalToString(p) if err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } - msg := &ResponseQuery{} + msg := &ResponseCheckTxDeprecated{} err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) if err != nil { t.Fatalf("seed = %d, err = %v", seed, err) @@ -2567,16 +2919,16 @@ func TestResponseQueryJSON(t *testing.T) { t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) } } -func TestResponseBeginBlockJSON(t *testing.T) { +func TestResponseDeliverTxJSON(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponseBeginBlock(popr, true) + p := NewPopulatedResponseDeliverTx(popr, true) marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} jsondata, err := marshaler.MarshalToString(p) if err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } - msg := &ResponseBeginBlock{} + msg := &ResponseDeliverTx{} err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) if err != nil { t.Fatalf("seed = %d, err = %v", seed, err) @@ -2585,16 +2937,16 @@ func TestResponseBeginBlockJSON(t *testing.T) { t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) } } -func TestResponseCheckTxJSON(t *testing.T) { +func TestResponseDeliverTxDeprecatedJSON(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponseCheckTx(popr, true) + p := NewPopulatedResponseDeliverTxDeprecated(popr, true) marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} jsondata, err := marshaler.MarshalToString(p) if err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } - msg := &ResponseCheckTx{} + msg := &ResponseDeliverTxDeprecated{} err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) if err != nil { t.Fatalf("seed = %d, err = %v", seed, err) @@ -2603,16 +2955,16 @@ func TestResponseCheckTxJSON(t *testing.T) { t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) } } -func TestResponseDeliverTxJSON(t *testing.T) { +func TestResponseEndBlockJSON(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponseDeliverTx(popr, true) + p := NewPopulatedResponseEndBlock(popr, true) marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} jsondata, err := marshaler.MarshalToString(p) if err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } - msg := &ResponseDeliverTx{} + msg := &ResponseEndBlock{} err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) if err != nil { t.Fatalf("seed = %d, err = %v", seed, err) @@ -2621,16 +2973,16 @@ func TestResponseDeliverTxJSON(t *testing.T) { t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) } } -func TestResponseEndBlockJSON(t *testing.T) { +func TestResponseEndBlockDeprecatedJSON(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponseEndBlock(popr, true) + p := NewPopulatedResponseEndBlockDeprecated(popr, true) marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} jsondata, err := marshaler.MarshalToString(p) if err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } - msg := &ResponseEndBlock{} + msg := &ResponseEndBlockDeprecated{} err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) if err != nil { t.Fatalf("seed = %d, err = %v", seed, err) @@ -2747,6 +3099,24 @@ func TestLastCommitInfoJSON(t *testing.T) { t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) } } +func TestEventJSON(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedEvent(popr, true) + marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} + jsondata, err := marshaler.MarshalToString(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &Event{} + err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) + } +} func TestHeaderJSON(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) @@ -3497,6 +3867,34 @@ func TestResponseBeginBlockProtoCompactText(t *testing.T) { } } +func TestResponseBeginBlockDeprecatedProtoText(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedResponseBeginBlockDeprecated(popr, true) + dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) + msg := &ResponseBeginBlockDeprecated{} + if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestResponseBeginBlockDeprecatedProtoCompactText(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedResponseBeginBlockDeprecated(popr, true) + dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) + msg := &ResponseBeginBlockDeprecated{} + if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + func TestResponseCheckTxProtoText(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) @@ -3525,6 +3923,34 @@ func TestResponseCheckTxProtoCompactText(t *testing.T) { } } +func TestResponseCheckTxDeprecatedProtoText(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedResponseCheckTxDeprecated(popr, true) + dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) + msg := &ResponseCheckTxDeprecated{} + if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestResponseCheckTxDeprecatedProtoCompactText(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedResponseCheckTxDeprecated(popr, true) + dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) + msg := &ResponseCheckTxDeprecated{} + if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + func TestResponseDeliverTxProtoText(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) @@ -3553,6 +3979,34 @@ func TestResponseDeliverTxProtoCompactText(t *testing.T) { } } +func TestResponseDeliverTxDeprecatedProtoText(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedResponseDeliverTxDeprecated(popr, true) + dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) + msg := &ResponseDeliverTxDeprecated{} + if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestResponseDeliverTxDeprecatedProtoCompactText(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedResponseDeliverTxDeprecated(popr, true) + dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) + msg := &ResponseDeliverTxDeprecated{} + if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + func TestResponseEndBlockProtoText(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) @@ -3581,6 +4035,34 @@ func TestResponseEndBlockProtoCompactText(t *testing.T) { } } +func TestResponseEndBlockDeprecatedProtoText(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedResponseEndBlockDeprecated(popr, true) + dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) + msg := &ResponseEndBlockDeprecated{} + if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestResponseEndBlockDeprecatedProtoCompactText(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedResponseEndBlockDeprecated(popr, true) + dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) + msg := &ResponseEndBlockDeprecated{} + if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + func TestResponseCommitProtoText(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) @@ -3749,6 +4231,34 @@ func TestLastCommitInfoProtoCompactText(t *testing.T) { } } +func TestEventProtoText(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedEvent(popr, true) + dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) + msg := &Event{} + if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestEventProtoCompactText(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedEvent(popr, true) + dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) + msg := &Event{} + if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + func TestHeaderProtoText(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) @@ -4463,6 +4973,28 @@ func TestResponseBeginBlockSize(t *testing.T) { } } +func TestResponseBeginBlockDeprecatedSize(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedResponseBeginBlockDeprecated(popr, true) + size2 := github_com_gogo_protobuf_proto.Size(p) + dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + size := p.Size() + if len(dAtA) != size { + t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) + } + if size2 != size { + t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) + } + size3 := github_com_gogo_protobuf_proto.Size(p) + if size3 != size { + t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) + } +} + func TestResponseCheckTxSize(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) @@ -4485,6 +5017,28 @@ func TestResponseCheckTxSize(t *testing.T) { } } +func TestResponseCheckTxDeprecatedSize(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedResponseCheckTxDeprecated(popr, true) + size2 := github_com_gogo_protobuf_proto.Size(p) + dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + size := p.Size() + if len(dAtA) != size { + t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) + } + if size2 != size { + t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) + } + size3 := github_com_gogo_protobuf_proto.Size(p) + if size3 != size { + t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) + } +} + func TestResponseDeliverTxSize(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) @@ -4507,6 +5061,28 @@ func TestResponseDeliverTxSize(t *testing.T) { } } +func TestResponseDeliverTxDeprecatedSize(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedResponseDeliverTxDeprecated(popr, true) + size2 := github_com_gogo_protobuf_proto.Size(p) + dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + size := p.Size() + if len(dAtA) != size { + t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) + } + if size2 != size { + t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) + } + size3 := github_com_gogo_protobuf_proto.Size(p) + if size3 != size { + t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) + } +} + func TestResponseEndBlockSize(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) @@ -4529,6 +5105,28 @@ func TestResponseEndBlockSize(t *testing.T) { } } +func TestResponseEndBlockDeprecatedSize(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedResponseEndBlockDeprecated(popr, true) + size2 := github_com_gogo_protobuf_proto.Size(p) + dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + size := p.Size() + if len(dAtA) != size { + t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) + } + if size2 != size { + t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) + } + size3 := github_com_gogo_protobuf_proto.Size(p) + if size3 != size { + t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) + } +} + func TestResponseCommitSize(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) @@ -4661,6 +5259,28 @@ func TestLastCommitInfoSize(t *testing.T) { } } +func TestEventSize(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedEvent(popr, true) + size2 := github_com_gogo_protobuf_proto.Size(p) + dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + size := p.Size() + if len(dAtA) != size { + t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) + } + if size2 != size { + t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) + } + size3 := github_com_gogo_protobuf_proto.Size(p) + if size3 != size { + t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) + } +} + func TestHeaderSize(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) diff --git a/abci/types/util.go b/abci/types/util.go index 3cde88232..c6db9b4e8 100644 --- a/abci/types/util.go +++ b/abci/types/util.go @@ -32,3 +32,50 @@ func (v ValidatorUpdates) Swap(i, j int) { v[i] = v[j] v[j] = v1 } + +//------------------------------------------------------------------------------ + +func ConvertDeprecatedDeliverTxResponse(deprecated *ResponseDeliverTxDeprecated) *ResponseDeliverTx { + if deprecated == nil { + return nil + } + return &ResponseDeliverTx{ + Code: deprecated.Code, + Data: deprecated.Data, + Log: deprecated.Log, + Info: deprecated.Info, + GasWanted: deprecated.GasWanted, + GasUsed: deprecated.GasUsed, + Events: []Event{{Attributes: deprecated.Tags}}, + Codespace: deprecated.Codespace, + XXX_NoUnkeyedLiteral: deprecated.XXX_NoUnkeyedLiteral, + XXX_unrecognized: deprecated.XXX_unrecognized, + XXX_sizecache: deprecated.XXX_sizecache, + } +} + +func ConvertDeprecatedBeginBlockResponse(deprecated *ResponseBeginBlockDeprecated) *ResponseBeginBlock { + if deprecated == nil { + return nil + } + return &ResponseBeginBlock{ + Events: []Event{{Attributes: deprecated.Tags}}, + XXX_NoUnkeyedLiteral: deprecated.XXX_NoUnkeyedLiteral, + XXX_unrecognized: deprecated.XXX_unrecognized, + XXX_sizecache: deprecated.XXX_sizecache, + } +} + +func ConvertDeprecatedEndBlockResponse(deprecated *ResponseEndBlockDeprecated) *ResponseEndBlock { + if deprecated == nil { + return nil + } + return &ResponseEndBlock{ + ValidatorUpdates: deprecated.ValidatorUpdates, + ConsensusParamUpdates: deprecated.ConsensusParamUpdates, + Events: []Event{{Attributes: deprecated.Tags}}, + XXX_NoUnkeyedLiteral: deprecated.XXX_NoUnkeyedLiteral, + XXX_unrecognized: deprecated.XXX_unrecognized, + XXX_sizecache: deprecated.XXX_sizecache, + } +} diff --git a/appveyor.yml b/appveyor.yml index 1ddf8fdd2..4aa8c2abb 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -7,7 +7,6 @@ clone_folder: c:\go\path\src\github.com\tendermint\tendermint before_build: - cmd: set GOPATH=%GOROOT%\path - cmd: set PATH=%GOPATH%\bin;%PATH% -- cmd: make get_vendor_deps build_script: - cmd: make test test: off diff --git a/behaviour/peer_behaviour.go b/behaviour/peer_behaviour.go new file mode 100644 index 000000000..f7cfd00f0 --- /dev/null +++ b/behaviour/peer_behaviour.go @@ -0,0 +1,49 @@ +package behaviour + +import ( + "github.com/tendermint/tendermint/p2p" +) + +// PeerBehaviour is a struct describing a behaviour a peer performed. +// `peerID` identifies the peer and reason characterizes the specific +// behaviour performed by the peer. +type PeerBehaviour struct { + peerID p2p.ID + reason interface{} +} + +type badMessage struct { + explanation string +} + +// BadMessage returns a badMessage PeerBehaviour. +func BadMessage(peerID p2p.ID, explanation string) PeerBehaviour { + return PeerBehaviour{peerID: peerID, reason: badMessage{explanation}} +} + +type messageOutOfOrder struct { + explanation string +} + +// MessageOutOfOrder returns a messagOutOfOrder PeerBehaviour. +func MessageOutOfOrder(peerID p2p.ID, explanation string) PeerBehaviour { + return PeerBehaviour{peerID: peerID, reason: messageOutOfOrder{explanation}} +} + +type consensusVote struct { + explanation string +} + +// ConsensusVote returns a consensusVote PeerBehaviour. +func ConsensusVote(peerID p2p.ID, explanation string) PeerBehaviour { + return PeerBehaviour{peerID: peerID, reason: consensusVote{explanation}} +} + +type blockPart struct { + explanation string +} + +// BlockPart returns blockPart PeerBehaviour. +func BlockPart(peerID p2p.ID, explanation string) PeerBehaviour { + return PeerBehaviour{peerID: peerID, reason: blockPart{explanation}} +} diff --git a/behaviour/reporter.go b/behaviour/reporter.go new file mode 100644 index 000000000..f8a0693bf --- /dev/null +++ b/behaviour/reporter.go @@ -0,0 +1,84 @@ +package behaviour + +import ( + "errors" + "sync" + + "github.com/tendermint/tendermint/p2p" +) + +// Reporter provides an interface for reactors to report the behaviour +// of peers synchronously to other components. +type Reporter interface { + Report(behaviour PeerBehaviour) error +} + +// SwitchReporter reports peer behaviour to an internal Switch. +type SwitchReporter struct { + sw *p2p.Switch +} + +// NewSwitchReporter return a new SwitchReporter instance which wraps the Switch. +func NewSwitcReporter(sw *p2p.Switch) *SwitchReporter { + return &SwitchReporter{ + sw: sw, + } +} + +// Report reports the behaviour of a peer to the Switch. +func (spbr *SwitchReporter) Report(behaviour PeerBehaviour) error { + peer := spbr.sw.Peers().Get(behaviour.peerID) + if peer == nil { + return errors.New("peer not found") + } + + switch reason := behaviour.reason.(type) { + case consensusVote, blockPart: + spbr.sw.MarkPeerAsGood(peer) + case badMessage: + spbr.sw.StopPeerForError(peer, reason.explanation) + case messageOutOfOrder: + spbr.sw.StopPeerForError(peer, reason.explanation) + default: + return errors.New("unknown reason reported") + } + + return nil +} + +// MockReporter is a concrete implementation of the Reporter +// interface used in reactor tests to ensure reactors report the correct +// behaviour in manufactured scenarios. +type MockReporter struct { + mtx sync.RWMutex + pb map[p2p.ID][]PeerBehaviour +} + +// NewMockReporter returns a Reporter which records all reported +// behaviours in memory. +func NewMockReporter() *MockReporter { + return &MockReporter{ + pb: map[p2p.ID][]PeerBehaviour{}, + } +} + +// Report stores the PeerBehaviour produced by the peer identified by peerID. +func (mpbr *MockReporter) Report(behaviour PeerBehaviour) { + mpbr.mtx.Lock() + defer mpbr.mtx.Unlock() + mpbr.pb[behaviour.peerID] = append(mpbr.pb[behaviour.peerID], behaviour) +} + +// GetBehaviours returns all behaviours reported on the peer identified by peerID. +func (mpbr *MockReporter) GetBehaviours(peerID p2p.ID) []PeerBehaviour { + mpbr.mtx.RLock() + defer mpbr.mtx.RUnlock() + if items, ok := mpbr.pb[peerID]; ok { + result := make([]PeerBehaviour, len(items)) + copy(result, items) + + return result + } else { + return []PeerBehaviour{} + } +} diff --git a/behaviour/reporter_test.go b/behaviour/reporter_test.go new file mode 100644 index 000000000..eae94e7bd --- /dev/null +++ b/behaviour/reporter_test.go @@ -0,0 +1,186 @@ +package behaviour_test + +import ( + "sync" + "testing" + + bh "github.com/tendermint/tendermint/behaviour" + "github.com/tendermint/tendermint/p2p" +) + +// TestMockReporter tests the MockReporter's ability to store reported +// peer behaviour in memory indexed by the peerID. +func TestMockReporter(t *testing.T) { + var peerID p2p.ID = "MockPeer" + pr := bh.NewMockReporter() + + behaviours := pr.GetBehaviours(peerID) + if len(behaviours) != 0 { + t.Error("Expected to have no behaviours reported") + } + + badMessage := bh.BadMessage(peerID, "bad message") + pr.Report(badMessage) + behaviours = pr.GetBehaviours(peerID) + if len(behaviours) != 1 { + t.Error("Expected the peer have one reported behaviour") + } + + if behaviours[0] != badMessage { + t.Error("Expected Bad Message to have been reported") + } +} + +type scriptItem struct { + peerID p2p.ID + behaviour bh.PeerBehaviour +} + +// equalBehaviours returns true if a and b contain the same PeerBehaviours with +// the same freequencies and otherwise false. +func equalBehaviours(a []bh.PeerBehaviour, b []bh.PeerBehaviour) bool { + aHistogram := map[bh.PeerBehaviour]int{} + bHistogram := map[bh.PeerBehaviour]int{} + + for _, behaviour := range a { + aHistogram[behaviour] += 1 + } + + for _, behaviour := range b { + bHistogram[behaviour] += 1 + } + + if len(aHistogram) != len(bHistogram) { + return false + } + + for _, behaviour := range a { + if aHistogram[behaviour] != bHistogram[behaviour] { + return false + } + } + + for _, behaviour := range b { + if bHistogram[behaviour] != aHistogram[behaviour] { + return false + } + } + + return true +} + +// TestEqualPeerBehaviours tests that equalBehaviours can tell that two slices +// of peer behaviours can be compared for the behaviours they contain and the +// freequencies that those behaviours occur. +func TestEqualPeerBehaviours(t *testing.T) { + var ( + peerID p2p.ID = "MockPeer" + consensusVote = bh.ConsensusVote(peerID, "voted") + blockPart = bh.BlockPart(peerID, "blocked") + equals = []struct { + left []bh.PeerBehaviour + right []bh.PeerBehaviour + }{ + // Empty sets + {[]bh.PeerBehaviour{}, []bh.PeerBehaviour{}}, + // Single behaviours + {[]bh.PeerBehaviour{consensusVote}, []bh.PeerBehaviour{consensusVote}}, + // Equal Frequencies + {[]bh.PeerBehaviour{consensusVote, consensusVote}, + []bh.PeerBehaviour{consensusVote, consensusVote}}, + // Equal frequencies different orders + {[]bh.PeerBehaviour{consensusVote, blockPart}, + []bh.PeerBehaviour{blockPart, consensusVote}}, + } + unequals = []struct { + left []bh.PeerBehaviour + right []bh.PeerBehaviour + }{ + // Comparing empty sets to non empty sets + {[]bh.PeerBehaviour{}, []bh.PeerBehaviour{consensusVote}}, + // Different behaviours + {[]bh.PeerBehaviour{consensusVote}, []bh.PeerBehaviour{blockPart}}, + // Same behaviour with different frequencies + {[]bh.PeerBehaviour{consensusVote}, + []bh.PeerBehaviour{consensusVote, consensusVote}}, + } + ) + + for _, test := range equals { + if !equalBehaviours(test.left, test.right) { + t.Errorf("Expected %#v and %#v to be equal", test.left, test.right) + } + } + + for _, test := range unequals { + if equalBehaviours(test.left, test.right) { + t.Errorf("Expected %#v and %#v to be unequal", test.left, test.right) + } + } +} + +// TestPeerBehaviourConcurrency constructs a scenario in which +// multiple goroutines are using the same MockReporter instance. +// This test reproduces the conditions in which MockReporter will +// be used within a Reactor `Receive` method tests to ensure thread safety. +func TestMockPeerBehaviourReporterConcurrency(t *testing.T) { + var ( + behaviourScript = []struct { + peerID p2p.ID + behaviours []bh.PeerBehaviour + }{ + {"1", []bh.PeerBehaviour{bh.ConsensusVote("1", "")}}, + {"2", []bh.PeerBehaviour{bh.ConsensusVote("2", ""), bh.ConsensusVote("2", ""), bh.ConsensusVote("2", "")}}, + {"3", []bh.PeerBehaviour{bh.BlockPart("3", ""), bh.ConsensusVote("3", ""), bh.BlockPart("3", ""), bh.ConsensusVote("3", "")}}, + {"4", []bh.PeerBehaviour{bh.ConsensusVote("4", ""), bh.ConsensusVote("4", ""), bh.ConsensusVote("4", ""), bh.ConsensusVote("4", "")}}, + {"5", []bh.PeerBehaviour{bh.BlockPart("5", ""), bh.ConsensusVote("5", ""), bh.BlockPart("5", ""), bh.ConsensusVote("5", "")}}, + } + ) + + var receiveWg sync.WaitGroup + pr := bh.NewMockReporter() + scriptItems := make(chan scriptItem) + done := make(chan int) + numConsumers := 3 + for i := 0; i < numConsumers; i++ { + receiveWg.Add(1) + go func() { + defer receiveWg.Done() + for { + select { + case pb := <-scriptItems: + pr.Report(pb.behaviour) + case <-done: + return + } + } + }() + } + + var sendingWg sync.WaitGroup + sendingWg.Add(1) + go func() { + defer sendingWg.Done() + for _, item := range behaviourScript { + for _, reason := range item.behaviours { + scriptItems <- scriptItem{item.peerID, reason} + } + } + }() + + sendingWg.Wait() + + for i := 0; i < numConsumers; i++ { + done <- 1 + } + + receiveWg.Wait() + + for _, items := range behaviourScript { + reported := pr.GetBehaviours(items.peerID) + if !equalBehaviours(reported, items.behaviours) { + t.Errorf("Expected peer %s to have behaved \nExpected: %#v \nGot %#v \n", + items.peerID, items.behaviours, reported) + } + } +} diff --git a/benchmarks/codec_test.go b/benchmarks/codec_test.go index eff5c7349..64c0e72cf 100644 --- a/benchmarks/codec_test.go +++ b/benchmarks/codec_test.go @@ -14,7 +14,7 @@ import ( func testNodeInfo(id p2p.ID) p2p.DefaultNodeInfo { return p2p.DefaultNodeInfo{ - ProtocolVersion: p2p.ProtocolVersion{1, 2, 3}, + ProtocolVersion: p2p.ProtocolVersion{P2P: 1, Block: 2, App: 3}, ID_: id, Moniker: "SOMENAME", Network: "SOMENAME", diff --git a/blockchain/hot/candidate_test.go b/blockchain/hot/candidate_test.go index d76665479..4146d1899 100644 --- a/blockchain/hot/candidate_test.go +++ b/blockchain/hot/candidate_test.go @@ -234,18 +234,13 @@ func TestPickFromDecayedSet(t *testing.T) { sampleStream <- metricsEvent{Bad, p, 0} } candidates := candidatePool.pickFromDecayedSet(true) + total:=make(map[p2p.ID]bool) for _, c := range candidates { - for idx, tpid := range testPids { + for _, tpid := range testPids { if *c == tpid { - if len(testPids) > 1 { - testPids = append(testPids[:idx], testPids[idx+1:]...) - break - } else { - testPids = nil - break - } + total[tpid] = true } } } - assert.Nil(t, testPids) + assert.Equal(t, len(total),100) } diff --git a/blockchain/hot/pool.go b/blockchain/hot/pool.go index 007351d36..4c59cdee0 100644 --- a/blockchain/hot/pool.go +++ b/blockchain/hot/pool.go @@ -8,11 +8,11 @@ import ( "sync/atomic" "time" - "github.com/tendermint/tendermint/blockchain" cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/p2p" st "github.com/tendermint/tendermint/state" + "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" ) @@ -61,7 +61,7 @@ type BlockPool struct { mtx sync.Mutex st SyncPattern eventBus *types.EventBus - store *blockchain.BlockStore + store *store.BlockStore blockExec *st.BlockExecutor blockTimeout time.Duration @@ -94,7 +94,7 @@ type BlockPool struct { switchWg sync.WaitGroup } -func NewBlockPool(store *blockchain.BlockStore, blockExec *st.BlockExecutor, state st.State, sendCh chan<- Message, st SyncPattern, blockTimeout time.Duration) *BlockPool { +func NewBlockPool(store *store.BlockStore, blockExec *st.BlockExecutor, state st.State, sendCh chan<- Message, st SyncPattern, blockTimeout time.Duration) *BlockPool { const capacity = 1000 sampleStream := make(chan metricsEvent, capacity) candidates := NewCandidatePool(sampleStream) diff --git a/blockchain/hot/pool_test.go b/blockchain/hot/pool_test.go index a648b4f3d..a74e96bce 100644 --- a/blockchain/hot/pool_test.go +++ b/blockchain/hot/pool_test.go @@ -10,14 +10,15 @@ import ( "github.com/stretchr/testify/require" abci "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tendermint/blockchain" cfg "github.com/tendermint/tendermint/config" cmn "github.com/tendermint/tendermint/libs/common" dbm "github.com/tendermint/tendermint/libs/db" "github.com/tendermint/tendermint/libs/log" + tmmock "github.com/tendermint/tendermint/mock" "github.com/tendermint/tendermint/p2p/mock" "github.com/tendermint/tendermint/proxy" sm "github.com/tendermint/tendermint/state" + "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" tmtime "github.com/tendermint/tendermint/types/time" ) @@ -504,7 +505,7 @@ func newBlockchainPoolPair(logger log.Logger, genDoc *types.GenesisDoc, privVals } blockDB := dbm.NewMemDB() stateDB := dbm.NewMemDB() - blockStore := blockchain.NewBlockStore(blockDB) + blockStore := store.NewBlockStore(blockDB) state, err := sm.LoadStateFromDBOrGenesisDoc(stateDB, genDoc) if err != nil { panic(cmn.ErrorWrap(err, "error constructing state from genesis file")) @@ -513,8 +514,10 @@ func newBlockchainPoolPair(logger log.Logger, genDoc *types.GenesisDoc, privVals // Make the BlockPool itself. // NOTE we have to create and commit the blocks first because // pool.height is determined from the store. - blockExec := sm.NewBlockExecutor(dbm.NewMemDB(), log.TestingLogger(), proxyApp.Consensus(), - sm.MockMempool{}, sm.MockEvidencePool{}, true) + db := dbm.NewMemDB() + blockExec := sm.NewBlockExecutor(db, log.TestingLogger(), proxyApp.Consensus(), + tmmock.Mempool{}, sm.MockEvidencePool{}, true) + sm.SaveState(db, state) // let's add some blocks in for blockHeight := int64(1); blockHeight <= maxBlockHeight; blockHeight++ { lastCommit := types.NewCommit(types.BlockID{}, nil) @@ -552,7 +555,7 @@ type BlockPoolPair struct { privVals []types.PrivValidator } -func nextBlock(state sm.State, blockStore *blockchain.BlockStore, blockExec *sm.BlockExecutor, pri types.PrivValidator) (*types.Block, *types.Commit, *types.VoteSet) { +func nextBlock(state sm.State, blockStore *store.BlockStore, blockExec *sm.BlockExecutor, pri types.PrivValidator) (*types.Block, *types.Commit, *types.VoteSet) { height := blockStore.Height() lastBlockMeta := blockStore.LoadBlockMeta(height) lastBlock := blockStore.LoadBlock(height) @@ -587,11 +590,11 @@ func (app *testApp) EndBlock(req abci.RequestEndBlock) abci.ResponseEndBlock { return abci.ResponseEndBlock{} } -func (app *testApp) DeliverTx(tx []byte) abci.ResponseDeliverTx { - return abci.ResponseDeliverTx{Tags: []cmn.KVPair{}} +func (app *testApp) DeliverTx(req abci.RequestDeliverTx) abci.ResponseDeliverTx { + return abci.ResponseDeliverTx{} } -func (app *testApp) CheckTx(tx []byte) abci.ResponseCheckTx { +func (app *testApp) CheckTx(req abci.RequestCheckTx) abci.ResponseCheckTx { return abci.ResponseCheckTx{} } diff --git a/blockchain/hot/reactor.go b/blockchain/hot/reactor.go index 6643e0ee0..c9802837e 100644 --- a/blockchain/hot/reactor.go +++ b/blockchain/hot/reactor.go @@ -7,9 +7,9 @@ import ( "time" "github.com/tendermint/go-amino" - "github.com/tendermint/tendermint/blockchain" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/p2p" + "github.com/tendermint/tendermint/store" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" ) @@ -54,7 +54,7 @@ type BlockchainReactor struct { type BlockChainOption func(*BlockchainReactor) // NewBlockChainReactor returns new reactor instance. -func NewBlockChainReactor(state sm.State, blockExec *sm.BlockExecutor, store *blockchain.BlockStore, hotSync, fastSync bool, blockTimeout time.Duration, options ...BlockChainOption) *BlockchainReactor { +func NewBlockChainReactor(state sm.State, blockExec *sm.BlockExecutor, store *store.BlockStore, hotSync, fastSync bool, blockTimeout time.Duration, options ...BlockChainOption) *BlockchainReactor { if state.LastBlockHeight != store.Height() { panic(fmt.Sprintf("state (%v) and store (%v) height mismatch", state.LastBlockHeight, @@ -197,7 +197,7 @@ func (hbcR *BlockchainReactor) switchRoutine() { case <-hbcR.pool.Quit(): return case <-switchToConsensusTicker.C: - if hbcR.pool.getSyncPattern() == Hot && hbcR.privValidator != nil && hbcR.pool.state.Validators.HasAddress(hbcR.privValidator.GetAddress()) { + if hbcR.pool.getSyncPattern() == Hot && hbcR.privValidator != nil && hbcR.pool.state.Validators.HasAddress(hbcR.privValidator.GetPubKey().Address()) { hbcR.Logger.Info("hot sync switching to consensus sync") conR, ok := hbcR.Switch.Reactor("CONSENSUS").(consensusReactor) if ok { diff --git a/blockchain/hot/reactor_test.go b/blockchain/hot/reactor_test.go index 2d9f58e9f..c0321e206 100644 --- a/blockchain/hot/reactor_test.go +++ b/blockchain/hot/reactor_test.go @@ -7,14 +7,15 @@ import ( "github.com/stretchr/testify/assert" - "github.com/tendermint/tendermint/blockchain" cfg "github.com/tendermint/tendermint/config" cmn "github.com/tendermint/tendermint/libs/common" dbm "github.com/tendermint/tendermint/libs/db" "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/mock" "github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/proxy" sm "github.com/tendermint/tendermint/state" + "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" ) @@ -221,7 +222,7 @@ func newBlockchainReactorPair(logger log.Logger, genDoc *types.GenesisDoc, privV } blockDB := dbm.NewMemDB() stateDB := dbm.NewMemDB() - blockStore := blockchain.NewBlockStore(blockDB) + blockStore := store.NewBlockStore(blockDB) state, err := sm.LoadStateFromDBOrGenesisDoc(stateDB, genDoc) if err != nil { panic(cmn.ErrorWrap(err, "error constructing state from genesis file")) @@ -230,8 +231,10 @@ func newBlockchainReactorPair(logger log.Logger, genDoc *types.GenesisDoc, privV // Make the BlockPool itself. // NOTE we have to create and commit the blocks first because // pool.height is determined from the store. - blockExec := sm.NewBlockExecutor(dbm.NewMemDB(), log.TestingLogger(), proxyApp.Consensus(), - sm.MockMempool{}, sm.MockEvidencePool{}, true) + db := dbm.NewMemDB() + blockExec := sm.NewBlockExecutor(db, log.TestingLogger(), proxyApp.Consensus(), + mock.Mempool{}, sm.MockEvidencePool{}, true) + sm.SaveState(db, state) // let's add some blocks in for blockHeight := int64(1); blockHeight <= maxBlockHeight; blockHeight++ { lastCommit := types.NewCommit(types.BlockID{}, nil) diff --git a/blockchain/v0/codec.go b/blockchain/v0/codec.go new file mode 100644 index 000000000..4494f41aa --- /dev/null +++ b/blockchain/v0/codec.go @@ -0,0 +1,13 @@ +package v0 + +import ( + amino "github.com/tendermint/go-amino" + "github.com/tendermint/tendermint/types" +) + +var cdc = amino.NewCodec() + +func init() { + RegisterBlockchainMessages(cdc) + types.RegisterBlockAmino(cdc) +} diff --git a/blockchain/pool.go b/blockchain/v0/pool.go similarity index 95% rename from blockchain/pool.go rename to blockchain/v0/pool.go index 29516a1b6..46b4b915a 100644 --- a/blockchain/pool.go +++ b/blockchain/v0/pool.go @@ -1,4 +1,4 @@ -package blockchain +package v0 import ( "errors" @@ -59,6 +59,7 @@ var peerTimeout = 15 * time.Second // not const so we can override with tests are not at peer limits, we can probably switch to consensus reactor */ +// BlockPool keeps track of the fast sync peers, block requests and block responses. type BlockPool struct { cmn.BaseService startTime time.Time @@ -115,17 +116,18 @@ func (pool *BlockPool) makeRequestersRoutine() { } _, numPending, lenRequesters := pool.GetStatus() - if numPending >= maxPendingRequests { + switch { + case numPending >= maxPendingRequests: // sleep for a bit. time.Sleep(requestIntervalMS * time.Millisecond) // check for timed out peers pool.removeTimedoutPeers() - } else if lenRequesters >= maxTotalRequesters { + case lenRequesters >= maxTotalRequesters: // sleep for a bit. time.Sleep(requestIntervalMS * time.Millisecond) // check for timed out peers pool.removeTimedoutPeers() - } else { + default: // request for more blocks. pool.makeNextRequester() } @@ -188,6 +190,7 @@ func (pool *BlockPool) IsCaughtUp() bool { return isCaughtUp } +// PeekTwoBlocks returns blocks at pool.height and pool.height+1. // We need to see the second block's Commit to validate the first block. // So we peek two blocks at a time. // The caller will verify the commit. @@ -204,7 +207,7 @@ func (pool *BlockPool) PeekTwoBlocks() (first *types.Block, second *types.Block) return } -// Pop the first block at pool.height +// PopRequest pops the first block at pool.height. // It must have been validated by 'second'.Commit from PeekTwoBlocks(). func (pool *BlockPool) PopRequest() { pool.mtx.Lock() @@ -224,7 +227,7 @@ func (pool *BlockPool) PopRequest() { } } -// Invalidates the block at pool.height, +// RedoRequest invalidates the block at pool.height, // Remove the peer and redo request from others. // Returns the ID of the removed peer. func (pool *BlockPool) RedoRequest(height int64) p2p.ID { @@ -240,6 +243,7 @@ func (pool *BlockPool) RedoRequest(height int64) p2p.ID { return peerID } +// AddBlock validates that the block comes from the peer it was expected from and calls the requester to store it. // TODO: ensure that blocks come in order for each peer. func (pool *BlockPool) AddBlock(peerID p2p.ID, block *types.Block, blockSize int) { pool.mtx.Lock() @@ -569,9 +573,9 @@ func (bpr *bpRequester) reset() { // Tells bpRequester to pick another peer and try again. // NOTE: Nonblocking, and does nothing if another redo // was already requested. -func (bpr *bpRequester) redo(peerId p2p.ID) { +func (bpr *bpRequester) redo(peerID p2p.ID) { select { - case bpr.redoCh <- peerId: + case bpr.redoCh <- peerID: default: } } @@ -626,8 +630,8 @@ OUTER_LOOP: } } -//------------------------------------- - +// BlockRequest stores a block request identified by the block Height and the PeerID responsible for +// delivering the block type BlockRequest struct { Height int64 PeerID p2p.ID diff --git a/blockchain/pool_test.go b/blockchain/v0/pool_test.go similarity index 99% rename from blockchain/pool_test.go rename to blockchain/v0/pool_test.go index 01d7dba20..d741d59df 100644 --- a/blockchain/pool_test.go +++ b/blockchain/v0/pool_test.go @@ -1,4 +1,4 @@ -package blockchain +package v0 import ( "fmt" diff --git a/blockchain/reactor.go b/blockchain/v0/reactor.go similarity index 96% rename from blockchain/reactor.go rename to blockchain/v0/reactor.go index 4b44f745c..2889fe0f6 100644 --- a/blockchain/reactor.go +++ b/blockchain/v0/reactor.go @@ -1,4 +1,4 @@ -package blockchain +package v0 import ( "errors" @@ -10,6 +10,7 @@ import ( "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/p2p" sm "github.com/tendermint/tendermint/state" + "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" ) @@ -64,7 +65,7 @@ type BlockchainReactor struct { initialState sm.State blockExec *sm.BlockExecutor - store *BlockStore + store *store.BlockStore pool *BlockPool fastSync bool hotSyncReactor bool @@ -75,7 +76,7 @@ type BlockchainReactor struct { } // NewBlockchainReactor returns new reactor instance. -func NewBlockchainReactor(state sm.State, blockExec *sm.BlockExecutor, store *BlockStore, +func NewBlockchainReactor(state sm.State, blockExec *sm.BlockExecutor, store *store.BlockStore, fastSync, hotSyncReactor, hotSync bool) *BlockchainReactor { if state.LastBlockHeight != store.Height() { @@ -168,9 +169,9 @@ func (bcR *BlockchainReactor) GetChannels() []*p2p.ChannelDescriptor { // AddPeer implements Reactor by sending our state to peer. func (bcR *BlockchainReactor) AddPeer(peer p2p.Peer) { msgBytes := cdc.MustMarshalBinaryBare(&bcStatusResponseMessage{bcR.store.Height()}) - if !peer.Send(BlockchainChannel, msgBytes) { - // doing nothing, will try later in `poolRoutine` - } + peer.Send(BlockchainChannel, msgBytes) + // it's OK if send fails. will try later in poolRoutine + // peer is added to the pool once we receive the first // bcStatusResponseMessage from the peer and call pool.SetPeerHeight } @@ -218,18 +219,13 @@ func (bcR *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) switch msg := msg.(type) { case *bcBlockRequestMessage: - if queued := bcR.respondToPeer(msg, src); !queued { - // Unfortunately not queued since the queue is full. - } + bcR.respondToPeer(msg, src) case *bcBlockResponseMessage: bcR.pool.AddBlock(src.ID(), msg.Block, len(msgBytes)) case *bcStatusRequestMessage: // Send peer our state. msgBytes := cdc.MustMarshalBinaryBare(&bcStatusResponseMessage{bcR.store.Height()}) - queued := src.TrySend(BlockchainChannel, msgBytes) - if !queued { - // sorry - } + src.TrySend(BlockchainChannel, msgBytes) case *bcStatusResponseMessage: // Got a peer status. Unverified. bcR.pool.SetPeerHeight(src.ID(), msg.Height) @@ -329,6 +325,9 @@ FOR_LOOP: // should only happen during testing } } + // else { + // should only happen during testing + // } break FOR_LOOP } @@ -431,6 +430,7 @@ type BlockchainMessage interface { ValidateBasic() error } +// RegisterBlockchainMessages registers the fast sync messages for amino encoding. func RegisterBlockchainMessages(cdc *amino.Codec) { cdc.RegisterInterface((*BlockchainMessage)(nil), nil) cdc.RegisterConcrete(&bcBlockRequestMessage{}, "tendermint/blockchain/BlockRequest", nil) @@ -478,8 +478,8 @@ func (m *bcNoBlockResponseMessage) ValidateBasic() error { return nil } -func (brm *bcNoBlockResponseMessage) String() string { - return fmt.Sprintf("[bcNoBlockResponseMessage %d]", brm.Height) +func (m *bcNoBlockResponseMessage) String() string { + return fmt.Sprintf("[bcNoBlockResponseMessage %d]", m.Height) } //------------------------------------- diff --git a/blockchain/reactor_test.go b/blockchain/v0/reactor_test.go similarity index 65% rename from blockchain/reactor_test.go rename to blockchain/v0/reactor_test.go index 6c46a4a38..4ef9c6590 100644 --- a/blockchain/reactor_test.go +++ b/blockchain/v0/reactor_test.go @@ -1,4 +1,4 @@ -package blockchain +package v0 import ( "os" @@ -6,13 +6,16 @@ import ( "testing" "time" + "github.com/pkg/errors" + "github.com/tendermint/tendermint/store" + "github.com/stretchr/testify/assert" abci "github.com/tendermint/tendermint/abci/types" cfg "github.com/tendermint/tendermint/config" - cmn "github.com/tendermint/tendermint/libs/common" dbm "github.com/tendermint/tendermint/libs/db" "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/mock" "github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/proxy" sm "github.com/tendermint/tendermint/state" @@ -42,24 +45,6 @@ func randGenesisDoc(numValidators int, randPower bool, minPower int64) (*types.G }, privValidators } -func makeVote(header *types.Header, blockID types.BlockID, valset *types.ValidatorSet, privVal types.PrivValidator) *types.Vote { - addr := privVal.GetPubKey().Address() - idx, _ := valset.GetByAddress(addr) - vote := &types.Vote{ - ValidatorAddress: addr, - ValidatorIndex: idx, - Height: header.Height, - Round: 1, - Timestamp: tmtime.Now(), - Type: types.PrecommitType, - BlockID: blockID, - } - - privVal.SignVote(header.ChainID, vote) - - return vote -} - type BlockchainReactorPair struct { reactor *BlockchainReactor app proxy.AppConns @@ -75,24 +60,26 @@ func newBlockchainReactor(logger log.Logger, genDoc *types.GenesisDoc, privVals proxyApp := proxy.NewAppConns(cc) err := proxyApp.Start() if err != nil { - panic(cmn.ErrorWrap(err, "error start app")) + panic(errors.Wrap(err, "error start app")) } blockDB := dbm.NewMemDB() stateDB := dbm.NewMemDB() - blockStore := NewBlockStore(blockDB) + blockStore := store.NewBlockStore(blockDB) state, err := sm.LoadStateFromDBOrGenesisDoc(stateDB, genDoc) if err != nil { - panic(cmn.ErrorWrap(err, "error constructing state from genesis file")) + panic(errors.Wrap(err, "error constructing state from genesis file")) } // Make the BlockchainReactor itself. // NOTE we have to create and commit the blocks first because // pool.height is determined from the store. fastSync := true - blockExec := sm.NewBlockExecutor(dbm.NewMemDB(), log.TestingLogger(), proxyApp.Consensus(), - sm.MockMempool{}, sm.MockEvidencePool{}, true) + db := dbm.NewMemDB() + blockExec := sm.NewBlockExecutor(db, log.TestingLogger(), proxyApp.Consensus(), + mock.Mempool{}, sm.MockEvidencePool{}, true) + sm.SaveState(db, state) // let's add some blocks in for blockHeight := int64(1); blockHeight <= maxBlockHeight; blockHeight++ { @@ -101,18 +88,22 @@ func newBlockchainReactor(logger log.Logger, genDoc *types.GenesisDoc, privVals lastBlockMeta := blockStore.LoadBlockMeta(blockHeight - 1) lastBlock := blockStore.LoadBlock(blockHeight - 1) - vote := makeVote(&lastBlock.Header, lastBlockMeta.BlockID, state.Validators, privVals[0]).CommitSig() - lastCommit = types.NewCommit(lastBlockMeta.BlockID, []*types.CommitSig{vote}) + vote, err := types.MakeVote(lastBlock.Header.Height, lastBlockMeta.BlockID, state.Validators, privVals[0], lastBlock.Header.ChainID) + if err != nil { + panic(err) + } + voteCommitSig := vote.CommitSig() + lastCommit = types.NewCommit(lastBlockMeta.BlockID, []*types.CommitSig{voteCommitSig}) } thisBlock := makeBlock(blockHeight, state, lastCommit) thisParts := thisBlock.MakePartSet(types.BlockPartSizeBytes) - blockID := types.BlockID{thisBlock.Hash(), thisParts.Header()} + blockID := types.BlockID{Hash: thisBlock.Hash(), PartsHeader: thisParts.Header()} state, err = blockExec.ApplyBlock(state, blockID, thisBlock) if err != nil { - panic(cmn.ErrorWrap(err, "error apply block")) + panic(errors.Wrap(err, "error apply block")) } blockStore.SaveBlock(thisBlock, thisParts, lastCommit) @@ -255,6 +246,97 @@ func TestBadBlockStopsPeer(t *testing.T) { assert.True(t, lastReactorPair.reactor.Switch.Peers().Size() < len(reactorPairs)-1) } +func TestBcBlockRequestMessageValidateBasic(t *testing.T) { + testCases := []struct { + testName string + requestHeight int64 + expectErr bool + }{ + {"Valid Request Message", 0, false}, + {"Valid Request Message", 1, false}, + {"Invalid Request Message", -1, true}, + } + + for _, tc := range testCases { + t.Run(tc.testName, func(t *testing.T) { + request := bcBlockRequestMessage{Height: tc.requestHeight} + assert.Equal(t, tc.expectErr, request.ValidateBasic() != nil, "Validate Basic had an unexpected result") + }) + } +} + +func TestBcNoBlockResponseMessageValidateBasic(t *testing.T) { + testCases := []struct { + testName string + nonResponseHeight int64 + expectErr bool + }{ + {"Valid Non-Response Message", 0, false}, + {"Valid Non-Response Message", 1, false}, + {"Invalid Non-Response Message", -1, true}, + } + + for _, tc := range testCases { + t.Run(tc.testName, func(t *testing.T) { + nonResponse := bcNoBlockResponseMessage{Height: tc.nonResponseHeight} + assert.Equal(t, tc.expectErr, nonResponse.ValidateBasic() != nil, "Validate Basic had an unexpected result") + }) + } +} + +func TestBcStatusRequestMessageValidateBasic(t *testing.T) { + testCases := []struct { + testName string + requestHeight int64 + expectErr bool + }{ + {"Valid Request Message", 0, false}, + {"Valid Request Message", 1, false}, + {"Invalid Request Message", -1, true}, + } + + for _, tc := range testCases { + t.Run(tc.testName, func(t *testing.T) { + request := bcStatusRequestMessage{Height: tc.requestHeight} + assert.Equal(t, tc.expectErr, request.ValidateBasic() != nil, "Validate Basic had an unexpected result") + }) + } +} + +func TestBcStatusResponseMessageValidateBasic(t *testing.T) { + testCases := []struct { + testName string + responseHeight int64 + expectErr bool + }{ + {"Valid Response Message", 0, false}, + {"Valid Response Message", 1, false}, + {"Invalid Response Message", -1, true}, + } + + for _, tc := range testCases { + t.Run(tc.testName, func(t *testing.T) { + response := bcStatusResponseMessage{Height: tc.responseHeight} + assert.Equal(t, tc.expectErr, response.ValidateBasic() != nil, "Validate Basic had an unexpected result") + }) + } +} + +//---------------------------------------------- +// utility funcs + +func makeTxs(height int64) (txs []types.Tx) { + for i := 0; i < 10; i++ { + txs = append(txs, types.Tx([]byte{byte(height), byte(i)})) + } + return txs +} + +func makeBlock(height int64, state sm.State, lastCommit *types.Commit) *types.Block { + block, _ := state.MakeBlock(height, makeTxs(height), lastCommit, nil, state.Validators.GetProposer().Address) + return block +} + type testApp struct { abci.BaseApplication } @@ -273,11 +355,11 @@ func (app *testApp) EndBlock(req abci.RequestEndBlock) abci.ResponseEndBlock { return abci.ResponseEndBlock{} } -func (app *testApp) DeliverTx(tx []byte) abci.ResponseDeliverTx { - return abci.ResponseDeliverTx{Tags: []cmn.KVPair{}} +func (app *testApp) DeliverTx(req abci.RequestDeliverTx) abci.ResponseDeliverTx { + return abci.ResponseDeliverTx{Events: []abci.Event{}} } -func (app *testApp) CheckTx(tx []byte) abci.ResponseCheckTx { +func (app *testApp) CheckTx(req abci.RequestCheckTx) abci.ResponseCheckTx { return abci.ResponseCheckTx{} } diff --git a/blockchain/wire.go b/blockchain/v1/codec.go similarity index 91% rename from blockchain/wire.go rename to blockchain/v1/codec.go index 91156fa8f..efbea02a9 100644 --- a/blockchain/wire.go +++ b/blockchain/v1/codec.go @@ -1,4 +1,4 @@ -package blockchain +package v1 import ( "github.com/tendermint/go-amino" diff --git a/blockchain/v1/peer.go b/blockchain/v1/peer.go new file mode 100644 index 000000000..02b1b4fc1 --- /dev/null +++ b/blockchain/v1/peer.go @@ -0,0 +1,209 @@ +package v1 + +import ( + "fmt" + "math" + "time" + + flow "github.com/tendermint/tendermint/libs/flowrate" + "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/p2p" + "github.com/tendermint/tendermint/types" +) + +//-------- +// Peer + +// BpPeerParams stores the peer parameters that are used when creating a peer. +type BpPeerParams struct { + timeout time.Duration + minRecvRate int64 + sampleRate time.Duration + windowSize time.Duration +} + +// BpPeer is the datastructure associated with a fast sync peer. +type BpPeer struct { + logger log.Logger + ID p2p.ID + + Height int64 // the peer reported height + NumPendingBlockRequests int // number of requests still waiting for block responses + blocks map[int64]*types.Block // blocks received or expected to be received from this peer + blockResponseTimer *time.Timer + recvMonitor *flow.Monitor + params *BpPeerParams // parameters for timer and monitor + + onErr func(err error, peerID p2p.ID) // function to call on error +} + +// NewBpPeer creates a new peer. +func NewBpPeer( + peerID p2p.ID, height int64, onErr func(err error, peerID p2p.ID), params *BpPeerParams) *BpPeer { + + if params == nil { + params = BpPeerDefaultParams() + } + return &BpPeer{ + ID: peerID, + Height: height, + blocks: make(map[int64]*types.Block, maxRequestsPerPeer), + logger: log.NewNopLogger(), + onErr: onErr, + params: params, + } +} + +// String returns a string representation of a peer. +func (peer *BpPeer) String() string { + return fmt.Sprintf("peer: %v height: %v pending: %v", peer.ID, peer.Height, peer.NumPendingBlockRequests) +} + +// SetLogger sets the logger of the peer. +func (peer *BpPeer) SetLogger(l log.Logger) { + peer.logger = l +} + +// Cleanup performs cleanup of the peer, removes blocks, requests, stops timer and monitor. +func (peer *BpPeer) Cleanup() { + if peer.blockResponseTimer != nil { + peer.blockResponseTimer.Stop() + } + if peer.NumPendingBlockRequests != 0 { + peer.logger.Info("peer with pending requests is being cleaned", "peer", peer.ID) + } + if len(peer.blocks)-peer.NumPendingBlockRequests != 0 { + peer.logger.Info("peer with pending blocks is being cleaned", "peer", peer.ID) + } + for h := range peer.blocks { + delete(peer.blocks, h) + } + peer.NumPendingBlockRequests = 0 + peer.recvMonitor = nil +} + +// BlockAtHeight returns the block at a given height if available and errMissingBlock otherwise. +func (peer *BpPeer) BlockAtHeight(height int64) (*types.Block, error) { + block, ok := peer.blocks[height] + if !ok { + return nil, errMissingBlock + } + if block == nil { + return nil, errMissingBlock + } + return peer.blocks[height], nil +} + +// AddBlock adds a block at peer level. Block must be non-nil and recvSize a positive integer +// The peer must have a pending request for this block. +func (peer *BpPeer) AddBlock(block *types.Block, recvSize int) error { + if block == nil || recvSize < 0 { + panic("bad parameters") + } + existingBlock, ok := peer.blocks[block.Height] + if !ok { + peer.logger.Error("unsolicited block", "blockHeight", block.Height, "peer", peer.ID) + return errMissingBlock + } + if existingBlock != nil { + peer.logger.Error("already have a block for height", "height", block.Height) + return errDuplicateBlock + } + if peer.NumPendingBlockRequests == 0 { + panic("peer does not have pending requests") + } + peer.blocks[block.Height] = block + peer.NumPendingBlockRequests-- + if peer.NumPendingBlockRequests == 0 { + peer.stopMonitor() + peer.stopBlockResponseTimer() + } else { + peer.recvMonitor.Update(recvSize) + peer.resetBlockResponseTimer() + } + return nil +} + +// RemoveBlock removes the block of given height +func (peer *BpPeer) RemoveBlock(height int64) { + delete(peer.blocks, height) +} + +// RequestSent records that a request was sent, and starts the peer timer and monitor if needed. +func (peer *BpPeer) RequestSent(height int64) { + peer.blocks[height] = nil + + if peer.NumPendingBlockRequests == 0 { + peer.startMonitor() + peer.resetBlockResponseTimer() + } + peer.NumPendingBlockRequests++ +} + +// CheckRate verifies that the response rate of the peer is acceptable (higher than the minimum allowed). +func (peer *BpPeer) CheckRate() error { + if peer.NumPendingBlockRequests == 0 { + return nil + } + curRate := peer.recvMonitor.Status().CurRate + // curRate can be 0 on start + if curRate != 0 && curRate < peer.params.minRecvRate { + err := errSlowPeer + peer.logger.Error("SendTimeout", "peer", peer, + "reason", err, + "curRate", fmt.Sprintf("%d KB/s", curRate/1024), + "minRate", fmt.Sprintf("%d KB/s", peer.params.minRecvRate/1024)) + return err + } + return nil +} + +func (peer *BpPeer) onTimeout() { + peer.onErr(errNoPeerResponse, peer.ID) +} + +func (peer *BpPeer) stopMonitor() { + peer.recvMonitor.Done() + peer.recvMonitor = nil +} + +func (peer *BpPeer) startMonitor() { + peer.recvMonitor = flow.New(peer.params.sampleRate, peer.params.windowSize) + initialValue := float64(peer.params.minRecvRate) * math.E + peer.recvMonitor.SetREMA(initialValue) +} + +func (peer *BpPeer) resetBlockResponseTimer() { + if peer.blockResponseTimer == nil { + peer.blockResponseTimer = time.AfterFunc(peer.params.timeout, peer.onTimeout) + } else { + peer.blockResponseTimer.Reset(peer.params.timeout) + } +} + +func (peer *BpPeer) stopBlockResponseTimer() bool { + if peer.blockResponseTimer == nil { + return false + } + return peer.blockResponseTimer.Stop() +} + +// BpPeerDefaultParams returns the default peer parameters. +func BpPeerDefaultParams() *BpPeerParams { + return &BpPeerParams{ + // Timeout for a peer to respond to a block request. + timeout: 15 * time.Second, + + // Minimum recv rate to ensure we're receiving blocks from a peer fast + // enough. If a peer is not sending data at at least that rate, we + // consider them to have timedout and we disconnect. + // + // Assuming a DSL connection (not a good choice) 128 Kbps (upload) ~ 15 KB/s, + // sending data across atlantic ~ 7.5 KB/s. + minRecvRate: int64(7680), + + // Monitor parameters + sampleRate: time.Second, + windowSize: 40 * time.Second, + } +} diff --git a/blockchain/v1/peer_test.go b/blockchain/v1/peer_test.go new file mode 100644 index 000000000..3c19e4efd --- /dev/null +++ b/blockchain/v1/peer_test.go @@ -0,0 +1,278 @@ +package v1 + +import ( + "sync" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/p2p" + "github.com/tendermint/tendermint/types" +) + +func TestPeerMonitor(t *testing.T) { + peer := NewBpPeer( + p2p.ID(cmn.RandStr(12)), 10, + func(err error, _ p2p.ID) {}, + nil) + peer.SetLogger(log.TestingLogger()) + peer.startMonitor() + assert.NotNil(t, peer.recvMonitor) + peer.stopMonitor() + assert.Nil(t, peer.recvMonitor) +} + +func TestPeerResetBlockResponseTimer(t *testing.T) { + var ( + numErrFuncCalls int // number of calls to the errFunc + lastErr error // last generated error + peerTestMtx sync.Mutex // modifications of ^^ variables are also done from timer handler goroutine + ) + params := &BpPeerParams{timeout: 2 * time.Millisecond} + + peer := NewBpPeer( + p2p.ID(cmn.RandStr(12)), 10, + func(err error, _ p2p.ID) { + peerTestMtx.Lock() + defer peerTestMtx.Unlock() + lastErr = err + numErrFuncCalls++ + }, + params) + + peer.SetLogger(log.TestingLogger()) + checkByStoppingPeerTimer(t, peer, false) + + // initial reset call with peer having a nil timer + peer.resetBlockResponseTimer() + assert.NotNil(t, peer.blockResponseTimer) + // make sure timer is running and stop it + checkByStoppingPeerTimer(t, peer, true) + + // reset with running timer + peer.resetBlockResponseTimer() + time.Sleep(time.Millisecond) + peer.resetBlockResponseTimer() + assert.NotNil(t, peer.blockResponseTimer) + + // let the timer expire and ... + time.Sleep(3 * time.Millisecond) + // ... check timer is not running + checkByStoppingPeerTimer(t, peer, false) + + peerTestMtx.Lock() + // ... check errNoPeerResponse has been sent + assert.Equal(t, 1, numErrFuncCalls) + assert.Equal(t, lastErr, errNoPeerResponse) + peerTestMtx.Unlock() +} + +func TestPeerRequestSent(t *testing.T) { + params := &BpPeerParams{timeout: 2 * time.Millisecond} + + peer := NewBpPeer( + p2p.ID(cmn.RandStr(12)), 10, + func(err error, _ p2p.ID) {}, + params) + + peer.SetLogger(log.TestingLogger()) + + peer.RequestSent(1) + assert.NotNil(t, peer.recvMonitor) + assert.NotNil(t, peer.blockResponseTimer) + assert.Equal(t, 1, peer.NumPendingBlockRequests) + + peer.RequestSent(1) + assert.NotNil(t, peer.recvMonitor) + assert.NotNil(t, peer.blockResponseTimer) + assert.Equal(t, 2, peer.NumPendingBlockRequests) +} + +func TestPeerGetAndRemoveBlock(t *testing.T) { + peer := NewBpPeer( + p2p.ID(cmn.RandStr(12)), 100, + func(err error, _ p2p.ID) {}, + nil) + + // Change peer height + peer.Height = int64(10) + assert.Equal(t, int64(10), peer.Height) + + // request some blocks and receive few of them + for i := 1; i <= 10; i++ { + peer.RequestSent(int64(i)) + if i > 5 { + // only receive blocks 1..5 + continue + } + _ = peer.AddBlock(makeSmallBlock(i), 10) + } + + tests := []struct { + name string + height int64 + wantErr error + blockPresent bool + }{ + {"no request", 100, errMissingBlock, false}, + {"no block", 6, errMissingBlock, false}, + {"block 1 present", 1, nil, true}, + {"block max present", 5, nil, true}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // try to get the block + b, err := peer.BlockAtHeight(tt.height) + assert.Equal(t, tt.wantErr, err) + assert.Equal(t, tt.blockPresent, b != nil) + + // remove the block + peer.RemoveBlock(tt.height) + _, err = peer.BlockAtHeight(tt.height) + assert.Equal(t, errMissingBlock, err) + }) + } +} + +func TestPeerAddBlock(t *testing.T) { + peer := NewBpPeer( + p2p.ID(cmn.RandStr(12)), 100, + func(err error, _ p2p.ID) {}, + nil) + + // request some blocks, receive one + for i := 1; i <= 10; i++ { + peer.RequestSent(int64(i)) + if i == 5 { + // receive block 5 + _ = peer.AddBlock(makeSmallBlock(i), 10) + } + } + + tests := []struct { + name string + height int64 + wantErr error + blockPresent bool + }{ + {"no request", 50, errMissingBlock, false}, + {"duplicate block", 5, errDuplicateBlock, true}, + {"block 1 successfully received", 1, nil, true}, + {"block max successfully received", 10, nil, true}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // try to get the block + err := peer.AddBlock(makeSmallBlock(int(tt.height)), 10) + assert.Equal(t, tt.wantErr, err) + _, err = peer.BlockAtHeight(tt.height) + assert.Equal(t, tt.blockPresent, err == nil) + }) + } +} + +func TestPeerOnErrFuncCalledDueToExpiration(t *testing.T) { + + params := &BpPeerParams{timeout: 2 * time.Millisecond} + var ( + numErrFuncCalls int // number of calls to the onErr function + lastErr error // last generated error + peerTestMtx sync.Mutex // modifications of ^^ variables are also done from timer handler goroutine + ) + + peer := NewBpPeer( + p2p.ID(cmn.RandStr(12)), 10, + func(err error, _ p2p.ID) { + peerTestMtx.Lock() + defer peerTestMtx.Unlock() + lastErr = err + numErrFuncCalls++ + }, + params) + + peer.SetLogger(log.TestingLogger()) + + peer.RequestSent(1) + time.Sleep(4 * time.Millisecond) + // timer should have expired by now, check that the on error function was called + peerTestMtx.Lock() + assert.Equal(t, 1, numErrFuncCalls) + assert.Equal(t, errNoPeerResponse, lastErr) + peerTestMtx.Unlock() +} + +func TestPeerCheckRate(t *testing.T) { + params := &BpPeerParams{ + timeout: time.Second, + minRecvRate: int64(100), // 100 bytes/sec exponential moving average + } + peer := NewBpPeer( + p2p.ID(cmn.RandStr(12)), 10, + func(err error, _ p2p.ID) {}, + params) + peer.SetLogger(log.TestingLogger()) + + require.Nil(t, peer.CheckRate()) + + for i := 0; i < 40; i++ { + peer.RequestSent(int64(i)) + } + + // monitor starts with a higher rEMA (~ 2*minRecvRate), wait for it to go down + time.Sleep(900 * time.Millisecond) + + // normal peer - send a bit more than 100 bytes/sec, > 10 bytes/100msec, check peer is not considered slow + for i := 0; i < 10; i++ { + _ = peer.AddBlock(makeSmallBlock(i), 11) + time.Sleep(100 * time.Millisecond) + require.Nil(t, peer.CheckRate()) + } + + // slow peer - send a bit less than 10 bytes/100msec + for i := 10; i < 20; i++ { + _ = peer.AddBlock(makeSmallBlock(i), 9) + time.Sleep(100 * time.Millisecond) + } + // check peer is considered slow + assert.Equal(t, errSlowPeer, peer.CheckRate()) +} + +func TestPeerCleanup(t *testing.T) { + params := &BpPeerParams{timeout: 2 * time.Millisecond} + + peer := NewBpPeer( + p2p.ID(cmn.RandStr(12)), 10, + func(err error, _ p2p.ID) {}, + params) + peer.SetLogger(log.TestingLogger()) + + assert.Nil(t, peer.blockResponseTimer) + peer.RequestSent(1) + assert.NotNil(t, peer.blockResponseTimer) + + peer.Cleanup() + checkByStoppingPeerTimer(t, peer, false) +} + +// Check if peer timer is running or not (a running timer can be successfully stopped). +// Note: stops the timer. +func checkByStoppingPeerTimer(t *testing.T, peer *BpPeer, running bool) { + assert.NotPanics(t, func() { + stopped := peer.stopBlockResponseTimer() + if running { + assert.True(t, stopped) + } else { + assert.False(t, stopped) + } + }) +} + +func makeSmallBlock(height int) *types.Block { + return types.MakeBlock(int64(height), []types.Tx{types.Tx("foo")}, nil, nil) +} diff --git a/blockchain/v1/pool.go b/blockchain/v1/pool.go new file mode 100644 index 000000000..5de741305 --- /dev/null +++ b/blockchain/v1/pool.go @@ -0,0 +1,369 @@ +package v1 + +import ( + "sort" + + "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/p2p" + "github.com/tendermint/tendermint/types" +) + +// BlockPool keeps track of the fast sync peers, block requests and block responses. +type BlockPool struct { + logger log.Logger + // Set of peers that have sent status responses, with height bigger than pool.Height + peers map[p2p.ID]*BpPeer + // Set of block heights and the corresponding peers from where a block response is expected or has been received. + blocks map[int64]p2p.ID + + plannedRequests map[int64]struct{} // list of blocks to be assigned peers for blockRequest + nextRequestHeight int64 // next height to be added to plannedRequests + + Height int64 // height of next block to execute + MaxPeerHeight int64 // maximum height of all peers + toBcR bcReactor +} + +// NewBlockPool creates a new BlockPool. +func NewBlockPool(height int64, toBcR bcReactor) *BlockPool { + return &BlockPool{ + Height: height, + MaxPeerHeight: 0, + peers: make(map[p2p.ID]*BpPeer), + blocks: make(map[int64]p2p.ID), + plannedRequests: make(map[int64]struct{}), + nextRequestHeight: height, + toBcR: toBcR, + } +} + +// SetLogger sets the logger of the pool. +func (pool *BlockPool) SetLogger(l log.Logger) { + pool.logger = l +} + +// ReachedMaxHeight check if the pool has reached the maximum peer height. +func (pool *BlockPool) ReachedMaxHeight() bool { + return pool.Height >= pool.MaxPeerHeight +} + +func (pool *BlockPool) rescheduleRequest(peerID p2p.ID, height int64) { + pool.logger.Info("reschedule requests made to peer for height ", "peerID", peerID, "height", height) + pool.plannedRequests[height] = struct{}{} + delete(pool.blocks, height) + pool.peers[peerID].RemoveBlock(height) +} + +// Updates the pool's max height. If no peers are left MaxPeerHeight is set to 0. +func (pool *BlockPool) updateMaxPeerHeight() { + var newMax int64 + for _, peer := range pool.peers { + peerHeight := peer.Height + if peerHeight > newMax { + newMax = peerHeight + } + } + pool.MaxPeerHeight = newMax +} + +// UpdatePeer adds a new peer or updates an existing peer with a new height. +// If a peer is short it is not added. +func (pool *BlockPool) UpdatePeer(peerID p2p.ID, height int64) error { + + peer := pool.peers[peerID] + + if peer == nil { + if height < pool.Height { + pool.logger.Info("Peer height too small", + "peer", peerID, "height", height, "fsm_height", pool.Height) + return errPeerTooShort + } + // Add new peer. + peer = NewBpPeer(peerID, height, pool.toBcR.sendPeerError, nil) + peer.SetLogger(pool.logger.With("peer", peerID)) + pool.peers[peerID] = peer + pool.logger.Info("added peer", "peerID", peerID, "height", height, "num_peers", len(pool.peers)) + } else { + // Check if peer is lowering its height. This is not allowed. + if height < peer.Height { + pool.RemovePeer(peerID, errPeerLowersItsHeight) + return errPeerLowersItsHeight + } + // Update existing peer. + peer.Height = height + } + + // Update the pool's MaxPeerHeight if needed. + pool.updateMaxPeerHeight() + + return nil +} + +// Cleans and deletes the peer. Recomputes the max peer height. +func (pool *BlockPool) deletePeer(peer *BpPeer) { + if peer == nil { + return + } + peer.Cleanup() + delete(pool.peers, peer.ID) + + if peer.Height == pool.MaxPeerHeight { + pool.updateMaxPeerHeight() + } +} + +// RemovePeer removes the blocks and requests from the peer, reschedules them and deletes the peer. +func (pool *BlockPool) RemovePeer(peerID p2p.ID, err error) { + peer := pool.peers[peerID] + if peer == nil { + return + } + pool.logger.Info("removing peer", "peerID", peerID, "error", err) + + // Reschedule the block requests made to the peer, or received and not processed yet. + // Note that some of the requests may be removed further down. + for h := range pool.peers[peerID].blocks { + pool.rescheduleRequest(peerID, h) + } + + oldMaxPeerHeight := pool.MaxPeerHeight + // Delete the peer. This operation may result in the pool's MaxPeerHeight being lowered. + pool.deletePeer(peer) + + // Check if the pool's MaxPeerHeight has been lowered. + // This may happen if the tallest peer has been removed. + if oldMaxPeerHeight > pool.MaxPeerHeight { + // Remove any planned requests for heights over the new MaxPeerHeight. + for h := range pool.plannedRequests { + if h > pool.MaxPeerHeight { + delete(pool.plannedRequests, h) + } + } + // Adjust the nextRequestHeight to the new max plus one. + if pool.nextRequestHeight > pool.MaxPeerHeight { + pool.nextRequestHeight = pool.MaxPeerHeight + 1 + } + } +} + +func (pool *BlockPool) removeShortPeers() { + for _, peer := range pool.peers { + if peer.Height < pool.Height { + pool.RemovePeer(peer.ID, nil) + } + } +} + +func (pool *BlockPool) removeBadPeers() { + pool.removeShortPeers() + for _, peer := range pool.peers { + if err := peer.CheckRate(); err != nil { + pool.RemovePeer(peer.ID, err) + pool.toBcR.sendPeerError(err, peer.ID) + } + } +} + +// MakeNextRequests creates more requests if the block pool is running low. +func (pool *BlockPool) MakeNextRequests(maxNumRequests int) { + heights := pool.makeRequestBatch(maxNumRequests) + if len(heights) != 0 { + pool.logger.Info("makeNextRequests will make following requests", + "number", len(heights), "heights", heights) + } + + for _, height := range heights { + h := int64(height) + if !pool.sendRequest(h) { + // If a good peer was not found for sending the request at height h then return, + // as it shouldn't be possible to find a peer for h+1. + return + } + delete(pool.plannedRequests, h) + } +} + +// Makes a batch of requests sorted by height such that the block pool has up to maxNumRequests entries. +func (pool *BlockPool) makeRequestBatch(maxNumRequests int) []int { + pool.removeBadPeers() + // At this point pool.requests may include heights for requests to be redone due to removal of peers: + // - peers timed out or were removed by switch + // - FSM timed out on waiting to advance the block execution due to missing blocks at h or h+1 + // Determine the number of requests needed by subtracting the number of requests already made from the maximum + // allowed + numNeeded := int(maxNumRequests) - len(pool.blocks) + for len(pool.plannedRequests) < numNeeded { + if pool.nextRequestHeight > pool.MaxPeerHeight { + break + } + pool.plannedRequests[pool.nextRequestHeight] = struct{}{} + pool.nextRequestHeight++ + } + + heights := make([]int, 0, len(pool.plannedRequests)) + for k := range pool.plannedRequests { + heights = append(heights, int(k)) + } + sort.Ints(heights) + return heights +} + +func (pool *BlockPool) sendRequest(height int64) bool { + for _, peer := range pool.peers { + if peer.NumPendingBlockRequests >= maxRequestsPerPeer { + continue + } + if peer.Height < height { + continue + } + + err := pool.toBcR.sendBlockRequest(peer.ID, height) + if err == errNilPeerForBlockRequest { + // Switch does not have this peer, remove it and continue to look for another peer. + pool.logger.Error("switch does not have peer..removing peer selected for height", "peer", + peer.ID, "height", height) + pool.RemovePeer(peer.ID, err) + continue + } + + if err == errSendQueueFull { + pool.logger.Error("peer queue is full", "peer", peer.ID, "height", height) + continue + } + + pool.logger.Info("assigned request to peer", "peer", peer.ID, "height", height) + + pool.blocks[height] = peer.ID + peer.RequestSent(height) + + return true + } + pool.logger.Error("could not find peer to send request for block at height", "height", height) + return false +} + +// AddBlock validates that the block comes from the peer it was expected from and stores it in the 'blocks' map. +func (pool *BlockPool) AddBlock(peerID p2p.ID, block *types.Block, blockSize int) error { + peer, ok := pool.peers[peerID] + if !ok { + pool.logger.Error("block from unknown peer", "height", block.Height, "peer", peerID) + return errBadDataFromPeer + } + if wantPeerID, ok := pool.blocks[block.Height]; ok && wantPeerID != peerID { + pool.logger.Error("block received from wrong peer", "height", block.Height, + "peer", peerID, "expected_peer", wantPeerID) + return errBadDataFromPeer + } + + return peer.AddBlock(block, blockSize) +} + +// BlockData stores the peer responsible to deliver a block and the actual block if delivered. +type BlockData struct { + block *types.Block + peer *BpPeer +} + +// BlockAndPeerAtHeight retrieves the block and delivery peer at specified height. +// Returns errMissingBlock if a block was not found +func (pool *BlockPool) BlockAndPeerAtHeight(height int64) (bData *BlockData, err error) { + peerID := pool.blocks[height] + peer := pool.peers[peerID] + if peer == nil { + return nil, errMissingBlock + } + + block, err := peer.BlockAtHeight(height) + if err != nil { + return nil, err + } + + return &BlockData{peer: peer, block: block}, nil + +} + +// FirstTwoBlocksAndPeers returns the blocks and the delivery peers at pool's height H and H+1. +func (pool *BlockPool) FirstTwoBlocksAndPeers() (first, second *BlockData, err error) { + first, err = pool.BlockAndPeerAtHeight(pool.Height) + second, err2 := pool.BlockAndPeerAtHeight(pool.Height + 1) + if err == nil { + err = err2 + } + return +} + +// InvalidateFirstTwoBlocks removes the peers that sent us the first two blocks, blocks are removed by RemovePeer(). +func (pool *BlockPool) InvalidateFirstTwoBlocks(err error) { + first, err1 := pool.BlockAndPeerAtHeight(pool.Height) + second, err2 := pool.BlockAndPeerAtHeight(pool.Height + 1) + + if err1 == nil { + pool.RemovePeer(first.peer.ID, err) + } + if err2 == nil { + pool.RemovePeer(second.peer.ID, err) + } +} + +// ProcessedCurrentHeightBlock performs cleanup after a block is processed. It removes block at pool height and +// the peers that are now short. +func (pool *BlockPool) ProcessedCurrentHeightBlock() { + peerID, peerOk := pool.blocks[pool.Height] + if peerOk { + pool.peers[peerID].RemoveBlock(pool.Height) + } + delete(pool.blocks, pool.Height) + pool.logger.Debug("removed block at height", "height", pool.Height) + pool.Height++ + pool.removeShortPeers() +} + +// RemovePeerAtCurrentHeights checks if a block at pool's height H exists and if not, it removes the +// delivery peer and returns. If a block at height H exists then the check and peer removal is done for H+1. +// This function is called when the FSM is not able to make progress for some time. +// This happens if either the block H or H+1 have not been delivered. +func (pool *BlockPool) RemovePeerAtCurrentHeights(err error) { + peerID := pool.blocks[pool.Height] + peer, ok := pool.peers[peerID] + if ok { + if _, err := peer.BlockAtHeight(pool.Height); err != nil { + pool.logger.Info("remove peer that hasn't sent block at pool.Height", + "peer", peerID, "height", pool.Height) + pool.RemovePeer(peerID, err) + return + } + } + peerID = pool.blocks[pool.Height+1] + peer, ok = pool.peers[peerID] + if ok { + if _, err := peer.BlockAtHeight(pool.Height + 1); err != nil { + pool.logger.Info("remove peer that hasn't sent block at pool.Height+1", + "peer", peerID, "height", pool.Height+1) + pool.RemovePeer(peerID, err) + return + } + } +} + +// Cleanup performs pool and peer cleanup +func (pool *BlockPool) Cleanup() { + for id, peer := range pool.peers { + peer.Cleanup() + delete(pool.peers, id) + } + pool.plannedRequests = make(map[int64]struct{}) + pool.blocks = make(map[int64]p2p.ID) + pool.nextRequestHeight = 0 + pool.Height = 0 + pool.MaxPeerHeight = 0 +} + +// NumPeers returns the number of peers in the pool +func (pool *BlockPool) NumPeers() int { + return len(pool.peers) +} + +// NeedsBlocks returns true if more blocks are required. +func (pool *BlockPool) NeedsBlocks() bool { + return len(pool.blocks) < maxNumRequests +} diff --git a/blockchain/v1/pool_test.go b/blockchain/v1/pool_test.go new file mode 100644 index 000000000..437fc7e05 --- /dev/null +++ b/blockchain/v1/pool_test.go @@ -0,0 +1,650 @@ +package v1 + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/p2p" + "github.com/tendermint/tendermint/types" +) + +type testPeer struct { + id p2p.ID + height int64 +} + +type testBcR struct { + logger log.Logger +} + +type testValues struct { + numRequestsSent int +} + +var testResults testValues + +func resetPoolTestResults() { + testResults.numRequestsSent = 0 +} + +func (testR *testBcR) sendPeerError(err error, peerID p2p.ID) { +} + +func (testR *testBcR) sendStatusRequest() { +} + +func (testR *testBcR) sendBlockRequest(peerID p2p.ID, height int64) error { + testResults.numRequestsSent++ + return nil +} + +func (testR *testBcR) resetStateTimer(name string, timer **time.Timer, timeout time.Duration) { +} + +func (testR *testBcR) switchToConsensusOrHotSync() { + +} + +func newTestBcR() *testBcR { + testBcR := &testBcR{logger: log.TestingLogger()} + return testBcR +} + +type tPBlocks struct { + id p2p.ID + create bool +} + +// Makes a block pool with specified current height, list of peers, block requests and block responses +func makeBlockPool(bcr *testBcR, height int64, peers []BpPeer, blocks map[int64]tPBlocks) *BlockPool { + bPool := NewBlockPool(height, bcr) + bPool.SetLogger(bcr.logger) + + txs := []types.Tx{types.Tx("foo"), types.Tx("bar")} + + var maxH int64 + for _, p := range peers { + if p.Height > maxH { + maxH = p.Height + } + bPool.peers[p.ID] = NewBpPeer(p.ID, p.Height, bcr.sendPeerError, nil) + bPool.peers[p.ID].SetLogger(bcr.logger) + + } + bPool.MaxPeerHeight = maxH + for h, p := range blocks { + bPool.blocks[h] = p.id + bPool.peers[p.id].RequestSent(int64(h)) + if p.create { + // simulate that a block at height h has been received + _ = bPool.peers[p.id].AddBlock(types.MakeBlock(int64(h), txs, nil, nil), 100) + } + } + return bPool +} + +func assertPeerSetsEquivalent(t *testing.T, set1 map[p2p.ID]*BpPeer, set2 map[p2p.ID]*BpPeer) { + assert.Equal(t, len(set1), len(set2)) + for peerID, peer1 := range set1 { + peer2 := set2[peerID] + assert.NotNil(t, peer2) + assert.Equal(t, peer1.NumPendingBlockRequests, peer2.NumPendingBlockRequests) + assert.Equal(t, peer1.Height, peer2.Height) + assert.Equal(t, len(peer1.blocks), len(peer2.blocks)) + for h, block1 := range peer1.blocks { + block2 := peer2.blocks[h] + // block1 and block2 could be nil if a request was made but no block was received + assert.Equal(t, block1, block2) + } + } +} + +func assertBlockPoolEquivalent(t *testing.T, poolWanted, pool *BlockPool) { + assert.Equal(t, poolWanted.blocks, pool.blocks) + assertPeerSetsEquivalent(t, poolWanted.peers, pool.peers) + assert.Equal(t, poolWanted.MaxPeerHeight, pool.MaxPeerHeight) + assert.Equal(t, poolWanted.Height, pool.Height) + +} + +func TestBlockPoolUpdatePeer(t *testing.T) { + testBcR := newTestBcR() + + tests := []struct { + name string + pool *BlockPool + args testPeer + poolWanted *BlockPool + errWanted error + }{ + { + name: "add a first short peer", + pool: makeBlockPool(testBcR, 100, []BpPeer{}, map[int64]tPBlocks{}), + args: testPeer{"P1", 50}, + errWanted: errPeerTooShort, + poolWanted: makeBlockPool(testBcR, 100, []BpPeer{}, map[int64]tPBlocks{}), + }, + { + name: "add a first good peer", + pool: makeBlockPool(testBcR, 100, []BpPeer{}, map[int64]tPBlocks{}), + args: testPeer{"P1", 101}, + poolWanted: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 101}}, map[int64]tPBlocks{}), + }, + { + name: "increase the height of P1 from 120 to 123", + pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 120}}, map[int64]tPBlocks{}), + args: testPeer{"P1", 123}, + poolWanted: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 123}}, map[int64]tPBlocks{}), + }, + { + name: "decrease the height of P1 from 120 to 110", + pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 120}}, map[int64]tPBlocks{}), + args: testPeer{"P1", 110}, + errWanted: errPeerLowersItsHeight, + poolWanted: makeBlockPool(testBcR, 100, []BpPeer{}, map[int64]tPBlocks{}), + }, + { + name: "decrease the height of P1 from 105 to 102 with blocks", + pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 105}}, + map[int64]tPBlocks{ + 100: {"P1", true}, 101: {"P1", true}, 102: {"P1", true}}), + args: testPeer{"P1", 102}, + errWanted: errPeerLowersItsHeight, + poolWanted: makeBlockPool(testBcR, 100, []BpPeer{}, + map[int64]tPBlocks{}), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + pool := tt.pool + err := pool.UpdatePeer(tt.args.id, tt.args.height) + assert.Equal(t, tt.errWanted, err) + assert.Equal(t, tt.poolWanted.blocks, tt.pool.blocks) + assertPeerSetsEquivalent(t, tt.poolWanted.peers, tt.pool.peers) + assert.Equal(t, tt.poolWanted.MaxPeerHeight, tt.pool.MaxPeerHeight) + }) + } +} + +func TestBlockPoolRemovePeer(t *testing.T) { + testBcR := newTestBcR() + + type args struct { + peerID p2p.ID + err error + } + + tests := []struct { + name string + pool *BlockPool + args args + poolWanted *BlockPool + }{ + { + name: "attempt to delete non-existing peer", + pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 120}}, map[int64]tPBlocks{}), + args: args{"P99", nil}, + poolWanted: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 120}}, map[int64]tPBlocks{}), + }, + { + name: "delete the only peer without blocks", + pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 120}}, map[int64]tPBlocks{}), + args: args{"P1", nil}, + poolWanted: makeBlockPool(testBcR, 100, []BpPeer{}, map[int64]tPBlocks{}), + }, + { + name: "delete the shortest of two peers without blocks", + pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 120}}, map[int64]tPBlocks{}), + args: args{"P1", nil}, + poolWanted: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P2", Height: 120}}, map[int64]tPBlocks{}), + }, + { + name: "delete the tallest of two peers without blocks", + pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 120}}, map[int64]tPBlocks{}), + args: args{"P2", nil}, + poolWanted: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 100}}, map[int64]tPBlocks{}), + }, + { + name: "delete the only peer with block requests sent and blocks received", + pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 120}}, + map[int64]tPBlocks{100: {"P1", true}, 101: {"P1", false}}), + args: args{"P1", nil}, + poolWanted: makeBlockPool(testBcR, 100, []BpPeer{}, map[int64]tPBlocks{}), + }, + { + name: "delete the shortest of two peers with block requests sent and blocks received", + pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 120}, {ID: "P2", Height: 200}}, + map[int64]tPBlocks{100: {"P1", true}, 101: {"P1", false}}), + args: args{"P1", nil}, + poolWanted: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P2", Height: 200}}, map[int64]tPBlocks{}), + }, + { + name: "delete the tallest of two peers with block requests sent and blocks received", + pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 120}, {ID: "P2", Height: 110}}, + map[int64]tPBlocks{100: {"P1", true}, 101: {"P1", false}}), + args: args{"P1", nil}, + poolWanted: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P2", Height: 110}}, map[int64]tPBlocks{}), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tt.pool.RemovePeer(tt.args.peerID, tt.args.err) + assertBlockPoolEquivalent(t, tt.poolWanted, tt.pool) + }) + } +} + +func TestBlockPoolRemoveShortPeers(t *testing.T) { + testBcR := newTestBcR() + + tests := []struct { + name string + pool *BlockPool + poolWanted *BlockPool + }{ + { + name: "no short peers", + pool: makeBlockPool(testBcR, 100, + []BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 110}, {ID: "P3", Height: 120}}, map[int64]tPBlocks{}), + poolWanted: makeBlockPool(testBcR, 100, + []BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 110}, {ID: "P3", Height: 120}}, map[int64]tPBlocks{}), + }, + + { + name: "one short peer", + pool: makeBlockPool(testBcR, 100, + []BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 90}, {ID: "P3", Height: 120}}, map[int64]tPBlocks{}), + poolWanted: makeBlockPool(testBcR, 100, + []BpPeer{{ID: "P1", Height: 100}, {ID: "P3", Height: 120}}, map[int64]tPBlocks{}), + }, + + { + name: "all short peers", + pool: makeBlockPool(testBcR, 100, + []BpPeer{{ID: "P1", Height: 90}, {ID: "P2", Height: 91}, {ID: "P3", Height: 92}}, map[int64]tPBlocks{}), + poolWanted: makeBlockPool(testBcR, 100, []BpPeer{}, map[int64]tPBlocks{}), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + pool := tt.pool + pool.removeShortPeers() + assertBlockPoolEquivalent(t, tt.poolWanted, tt.pool) + }) + } +} + +func TestBlockPoolSendRequestBatch(t *testing.T) { + type testPeerResult struct { + id p2p.ID + numPendingBlockRequests int + } + + testBcR := newTestBcR() + + tests := []struct { + name string + pool *BlockPool + maxRequestsPerPeer int + expRequests map[int64]bool + expPeerResults []testPeerResult + expnumPendingBlockRequests int + }{ + { + name: "one peer - send up to maxRequestsPerPeer block requests", + pool: makeBlockPool(testBcR, 10, []BpPeer{{ID: "P1", Height: 100}}, map[int64]tPBlocks{}), + maxRequestsPerPeer: 2, + expRequests: map[int64]bool{10: true, 11: true}, + expPeerResults: []testPeerResult{{id: "P1", numPendingBlockRequests: 2}}, + expnumPendingBlockRequests: 2, + }, + { + name: "n peers - send n*maxRequestsPerPeer block requests", + pool: makeBlockPool(testBcR, 10, []BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}}, map[int64]tPBlocks{}), + maxRequestsPerPeer: 2, + expRequests: map[int64]bool{10: true, 11: true}, + expPeerResults: []testPeerResult{ + {id: "P1", numPendingBlockRequests: 2}, + {id: "P2", numPendingBlockRequests: 2}}, + expnumPendingBlockRequests: 4, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + resetPoolTestResults() + + var pool = tt.pool + maxRequestsPerPeer = tt.maxRequestsPerPeer + pool.MakeNextRequests(10) + assert.Equal(t, testResults.numRequestsSent, maxRequestsPerPeer*len(pool.peers)) + + for _, tPeer := range tt.expPeerResults { + var peer = pool.peers[tPeer.id] + assert.NotNil(t, peer) + assert.Equal(t, tPeer.numPendingBlockRequests, peer.NumPendingBlockRequests) + } + assert.Equal(t, testResults.numRequestsSent, maxRequestsPerPeer*len(pool.peers)) + + }) + } +} + +func TestBlockPoolAddBlock(t *testing.T) { + testBcR := newTestBcR() + txs := []types.Tx{types.Tx("foo"), types.Tx("bar")} + + type args struct { + peerID p2p.ID + block *types.Block + blockSize int + } + tests := []struct { + name string + pool *BlockPool + args args + poolWanted *BlockPool + errWanted error + }{ + {name: "block from unknown peer", + pool: makeBlockPool(testBcR, 10, []BpPeer{{ID: "P1", Height: 100}}, map[int64]tPBlocks{}), + args: args{ + peerID: "P2", + block: types.MakeBlock(int64(10), txs, nil, nil), + blockSize: 100, + }, + poolWanted: makeBlockPool(testBcR, 10, []BpPeer{{ID: "P1", Height: 100}}, map[int64]tPBlocks{}), + errWanted: errBadDataFromPeer, + }, + {name: "unexpected block 11 from known peer - waiting for 10", + pool: makeBlockPool(testBcR, 10, + []BpPeer{{ID: "P1", Height: 100}}, + map[int64]tPBlocks{10: {"P1", false}}), + args: args{ + peerID: "P1", + block: types.MakeBlock(int64(11), txs, nil, nil), + blockSize: 100, + }, + poolWanted: makeBlockPool(testBcR, 10, + []BpPeer{{ID: "P1", Height: 100}}, + map[int64]tPBlocks{10: {"P1", false}}), + errWanted: errMissingBlock, + }, + {name: "unexpected block 10 from known peer - already have 10", + pool: makeBlockPool(testBcR, 10, + []BpPeer{{ID: "P1", Height: 100}}, + map[int64]tPBlocks{10: {"P1", true}, 11: {"P1", false}}), + args: args{ + peerID: "P1", + block: types.MakeBlock(int64(10), txs, nil, nil), + blockSize: 100, + }, + poolWanted: makeBlockPool(testBcR, 10, + []BpPeer{{ID: "P1", Height: 100}}, + map[int64]tPBlocks{10: {"P1", true}, 11: {"P1", false}}), + errWanted: errDuplicateBlock, + }, + {name: "unexpected block 10 from known peer P2 - expected 10 to come from P1", + pool: makeBlockPool(testBcR, 10, + []BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}}, + map[int64]tPBlocks{10: {"P1", false}}), + args: args{ + peerID: "P2", + block: types.MakeBlock(int64(10), txs, nil, nil), + blockSize: 100, + }, + poolWanted: makeBlockPool(testBcR, 10, + []BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}}, + map[int64]tPBlocks{10: {"P1", false}}), + errWanted: errBadDataFromPeer, + }, + {name: "expected block from known peer", + pool: makeBlockPool(testBcR, 10, + []BpPeer{{ID: "P1", Height: 100}}, + map[int64]tPBlocks{10: {"P1", false}}), + args: args{ + peerID: "P1", + block: types.MakeBlock(int64(10), txs, nil, nil), + blockSize: 100, + }, + poolWanted: makeBlockPool(testBcR, 10, + []BpPeer{{ID: "P1", Height: 100}}, + map[int64]tPBlocks{10: {"P1", true}}), + errWanted: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := tt.pool.AddBlock(tt.args.peerID, tt.args.block, tt.args.blockSize) + assert.Equal(t, tt.errWanted, err) + assertBlockPoolEquivalent(t, tt.poolWanted, tt.pool) + }) + } +} + +func TestBlockPoolFirstTwoBlocksAndPeers(t *testing.T) { + testBcR := newTestBcR() + + tests := []struct { + name string + pool *BlockPool + firstWanted int64 + secondWanted int64 + errWanted error + }{ + { + name: "both blocks missing", + pool: makeBlockPool(testBcR, 10, + []BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}}, + map[int64]tPBlocks{15: {"P1", true}, 16: {"P2", true}}), + errWanted: errMissingBlock, + }, + { + name: "second block missing", + pool: makeBlockPool(testBcR, 15, + []BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}}, + map[int64]tPBlocks{15: {"P1", true}, 18: {"P2", true}}), + firstWanted: 15, + errWanted: errMissingBlock, + }, + { + name: "first block missing", + pool: makeBlockPool(testBcR, 15, + []BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}}, + map[int64]tPBlocks{16: {"P2", true}, 18: {"P2", true}}), + secondWanted: 16, + errWanted: errMissingBlock, + }, + { + name: "both blocks present", + pool: makeBlockPool(testBcR, 10, + []BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}}, + map[int64]tPBlocks{10: {"P1", true}, 11: {"P2", true}}), + firstWanted: 10, + secondWanted: 11, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + pool := tt.pool + gotFirst, gotSecond, err := pool.FirstTwoBlocksAndPeers() + assert.Equal(t, tt.errWanted, err) + + if tt.firstWanted != 0 { + peer := pool.blocks[tt.firstWanted] + block := pool.peers[peer].blocks[tt.firstWanted] + assert.Equal(t, block, gotFirst.block, + "BlockPool.FirstTwoBlocksAndPeers() gotFirst = %v, want %v", + tt.firstWanted, gotFirst.block.Height) + } + + if tt.secondWanted != 0 { + peer := pool.blocks[tt.secondWanted] + block := pool.peers[peer].blocks[tt.secondWanted] + assert.Equal(t, block, gotSecond.block, + "BlockPool.FirstTwoBlocksAndPeers() gotFirst = %v, want %v", + tt.secondWanted, gotSecond.block.Height) + } + }) + } +} + +func TestBlockPoolInvalidateFirstTwoBlocks(t *testing.T) { + testBcR := newTestBcR() + + tests := []struct { + name string + pool *BlockPool + poolWanted *BlockPool + }{ + { + name: "both blocks missing", + pool: makeBlockPool(testBcR, 10, + []BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}}, + map[int64]tPBlocks{15: {"P1", true}, 16: {"P2", true}}), + poolWanted: makeBlockPool(testBcR, 10, + []BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}}, + map[int64]tPBlocks{15: {"P1", true}, 16: {"P2", true}}), + }, + { + name: "second block missing", + pool: makeBlockPool(testBcR, 15, + []BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}}, + map[int64]tPBlocks{15: {"P1", true}, 18: {"P2", true}}), + poolWanted: makeBlockPool(testBcR, 15, + []BpPeer{{ID: "P2", Height: 100}}, + map[int64]tPBlocks{18: {"P2", true}}), + }, + { + name: "first block missing", + pool: makeBlockPool(testBcR, 15, + []BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}}, + map[int64]tPBlocks{18: {"P1", true}, 16: {"P2", true}}), + poolWanted: makeBlockPool(testBcR, 15, + []BpPeer{{ID: "P1", Height: 100}}, + map[int64]tPBlocks{18: {"P1", true}}), + }, + { + name: "both blocks present", + pool: makeBlockPool(testBcR, 10, + []BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}}, + map[int64]tPBlocks{10: {"P1", true}, 11: {"P2", true}}), + poolWanted: makeBlockPool(testBcR, 10, + []BpPeer{}, + map[int64]tPBlocks{}), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tt.pool.InvalidateFirstTwoBlocks(errNoPeerResponse) + assertBlockPoolEquivalent(t, tt.poolWanted, tt.pool) + }) + } +} + +func TestProcessedCurrentHeightBlock(t *testing.T) { + testBcR := newTestBcR() + + tests := []struct { + name string + pool *BlockPool + poolWanted *BlockPool + }{ + { + name: "one peer", + pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 120}}, + map[int64]tPBlocks{100: {"P1", true}, 101: {"P1", true}}), + poolWanted: makeBlockPool(testBcR, 101, []BpPeer{{ID: "P1", Height: 120}}, + map[int64]tPBlocks{101: {"P1", true}}), + }, + { + name: "multiple peers", + pool: makeBlockPool(testBcR, 100, + []BpPeer{{ID: "P1", Height: 120}, {ID: "P2", Height: 120}, {ID: "P3", Height: 130}}, + map[int64]tPBlocks{ + 100: {"P1", true}, 104: {"P1", true}, 105: {"P1", false}, + 101: {"P2", true}, 103: {"P2", false}, + 102: {"P3", true}, 106: {"P3", true}}), + poolWanted: makeBlockPool(testBcR, 101, + []BpPeer{{ID: "P1", Height: 120}, {ID: "P2", Height: 120}, {ID: "P3", Height: 130}}, + map[int64]tPBlocks{ + 104: {"P1", true}, 105: {"P1", false}, + 101: {"P2", true}, 103: {"P2", false}, + 102: {"P3", true}, 106: {"P3", true}}), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tt.pool.ProcessedCurrentHeightBlock() + assertBlockPoolEquivalent(t, tt.poolWanted, tt.pool) + }) + } +} + +func TestRemovePeerAtCurrentHeight(t *testing.T) { + testBcR := newTestBcR() + + tests := []struct { + name string + pool *BlockPool + poolWanted *BlockPool + }{ + { + name: "one peer, remove peer for block at H", + pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 120}}, + map[int64]tPBlocks{100: {"P1", false}, 101: {"P1", true}}), + poolWanted: makeBlockPool(testBcR, 100, []BpPeer{}, map[int64]tPBlocks{}), + }, + { + name: "one peer, remove peer for block at H+1", + pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 120}}, + map[int64]tPBlocks{100: {"P1", true}, 101: {"P1", false}}), + poolWanted: makeBlockPool(testBcR, 100, []BpPeer{}, map[int64]tPBlocks{}), + }, + { + name: "multiple peers, remove peer for block at H", + pool: makeBlockPool(testBcR, 100, + []BpPeer{{ID: "P1", Height: 120}, {ID: "P2", Height: 120}, {ID: "P3", Height: 130}}, + map[int64]tPBlocks{ + 100: {"P1", false}, 104: {"P1", true}, 105: {"P1", false}, + 101: {"P2", true}, 103: {"P2", false}, + 102: {"P3", true}, 106: {"P3", true}}), + poolWanted: makeBlockPool(testBcR, 100, + []BpPeer{{ID: "P2", Height: 120}, {ID: "P3", Height: 130}}, + map[int64]tPBlocks{ + 101: {"P2", true}, 103: {"P2", false}, + 102: {"P3", true}, 106: {"P3", true}}), + }, + { + name: "multiple peers, remove peer for block at H+1", + pool: makeBlockPool(testBcR, 100, + []BpPeer{{ID: "P1", Height: 120}, {ID: "P2", Height: 120}, {ID: "P3", Height: 130}}, + map[int64]tPBlocks{ + 100: {"P1", true}, 104: {"P1", true}, 105: {"P1", false}, + 101: {"P2", false}, 103: {"P2", false}, + 102: {"P3", true}, 106: {"P3", true}}), + poolWanted: makeBlockPool(testBcR, 100, + []BpPeer{{ID: "P1", Height: 120}, {ID: "P3", Height: 130}}, + map[int64]tPBlocks{ + 100: {"P1", true}, 104: {"P1", true}, 105: {"P1", false}, + 102: {"P3", true}, 106: {"P3", true}}), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tt.pool.RemovePeerAtCurrentHeights(errNoPeerResponse) + assertBlockPoolEquivalent(t, tt.poolWanted, tt.pool) + }) + } +} diff --git a/blockchain/v1/reactor.go b/blockchain/v1/reactor.go new file mode 100644 index 000000000..fbb0fdf6a --- /dev/null +++ b/blockchain/v1/reactor.go @@ -0,0 +1,667 @@ +package v1 + +import ( + "errors" + "fmt" + "reflect" + "time" + + amino "github.com/tendermint/go-amino" + "github.com/tendermint/tendermint/behaviour" + "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/p2p" + sm "github.com/tendermint/tendermint/state" + "github.com/tendermint/tendermint/store" + "github.com/tendermint/tendermint/types" +) + +const ( + // BlockchainChannel is a channel for blocks and status updates (`BlockStore` height) + BlockchainChannel = byte(0x40) + trySyncIntervalMS = 10 + trySendIntervalMS = 10 + + // ask for best height every 10s + statusUpdateIntervalSeconds = 10 + + // NOTE: keep up to date with bcBlockResponseMessage + bcBlockResponseMessagePrefixSize = 4 + bcBlockResponseMessageFieldKeySize = 1 + maxMsgSize = types.MaxBlockSizeBytes + + bcBlockResponseMessagePrefixSize + + bcBlockResponseMessageFieldKeySize +) + +var ( + // Maximum number of requests that can be pending per peer, i.e. for which requests have been sent but blocks + // have not been received. + maxRequestsPerPeer = 20 + // Maximum number of block requests for the reactor, pending or for which blocks have been received. + maxNumRequests = 64 +) + +type hotsyncReactor interface { + SwitchToHotSync(sm.State, int32) + SwitchToConsensusSync(sm.State) +} + +type consensusReactor interface { + // for when we switch from blockchain reactor and fast sync to + // the consensus machine + SwitchToConsensus(sm.State, int) +} + +// BlockchainReactor handles long-term catchup syncing. +type BlockchainReactor struct { + p2p.BaseReactor + + initialState sm.State // immutable + state sm.State + + blockExec *sm.BlockExecutor + store *store.BlockStore + + fastSync bool + + fsm *BcReactorFSM + blocksSynced int + + // Receive goroutine forwards messages to this channel to be processed in the context of the poolRoutine. + messagesForFSMCh chan bcReactorMessage + + // Switch goroutine may send RemovePeer to the blockchain reactor. This is an error message that is relayed + // to this channel to be processed in the context of the poolRoutine. + errorsForFSMCh chan bcReactorMessage + + // This channel is used by the FSM and indirectly the block pool to report errors to the blockchain reactor and + // the switch. + eventsFromFSMCh chan bcFsmMessage + + swReporter *behaviour.SwitchReporter + + hotSyncReactor bool + + hotsync bool +} + +// NewBlockchainReactor returns new reactor instance. +func NewBlockchainReactor(state sm.State, blockExec *sm.BlockExecutor, store *store.BlockStore, + fastSync bool, hotSyncReactor, hotSync bool) *BlockchainReactor { + + if state.LastBlockHeight != store.Height() { + panic(fmt.Sprintf("state (%v) and store (%v) height mismatch", state.LastBlockHeight, + store.Height())) + } + + const capacity = 1000 + eventsFromFSMCh := make(chan bcFsmMessage, capacity) + messagesForFSMCh := make(chan bcReactorMessage, capacity) + errorsForFSMCh := make(chan bcReactorMessage, capacity) + + startHeight := store.Height() + 1 + bcR := &BlockchainReactor{ + initialState: state, + state: state, + blockExec: blockExec, + fastSync: fastSync, + store: store, + messagesForFSMCh: messagesForFSMCh, + eventsFromFSMCh: eventsFromFSMCh, + errorsForFSMCh: errorsForFSMCh, + hotSyncReactor: hotSyncReactor, + hotsync: hotSync, + } + fsm := NewFSM(startHeight, bcR) + bcR.fsm = fsm + bcR.BaseReactor = *p2p.NewBaseReactor("BlockchainReactor", bcR) + + return bcR +} + +// bcReactorMessage is used by the reactor to send messages to the FSM. +type bcReactorMessage struct { + event bReactorEvent + data bReactorEventData +} + +type bFsmEvent uint + +const ( + // message type events + peerErrorEv = iota + 1 + syncFinishedEv +) + +type bFsmEventData struct { + peerID p2p.ID + err error +} + +// bcFsmMessage is used by the FSM to send messages to the reactor +type bcFsmMessage struct { + event bFsmEvent + data bFsmEventData +} + +// SetLogger implements cmn.Service by setting the logger on reactor and pool. +func (bcR *BlockchainReactor) SetLogger(l log.Logger) { + bcR.BaseService.Logger = l + bcR.fsm.SetLogger(l) +} + +// OnStart implements cmn.Service. +func (bcR *BlockchainReactor) OnStart() error { + bcR.swReporter = behaviour.NewSwitcReporter(bcR.BaseReactor.Switch) + if bcR.fastSync { + go bcR.poolRoutine() + } + return nil +} + +// OnStop implements cmn.Service. +func (bcR *BlockchainReactor) OnStop() { + _ = bcR.Stop() +} + +// GetChannels implements Reactor +func (bcR *BlockchainReactor) GetChannels() []*p2p.ChannelDescriptor { + return []*p2p.ChannelDescriptor{ + { + ID: BlockchainChannel, + Priority: 10, + SendQueueCapacity: 2000, + RecvBufferCapacity: 50 * 4096, + RecvMessageCapacity: maxMsgSize, + }, + } +} + +// AddPeer implements Reactor by sending our state to peer. +func (bcR *BlockchainReactor) AddPeer(peer p2p.Peer) { + msgBytes := cdc.MustMarshalBinaryBare(&bcStatusResponseMessage{bcR.store.Height()}) + peer.Send(BlockchainChannel, msgBytes) + // it's OK if send fails. will try later in poolRoutine + + // peer is added to the pool once we receive the first + // bcStatusResponseMessage from the peer and call pool.updatePeer() +} + +// sendBlockToPeer loads a block and sends it to the requesting peer. +// If the block doesn't exist a bcNoBlockResponseMessage is sent. +// If all nodes are honest, no node should be requesting for a block that doesn't exist. +func (bcR *BlockchainReactor) sendBlockToPeer(msg *bcBlockRequestMessage, + src p2p.Peer) (queued bool) { + + block := bcR.store.LoadBlock(msg.Height) + if block != nil { + msgBytes := cdc.MustMarshalBinaryBare(&bcBlockResponseMessage{Block: block}) + return src.TrySend(BlockchainChannel, msgBytes) + } + + bcR.Logger.Info("peer asking for a block we don't have", "src", src, "height", msg.Height) + + msgBytes := cdc.MustMarshalBinaryBare(&bcNoBlockResponseMessage{Height: msg.Height}) + return src.TrySend(BlockchainChannel, msgBytes) +} + +func (bcR *BlockchainReactor) sendStatusResponseToPeer(msg *bcStatusRequestMessage, src p2p.Peer) (queued bool) { + msgBytes := cdc.MustMarshalBinaryBare(&bcStatusResponseMessage{bcR.store.Height()}) + return src.TrySend(BlockchainChannel, msgBytes) +} + +// RemovePeer implements Reactor by removing peer from the pool. +func (bcR *BlockchainReactor) RemovePeer(peer p2p.Peer, reason interface{}) { + msgData := bcReactorMessage{ + event: peerRemoveEv, + data: bReactorEventData{ + peerID: peer.ID(), + err: errSwitchRemovesPeer, + }, + } + bcR.errorsForFSMCh <- msgData +} + +// Receive implements Reactor by handling 4 types of messages (look below). +func (bcR *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { + msg, err := decodeMsg(msgBytes) + if err != nil { + bcR.Logger.Error("error decoding message", + "src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes) + _ = bcR.swReporter.Report(behaviour.BadMessage(src.ID(), err.Error())) + return + } + + if err = msg.ValidateBasic(); err != nil { + bcR.Logger.Error("peer sent us invalid msg", "peer", src, "msg", msg, "err", err) + _ = bcR.swReporter.Report(behaviour.BadMessage(src.ID(), err.Error())) + return + } + + bcR.Logger.Debug("Receive", "src", src, "chID", chID, "msg", msg) + + switch msg := msg.(type) { + case *bcBlockRequestMessage: + if queued := bcR.sendBlockToPeer(msg, src); !queued { + // Unfortunately not queued since the queue is full. + bcR.Logger.Error("Could not send block message to peer", "src", src, "height", msg.Height) + } + + case *bcStatusRequestMessage: + // Send peer our state. + if queued := bcR.sendStatusResponseToPeer(msg, src); !queued { + // Unfortunately not queued since the queue is full. + bcR.Logger.Error("Could not send status message to peer", "src", src) + } + + case *bcBlockResponseMessage: + msgForFSM := bcReactorMessage{ + event: blockResponseEv, + data: bReactorEventData{ + peerID: src.ID(), + height: msg.Block.Height, + block: msg.Block, + length: len(msgBytes), + }, + } + bcR.Logger.Info("Received", "src", src, "height", msg.Block.Height) + bcR.messagesForFSMCh <- msgForFSM + + case *bcStatusResponseMessage: + // Got a peer status. Unverified. + msgForFSM := bcReactorMessage{ + event: statusResponseEv, + data: bReactorEventData{ + peerID: src.ID(), + height: msg.Height, + length: len(msgBytes), + }, + } + bcR.messagesForFSMCh <- msgForFSM + + default: + bcR.Logger.Error(fmt.Sprintf("unknown message type %v", reflect.TypeOf(msg))) + } +} + +// processBlocksRoutine processes blocks until signlaed to stop over the stopProcessing channel +func (bcR *BlockchainReactor) processBlocksRoutine(stopProcessing chan struct{}) { + + processReceivedBlockTicker := time.NewTicker(trySyncIntervalMS * time.Millisecond) + doProcessBlockCh := make(chan struct{}, 1) + + lastHundred := time.Now() + lastRate := 0.0 + +ForLoop: + for { + select { + case <-stopProcessing: + bcR.Logger.Info("finishing block execution") + break ForLoop + case <-processReceivedBlockTicker.C: // try to execute blocks + select { + case doProcessBlockCh <- struct{}{}: + default: + } + case <-doProcessBlockCh: + for { + err := bcR.processBlock() + if err == errMissingBlock { + break + } + // Notify FSM of block processing result. + msgForFSM := bcReactorMessage{ + event: processedBlockEv, + data: bReactorEventData{ + err: err, + }, + } + _ = bcR.fsm.Handle(&msgForFSM) + + if err != nil { + break + } + + bcR.blocksSynced++ + if bcR.blocksSynced%100 == 0 { + lastRate = 0.9*lastRate + 0.1*(100/time.Since(lastHundred).Seconds()) + height, maxPeerHeight := bcR.fsm.Status() + bcR.Logger.Info("Fast Sync Rate", "height", height, + "max_peer_height", maxPeerHeight, "blocks/s", lastRate) + lastHundred = time.Now() + } + } + } + } +} + +// poolRoutine receives and handles messages from the Receive() routine and from the FSM. +func (bcR *BlockchainReactor) poolRoutine() { + + bcR.fsm.Start() + + sendBlockRequestTicker := time.NewTicker(trySendIntervalMS * time.Millisecond) + statusUpdateTicker := time.NewTicker(statusUpdateIntervalSeconds * time.Second) + + stopProcessing := make(chan struct{}, 1) + go bcR.processBlocksRoutine(stopProcessing) + +ForLoop: + for { + select { + + case <-sendBlockRequestTicker.C: + if !bcR.fsm.NeedsBlocks() { + continue + } + _ = bcR.fsm.Handle(&bcReactorMessage{ + event: makeRequestsEv, + data: bReactorEventData{ + maxNumRequests: maxNumRequests}}) + + case <-statusUpdateTicker.C: + // Ask for status updates. + go bcR.sendStatusRequest() + + case msg := <-bcR.messagesForFSMCh: + // Sent from the Receive() routine when status (statusResponseEv) and + // block (blockResponseEv) response events are received + _ = bcR.fsm.Handle(&msg) + + case msg := <-bcR.errorsForFSMCh: + // Sent from the switch.RemovePeer() routine (RemovePeerEv) and + // FSM state timer expiry routine (stateTimeoutEv). + _ = bcR.fsm.Handle(&msg) + + case msg := <-bcR.eventsFromFSMCh: + switch msg.event { + case syncFinishedEv: + stopProcessing <- struct{}{} + // Sent from the FSM when it enters finished state. + break ForLoop + case peerErrorEv: + // Sent from the FSM when it detects peer error + bcR.reportPeerErrorToSwitch(msg.data.err, msg.data.peerID) + if msg.data.err == errNoPeerResponse { + // Sent from the peer timeout handler routine + _ = bcR.fsm.Handle(&bcReactorMessage{ + event: peerRemoveEv, + data: bReactorEventData{ + peerID: msg.data.peerID, + err: msg.data.err, + }, + }) + } + // else { + // For slow peers, or errors due to blocks received from wrong peer + // the FSM had already removed the peers + // } + default: + bcR.Logger.Error("Event from FSM not supported", "type", msg.event) + } + + case <-bcR.Quit(): + break ForLoop + } + } +} + +func (bcR *BlockchainReactor) reportPeerErrorToSwitch(err error, peerID p2p.ID) { + peer := bcR.Switch.Peers().Get(peerID) + if peer != nil { + _ = bcR.swReporter.Report(behaviour.BadMessage(peerID, err.Error())) + } +} + +func (bcR *BlockchainReactor) processBlock() error { + + first, second, err := bcR.fsm.FirstTwoBlocks() + if err != nil { + // We need both to sync the first block. + return err + } + + chainID := bcR.initialState.ChainID + + firstParts := first.MakePartSet(types.BlockPartSizeBytes) + firstPartsHeader := firstParts.Header() + firstID := types.BlockID{Hash: first.Hash(), PartsHeader: firstPartsHeader} + // Finally, verify the first block using the second's commit + // NOTE: we can probably make this more efficient, but note that calling + // first.Hash() doesn't verify the tx contents, so MakePartSet() is + // currently necessary. + err = bcR.state.Validators.VerifyCommit(chainID, firstID, first.Height, second.LastCommit) + if err != nil { + bcR.Logger.Error("error during commit verification", "err", err, + "first", first.Height, "second", second.Height) + return errBlockVerificationFailure + } + + bcR.store.SaveBlock(first, firstParts, second.LastCommit) + + bcR.state, err = bcR.blockExec.ApplyBlock(bcR.state, firstID, first) + if err != nil { + panic(fmt.Sprintf("failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err)) + } + + return nil +} + +// Implements bcRNotifier +// sendStatusRequest broadcasts `BlockStore` height. +func (bcR *BlockchainReactor) sendStatusRequest() { + msgBytes := cdc.MustMarshalBinaryBare(&bcStatusRequestMessage{bcR.store.Height()}) + bcR.Switch.Broadcast(BlockchainChannel, msgBytes) +} + +// Implements bcRNotifier +// BlockRequest sends `BlockRequest` height. +func (bcR *BlockchainReactor) sendBlockRequest(peerID p2p.ID, height int64) error { + peer := bcR.Switch.Peers().Get(peerID) + if peer == nil { + return errNilPeerForBlockRequest + } + + msgBytes := cdc.MustMarshalBinaryBare(&bcBlockRequestMessage{height}) + queued := peer.TrySend(BlockchainChannel, msgBytes) + if !queued { + return errSendQueueFull + } + return nil +} + +func (bcR *BlockchainReactor) SwitchToBlockchain(state *sm.State) { + bcR.Logger.Info("SwitchToBlockchain") + if state == nil { + panic("state is nil") + } + bcR.initialState = *state + bcR.state = *state + bcR.fastSync = true + bcR.store.SetHeight(state.LastBlockHeight) + bcR.Logger.Debug("SwitchToBlockchain", "lastheight", state.LastBlockHeight, "apphash", state.AppHash) + + startHeight := state.LastBlockHeight + 1 + fsm := NewFSM(startHeight, bcR) + fsm.SetLogger(bcR.Logger) + bcR.fsm = fsm + + go bcR.poolRoutine() +} + +// Implements bcRNotifier +func (bcR *BlockchainReactor) switchToConsensusOrHotSync() { + if bcR.hotSyncReactor && bcR.hotsync { + bcR.Logger.Info("Time to switch to hot sync reactor!") + hotR, ok := bcR.Switch.Reactor("HOT").(hotsyncReactor) + if ok { + hotR.SwitchToHotSync(bcR.state, int32(bcR.blocksSynced)) + } else { + // should only happen during testing + } + } else { + if bcR.hotSyncReactor { + bcR.Logger.Info("Time to switch hot sync reactor to consensus sync pattern!") + hotR, ok := bcR.Switch.Reactor("HOT").(hotsyncReactor) + if ok { + hotR.SwitchToConsensusSync(bcR.state) + } else { + // should only happen during testing + } + } + conR, ok := bcR.Switch.Reactor("CONSENSUS").(consensusReactor) + if ok { + conR.SwitchToConsensus(bcR.state, bcR.blocksSynced) + } + } + bcR.eventsFromFSMCh <- bcFsmMessage{event: syncFinishedEv} +} + +// Implements bcRNotifier +// Called by FSM and pool: +// - pool calls when it detects slow peer or when peer times out +// - FSM calls when: +// - adding a block (addBlock) fails +// - reactor processing of a block reports failure and FSM sends back the peers of first and second blocks +func (bcR *BlockchainReactor) sendPeerError(err error, peerID p2p.ID) { + bcR.Logger.Info("sendPeerError:", "peer", peerID, "error", err) + msgData := bcFsmMessage{ + event: peerErrorEv, + data: bFsmEventData{ + peerID: peerID, + err: err, + }, + } + bcR.eventsFromFSMCh <- msgData +} + +// Implements bcRNotifier +func (bcR *BlockchainReactor) resetStateTimer(name string, timer **time.Timer, timeout time.Duration) { + if timer == nil { + panic("nil timer pointer parameter") + } + if *timer == nil { + *timer = time.AfterFunc(timeout, func() { + msg := bcReactorMessage{ + event: stateTimeoutEv, + data: bReactorEventData{ + stateName: name, + }, + } + bcR.errorsForFSMCh <- msg + }) + } else { + (*timer).Reset(timeout) + } +} + +//----------------------------------------------------------------------------- +// Messages + +// BlockchainMessage is a generic message for this reactor. +type BlockchainMessage interface { + ValidateBasic() error +} + +// RegisterBlockchainMessages registers the fast sync messages for amino encoding. +func RegisterBlockchainMessages(cdc *amino.Codec) { + cdc.RegisterInterface((*BlockchainMessage)(nil), nil) + cdc.RegisterConcrete(&bcBlockRequestMessage{}, "tendermint/blockchain/BlockRequest", nil) + cdc.RegisterConcrete(&bcBlockResponseMessage{}, "tendermint/blockchain/BlockResponse", nil) + cdc.RegisterConcrete(&bcNoBlockResponseMessage{}, "tendermint/blockchain/NoBlockResponse", nil) + cdc.RegisterConcrete(&bcStatusResponseMessage{}, "tendermint/blockchain/StatusResponse", nil) + cdc.RegisterConcrete(&bcStatusRequestMessage{}, "tendermint/blockchain/StatusRequest", nil) +} + +func decodeMsg(bz []byte) (msg BlockchainMessage, err error) { + if len(bz) > maxMsgSize { + return msg, fmt.Errorf("msg exceeds max size (%d > %d)", len(bz), maxMsgSize) + } + err = cdc.UnmarshalBinaryBare(bz, &msg) + return +} + +//------------------------------------- + +type bcBlockRequestMessage struct { + Height int64 +} + +// ValidateBasic performs basic validation. +func (m *bcBlockRequestMessage) ValidateBasic() error { + if m.Height < 0 { + return errors.New("negative Height") + } + return nil +} + +func (m *bcBlockRequestMessage) String() string { + return fmt.Sprintf("[bcBlockRequestMessage %v]", m.Height) +} + +type bcNoBlockResponseMessage struct { + Height int64 +} + +// ValidateBasic performs basic validation. +func (m *bcNoBlockResponseMessage) ValidateBasic() error { + if m.Height < 0 { + return errors.New("negative Height") + } + return nil +} + +func (m *bcNoBlockResponseMessage) String() string { + return fmt.Sprintf("[bcNoBlockResponseMessage %d]", m.Height) +} + +//------------------------------------- + +type bcBlockResponseMessage struct { + Block *types.Block +} + +// ValidateBasic performs basic validation. +func (m *bcBlockResponseMessage) ValidateBasic() error { + return m.Block.ValidateBasic() +} + +func (m *bcBlockResponseMessage) String() string { + return fmt.Sprintf("[bcBlockResponseMessage %v]", m.Block.Height) +} + +//------------------------------------- + +type bcStatusRequestMessage struct { + Height int64 +} + +// ValidateBasic performs basic validation. +func (m *bcStatusRequestMessage) ValidateBasic() error { + if m.Height < 0 { + return errors.New("negative Height") + } + return nil +} + +func (m *bcStatusRequestMessage) String() string { + return fmt.Sprintf("[bcStatusRequestMessage %v]", m.Height) +} + +//------------------------------------- + +type bcStatusResponseMessage struct { + Height int64 +} + +// ValidateBasic performs basic validation. +func (m *bcStatusResponseMessage) ValidateBasic() error { + if m.Height < 0 { + return errors.New("negative Height") + } + return nil +} + +func (m *bcStatusResponseMessage) String() string { + return fmt.Sprintf("[bcStatusResponseMessage %v]", m.Height) +} diff --git a/blockchain/v1/reactor_fsm.go b/blockchain/v1/reactor_fsm.go new file mode 100644 index 000000000..71bb40090 --- /dev/null +++ b/blockchain/v1/reactor_fsm.go @@ -0,0 +1,451 @@ +package v1 + +import ( + "errors" + "fmt" + "sync" + "time" + + "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/p2p" + "github.com/tendermint/tendermint/types" +) + +// Blockchain Reactor State +type bcReactorFSMState struct { + name string + + // called when transitioning out of current state + handle func(*BcReactorFSM, bReactorEvent, bReactorEventData) (next *bcReactorFSMState, err error) + // called when entering the state + enter func(fsm *BcReactorFSM) + + // timeout to ensure FSM is not stuck in a state forever + // the timer is owned and run by the fsm instance + timeout time.Duration +} + +func (s *bcReactorFSMState) String() string { + return s.name +} + +// BcReactorFSM is the datastructure for the Blockchain Reactor State Machine +type BcReactorFSM struct { + logger log.Logger + mtx sync.Mutex + + startTime time.Time + + state *bcReactorFSMState + stateTimer *time.Timer + pool *BlockPool + + // interface used to call the Blockchain reactor to send StatusRequest, BlockRequest, reporting errors, etc. + toBcR bcReactor +} + +// NewFSM creates a new reactor FSM. +func NewFSM(height int64, toBcR bcReactor) *BcReactorFSM { + return &BcReactorFSM{ + state: unknown, + startTime: time.Now(), + pool: NewBlockPool(height, toBcR), + toBcR: toBcR, + } +} + +// bReactorEventData is part of the message sent by the reactor to the FSM and used by the state handlers. +type bReactorEventData struct { + peerID p2p.ID + err error // for peer error: timeout, slow; for processed block event if error occurred + height int64 // for status response; for processed block event + block *types.Block // for block response + stateName string // for state timeout events + length int // for block response event, length of received block, used to detect slow peers + maxNumRequests int // for request needed event, maximum number of pending requests +} + +// Blockchain Reactor Events (the input to the state machine) +type bReactorEvent uint + +const ( + // message type events + startFSMEv = iota + 1 + statusResponseEv + blockResponseEv + processedBlockEv + makeRequestsEv + stopFSMEv + + // other events + peerRemoveEv = iota + 256 + stateTimeoutEv +) + +func (msg *bcReactorMessage) String() string { + var dataStr string + + switch msg.event { + case startFSMEv: + dataStr = "" + case statusResponseEv: + dataStr = fmt.Sprintf("peer=%v height=%v", msg.data.peerID, msg.data.height) + case blockResponseEv: + dataStr = fmt.Sprintf("peer=%v block.height=%v length=%v", + msg.data.peerID, msg.data.block.Height, msg.data.length) + case processedBlockEv: + dataStr = fmt.Sprintf("error=%v", msg.data.err) + case makeRequestsEv: + dataStr = "" + case stopFSMEv: + dataStr = "" + case peerRemoveEv: + dataStr = fmt.Sprintf("peer: %v is being removed by the switch", msg.data.peerID) + case stateTimeoutEv: + dataStr = fmt.Sprintf("state=%v", msg.data.stateName) + default: + dataStr = fmt.Sprintf("cannot interpret message data") + } + + return fmt.Sprintf("%v: %v", msg.event, dataStr) +} + +func (ev bReactorEvent) String() string { + switch ev { + case startFSMEv: + return "startFSMEv" + case statusResponseEv: + return "statusResponseEv" + case blockResponseEv: + return "blockResponseEv" + case processedBlockEv: + return "processedBlockEv" + case makeRequestsEv: + return "makeRequestsEv" + case stopFSMEv: + return "stopFSMEv" + case peerRemoveEv: + return "peerRemoveEv" + case stateTimeoutEv: + return "stateTimeoutEv" + default: + return "event unknown" + } + +} + +// states +var ( + unknown *bcReactorFSMState + waitForPeer *bcReactorFSMState + waitForBlock *bcReactorFSMState + finished *bcReactorFSMState +) + +// timeouts for state timers +const ( + waitForPeerTimeout = 3 * time.Second + waitForBlockAtCurrentHeightTimeout = 10 * time.Second +) + +// errors +var ( + // internal to the package + errNoErrorFinished = errors.New("fast sync is finished") + errInvalidEvent = errors.New("invalid event in current state") + errMissingBlock = errors.New("missing blocks") + errNilPeerForBlockRequest = errors.New("peer for block request does not exist in the switch") + errSendQueueFull = errors.New("block request not made, send-queue is full") + errPeerTooShort = errors.New("peer height too low, old peer removed/ new peer not added") + errSwitchRemovesPeer = errors.New("switch is removing peer") + errTimeoutEventWrongState = errors.New("timeout event for a state different than the current one") + errNoTallerPeer = errors.New("fast sync timed out on waiting for a peer taller than this node") + + // reported eventually to the switch + errPeerLowersItsHeight = errors.New("fast sync peer reports a height lower than previous") // handle return + errNoPeerResponseForCurrentHeights = errors.New("fast sync timed out on peer block response for current heights") // handle return + errNoPeerResponse = errors.New("fast sync timed out on peer block response") // xx + errBadDataFromPeer = errors.New("fast sync received block from wrong peer or block is bad") // xx + errDuplicateBlock = errors.New("fast sync received duplicate block from peer") + errBlockVerificationFailure = errors.New("fast sync block verification failure") // xx + errSlowPeer = errors.New("fast sync peer is not sending us data fast enough") // xx + +) + +func init() { + unknown = &bcReactorFSMState{ + name: "unknown", + handle: func(fsm *BcReactorFSM, ev bReactorEvent, data bReactorEventData) (*bcReactorFSMState, error) { + switch ev { + case startFSMEv: + // Broadcast Status message. Currently doesn't return non-nil error. + fsm.toBcR.sendStatusRequest() + return waitForPeer, nil + + case stopFSMEv: + return finished, errNoErrorFinished + + default: + return unknown, errInvalidEvent + } + }, + } + + waitForPeer = &bcReactorFSMState{ + name: "waitForPeer", + timeout: waitForPeerTimeout, + enter: func(fsm *BcReactorFSM) { + // Stop when leaving the state. + fsm.resetStateTimer() + }, + handle: func(fsm *BcReactorFSM, ev bReactorEvent, data bReactorEventData) (*bcReactorFSMState, error) { + switch ev { + case stateTimeoutEv: + if data.stateName != "waitForPeer" { + fsm.logger.Error("received a state timeout event for different state", + "state", data.stateName) + return waitForPeer, errTimeoutEventWrongState + } + // There was no statusResponse received from any peer. + // Should we send status request again? + return finished, errNoTallerPeer + + case statusResponseEv: + if err := fsm.pool.UpdatePeer(data.peerID, data.height); err != nil { + if fsm.pool.NumPeers() == 0 { + return waitForPeer, err + } + } + if fsm.stateTimer != nil { + fsm.stateTimer.Stop() + } + return waitForBlock, nil + + case stopFSMEv: + if fsm.stateTimer != nil { + fsm.stateTimer.Stop() + } + return finished, errNoErrorFinished + + default: + return waitForPeer, errInvalidEvent + } + }, + } + + waitForBlock = &bcReactorFSMState{ + name: "waitForBlock", + timeout: waitForBlockAtCurrentHeightTimeout, + enter: func(fsm *BcReactorFSM) { + // Stop when leaving the state. + fsm.resetStateTimer() + }, + handle: func(fsm *BcReactorFSM, ev bReactorEvent, data bReactorEventData) (*bcReactorFSMState, error) { + switch ev { + + case statusResponseEv: + err := fsm.pool.UpdatePeer(data.peerID, data.height) + if fsm.pool.NumPeers() == 0 { + return waitForPeer, err + } + if fsm.pool.ReachedMaxHeight() { + return finished, err + } + return waitForBlock, err + + case blockResponseEv: + fsm.logger.Debug("blockResponseEv", "H", data.block.Height) + err := fsm.pool.AddBlock(data.peerID, data.block, data.length) + if err != nil { + // A block was received that was unsolicited, from unexpected peer, or that we already have it. + // Ignore block, remove peer and send error to switch. + fsm.pool.RemovePeer(data.peerID, err) + fsm.toBcR.sendPeerError(err, data.peerID) + } + if fsm.pool.NumPeers() == 0 { + return waitForPeer, err + } + return waitForBlock, err + + case processedBlockEv: + if data.err != nil { + first, second, _ := fsm.pool.FirstTwoBlocksAndPeers() + fsm.logger.Error("error processing block", "err", data.err, + "first", first.block.Height, "second", second.block.Height) + fsm.logger.Error("send peer error for", "peer", first.peer.ID) + fsm.toBcR.sendPeerError(data.err, first.peer.ID) + fsm.logger.Error("send peer error for", "peer", second.peer.ID) + fsm.toBcR.sendPeerError(data.err, second.peer.ID) + // Remove the first two blocks. This will also remove the peers + fsm.pool.InvalidateFirstTwoBlocks(data.err) + } else { + fsm.pool.ProcessedCurrentHeightBlock() + // Since we advanced one block reset the state timer + fsm.resetStateTimer() + } + + // Both cases above may result in achieving maximum height. + if fsm.pool.ReachedMaxHeight() { + return finished, nil + } + + return waitForBlock, data.err + + case peerRemoveEv: + // This event is sent by the switch to remove disconnected and errored peers. + fsm.pool.RemovePeer(data.peerID, data.err) + if fsm.pool.NumPeers() == 0 { + return waitForPeer, nil + } + if fsm.pool.ReachedMaxHeight() { + return finished, nil + } + return waitForBlock, nil + + case makeRequestsEv: + fsm.makeNextRequests(data.maxNumRequests) + return waitForBlock, nil + + case stateTimeoutEv: + if data.stateName != "waitForBlock" { + fsm.logger.Error("received a state timeout event for different state", + "state", data.stateName) + return waitForBlock, errTimeoutEventWrongState + } + // We haven't received the block at current height or height+1. Remove peer. + fsm.pool.RemovePeerAtCurrentHeights(errNoPeerResponseForCurrentHeights) + fsm.resetStateTimer() + if fsm.pool.NumPeers() == 0 { + return waitForPeer, errNoPeerResponseForCurrentHeights + } + if fsm.pool.ReachedMaxHeight() { + return finished, nil + } + return waitForBlock, errNoPeerResponseForCurrentHeights + + case stopFSMEv: + if fsm.stateTimer != nil { + fsm.stateTimer.Stop() + } + return finished, errNoErrorFinished + + default: + return waitForBlock, errInvalidEvent + } + }, + } + + finished = &bcReactorFSMState{ + name: "finished", + enter: func(fsm *BcReactorFSM) { + fsm.logger.Info("Time to switch to consensus or hotsync reactor!", "height", fsm.pool.Height) + + fsm.toBcR.switchToConsensusOrHotSync() + fsm.cleanup() + }, + handle: func(fsm *BcReactorFSM, ev bReactorEvent, data bReactorEventData) (*bcReactorFSMState, error) { + return finished, nil + }, + } +} + +// Interface used by FSM for sending Block and Status requests, +// informing of peer errors and state timeouts +// Implemented by BlockchainReactor and tests +type bcReactor interface { + sendStatusRequest() + sendBlockRequest(peerID p2p.ID, height int64) error + sendPeerError(err error, peerID p2p.ID) + resetStateTimer(name string, timer **time.Timer, timeout time.Duration) + switchToConsensusOrHotSync() +} + +// SetLogger sets the FSM logger. +func (fsm *BcReactorFSM) SetLogger(l log.Logger) { + fsm.logger = l + fsm.pool.SetLogger(l) +} + +// Start starts the FSM. +func (fsm *BcReactorFSM) Start() { + _ = fsm.Handle(&bcReactorMessage{event: startFSMEv}) +} + +// Handle processes messages and events sent to the FSM. +func (fsm *BcReactorFSM) Handle(msg *bcReactorMessage) error { + fsm.mtx.Lock() + defer fsm.mtx.Unlock() + fsm.logger.Debug("FSM received", "event", msg, "state", fsm.state) + + if fsm.state == nil { + fsm.state = unknown + } + next, err := fsm.state.handle(fsm, msg.event, msg.data) + if err != nil { + fsm.logger.Error("FSM event handler returned", "err", err, + "state", fsm.state, "event", msg.event) + } + + oldState := fsm.state.name + fsm.transition(next) + if oldState != fsm.state.name { + fsm.logger.Info("FSM changed state", "new_state", fsm.state) + } + return err +} + +func (fsm *BcReactorFSM) transition(next *bcReactorFSMState) { + if next == nil { + return + } + if fsm.state != next { + fsm.state = next + if next.enter != nil { + next.enter(fsm) + } + } +} + +// Called when entering an FSM state in order to detect lack of progress in the state machine. +// Note the use of the 'bcr' interface to facilitate testing without timer expiring. +func (fsm *BcReactorFSM) resetStateTimer() { + fsm.toBcR.resetStateTimer(fsm.state.name, &fsm.stateTimer, fsm.state.timeout) +} + +func (fsm *BcReactorFSM) isCaughtUp() bool { + return fsm.state == finished +} + +func (fsm *BcReactorFSM) makeNextRequests(maxNumRequests int) { + fsm.pool.MakeNextRequests(maxNumRequests) +} + +func (fsm *BcReactorFSM) cleanup() { + fsm.pool.Cleanup() +} + +// NeedsBlocks checks if more block requests are required. +func (fsm *BcReactorFSM) NeedsBlocks() bool { + fsm.mtx.Lock() + defer fsm.mtx.Unlock() + return fsm.state.name == "waitForBlock" && fsm.pool.NeedsBlocks() +} + +// FirstTwoBlocks returns the two blocks at pool height and height+1 +func (fsm *BcReactorFSM) FirstTwoBlocks() (first, second *types.Block, err error) { + fsm.mtx.Lock() + defer fsm.mtx.Unlock() + firstBP, secondBP, err := fsm.pool.FirstTwoBlocksAndPeers() + if err == nil { + first = firstBP.block + second = secondBP.block + } + return +} + +// Status returns the pool's height and the maximum peer height. +func (fsm *BcReactorFSM) Status() (height, maxPeerHeight int64) { + fsm.mtx.Lock() + defer fsm.mtx.Unlock() + return fsm.pool.Height, fsm.pool.MaxPeerHeight +} diff --git a/blockchain/v1/reactor_fsm_test.go b/blockchain/v1/reactor_fsm_test.go new file mode 100644 index 000000000..7caeedd84 --- /dev/null +++ b/blockchain/v1/reactor_fsm_test.go @@ -0,0 +1,938 @@ +package v1 + +import ( + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/assert" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/p2p" + "github.com/tendermint/tendermint/types" +) + +type lastBlockRequestT struct { + peerID p2p.ID + height int64 +} + +type lastPeerErrorT struct { + peerID p2p.ID + err error +} + +// reactor for FSM testing +type testReactor struct { + logger log.Logger + fsm *BcReactorFSM + numStatusRequests int + numBlockRequests int + lastBlockRequest lastBlockRequestT + lastPeerError lastPeerErrorT + stateTimerStarts map[string]int +} + +func sendEventToFSM(fsm *BcReactorFSM, ev bReactorEvent, data bReactorEventData) error { + return fsm.Handle(&bcReactorMessage{event: ev, data: data}) +} + +type fsmStepTestValues struct { + currentState string + event bReactorEvent + data bReactorEventData + + wantErr error + wantState string + wantStatusReqSent bool + wantReqIncreased bool + wantNewBlocks []int64 + wantRemovedPeers []p2p.ID +} + +// --------------------------------------------------------------------------- +// helper test function for different FSM events, state and expected behavior +func sStopFSMEv(current, expected string) fsmStepTestValues { + return fsmStepTestValues{ + currentState: current, + event: stopFSMEv, + wantState: expected, + wantErr: errNoErrorFinished} +} + +func sUnknownFSMEv(current string) fsmStepTestValues { + return fsmStepTestValues{ + currentState: current, + event: 1234, + wantState: current, + wantErr: errInvalidEvent} +} + +func sStartFSMEv() fsmStepTestValues { + return fsmStepTestValues{ + currentState: "unknown", + event: startFSMEv, + wantState: "waitForPeer", + wantStatusReqSent: true} +} + +func sStateTimeoutEv(current, expected string, timedoutState string, wantErr error) fsmStepTestValues { + return fsmStepTestValues{ + currentState: current, + event: stateTimeoutEv, + data: bReactorEventData{ + stateName: timedoutState, + }, + wantState: expected, + wantErr: wantErr, + } +} + +func sProcessedBlockEv(current, expected string, reactorError error) fsmStepTestValues { + return fsmStepTestValues{ + currentState: current, + event: processedBlockEv, + data: bReactorEventData{ + err: reactorError, + }, + wantState: expected, + wantErr: reactorError, + } +} + +func sStatusEv(current, expected string, peerID p2p.ID, height int64, err error) fsmStepTestValues { + return fsmStepTestValues{ + currentState: current, + event: statusResponseEv, + data: bReactorEventData{peerID: peerID, height: height}, + wantState: expected, + wantErr: err} +} + +func sMakeRequestsEv(current, expected string, maxPendingRequests int) fsmStepTestValues { + return fsmStepTestValues{ + currentState: current, + event: makeRequestsEv, + data: bReactorEventData{maxNumRequests: maxPendingRequests}, + wantState: expected, + wantReqIncreased: true, + } +} + +func sMakeRequestsEvErrored(current, expected string, + maxPendingRequests int, err error, peersRemoved []p2p.ID) fsmStepTestValues { + return fsmStepTestValues{ + currentState: current, + event: makeRequestsEv, + data: bReactorEventData{maxNumRequests: maxPendingRequests}, + wantState: expected, + wantErr: err, + wantRemovedPeers: peersRemoved, + wantReqIncreased: true, + } +} + +func sBlockRespEv(current, expected string, peerID p2p.ID, height int64, prevBlocks []int64) fsmStepTestValues { + txs := []types.Tx{types.Tx("foo"), types.Tx("bar")} + return fsmStepTestValues{ + currentState: current, + event: blockResponseEv, + data: bReactorEventData{ + peerID: peerID, + height: height, + block: types.MakeBlock(int64(height), txs, nil, nil), + length: 100}, + wantState: expected, + wantNewBlocks: append(prevBlocks, height), + } +} + +func sBlockRespEvErrored(current, expected string, + peerID p2p.ID, height int64, prevBlocks []int64, wantErr error, peersRemoved []p2p.ID) fsmStepTestValues { + txs := []types.Tx{types.Tx("foo"), types.Tx("bar")} + + return fsmStepTestValues{ + currentState: current, + event: blockResponseEv, + data: bReactorEventData{ + peerID: peerID, + height: height, + block: types.MakeBlock(int64(height), txs, nil, nil), + length: 100}, + wantState: expected, + wantErr: wantErr, + wantRemovedPeers: peersRemoved, + wantNewBlocks: prevBlocks, + } +} + +func sPeerRemoveEv(current, expected string, peerID p2p.ID, err error, peersRemoved []p2p.ID) fsmStepTestValues { + return fsmStepTestValues{ + currentState: current, + event: peerRemoveEv, + data: bReactorEventData{ + peerID: peerID, + err: err, + }, + wantState: expected, + wantRemovedPeers: peersRemoved, + } +} + +// -------------------------------------------- + +func newTestReactor(height int64) *testReactor { + testBcR := &testReactor{logger: log.TestingLogger(), stateTimerStarts: make(map[string]int)} + testBcR.fsm = NewFSM(height, testBcR) + testBcR.fsm.SetLogger(testBcR.logger) + return testBcR +} + +func fixBlockResponseEvStep(step *fsmStepTestValues, testBcR *testReactor) { + // There is currently no good way to know to which peer a block request was sent. + // So in some cases where it does not matter, before we simulate a block response + // we cheat and look where it is expected from. + if step.event == blockResponseEv { + height := step.data.height + peerID, ok := testBcR.fsm.pool.blocks[height] + if ok { + step.data.peerID = peerID + } + } +} + +type testFields struct { + name string + startingHeight int64 + maxRequestsPerPeer int + maxPendingRequests int + steps []fsmStepTestValues +} + +func executeFSMTests(t *testing.T, tests []testFields, matchRespToReq bool) { + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Create test reactor + testBcR := newTestReactor(tt.startingHeight) + + if tt.maxRequestsPerPeer != 0 { + maxRequestsPerPeer = tt.maxRequestsPerPeer + } + + for _, step := range tt.steps { + assert.Equal(t, step.currentState, testBcR.fsm.state.name) + + var heightBefore int64 + if step.event == processedBlockEv && step.data.err == errBlockVerificationFailure { + heightBefore = testBcR.fsm.pool.Height + } + oldNumStatusRequests := testBcR.numStatusRequests + oldNumBlockRequests := testBcR.numBlockRequests + if matchRespToReq { + fixBlockResponseEvStep(&step, testBcR) + } + + fsmErr := sendEventToFSM(testBcR.fsm, step.event, step.data) + assert.Equal(t, step.wantErr, fsmErr) + + if step.wantStatusReqSent { + assert.Equal(t, oldNumStatusRequests+1, testBcR.numStatusRequests) + } else { + assert.Equal(t, oldNumStatusRequests, testBcR.numStatusRequests) + } + + if step.wantReqIncreased { + assert.True(t, oldNumBlockRequests < testBcR.numBlockRequests) + } else { + assert.Equal(t, oldNumBlockRequests, testBcR.numBlockRequests) + } + + for _, height := range step.wantNewBlocks { + _, err := testBcR.fsm.pool.BlockAndPeerAtHeight(height) + assert.Nil(t, err) + } + if step.event == processedBlockEv && step.data.err == errBlockVerificationFailure { + heightAfter := testBcR.fsm.pool.Height + assert.Equal(t, heightBefore, heightAfter) + firstAfter, err1 := testBcR.fsm.pool.BlockAndPeerAtHeight(testBcR.fsm.pool.Height) + secondAfter, err2 := testBcR.fsm.pool.BlockAndPeerAtHeight(testBcR.fsm.pool.Height + 1) + assert.NotNil(t, err1) + assert.NotNil(t, err2) + assert.Nil(t, firstAfter) + assert.Nil(t, secondAfter) + } + + assert.Equal(t, step.wantState, testBcR.fsm.state.name) + + if step.wantState == "finished" { + assert.True(t, testBcR.fsm.isCaughtUp()) + } + } + }) + } +} + +func TestFSMBasic(t *testing.T) { + tests := []testFields{ + { + name: "one block, one peer - TS2", + startingHeight: 1, + maxRequestsPerPeer: 2, + steps: []fsmStepTestValues{ + sStartFSMEv(), + sStatusEv("waitForPeer", "waitForBlock", "P1", 2, nil), + sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests), + sBlockRespEv("waitForBlock", "waitForBlock", "P1", 1, []int64{}), + sBlockRespEv("waitForBlock", "waitForBlock", "P2", 2, []int64{1}), + sProcessedBlockEv("waitForBlock", "finished", nil), + }, + }, + { + name: "multi block, multi peer - TS2", + startingHeight: 1, + maxRequestsPerPeer: 2, + steps: []fsmStepTestValues{ + sStartFSMEv(), + sStatusEv("waitForPeer", "waitForBlock", "P1", 4, nil), + sStatusEv("waitForBlock", "waitForBlock", "P2", 4, nil), + sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests), + + sBlockRespEv("waitForBlock", "waitForBlock", "P1", 1, []int64{}), + sBlockRespEv("waitForBlock", "waitForBlock", "P1", 2, []int64{1}), + sBlockRespEv("waitForBlock", "waitForBlock", "P2", 3, []int64{1, 2}), + sBlockRespEv("waitForBlock", "waitForBlock", "P2", 4, []int64{1, 2, 3}), + + sProcessedBlockEv("waitForBlock", "waitForBlock", nil), + sProcessedBlockEv("waitForBlock", "waitForBlock", nil), + sProcessedBlockEv("waitForBlock", "finished", nil), + }, + }, + } + + executeFSMTests(t, tests, true) +} + +func TestFSMBlockVerificationFailure(t *testing.T) { + tests := []testFields{ + { + name: "block verification failure - TS2 variant", + startingHeight: 1, + maxRequestsPerPeer: 3, + steps: []fsmStepTestValues{ + sStartFSMEv(), + + // add P1 and get blocks 1-3 from it + sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil), + sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests), + sBlockRespEv("waitForBlock", "waitForBlock", "P1", 1, []int64{}), + sBlockRespEv("waitForBlock", "waitForBlock", "P1", 2, []int64{1}), + sBlockRespEv("waitForBlock", "waitForBlock", "P1", 3, []int64{1, 2}), + + // add P2 + sStatusEv("waitForBlock", "waitForBlock", "P2", 3, nil), + + // process block failure, should remove P1 and all blocks + sProcessedBlockEv("waitForBlock", "waitForBlock", errBlockVerificationFailure), + + // get blocks 1-3 from P2 + sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests), + sBlockRespEv("waitForBlock", "waitForBlock", "P2", 1, []int64{}), + sBlockRespEv("waitForBlock", "waitForBlock", "P2", 2, []int64{1}), + sBlockRespEv("waitForBlock", "waitForBlock", "P2", 3, []int64{1, 2}), + + // finish after processing blocks 1 and 2 + sProcessedBlockEv("waitForBlock", "waitForBlock", nil), + sProcessedBlockEv("waitForBlock", "finished", nil), + }, + }, + } + + executeFSMTests(t, tests, false) +} + +func TestFSMBadBlockFromPeer(t *testing.T) { + tests := []testFields{ + { + name: "block we haven't asked for", + startingHeight: 1, + maxRequestsPerPeer: 3, + steps: []fsmStepTestValues{ + sStartFSMEv(), + // add P1 and ask for blocks 1-3 + sStatusEv("waitForPeer", "waitForBlock", "P1", 300, nil), + sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests), + + // blockResponseEv for height 100 should cause an error + sBlockRespEvErrored("waitForBlock", "waitForPeer", + "P1", 100, []int64{}, errMissingBlock, []p2p.ID{}), + }, + }, + { + name: "block we already have", + startingHeight: 1, + maxRequestsPerPeer: 3, + steps: []fsmStepTestValues{ + sStartFSMEv(), + // add P1 and get block 1 + sStatusEv("waitForPeer", "waitForBlock", "P1", 100, nil), + sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests), + sBlockRespEv("waitForBlock", "waitForBlock", + "P1", 1, []int64{}), + + // Get block 1 again. Since peer is removed together with block 1, + // the blocks present in the pool should be {} + sBlockRespEvErrored("waitForBlock", "waitForPeer", + "P1", 1, []int64{}, errDuplicateBlock, []p2p.ID{"P1"}), + }, + }, + { + name: "block from unknown peer", + startingHeight: 1, + maxRequestsPerPeer: 3, + steps: []fsmStepTestValues{ + sStartFSMEv(), + // add P1 and get block 1 + sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil), + + // get block 1 from unknown peer P2 + sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests), + sBlockRespEvErrored("waitForBlock", "waitForBlock", + "P2", 1, []int64{}, errBadDataFromPeer, []p2p.ID{"P2"}), + }, + }, + { + name: "block from wrong peer", + startingHeight: 1, + maxRequestsPerPeer: 3, + steps: []fsmStepTestValues{ + sStartFSMEv(), + // add P1, make requests for blocks 1-3 to P1 + sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil), + sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests), + + // add P2 + sStatusEv("waitForBlock", "waitForBlock", "P2", 3, nil), + + // receive block 1 from P2 + sBlockRespEvErrored("waitForBlock", "waitForBlock", + "P2", 1, []int64{}, errBadDataFromPeer, []p2p.ID{"P2"}), + }, + }, + } + + executeFSMTests(t, tests, false) +} + +func TestFSMBlockAtCurrentHeightDoesNotArriveInTime(t *testing.T) { + tests := []testFields{ + { + name: "block at current height undelivered - TS5", + startingHeight: 1, + maxRequestsPerPeer: 3, + steps: []fsmStepTestValues{ + sStartFSMEv(), + // add P1, get blocks 1 and 2, process block 1 + sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil), + sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests), + sBlockRespEv("waitForBlock", "waitForBlock", + "P1", 1, []int64{}), + sBlockRespEv("waitForBlock", "waitForBlock", + "P1", 2, []int64{1}), + sProcessedBlockEv("waitForBlock", "waitForBlock", nil), + + // add P2 + sStatusEv("waitForBlock", "waitForBlock", "P2", 3, nil), + + // timeout on block 3, P1 should be removed + sStateTimeoutEv("waitForBlock", "waitForBlock", "waitForBlock", errNoPeerResponseForCurrentHeights), + + // make requests and finish by receiving blocks 2 and 3 from P2 + sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests), + sBlockRespEv("waitForBlock", "waitForBlock", "P2", 2, []int64{}), + sBlockRespEv("waitForBlock", "waitForBlock", "P2", 3, []int64{2}), + sProcessedBlockEv("waitForBlock", "finished", nil), + }, + }, + { + name: "block at current height undelivered, at maxPeerHeight after peer removal - TS3", + startingHeight: 1, + maxRequestsPerPeer: 3, + steps: []fsmStepTestValues{ + sStartFSMEv(), + // add P1, request blocks 1-3 from P1 + sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil), + sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests), + + // add P2 (tallest) + sStatusEv("waitForBlock", "waitForBlock", "P2", 30, nil), + sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests), + + // receive blocks 1-3 from P1 + sBlockRespEv("waitForBlock", "waitForBlock", "P1", 1, []int64{}), + sBlockRespEv("waitForBlock", "waitForBlock", "P1", 2, []int64{1}), + sBlockRespEv("waitForBlock", "waitForBlock", "P1", 3, []int64{1, 2}), + + // process blocks at heights 1 and 2 + sProcessedBlockEv("waitForBlock", "waitForBlock", nil), + sProcessedBlockEv("waitForBlock", "waitForBlock", nil), + + // timeout on block at height 4 + sStateTimeoutEv("waitForBlock", "finished", "waitForBlock", nil), + }, + }, + } + + executeFSMTests(t, tests, true) +} + +func TestFSMPeerRelatedEvents(t *testing.T) { + tests := []testFields{ + { + name: "peer remove event with no blocks", + startingHeight: 1, + steps: []fsmStepTestValues{ + sStartFSMEv(), + // add P1, P2, P3 + sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil), + sStatusEv("waitForBlock", "waitForBlock", "P2", 3, nil), + sStatusEv("waitForBlock", "waitForBlock", "P3", 3, nil), + + // switch removes P2 + sPeerRemoveEv("waitForBlock", "waitForBlock", "P2", errSwitchRemovesPeer, []p2p.ID{"P2"}), + }, + }, + { + name: "only peer removed while in waitForBlock state", + startingHeight: 100, + steps: []fsmStepTestValues{ + sStartFSMEv(), + // add P1 + sStatusEv("waitForPeer", "waitForBlock", "P1", 200, nil), + + // switch removes P1 + sPeerRemoveEv("waitForBlock", "waitForPeer", "P1", errSwitchRemovesPeer, []p2p.ID{"P1"}), + }, + }, + { + name: "highest peer removed while in waitForBlock state, node reaches maxPeerHeight - TS4 ", + startingHeight: 100, + maxRequestsPerPeer: 3, + steps: []fsmStepTestValues{ + sStartFSMEv(), + // add P1 and make requests + sStatusEv("waitForPeer", "waitForBlock", "P1", 101, nil), + sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests), + // add P2 + sStatusEv("waitForBlock", "waitForBlock", "P2", 200, nil), + + // get blocks 100 and 101 from P1 and process block at height 100 + sBlockRespEv("waitForBlock", "waitForBlock", "P1", 100, []int64{}), + sBlockRespEv("waitForBlock", "waitForBlock", "P1", 101, []int64{100}), + sProcessedBlockEv("waitForBlock", "waitForBlock", nil), + + // switch removes peer P1, should be finished + sPeerRemoveEv("waitForBlock", "finished", "P2", errSwitchRemovesPeer, []p2p.ID{"P2"}), + }, + }, + { + name: "highest peer lowers its height in waitForBlock state, node reaches maxPeerHeight - TS4", + startingHeight: 100, + maxRequestsPerPeer: 3, + steps: []fsmStepTestValues{ + sStartFSMEv(), + // add P1 and make requests + sStatusEv("waitForPeer", "waitForBlock", "P1", 101, nil), + sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests), + + // add P2 + sStatusEv("waitForBlock", "waitForBlock", "P2", 200, nil), + + // get blocks 100 and 101 from P1 + sBlockRespEv("waitForBlock", "waitForBlock", "P1", 100, []int64{}), + sBlockRespEv("waitForBlock", "waitForBlock", "P1", 101, []int64{100}), + + // processed block at heights 100 + sProcessedBlockEv("waitForBlock", "waitForBlock", nil), + + // P2 becomes short + sStatusEv("waitForBlock", "finished", "P2", 100, errPeerLowersItsHeight), + }, + }, + { + name: "new short peer while in waitForPeer state", + startingHeight: 100, + steps: []fsmStepTestValues{ + sStartFSMEv(), + sStatusEv("waitForPeer", "waitForPeer", "P1", 3, errPeerTooShort), + }, + }, + { + name: "new short peer while in waitForBlock state", + startingHeight: 100, + steps: []fsmStepTestValues{ + sStartFSMEv(), + sStatusEv("waitForPeer", "waitForBlock", "P1", 200, nil), + sStatusEv("waitForBlock", "waitForBlock", "P2", 3, errPeerTooShort), + }, + }, + { + name: "only peer updated with low height while in waitForBlock state", + startingHeight: 100, + steps: []fsmStepTestValues{ + sStartFSMEv(), + sStatusEv("waitForPeer", "waitForBlock", "P1", 200, nil), + sStatusEv("waitForBlock", "waitForPeer", "P1", 3, errPeerLowersItsHeight), + }, + }, + { + name: "peer does not exist in the switch", + startingHeight: 9999999, + maxRequestsPerPeer: 3, + steps: []fsmStepTestValues{ + sStartFSMEv(), + // add P1 + sStatusEv("waitForPeer", "waitForBlock", "P1", 20000000, nil), + // send request for block 9999999 + // Note: For this block request the "switch missing the peer" error is simulated, + // see implementation of bcReactor interface, sendBlockRequest(), in this file. + sMakeRequestsEvErrored("waitForBlock", "waitForBlock", + maxNumRequests, nil, []p2p.ID{"P1"}), + }, + }, + } + + executeFSMTests(t, tests, true) +} + +func TestFSMStopFSM(t *testing.T) { + tests := []testFields{ + { + name: "stopFSMEv in unknown", + steps: []fsmStepTestValues{ + sStopFSMEv("unknown", "finished"), + }, + }, + { + name: "stopFSMEv in waitForPeer", + startingHeight: 1, + steps: []fsmStepTestValues{ + sStartFSMEv(), + sStopFSMEv("waitForPeer", "finished"), + }, + }, + { + name: "stopFSMEv in waitForBlock", + startingHeight: 1, + steps: []fsmStepTestValues{ + sStartFSMEv(), + sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil), + sStopFSMEv("waitForBlock", "finished"), + }, + }, + } + + executeFSMTests(t, tests, false) +} + +func TestFSMUnknownElements(t *testing.T) { + tests := []testFields{ + { + name: "unknown event for state unknown", + steps: []fsmStepTestValues{ + sUnknownFSMEv("unknown"), + }, + }, + { + name: "unknown event for state waitForPeer", + steps: []fsmStepTestValues{ + sStartFSMEv(), + sUnknownFSMEv("waitForPeer"), + }, + }, + { + name: "unknown event for state waitForBlock", + startingHeight: 1, + steps: []fsmStepTestValues{ + sStartFSMEv(), + sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil), + sUnknownFSMEv("waitForBlock"), + }, + }, + } + + executeFSMTests(t, tests, false) +} + +func TestFSMPeerStateTimeoutEvent(t *testing.T) { + tests := []testFields{ + { + name: "timeout event for state waitForPeer while in state waitForPeer - TS1", + startingHeight: 1, + maxRequestsPerPeer: 3, + steps: []fsmStepTestValues{ + sStartFSMEv(), + sStateTimeoutEv("waitForPeer", "finished", "waitForPeer", errNoTallerPeer), + }, + }, + { + name: "timeout event for state waitForPeer while in a state != waitForPeer", + startingHeight: 1, + maxRequestsPerPeer: 3, + steps: []fsmStepTestValues{ + sStartFSMEv(), + sStateTimeoutEv("waitForPeer", "waitForPeer", "waitForBlock", errTimeoutEventWrongState), + }, + }, + { + name: "timeout event for state waitForBlock while in state waitForBlock ", + startingHeight: 1, + maxRequestsPerPeer: 3, + steps: []fsmStepTestValues{ + sStartFSMEv(), + sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil), + sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests), + sStateTimeoutEv("waitForBlock", "waitForPeer", "waitForBlock", errNoPeerResponseForCurrentHeights), + }, + }, + { + name: "timeout event for state waitForBlock while in a state != waitForBlock", + startingHeight: 1, + maxRequestsPerPeer: 3, + steps: []fsmStepTestValues{ + sStartFSMEv(), + sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil), + sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests), + sStateTimeoutEv("waitForBlock", "waitForBlock", "waitForPeer", errTimeoutEventWrongState), + }, + }, + { + name: "timeout event for state waitForBlock with multiple peers", + startingHeight: 1, + maxRequestsPerPeer: 3, + steps: []fsmStepTestValues{ + sStartFSMEv(), + sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil), + sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests), + sStatusEv("waitForBlock", "waitForBlock", "P2", 3, nil), + sStateTimeoutEv("waitForBlock", "waitForBlock", "waitForBlock", errNoPeerResponseForCurrentHeights), + }, + }, + } + + executeFSMTests(t, tests, false) +} + +func makeCorrectTransitionSequence(startingHeight int64, numBlocks int64, numPeers int, randomPeerHeights bool, + maxRequestsPerPeer int, maxPendingRequests int) testFields { + + // Generate numPeers peers with random or numBlocks heights according to the randomPeerHeights flag. + peerHeights := make([]int64, numPeers) + for i := 0; i < numPeers; i++ { + if i == 0 { + peerHeights[0] = numBlocks + continue + } + if randomPeerHeights { + peerHeights[i] = int64(cmn.MaxInt(cmn.RandIntn(int(numBlocks)), int(startingHeight)+1)) + } else { + peerHeights[i] = numBlocks + } + } + + // Approximate the slice capacity to save time for appends. + testSteps := make([]fsmStepTestValues, 0, 3*numBlocks+int64(numPeers)) + + testName := fmt.Sprintf("%v-blocks %v-startingHeight %v-peers %v-maxRequestsPerPeer %v-maxNumRequests", + numBlocks, startingHeight, numPeers, maxRequestsPerPeer, maxPendingRequests) + + // Add startFSMEv step. + testSteps = append(testSteps, sStartFSMEv()) + + // For each peer, add statusResponseEv step. + for i := 0; i < numPeers; i++ { + peerName := fmt.Sprintf("P%d", i) + if i == 0 { + testSteps = append( + testSteps, + sStatusEv("waitForPeer", "waitForBlock", p2p.ID(peerName), peerHeights[i], nil)) + } else { + testSteps = append(testSteps, + sStatusEv("waitForBlock", "waitForBlock", p2p.ID(peerName), peerHeights[i], nil)) + } + } + + height := startingHeight + numBlocksReceived := 0 + prevBlocks := make([]int64, 0, maxPendingRequests) + +forLoop: + for i := 0; i < int(numBlocks); i++ { + + // Add the makeRequestEv step periodically. + if i%int(maxRequestsPerPeer) == 0 { + testSteps = append( + testSteps, + sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests), + ) + } + + // Add the blockRespEv step + testSteps = append( + testSteps, + sBlockRespEv("waitForBlock", "waitForBlock", + "P0", height, prevBlocks)) + prevBlocks = append(prevBlocks, height) + height++ + numBlocksReceived++ + + // Add the processedBlockEv step periodically. + if numBlocksReceived >= int(maxRequestsPerPeer) || height >= numBlocks { + for j := int(height) - numBlocksReceived; j < int(height); j++ { + if j >= int(numBlocks) { + // This is the last block that is processed, we should be in "finished" state. + testSteps = append( + testSteps, + sProcessedBlockEv("waitForBlock", "finished", nil)) + break forLoop + } + testSteps = append( + testSteps, + sProcessedBlockEv("waitForBlock", "waitForBlock", nil)) + } + numBlocksReceived = 0 + prevBlocks = make([]int64, 0, maxPendingRequests) + } + } + + return testFields{ + name: testName, + startingHeight: startingHeight, + maxRequestsPerPeer: maxRequestsPerPeer, + maxPendingRequests: maxPendingRequests, + steps: testSteps, + } +} + +const ( + maxStartingHeightTest = 100 + maxRequestsPerPeerTest = 20 + maxTotalPendingRequestsTest = 600 + maxNumPeersTest = 1000 + maxNumBlocksInChainTest = 10000 //should be smaller than 9999999 +) + +func makeCorrectTransitionSequenceWithRandomParameters() testFields { + // Generate a starting height for fast sync. + startingHeight := int64(cmn.RandIntn(maxStartingHeightTest) + 1) + + // Generate the number of requests per peer. + maxRequestsPerPeer := cmn.RandIntn(maxRequestsPerPeerTest) + 1 + + // Generate the maximum number of total pending requests, >= maxRequestsPerPeer. + maxPendingRequests := cmn.RandIntn(maxTotalPendingRequestsTest-int(maxRequestsPerPeer)) + maxRequestsPerPeer + + // Generate the number of blocks to be synced. + numBlocks := int64(cmn.RandIntn(maxNumBlocksInChainTest)) + startingHeight + + // Generate a number of peers. + numPeers := cmn.RandIntn(maxNumPeersTest) + 1 + + return makeCorrectTransitionSequence(startingHeight, numBlocks, numPeers, true, maxRequestsPerPeer, maxPendingRequests) +} + +func shouldApplyProcessedBlockEvStep(step *fsmStepTestValues, testBcR *testReactor) bool { + if step.event == processedBlockEv { + _, err := testBcR.fsm.pool.BlockAndPeerAtHeight(testBcR.fsm.pool.Height) + if err == errMissingBlock { + return false + } + _, err = testBcR.fsm.pool.BlockAndPeerAtHeight(testBcR.fsm.pool.Height + 1) + if err == errMissingBlock { + return false + } + } + return true +} + +func TestFSMCorrectTransitionSequences(t *testing.T) { + + tests := []testFields{ + makeCorrectTransitionSequence(1, 100, 10, true, 10, 40), + makeCorrectTransitionSequenceWithRandomParameters(), + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Create test reactor + testBcR := newTestReactor(tt.startingHeight) + + if tt.maxRequestsPerPeer != 0 { + maxRequestsPerPeer = tt.maxRequestsPerPeer + } + + for _, step := range tt.steps { + assert.Equal(t, step.currentState, testBcR.fsm.state.name) + + oldNumStatusRequests := testBcR.numStatusRequests + fixBlockResponseEvStep(&step, testBcR) + if !shouldApplyProcessedBlockEvStep(&step, testBcR) { + continue + } + + fsmErr := sendEventToFSM(testBcR.fsm, step.event, step.data) + assert.Equal(t, step.wantErr, fsmErr) + + if step.wantStatusReqSent { + assert.Equal(t, oldNumStatusRequests+1, testBcR.numStatusRequests) + } else { + assert.Equal(t, oldNumStatusRequests, testBcR.numStatusRequests) + } + + assert.Equal(t, step.wantState, testBcR.fsm.state.name) + if step.wantState == "finished" { + assert.True(t, testBcR.fsm.isCaughtUp()) + } + } + + }) + } +} + +// ---------------------------------------- +// implements the bcRNotifier +func (testR *testReactor) sendPeerError(err error, peerID p2p.ID) { + testR.logger.Info("Reactor received sendPeerError call from FSM", "peer", peerID, "err", err) + testR.lastPeerError.peerID = peerID + testR.lastPeerError.err = err +} + +func (testR *testReactor) sendStatusRequest() { + testR.logger.Info("Reactor received sendStatusRequest call from FSM") + testR.numStatusRequests++ +} + +func (testR *testReactor) sendBlockRequest(peerID p2p.ID, height int64) error { + testR.logger.Info("Reactor received sendBlockRequest call from FSM", "peer", peerID, "height", height) + testR.numBlockRequests++ + testR.lastBlockRequest.peerID = peerID + testR.lastBlockRequest.height = height + if height == 9999999 { + // simulate switch does not have peer + return errNilPeerForBlockRequest + } + return nil +} + +func (testR *testReactor) resetStateTimer(name string, timer **time.Timer, timeout time.Duration) { + testR.logger.Info("Reactor received resetStateTimer call from FSM", "state", name, "timeout", timeout) + if _, ok := testR.stateTimerStarts[name]; !ok { + testR.stateTimerStarts[name] = 1 + } else { + testR.stateTimerStarts[name]++ + } +} + +func (testR *testReactor) switchToConsensusOrHotSync() { +} + +// ---------------------------------------- diff --git a/blockchain/v1/reactor_test.go b/blockchain/v1/reactor_test.go new file mode 100644 index 000000000..a7e47b4c4 --- /dev/null +++ b/blockchain/v1/reactor_test.go @@ -0,0 +1,413 @@ +package v1 + +import ( + "fmt" + "os" + "sort" + "sync" + "testing" + "time" + + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + abci "github.com/tendermint/tendermint/abci/types" + cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/mock" + "github.com/tendermint/tendermint/p2p" + "github.com/tendermint/tendermint/proxy" + sm "github.com/tendermint/tendermint/state" + "github.com/tendermint/tendermint/store" + "github.com/tendermint/tendermint/types" + tmtime "github.com/tendermint/tendermint/types/time" + dbm "github.com/tendermint/tendermint/libs/db" +) + +var config *cfg.Config + +func randGenesisDoc(numValidators int, randPower bool, minPower int64) (*types.GenesisDoc, []types.PrivValidator) { + validators := make([]types.GenesisValidator, numValidators) + privValidators := make([]types.PrivValidator, numValidators) + for i := 0; i < numValidators; i++ { + val, privVal := types.RandValidator(randPower, minPower) + validators[i] = types.GenesisValidator{ + PubKey: val.PubKey, + Power: val.VotingPower, + } + privValidators[i] = privVal + } + sort.Sort(types.PrivValidatorsByAddress(privValidators)) + + return &types.GenesisDoc{ + GenesisTime: tmtime.Now(), + ChainID: config.ChainID(), + Validators: validators, + }, privValidators +} + +func makeVote(header *types.Header, blockID types.BlockID, valset *types.ValidatorSet, privVal types.PrivValidator) *types.Vote { + addr := privVal.GetPubKey().Address() + idx, _ := valset.GetByAddress(addr) + vote := &types.Vote{ + ValidatorAddress: addr, + ValidatorIndex: idx, + Height: header.Height, + Round: 1, + Timestamp: tmtime.Now(), + Type: types.PrecommitType, + BlockID: blockID, + } + + _ = privVal.SignVote(header.ChainID, vote) + + return vote +} + +type BlockchainReactorPair struct { + bcR *BlockchainReactor + conR *consensusReactorTest +} + +func newBlockchainReactor(logger log.Logger, genDoc *types.GenesisDoc, privVals []types.PrivValidator, maxBlockHeight int64) *BlockchainReactor { + if len(privVals) != 1 { + panic("only support one validator") + } + + app := &testApp{} + cc := proxy.NewLocalClientCreator(app) + proxyApp := proxy.NewAppConns(cc) + err := proxyApp.Start() + if err != nil { + panic(errors.Wrap(err, "error start app")) + } + + blockDB := dbm.NewMemDB() + stateDB := dbm.NewMemDB() + blockStore := store.NewBlockStore(blockDB) + + state, err := sm.LoadStateFromDBOrGenesisDoc(stateDB, genDoc) + if err != nil { + panic(errors.Wrap(err, "error constructing state from genesis file")) + } + + // Make the BlockchainReactor itself. + // NOTE we have to create and commit the blocks first because + // pool.height is determined from the store. + fastSync := true + db := dbm.NewMemDB() + blockExec := sm.NewBlockExecutor(db, log.TestingLogger(), proxyApp.Consensus(), + mock.Mempool{}, sm.MockEvidencePool{}, true) + sm.SaveState(db, state) + + // let's add some blocks in + for blockHeight := int64(1); blockHeight <= maxBlockHeight; blockHeight++ { + lastCommit := types.NewCommit(types.BlockID{}, nil) + if blockHeight > 1 { + lastBlockMeta := blockStore.LoadBlockMeta(blockHeight - 1) + lastBlock := blockStore.LoadBlock(blockHeight - 1) + + vote := makeVote(&lastBlock.Header, lastBlockMeta.BlockID, state.Validators, privVals[0]).CommitSig() + lastCommit = types.NewCommit(lastBlockMeta.BlockID, []*types.CommitSig{vote}) + } + + thisBlock := makeBlock(blockHeight, state, lastCommit) + + thisParts := thisBlock.MakePartSet(types.BlockPartSizeBytes) + blockID := types.BlockID{Hash: thisBlock.Hash(), PartsHeader: thisParts.Header()} + + state, err = blockExec.ApplyBlock(state, blockID, thisBlock) + if err != nil { + panic(errors.Wrap(err, "error apply block")) + } + + blockStore.SaveBlock(thisBlock, thisParts, lastCommit) + } + + bcReactor := NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync, config.HotSyncReactor, config.HotSync) + bcReactor.SetLogger(logger.With("module", "blockchain")) + + return bcReactor +} + +func newBlockchainReactorPair(logger log.Logger, genDoc *types.GenesisDoc, privVals []types.PrivValidator, maxBlockHeight int64) BlockchainReactorPair { + + consensusReactor := &consensusReactorTest{} + consensusReactor.BaseReactor = *p2p.NewBaseReactor("Consensus reactor", consensusReactor) + + return BlockchainReactorPair{ + newBlockchainReactor(logger, genDoc, privVals, maxBlockHeight), + consensusReactor} +} + +type consensusReactorTest struct { + p2p.BaseReactor // BaseService + p2p.Switch + switchedToConsensus bool + mtx sync.Mutex +} + +func (conR *consensusReactorTest) SwitchToConsensus(state sm.State, blocksSynced int) { + conR.mtx.Lock() + defer conR.mtx.Unlock() + conR.switchedToConsensus = true +} + +func TestFastSyncNoBlockResponse(t *testing.T) { + + config = cfg.ResetTestRoot("blockchain_new_reactor_test") + defer os.RemoveAll(config.RootDir) + genDoc, privVals := randGenesisDoc(1, false, 30) + + maxBlockHeight := int64(65) + + reactorPairs := make([]BlockchainReactorPair, 2) + + logger := log.TestingLogger() + reactorPairs[0] = newBlockchainReactorPair(logger, genDoc, privVals, maxBlockHeight) + reactorPairs[1] = newBlockchainReactorPair(logger, genDoc, privVals, 0) + + p2p.MakeConnectedSwitches(config.P2P, 2, func(i int, s *p2p.Switch) *p2p.Switch { + s.AddReactor("BLOCKCHAIN", reactorPairs[i].bcR) + s.AddReactor("CONSENSUS", reactorPairs[i].conR) + moduleName := fmt.Sprintf("blockchain-%v", i) + reactorPairs[i].bcR.SetLogger(logger.With("module", moduleName)) + + return s + + }, p2p.Connect2Switches) + + defer func() { + for _, r := range reactorPairs { + _ = r.bcR.Stop() + _ = r.conR.Stop() + } + }() + + tests := []struct { + height int64 + existent bool + }{ + {maxBlockHeight + 2, false}, + {10, true}, + {1, true}, + {maxBlockHeight + 100, false}, + } + + for { + time.Sleep(10 * time.Millisecond) + reactorPairs[1].conR.mtx.Lock() + if reactorPairs[1].conR.switchedToConsensus { + reactorPairs[1].conR.mtx.Unlock() + break + } + reactorPairs[1].conR.mtx.Unlock() + } + + assert.Equal(t, maxBlockHeight, reactorPairs[0].bcR.store.Height()) + + for _, tt := range tests { + block := reactorPairs[1].bcR.store.LoadBlock(tt.height) + if tt.existent { + assert.True(t, block != nil) + } else { + assert.True(t, block == nil) + } + } +} + +// NOTE: This is too hard to test without +// an easy way to add test peer to switch +// or without significant refactoring of the module. +// Alternatively we could actually dial a TCP conn but +// that seems extreme. +func TestFastSyncBadBlockStopsPeer(t *testing.T) { + numNodes := 4 + maxBlockHeight := int64(148) + + config = cfg.ResetTestRoot("blockchain_reactor_test") + defer os.RemoveAll(config.RootDir) + genDoc, privVals := randGenesisDoc(1, false, 30) + + otherChain := newBlockchainReactorPair(log.TestingLogger(), genDoc, privVals, maxBlockHeight) + defer func() { + _ = otherChain.bcR.Stop() + _ = otherChain.conR.Stop() + }() + + reactorPairs := make([]BlockchainReactorPair, numNodes) + logger := make([]log.Logger, numNodes) + + for i := 0; i < numNodes; i++ { + logger[i] = log.TestingLogger() + height := int64(0) + if i == 0 { + height = maxBlockHeight + } + reactorPairs[i] = newBlockchainReactorPair(logger[i], genDoc, privVals, height) + } + + switches := p2p.MakeConnectedSwitches(config.P2P, numNodes, func(i int, s *p2p.Switch) *p2p.Switch { + reactorPairs[i].conR.mtx.Lock() + s.AddReactor("BLOCKCHAIN", reactorPairs[i].bcR) + s.AddReactor("CONSENSUS", reactorPairs[i].conR) + moduleName := fmt.Sprintf("blockchain-%v", i) + reactorPairs[i].bcR.SetLogger(logger[i].With("module", moduleName)) + reactorPairs[i].conR.mtx.Unlock() + return s + + }, p2p.Connect2Switches) + + defer func() { + for _, r := range reactorPairs { + _ = r.bcR.Stop() + _ = r.conR.Stop() + } + }() + +outerFor: + for { + time.Sleep(10 * time.Millisecond) + for i := 0; i < numNodes; i++ { + reactorPairs[i].conR.mtx.Lock() + if !reactorPairs[i].conR.switchedToConsensus { + reactorPairs[i].conR.mtx.Unlock() + continue outerFor + } + reactorPairs[i].conR.mtx.Unlock() + } + break + } + + //at this time, reactors[0-3] is the newest + assert.Equal(t, numNodes-1, reactorPairs[1].bcR.Switch.Peers().Size()) + + //mark last reactorPair as an invalid peer + reactorPairs[numNodes-1].bcR.store = otherChain.bcR.store + + lastLogger := log.TestingLogger() + lastReactorPair := newBlockchainReactorPair(lastLogger, genDoc, privVals, 0) + reactorPairs = append(reactorPairs, lastReactorPair) + + switches = append(switches, p2p.MakeConnectedSwitches(config.P2P, 1, func(i int, s *p2p.Switch) *p2p.Switch { + s.AddReactor("BLOCKCHAIN", reactorPairs[len(reactorPairs)-1].bcR) + s.AddReactor("CONSENSUS", reactorPairs[len(reactorPairs)-1].conR) + moduleName := fmt.Sprintf("blockchain-%v", len(reactorPairs)-1) + reactorPairs[len(reactorPairs)-1].bcR.SetLogger(lastLogger.With("module", moduleName)) + return s + + }, p2p.Connect2Switches)...) + + for i := 0; i < len(reactorPairs)-1; i++ { + p2p.Connect2Switches(switches, i, len(reactorPairs)-1) + } + + for { + time.Sleep(1 * time.Second) + lastReactorPair.conR.mtx.Lock() + if lastReactorPair.conR.switchedToConsensus { + lastReactorPair.conR.mtx.Unlock() + break + } + lastReactorPair.conR.mtx.Unlock() + + if lastReactorPair.bcR.Switch.Peers().Size() == 0 { + break + } + } + + assert.True(t, lastReactorPair.bcR.Switch.Peers().Size() < len(reactorPairs)-1) +} + +func TestBcBlockRequestMessageValidateBasic(t *testing.T) { + testCases := []struct { + testName string + requestHeight int64 + expectErr bool + }{ + {"Valid Request Message", 0, false}, + {"Valid Request Message", 1, false}, + {"Invalid Request Message", -1, true}, + } + + for _, tc := range testCases { + t.Run(tc.testName, func(t *testing.T) { + request := bcBlockRequestMessage{Height: tc.requestHeight} + assert.Equal(t, tc.expectErr, request.ValidateBasic() != nil, "Validate Basic had an unexpected result") + }) + } +} + +func TestBcNoBlockResponseMessageValidateBasic(t *testing.T) { + testCases := []struct { + testName string + nonResponseHeight int64 + expectErr bool + }{ + {"Valid Non-Response Message", 0, false}, + {"Valid Non-Response Message", 1, false}, + {"Invalid Non-Response Message", -1, true}, + } + + for _, tc := range testCases { + t.Run(tc.testName, func(t *testing.T) { + nonResponse := bcNoBlockResponseMessage{Height: tc.nonResponseHeight} + assert.Equal(t, tc.expectErr, nonResponse.ValidateBasic() != nil, "Validate Basic had an unexpected result") + }) + } +} + +func TestBcStatusRequestMessageValidateBasic(t *testing.T) { + testCases := []struct { + testName string + requestHeight int64 + expectErr bool + }{ + {"Valid Request Message", 0, false}, + {"Valid Request Message", 1, false}, + {"Invalid Request Message", -1, true}, + } + + for _, tc := range testCases { + t.Run(tc.testName, func(t *testing.T) { + request := bcStatusRequestMessage{Height: tc.requestHeight} + assert.Equal(t, tc.expectErr, request.ValidateBasic() != nil, "Validate Basic had an unexpected result") + }) + } +} + +func TestBcStatusResponseMessageValidateBasic(t *testing.T) { + testCases := []struct { + testName string + responseHeight int64 + expectErr bool + }{ + {"Valid Response Message", 0, false}, + {"Valid Response Message", 1, false}, + {"Invalid Response Message", -1, true}, + } + + for _, tc := range testCases { + t.Run(tc.testName, func(t *testing.T) { + response := bcStatusResponseMessage{Height: tc.responseHeight} + assert.Equal(t, tc.expectErr, response.ValidateBasic() != nil, "Validate Basic had an unexpected result") + }) + } +} + +//---------------------------------------------- +// utility funcs + +func makeTxs(height int64) (txs []types.Tx) { + for i := 0; i < 10; i++ { + txs = append(txs, types.Tx([]byte{byte(height), byte(i)})) + } + return txs +} + +func makeBlock(height int64, state sm.State, lastCommit *types.Commit) *types.Block { + block, _ := state.MakeBlock(height, makeTxs(height), lastCommit, nil, state.Validators.GetProposer().Address) + return block +} + +type testApp struct { + abci.BaseApplication +} diff --git a/blockchain/v2/schedule.go b/blockchain/v2/schedule.go new file mode 100644 index 000000000..329557492 --- /dev/null +++ b/blockchain/v2/schedule.go @@ -0,0 +1,387 @@ +// nolint:unused +package v2 + +import ( + "fmt" + "math" + "math/rand" + "time" + + "github.com/tendermint/tendermint/p2p" +) + +type Event interface{} + +type blockState int + +const ( + blockStateUnknown blockState = iota + blockStateNew + blockStatePending + blockStateReceived + blockStateProcessed +) + +func (e blockState) String() string { + switch e { + case blockStateUnknown: + return "Unknown" + case blockStateNew: + return "New" + case blockStatePending: + return "Pending" + case blockStateReceived: + return "Received" + case blockStateProcessed: + return "Processed" + default: + return fmt.Sprintf("unknown blockState: %d", e) + } +} + +type peerState int + +const ( + peerStateNew = iota + peerStateReady + peerStateRemoved +) + +func (e peerState) String() string { + switch e { + case peerStateNew: + return "New" + case peerStateReady: + return "Ready" + case peerStateRemoved: + return "Removed" + default: + return fmt.Sprintf("unknown peerState: %d", e) + } +} + +type scPeer struct { + peerID p2p.ID + state peerState + height int64 + lastTouched time.Time + lastRate int64 +} + +func newScPeer(peerID p2p.ID) *scPeer { + return &scPeer{ + peerID: peerID, + state: peerStateNew, + height: -1, + lastTouched: time.Time{}, + } +} + +// The schedule is a composite data structure which allows a scheduler to keep +// track of which blocks have been scheduled into which state. +type schedule struct { + initHeight int64 + // a list of blocks in which blockState + blockStates map[int64]blockState + + // a map of peerID to schedule specific peer struct `scPeer` used to keep + // track of peer specific state + peers map[p2p.ID]*scPeer + + // a map of heights to the peer we are waiting for a response from + pendingBlocks map[int64]p2p.ID + + // the time at which a block was put in blockStatePending + pendingTime map[int64]time.Time + + // the peerID of the peer which put the block in blockStateReceived + receivedBlocks map[int64]p2p.ID +} + +func newSchedule(initHeight int64) *schedule { + sc := schedule{ + initHeight: initHeight, + blockStates: make(map[int64]blockState), + peers: make(map[p2p.ID]*scPeer), + pendingBlocks: make(map[int64]p2p.ID), + pendingTime: make(map[int64]time.Time), + receivedBlocks: make(map[int64]p2p.ID), + } + + sc.setStateAtHeight(initHeight, blockStateNew) + + return &sc +} + +func (sc *schedule) addPeer(peerID p2p.ID) error { + if _, ok := sc.peers[peerID]; ok { + return fmt.Errorf("Cannot add duplicate peer %s", peerID) + } + sc.peers[peerID] = newScPeer(peerID) + return nil +} + +func (sc *schedule) touchPeer(peerID p2p.ID, time time.Time) error { + peer, ok := sc.peers[peerID] + if !ok { + return fmt.Errorf("Couldn't find peer %s", peerID) + } + + if peer.state == peerStateRemoved { + return fmt.Errorf("Tried to touch peer in peerStateRemoved") + } + + peer.lastTouched = time + + return nil +} + +func (sc *schedule) removePeer(peerID p2p.ID) error { + peer, ok := sc.peers[peerID] + if !ok { + return fmt.Errorf("Couldn't find peer %s", peerID) + } + + if peer.state == peerStateRemoved { + return fmt.Errorf("Tried to remove peer %s in peerStateRemoved", peerID) + } + + for height, pendingPeerID := range sc.pendingBlocks { + if pendingPeerID == peerID { + sc.setStateAtHeight(height, blockStateNew) + delete(sc.pendingTime, height) + delete(sc.pendingBlocks, height) + } + } + + for height, rcvPeerID := range sc.receivedBlocks { + if rcvPeerID == peerID { + sc.setStateAtHeight(height, blockStateNew) + delete(sc.receivedBlocks, height) + } + } + + peer.state = peerStateRemoved + + return nil +} + +func (sc *schedule) setPeerHeight(peerID p2p.ID, height int64) error { + peer, ok := sc.peers[peerID] + if !ok { + return fmt.Errorf("Can't find peer %s", peerID) + } + + if peer.state == peerStateRemoved { + return fmt.Errorf("Cannot set peer height for a peer in peerStateRemoved") + } + + if height < peer.height { + return fmt.Errorf("Cannot move peer height lower. from %d to %d", peer.height, height) + } + + peer.height = height + peer.state = peerStateReady + for i := sc.minHeight(); i <= height; i++ { + if sc.getStateAtHeight(i) == blockStateUnknown { + sc.setStateAtHeight(i, blockStateNew) + } + } + + return nil +} + +func (sc *schedule) getStateAtHeight(height int64) blockState { + if height < sc.initHeight { + return blockStateProcessed + } else if state, ok := sc.blockStates[height]; ok { + return state + } else { + return blockStateUnknown + } +} + +func (sc *schedule) getPeersAtHeight(height int64) []*scPeer { + peers := []*scPeer{} + for _, peer := range sc.peers { + if peer.height >= height { + peers = append(peers, peer) + } + } + + return peers +} + +func (sc *schedule) peersInactiveSince(duration time.Duration, now time.Time) []p2p.ID { + peers := []p2p.ID{} + for _, peer := range sc.peers { + if now.Sub(peer.lastTouched) > duration { + peers = append(peers, peer.peerID) + } + } + + return peers +} + +func (sc *schedule) peersSlowerThan(minSpeed int64) []p2p.ID { + peers := []p2p.ID{} + for _, peer := range sc.peers { + if peer.lastRate < minSpeed { + peers = append(peers, peer.peerID) + } + } + + return peers +} + +func (sc *schedule) setStateAtHeight(height int64, state blockState) { + sc.blockStates[height] = state +} + +func (sc *schedule) markReceived(peerID p2p.ID, height int64, size int64, now time.Time) error { + peer, ok := sc.peers[peerID] + if !ok { + return fmt.Errorf("Can't find peer %s", peerID) + } + + if peer.state == peerStateRemoved { + return fmt.Errorf("Cannot receive blocks from removed peer %s", peerID) + } + + if state := sc.getStateAtHeight(height); state != blockStatePending || sc.pendingBlocks[height] != peerID { + return fmt.Errorf("Received block %d from peer %s without being requested", height, peerID) + } + + pendingTime, ok := sc.pendingTime[height] + if !ok || now.Sub(pendingTime) <= 0 { + return fmt.Errorf("Clock error. Block %d received at %s but requested at %s", + height, pendingTime, now) + } + + peer.lastRate = size / int64(now.Sub(pendingTime).Seconds()) + + sc.setStateAtHeight(height, blockStateReceived) + delete(sc.pendingBlocks, height) + delete(sc.pendingTime, height) + + sc.receivedBlocks[height] = peerID + + return nil +} + +func (sc *schedule) markPending(peerID p2p.ID, height int64, time time.Time) error { + peer, ok := sc.peers[peerID] + if !ok { + return fmt.Errorf("Can't find peer %s", peerID) + } + + state := sc.getStateAtHeight(height) + if state != blockStateNew { + return fmt.Errorf("Block %d should be in blockStateNew but was %s", height, state) + } + + if peer.state != peerStateReady { + return fmt.Errorf("Cannot schedule %d from %s in %s", height, peerID, peer.state) + } + + if height > peer.height { + return fmt.Errorf("Cannot request height %d from peer %s who is at height %d", + height, peerID, peer.height) + } + + sc.setStateAtHeight(height, blockStatePending) + sc.pendingBlocks[height] = peerID + // XXX: to make this more accurate we can introduce a message from + // the IO routine which indicates the time the request was put on the wire + sc.pendingTime[height] = time + + return nil +} + +func (sc *schedule) markProcessed(height int64) error { + state := sc.getStateAtHeight(height) + if state != blockStateReceived { + return fmt.Errorf("Can't mark height %d received from block state %s", height, state) + } + + delete(sc.receivedBlocks, height) + + sc.setStateAtHeight(height, blockStateProcessed) + + return nil +} + +// allBlockProcessed returns true if all blocks are in blockStateProcessed and +// determines if the schedule has been completed +func (sc *schedule) allBlocksProcessed() bool { + for _, state := range sc.blockStates { + if state != blockStateProcessed { + return false + } + } + return true +} + +// highest block | state == blockStateNew +func (sc *schedule) maxHeight() int64 { + var max int64 = 0 + for height, state := range sc.blockStates { + if state == blockStateNew && height > max { + max = height + } + } + + return max +} + +// lowest block | state == blockStateNew +func (sc *schedule) minHeight() int64 { + var min int64 = math.MaxInt64 + for height, state := range sc.blockStates { + if state == blockStateNew && height < min { + min = height + } + } + + return min +} + +func (sc *schedule) pendingFrom(peerID p2p.ID) []int64 { + heights := []int64{} + for height, pendingPeerID := range sc.pendingBlocks { + if pendingPeerID == peerID { + heights = append(heights, height) + } + } + return heights +} + +func (sc *schedule) selectPeer(peers []*scPeer) *scPeer { + // FIXME: properPeerSelector + s := rand.NewSource(time.Now().Unix()) + r := rand.New(s) + + return peers[r.Intn(len(peers))] +} + +// XXX: this duplicates the logic of peersInactiveSince and peersSlowerThan +func (sc *schedule) prunablePeers(peerTimout time.Duration, minRecvRate int64, now time.Time) []p2p.ID { + prunable := []p2p.ID{} + for peerID, peer := range sc.peers { + if now.Sub(peer.lastTouched) > peerTimout || peer.lastRate < minRecvRate { + prunable = append(prunable, peerID) + } + } + + return prunable +} + +func (sc *schedule) numBlockInState(targetState blockState) uint32 { + var num uint32 = 0 + for _, state := range sc.blockStates { + if state == targetState { + num++ + } + } + return num +} diff --git a/blockchain/v2/schedule_test.go b/blockchain/v2/schedule_test.go new file mode 100644 index 000000000..a1448c528 --- /dev/null +++ b/blockchain/v2/schedule_test.go @@ -0,0 +1,272 @@ +package v2 + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/tendermint/tendermint/p2p" +) + +func TestScheduleInit(t *testing.T) { + var ( + initHeight int64 = 5 + sc = newSchedule(initHeight) + ) + + assert.Equal(t, blockStateNew, sc.getStateAtHeight(initHeight)) + assert.Equal(t, blockStateProcessed, sc.getStateAtHeight(initHeight-1)) + assert.Equal(t, blockStateUnknown, sc.getStateAtHeight(initHeight+1)) +} + +func TestAddPeer(t *testing.T) { + var ( + initHeight int64 = 5 + peerID p2p.ID = "1" + peerIDTwo p2p.ID = "2" + sc = newSchedule(initHeight) + ) + + assert.Nil(t, sc.addPeer(peerID)) + assert.Nil(t, sc.addPeer(peerIDTwo)) + assert.Error(t, sc.addPeer(peerID)) +} + +func TestTouchPeer(t *testing.T) { + var ( + initHeight int64 = 5 + peerID p2p.ID = "1" + sc = newSchedule(initHeight) + now = time.Now() + ) + + assert.Error(t, sc.touchPeer(peerID, now), + "Touching an unknown peer should return errPeerNotFound") + + assert.Nil(t, sc.addPeer(peerID), + "Adding a peer should return no error") + assert.Nil(t, sc.touchPeer(peerID, now), + "Touching a peer should return no error") + + threshold := 10 * time.Second + assert.Empty(t, sc.peersInactiveSince(threshold, now.Add(9*time.Second)), + "Expected no peers to have been touched over 9 seconds") + assert.Containsf(t, sc.peersInactiveSince(threshold, now.Add(11*time.Second)), peerID, + "Expected one %s to have been touched over 10 seconds ago", peerID) +} + +func TestPeerHeight(t *testing.T) { + var ( + initHeight int64 = 5 + peerID p2p.ID = "1" + peerHeight int64 = 20 + sc = newSchedule(initHeight) + ) + + assert.NoError(t, sc.addPeer(peerID), + "Adding a peer should return no error") + assert.NoError(t, sc.setPeerHeight(peerID, peerHeight)) + for i := initHeight; i <= peerHeight; i++ { + assert.Equal(t, sc.getStateAtHeight(i), blockStateNew, + "Expected all blocks to be in blockStateNew") + peerIDs := []p2p.ID{} + for _, peer := range sc.getPeersAtHeight(i) { + peerIDs = append(peerIDs, peer.peerID) + } + + assert.Containsf(t, peerIDs, peerID, + "Expected %s to have block %d", peerID, i) + } +} + +func TestTransitionPending(t *testing.T) { + var ( + initHeight int64 = 5 + peerID p2p.ID = "1" + peerIDTwo p2p.ID = "2" + peerHeight int64 = 20 + sc = newSchedule(initHeight) + now = time.Now() + ) + + assert.NoError(t, sc.addPeer(peerID), + "Adding a peer should return no error") + assert.Nil(t, sc.addPeer(peerIDTwo), + "Adding a peer should return no error") + + assert.Error(t, sc.markPending(peerID, peerHeight, now), + "Expected scheduling a block from a peer in peerStateNew to fail") + + assert.NoError(t, sc.setPeerHeight(peerID, peerHeight), + "Expected setPeerHeight to return no error") + assert.NoError(t, sc.setPeerHeight(peerIDTwo, peerHeight), + "Expected setPeerHeight to return no error") + + assert.NoError(t, sc.markPending(peerID, peerHeight, now), + "Expected markingPending new block to succeed") + assert.Error(t, sc.markPending(peerIDTwo, peerHeight, now), + "Expected markingPending by a second peer to fail") + + assert.Equal(t, blockStatePending, sc.getStateAtHeight(peerHeight), + "Expected the block to to be in blockStatePending") + + assert.NoError(t, sc.removePeer(peerID), + "Expected removePeer to return no error") + + assert.Equal(t, blockStateNew, sc.getStateAtHeight(peerHeight), + "Expected the block to to be in blockStateNew") + + assert.Error(t, sc.markPending(peerID, peerHeight, now), + "Expected markingPending removed peer to fail") + + assert.NoError(t, sc.markPending(peerIDTwo, peerHeight, now), + "Expected markingPending on a ready peer to succeed") + + assert.Equal(t, blockStatePending, sc.getStateAtHeight(peerHeight), + "Expected the block to to be in blockStatePending") +} + +func TestTransitionReceived(t *testing.T) { + var ( + initHeight int64 = 5 + peerID p2p.ID = "1" + peerIDTwo p2p.ID = "2" + peerHeight int64 = 20 + blockSize int64 = 1024 + sc = newSchedule(initHeight) + now = time.Now() + receivedAt = now.Add(1 * time.Second) + ) + + assert.NoError(t, sc.addPeer(peerID), + "Expected adding peer %s to succeed", peerID) + assert.NoError(t, sc.addPeer(peerIDTwo), + "Expected adding peer %s to succeed", peerIDTwo) + assert.NoError(t, sc.setPeerHeight(peerID, peerHeight), + "Expected setPeerHeight to return no error") + assert.NoErrorf(t, sc.setPeerHeight(peerIDTwo, peerHeight), + "Expected setPeerHeight on %s to %d to succeed", peerIDTwo, peerHeight) + assert.NoError(t, sc.markPending(peerID, initHeight, now), + "Expected markingPending new block to succeed") + + assert.Error(t, sc.markReceived(peerIDTwo, initHeight, blockSize, receivedAt), + "Expected marking markReceived from a non requesting peer to fail") + + assert.NoError(t, sc.markReceived(peerID, initHeight, blockSize, receivedAt), + "Expected marking markReceived on a pending block to succeed") + + assert.Error(t, sc.markReceived(peerID, initHeight, blockSize, receivedAt), + "Expected marking markReceived on received block to fail") + + assert.Equalf(t, blockStateReceived, sc.getStateAtHeight(initHeight), + "Expected block %d to be blockHeightReceived", initHeight) + + assert.NoErrorf(t, sc.removePeer(peerID), + "Expected removePeer removing %s to succeed", peerID) + + assert.Equalf(t, blockStateNew, sc.getStateAtHeight(initHeight), + "Expected block %d to be blockStateNew", initHeight) + + assert.NoErrorf(t, sc.markPending(peerIDTwo, initHeight, now), + "Expected markingPending %d from %s to succeed", initHeight, peerIDTwo) + assert.NoErrorf(t, sc.markReceived(peerIDTwo, initHeight, blockSize, receivedAt), + "Expected marking markReceived %d from %s to succeed", initHeight, peerIDTwo) + assert.Equalf(t, blockStateReceived, sc.getStateAtHeight(initHeight), + "Expected block %d to be blockStateReceived", initHeight) +} + +func TestTransitionProcessed(t *testing.T) { + var ( + initHeight int64 = 5 + peerID p2p.ID = "1" + peerHeight int64 = 20 + blockSize int64 = 1024 + sc = newSchedule(initHeight) + now = time.Now() + receivedAt = now.Add(1 * time.Second) + ) + + assert.NoError(t, sc.addPeer(peerID), + "Expected adding peer %s to succeed", peerID) + assert.NoErrorf(t, sc.setPeerHeight(peerID, peerHeight), + "Expected setPeerHeight on %s to %d to succeed", peerID, peerHeight) + assert.NoError(t, sc.markPending(peerID, initHeight, now), + "Expected markingPending new block to succeed") + assert.NoError(t, sc.markReceived(peerID, initHeight, blockSize, receivedAt), + "Expected marking markReceived on a pending block to succeed") + + assert.Error(t, sc.markProcessed(initHeight+1), + "Expected marking %d as processed to fail", initHeight+1) + assert.NoError(t, sc.markProcessed(initHeight), + "Expected marking %d as processed to succeed", initHeight) + + assert.Equalf(t, blockStateProcessed, sc.getStateAtHeight(initHeight), + "Expected block %d to be blockStateProcessed", initHeight) + + assert.NoError(t, sc.removePeer(peerID), + "Expected removing peer %s to succeed", peerID) + + assert.Equalf(t, blockStateProcessed, sc.getStateAtHeight(initHeight), + "Expected block %d to be blockStateProcessed", initHeight) +} + +func TestMinMaxHeight(t *testing.T) { + var ( + initHeight int64 = 5 + peerID p2p.ID = "1" + peerHeight int64 = 20 + sc = newSchedule(initHeight) + now = time.Now() + ) + + assert.Equal(t, initHeight, sc.minHeight(), + "Expected min height to be the initialized height") + + assert.Equal(t, initHeight, sc.maxHeight(), + "Expected max height to be the initialized height") + + assert.NoError(t, sc.addPeer(peerID), + "Adding a peer should return no error") + + assert.NoError(t, sc.setPeerHeight(peerID, peerHeight), + "Expected setPeerHeight to return no error") + + assert.Equal(t, peerHeight, sc.maxHeight(), + "Expected max height to increase to peerHeight") + + assert.Nil(t, sc.markPending(peerID, initHeight, now.Add(1*time.Second)), + "Expected marking initHeight as pending to return no error") + + assert.Equal(t, initHeight+1, sc.minHeight(), + "Expected marking initHeight as pending to move minHeight forward") +} + +func TestPeersSlowerThan(t *testing.T) { + var ( + initHeight int64 = 5 + peerID p2p.ID = "1" + peerHeight int64 = 20 + blockSize int64 = 1024 + sc = newSchedule(initHeight) + now = time.Now() + receivedAt = now.Add(1 * time.Second) + ) + + assert.NoError(t, sc.addPeer(peerID), + "Adding a peer should return no error") + + assert.NoError(t, sc.setPeerHeight(peerID, peerHeight), + "Expected setPeerHeight to return no error") + + assert.NoError(t, sc.markPending(peerID, peerHeight, now), + "Expected markingPending on to return no error") + + assert.NoError(t, sc.markReceived(peerID, peerHeight, blockSize, receivedAt), + "Expected markingPending on to return no error") + + assert.Empty(t, sc.peersSlowerThan(blockSize-1), + "expected no peers to be slower than blockSize-1 bytes/sec") + + assert.Containsf(t, sc.peersSlowerThan(blockSize+1), peerID, + "expected %s to be slower than blockSize+1 bytes/sec", peerID) +} diff --git a/cmd/contract_tests/main.go b/cmd/contract_tests/main.go new file mode 100644 index 000000000..487537824 --- /dev/null +++ b/cmd/contract_tests/main.go @@ -0,0 +1,34 @@ +package main + +import ( + "fmt" + "strings" + + "github.com/snikch/goodman/hooks" + "github.com/snikch/goodman/transaction" +) + +func main() { + // This must be compiled beforehand and given to dredd as parameter, in the meantime the server should be running + h := hooks.NewHooks() + server := hooks.NewServer(hooks.NewHooksRunner(h)) + h.BeforeAll(func(t []*transaction.Transaction) { + fmt.Println(t[0].Name) + }) + h.BeforeEach(func(t *transaction.Transaction) { + if strings.HasPrefix(t.Name, "Tx") || + // We need a proper example of evidence to broadcast + strings.HasPrefix(t.Name, "Info > /broadcast_evidence") || + // We need a proper example of path and data + strings.HasPrefix(t.Name, "ABCI > /abci_query") || + // We need to find a way to make a transaction before starting the tests, + // that hash should replace the dummy one in hte swagger file + strings.HasPrefix(t.Name, "Info > /tx") { + t.Skip = true + fmt.Printf("%s Has been skipped\n", t.Name) + } + }) + server.Serve() + defer server.Listener.Close() + fmt.Print("FINE") +} diff --git a/cmd/priv_val_server/main.go b/cmd/priv_val_server/main.go index c86bced81..22af6418f 100644 --- a/cmd/priv_val_server/main.go +++ b/cmd/priv_val_server/main.go @@ -48,15 +48,17 @@ func main() { os.Exit(1) } - rs := privval.NewSignerServiceEndpoint(logger, *chainID, pv, dialer) - err := rs.Start() + sd := privval.NewSignerDialerEndpoint(logger, dialer) + ss := privval.NewSignerServer(sd, *chainID, pv) + + err := ss.Start() if err != nil { panic(err) } // Stop upon receiving SIGTERM or CTRL-C. cmn.TrapSignal(logger, func() { - err := rs.Stop() + err := ss.Stop() if err != nil { panic(err) } diff --git a/cmd/tendermint/commands/wire.go b/cmd/tendermint/commands/codec.go similarity index 100% rename from cmd/tendermint/commands/wire.go rename to cmd/tendermint/commands/codec.go diff --git a/cmd/tendermint/commands/reset_priv_validator.go b/cmd/tendermint/commands/reset_priv_validator.go index 055a76c51..4e8bde8b4 100644 --- a/cmd/tendermint/commands/reset_priv_validator.go +++ b/cmd/tendermint/commands/reset_priv_validator.go @@ -18,6 +18,12 @@ var ResetAllCmd = &cobra.Command{ Run: resetAll, } +var keepAddrBook bool + +func init() { + ResetAllCmd.Flags().BoolVar(&keepAddrBook, "keep-addr-book", false, "Keep the address book intact") +} + // ResetPrivValidatorCmd resets the private validator files. var ResetPrivValidatorCmd = &cobra.Command{ Use: "unsafe_reset_priv_validator", @@ -41,7 +47,11 @@ func resetPrivValidator(cmd *cobra.Command, args []string) { // ResetAll removes address book files plus all data, and resets the privValdiator data. // Exported so other CLI tools can use it. func ResetAll(dbDir, addrBookFile, privValKeyFile, privValStateFile string, logger log.Logger) { - removeAddrBook(addrBookFile, logger) + if keepAddrBook { + logger.Info("The address book remains intact") + } else { + removeAddrBook(addrBookFile, logger) + } if err := os.RemoveAll(dbDir); err == nil { logger.Info("Removed all blockchain history", "dir", dbDir) } else { diff --git a/cmd/tendermint/commands/root_test.go b/cmd/tendermint/commands/root_test.go index 892a49b74..229385af9 100644 --- a/cmd/tendermint/commands/root_test.go +++ b/cmd/tendermint/commands/root_test.go @@ -165,7 +165,7 @@ func TestRootConfig(t *testing.T) { func WriteConfigVals(dir string, vals map[string]string) error { data := "" for k, v := range vals { - data = data + fmt.Sprintf("%s = \"%s\"\n", k, v) + data += fmt.Sprintf("%s = \"%s\"\n", k, v) } cfile := filepath.Join(dir, "config.toml") return ioutil.WriteFile(cfile, []byte(data), 0666) diff --git a/cmd/tendermint/commands/run_node.go b/cmd/tendermint/commands/run_node.go index fa63b4944..70de9aba7 100644 --- a/cmd/tendermint/commands/run_node.go +++ b/cmd/tendermint/commands/run_node.go @@ -19,7 +19,7 @@ func AddNodeFlags(cmd *cobra.Command) { cmd.Flags().String("priv_validator_laddr", config.PrivValidatorListenAddr, "Socket address to listen on for connections from external priv_validator process") // node flags - cmd.Flags().Bool("fast_sync", config.FastSync, "Fast blockchain syncing") + cmd.Flags().Bool("fast_sync", config.FastSyncMode, "Fast blockchain syncing") // abci flags cmd.Flags().String("proxy_app", config.ProxyApp, "Proxy app address, or one of: 'kvstore', 'persistent_kvstore', 'counter', 'counter_serial' or 'noop' for local testing.") diff --git a/cmd/tendermint/commands/testnet.go b/cmd/tendermint/commands/testnet.go index e34b8d305..5e2dc1a3a 100644 --- a/cmd/tendermint/commands/testnet.go +++ b/cmd/tendermint/commands/testnet.go @@ -8,6 +8,7 @@ import ( "strings" "github.com/spf13/cobra" + "github.com/spf13/viper" cfg "github.com/tendermint/tendermint/config" cmn "github.com/tendermint/tendermint/libs/common" @@ -20,13 +21,17 @@ import ( var ( nValidators int nNonValidators int + configFile string outputDir string nodeDirPrefix string populatePersistentPeers bool hostnamePrefix string + hostnameSuffix string startingIPAddress string + hostnames []string p2pPort int + randomMonikers bool ) const ( @@ -36,6 +41,8 @@ const ( func init() { TestnetFilesCmd.Flags().IntVar(&nValidators, "v", 4, "Number of validators to initialize the testnet with") + TestnetFilesCmd.Flags().StringVar(&configFile, "config", "", + "Config file to use (note some options may be overwritten)") TestnetFilesCmd.Flags().IntVar(&nNonValidators, "n", 0, "Number of non-validators to initialize the testnet with") TestnetFilesCmd.Flags().StringVar(&outputDir, "o", "./mytestnet", @@ -46,11 +53,17 @@ func init() { TestnetFilesCmd.Flags().BoolVar(&populatePersistentPeers, "populate-persistent-peers", true, "Update config of each node with the list of persistent peers build using either hostname-prefix or starting-ip-address") TestnetFilesCmd.Flags().StringVar(&hostnamePrefix, "hostname-prefix", "node", - "Hostname prefix (node results in persistent peers list ID0@node0:26656, ID1@node1:26656, ...)") + "Hostname prefix (\"node\" results in persistent peers list ID0@node0:26656, ID1@node1:26656, ...)") + TestnetFilesCmd.Flags().StringVar(&hostnameSuffix, "hostname-suffix", "", + "Hostname suffix (\".xyz.com\" results in persistent peers list ID0@node0.xyz.com:26656, ID1@node1.xyz.com:26656, ...)") TestnetFilesCmd.Flags().StringVar(&startingIPAddress, "starting-ip-address", "", - "Starting IP address (192.168.0.1 results in persistent peers list ID0@192.168.0.1:26656, ID1@192.168.0.2:26656, ...)") + "Starting IP address (\"192.168.0.1\" results in persistent peers list ID0@192.168.0.1:26656, ID1@192.168.0.2:26656, ...)") + TestnetFilesCmd.Flags().StringArrayVar(&hostnames, "hostname", []string{}, + "Manually override all hostnames of validators and non-validators (use --hostname multiple times for multiple hosts)") TestnetFilesCmd.Flags().IntVar(&p2pPort, "p2p-port", 26656, "P2P Port") + TestnetFilesCmd.Flags().BoolVar(&randomMonikers, "random-monikers", false, + "Randomize the moniker for each generated node") } // TestnetFilesCmd allows initialisation of files for a Tendermint testnet. @@ -72,7 +85,29 @@ Example: } func testnetFiles(cmd *cobra.Command, args []string) error { + if len(hostnames) > 0 && len(hostnames) != (nValidators+nNonValidators) { + return fmt.Errorf( + "testnet needs precisely %d hostnames (number of validators plus non-validators) if --hostname parameter is used", + nValidators+nNonValidators, + ) + } + config := cfg.DefaultConfig() + + // overwrite default config if set and valid + if configFile != "" { + viper.SetConfigFile(configFile) + if err := viper.ReadInConfig(); err != nil { + return err + } + if err := viper.Unmarshal(config); err != nil { + return err + } + if err := config.ValidateBasic(); err != nil { + return err + } + } + genVals := make([]types.GenesisValidator, nValidators) for i := 0; i < nValidators; i++ { @@ -126,9 +161,10 @@ func testnetFiles(cmd *cobra.Command, args []string) error { // Generate genesis doc from generated validators genDoc := &types.GenesisDoc{ - GenesisTime: tmtime.Now(), - ChainID: "chain-" + cmn.RandStr(6), - Validators: genVals, + ChainID: "chain-" + cmn.RandStr(6), + ConsensusParams: types.DefaultConsensusParams(), + GenesisTime: tmtime.Now(), + Validators: genVals, } // Write genesis file. @@ -162,6 +198,7 @@ func testnetFiles(cmd *cobra.Command, args []string) error { if populatePersistentPeers { config.P2P.PersistentPeers = persistentPeers } + config.Moniker = moniker(i) cfg.WriteConfigFile(filepath.Join(nodeDir, "config", "config.toml"), config) } @@ -171,21 +208,23 @@ func testnetFiles(cmd *cobra.Command, args []string) error { } func hostnameOrIP(i int) string { - if startingIPAddress != "" { - ip := net.ParseIP(startingIPAddress) - ip = ip.To4() - if ip == nil { - fmt.Printf("%v: non ipv4 address\n", startingIPAddress) - os.Exit(1) - } - - for j := 0; j < i; j++ { - ip[3]++ - } - return ip.String() + if len(hostnames) > 0 && i < len(hostnames) { + return hostnames[i] + } + if startingIPAddress == "" { + return fmt.Sprintf("%s%d%s", hostnamePrefix, i, hostnameSuffix) + } + ip := net.ParseIP(startingIPAddress) + ip = ip.To4() + if ip == nil { + fmt.Printf("%v: non ipv4 address\n", startingIPAddress) + os.Exit(1) } - return fmt.Sprintf("%s%d", hostnamePrefix, i) + for j := 0; j < i; j++ { + ip[3]++ + } + return ip.String() } func persistentPeersString(config *cfg.Config) (string, error) { @@ -201,3 +240,20 @@ func persistentPeersString(config *cfg.Config) (string, error) { } return strings.Join(persistentPeers, ","), nil } + +func moniker(i int) string { + if randomMonikers { + return randomMoniker() + } + if len(hostnames) > 0 && i < len(hostnames) { + return hostnames[i] + } + if startingIPAddress == "" { + return fmt.Sprintf("%s%d%s", hostnamePrefix, i, hostnameSuffix) + } + return randomMoniker() +} + +func randomMoniker() string { + return cmn.HexBytes(cmn.RandBytes(8)).String() +} diff --git a/config/config.go b/config/config.go index 6de68ad22..5592227f1 100644 --- a/config/config.go +++ b/config/config.go @@ -67,6 +67,7 @@ type Config struct { P2P *P2PConfig `mapstructure:"p2p"` DBCache *DBCacheConfig `mapstructure:"dbcache"` Mempool *MempoolConfig `mapstructure:"mempool"` + FastSync *FastSyncConfig `mapstructure:"fastsync"` Consensus *ConsensusConfig `mapstructure:"consensus"` TxIndex *TxIndexConfig `mapstructure:"tx_index"` BlockIndex *BlockIndexConfig `mapstructure:"block_index"` @@ -81,6 +82,7 @@ func DefaultConfig() *Config { P2P: DefaultP2PConfig(), DBCache: DefaultDBCacheConfig(), Mempool: DefaultMempoolConfig(), + FastSync: DefaultFastSyncConfig(), Consensus: DefaultConsensusConfig(), TxIndex: DefaultTxIndexConfig(), BlockIndex: DefaultBlockIndexConfig(), @@ -96,6 +98,7 @@ func TestConfig() *Config { P2P: TestP2PConfig(), DBCache: TestDBCacheConfig(), Mempool: TestMempoolConfig(), + FastSync: TestFastSyncConfig(), Consensus: TestConsensusConfig(), TxIndex: TestTxIndexConfig(), BlockIndex: TestBlockIndexConfig(), @@ -131,6 +134,9 @@ func (cfg *Config) ValidateBasic() error { if err := cfg.Mempool.ValidateBasic(); err != nil { return errors.Wrap(err, "Error in [mempool] section") } + if err := cfg.FastSync.ValidateBasic(); err != nil { + return errors.Wrap(err, "Error in [fastsync] section") + } if err := cfg.Consensus.ValidateBasic(); err != nil { return errors.Wrap(err, "Error in [consensus] section") } @@ -162,7 +168,7 @@ type BaseConfig struct { // If this node is many blocks behind the tip of the chain, FastSync // allows them to catchup quickly by downloading blocks in parallel // and verifying their commits - FastSync bool `mapstructure:"fast_sync"` + FastSyncMode bool `mapstructure:"fast_sync"` // it is for fullnode/witness who do not need consensus to sync block. HotSyncReactor bool `mapstructure:"hot_sync_reactor"` @@ -187,7 +193,18 @@ type BaseConfig struct { // >0 - sync from that height StateSyncHeight int64 `mapstructure:"state_sync_height"` - // Database backend: leveldb | memdb | cleveldb + // Database backend: goleveldb | cleveldb | boltdb + // * goleveldb (github.com/syndtr/goleveldb - most popular implementation) + // - pure go + // - stable + // * cleveldb (uses levigo wrapper) + // - fast + // - requires gcc + // - use cleveldb build tag (go build -tags cleveldb) + // * boltdb (uses etcd's fork of bolt - github.com/etcd-io/bbolt) + // - EXPERIMENTAL + // - may be faster is some use-cases (random reads - indexer) + // - use boltdb build tag (go build -tags boltdb) DBBackend string `mapstructure:"db_backend"` // Database directory @@ -242,14 +259,14 @@ func DefaultBaseConfig() BaseConfig { LogLevel: DefaultPackageLogLevels(), LogFormat: LogFormatPlain, ProfListenAddress: "", - FastSync: true, + FastSyncMode: true, StateSyncReactor: true, HotSync: false, HotSyncReactor: false, HotSyncTimeout: 3 * time.Second, StateSyncHeight: -1, FilterPeers: false, - DBBackend: "leveldb", + DBBackend: "goleveldb", DBPath: "data", WithAppStat: true, } @@ -260,7 +277,7 @@ func TestBaseConfig() BaseConfig { cfg := DefaultBaseConfig() cfg.chainID = "tendermint_test" cfg.ProxyApp = "kvstore" - cfg.FastSync = false + cfg.FastSyncMode = false cfg.DBBackend = "memdb" return cfg } @@ -405,7 +422,14 @@ type RPCConfig struct { // See https://github.com/tendermint/tendermint/issues/3435 TimeoutBroadcastTxCommit time.Duration `mapstructure:"timeout_broadcast_tx_commit"` - // The name of a file containing certificate that is used to create the HTTPS server. + // Maximum size of request body, in bytes + MaxBodyBytes int64 `mapstructure:"max_body_bytes"` + + // Maximum size of request header, in bytes + MaxHeaderBytes int `mapstructure:"max_header_bytes"` + + // The path to a file containing certificate that is used to create the HTTPS server. + // Migth be either absolute path or path related to tendermint's config directory. // // If the certificate is signed by a certificate authority, // the certFile should be the concatenation of the server's certificate, any intermediates, @@ -414,7 +438,8 @@ type RPCConfig struct { // NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. Otherwise, HTTP server is run. TLSCertFile string `mapstructure:"tls_cert_file"` - // The name of a file containing matching private key that is used to create the HTTPS server. + // The path to a file containing matching private key that is used to create the HTTPS server. + // Migth be either absolute path or path related to tendermint's config directory. // // NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. Otherwise, HTTP server is run. TLSKeyFile string `mapstructure:"tls_key_file"` @@ -441,6 +466,9 @@ func DefaultRPCConfig() *RPCConfig { MaxSubscriptionsPerClient: 5, TimeoutBroadcastTxCommit: 10 * time.Second, + MaxBodyBytes: int64(1000000), // 1MB + MaxHeaderBytes: 1 << 20, // same as the net/http default + TLSCertFile: "", TLSKeyFile: "", } @@ -485,6 +513,12 @@ func (cfg *RPCConfig) ValidateBasic() error { if cfg.TimeoutBroadcastTxCommit < 0 { return errors.New("timeout_broadcast_tx_commit can't be negative") } + if cfg.MaxBodyBytes < 0 { + return errors.New("max_body_bytes can't be negative") + } + if cfg.MaxHeaderBytes < 0 { + return errors.New("max_header_bytes can't be negative") + } return nil } @@ -494,11 +528,19 @@ func (cfg *RPCConfig) IsCorsEnabled() bool { } func (cfg RPCConfig) KeyFile() string { - return rootify(filepath.Join(defaultConfigDir, cfg.TLSKeyFile), cfg.RootDir) + path := cfg.TLSKeyFile + if filepath.IsAbs(path) { + return path + } + return rootify(filepath.Join(defaultConfigDir, path), cfg.RootDir) } func (cfg RPCConfig) CertFile() string { - return rootify(filepath.Join(defaultConfigDir, cfg.TLSCertFile), cfg.RootDir) + path := cfg.TLSCertFile + if filepath.IsAbs(path) { + return path + } + return rootify(filepath.Join(defaultConfigDir, path), cfg.RootDir) } func (cfg RPCConfig) IsTLSEnabled() bool { @@ -757,6 +799,7 @@ type MempoolConfig struct { CacheSize int `mapstructure:"cache_size"` OnlyToPersistent bool `mapstructure:"only_to_persistent"` SkipTxFromPersistent bool `mapstructure:"skip_tx_from_persistent"` + MaxTxBytes int `mapstructure:"max_tx_bytes"` } // DefaultMempoolConfig returns a default configuration for the Tendermint mempool @@ -767,9 +810,10 @@ func DefaultMempoolConfig() *MempoolConfig { WalPath: "", // Each signature verification takes .5ms, Size reduced until we implement // ABCI Recheck - Size: 5000, - MaxTxsBytes: 1024 * 1024 * 1024, // 1GB - CacheSize: 10000, + Size: 5000, + MaxTxsBytes: 1024 * 1024 * 1024, // 1GB + CacheSize: 10000, + MaxTxBytes: 1024 * 1024, // 1MB OnlyToPersistent: false, SkipTxFromPersistent: false, } @@ -804,9 +848,44 @@ func (cfg *MempoolConfig) ValidateBasic() error { if cfg.CacheSize < 0 { return errors.New("cache_size can't be negative") } + if cfg.MaxTxBytes < 0 { + return errors.New("max_tx_bytes can't be negative") + } return nil } +//----------------------------------------------------------------------------- +// FastSyncConfig + +// FastSyncConfig defines the configuration for the Tendermint fast sync service +type FastSyncConfig struct { + Version string `mapstructure:"version"` +} + +// DefaultFastSyncConfig returns a default configuration for the fast sync service +func DefaultFastSyncConfig() *FastSyncConfig { + return &FastSyncConfig{ + Version: "v0", + } +} + +// TestFastSyncConfig returns a default configuration for the fast sync. +func TestFastSyncConfig() *FastSyncConfig { + return DefaultFastSyncConfig() +} + +// ValidateBasic performs basic validation. +func (cfg *FastSyncConfig) ValidateBasic() error { + switch cfg.Version { + case "v0": + return nil + case "v1": + return nil + default: + return fmt.Errorf("unknown fastsync version %s", cfg.Version) + } +} + //----------------------------------------------------------------------------- // ConsensusConfig diff --git a/config/config_test.go b/config/config_test.go index afdbed181..6da032d07 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -1,6 +1,7 @@ package config import ( + "reflect" "testing" "time" @@ -36,3 +37,132 @@ func TestConfigValidateBasic(t *testing.T) { cfg.Consensus.TimeoutPropose = -10 * time.Second assert.Error(t, cfg.ValidateBasic()) } + +func TestTLSConfiguration(t *testing.T) { + assert := assert.New(t) + cfg := DefaultConfig() + cfg.SetRoot("/home/user") + + cfg.RPC.TLSCertFile = "file.crt" + assert.Equal("/home/user/config/file.crt", cfg.RPC.CertFile()) + cfg.RPC.TLSKeyFile = "file.key" + assert.Equal("/home/user/config/file.key", cfg.RPC.KeyFile()) + + cfg.RPC.TLSCertFile = "/abs/path/to/file.crt" + assert.Equal("/abs/path/to/file.crt", cfg.RPC.CertFile()) + cfg.RPC.TLSKeyFile = "/abs/path/to/file.key" + assert.Equal("/abs/path/to/file.key", cfg.RPC.KeyFile()) +} + +func TestBaseConfigValidateBasic(t *testing.T) { + cfg := TestBaseConfig() + assert.NoError(t, cfg.ValidateBasic()) + + // tamper with log format + cfg.LogFormat = "invalid" + assert.Error(t, cfg.ValidateBasic()) +} + +func TestRPCConfigValidateBasic(t *testing.T) { + cfg := TestRPCConfig() + assert.NoError(t, cfg.ValidateBasic()) + + fieldsToTest := []string{ + "GRPCMaxOpenConnections", + "MaxOpenConnections", + "MaxSubscriptionClients", + "MaxSubscriptionsPerClient", + "TimeoutBroadcastTxCommit", + "MaxBodyBytes", + "MaxHeaderBytes", + } + + for _, fieldName := range fieldsToTest { + reflect.ValueOf(cfg).Elem().FieldByName(fieldName).SetInt(-1) + assert.Error(t, cfg.ValidateBasic()) + reflect.ValueOf(cfg).Elem().FieldByName(fieldName).SetInt(0) + } +} + +func TestP2PConfigValidateBasic(t *testing.T) { + cfg := TestP2PConfig() + assert.NoError(t, cfg.ValidateBasic()) + + fieldsToTest := []string{ + "MaxNumInboundPeers", + "MaxNumOutboundPeers", + "FlushThrottleTimeout", + "MaxPacketMsgPayloadSize", + "SendRate", + "RecvRate", + } + + for _, fieldName := range fieldsToTest { + reflect.ValueOf(cfg).Elem().FieldByName(fieldName).SetInt(-1) + assert.Error(t, cfg.ValidateBasic()) + reflect.ValueOf(cfg).Elem().FieldByName(fieldName).SetInt(0) + } +} + +func TestMempoolConfigValidateBasic(t *testing.T) { + cfg := TestMempoolConfig() + assert.NoError(t, cfg.ValidateBasic()) + + fieldsToTest := []string{ + "Size", + "MaxTxsBytes", + "CacheSize", + "MaxTxBytes", + } + + for _, fieldName := range fieldsToTest { + reflect.ValueOf(cfg).Elem().FieldByName(fieldName).SetInt(-1) + assert.Error(t, cfg.ValidateBasic()) + reflect.ValueOf(cfg).Elem().FieldByName(fieldName).SetInt(0) + } +} + +func TestFastSyncConfigValidateBasic(t *testing.T) { + cfg := TestFastSyncConfig() + assert.NoError(t, cfg.ValidateBasic()) + + // tamper with version + cfg.Version = "v1" + assert.NoError(t, cfg.ValidateBasic()) + + cfg.Version = "invalid" + assert.Error(t, cfg.ValidateBasic()) +} + +func TestConsensusConfigValidateBasic(t *testing.T) { + cfg := TestConsensusConfig() + assert.NoError(t, cfg.ValidateBasic()) + + fieldsToTest := []string{ + "TimeoutPropose", + "TimeoutProposeDelta", + "TimeoutPrevote", + "TimeoutPrevoteDelta", + "TimeoutPrecommit", + "TimeoutPrecommitDelta", + "TimeoutCommit", + "CreateEmptyBlocksInterval", + "PeerGossipSleepDuration", + "PeerQueryMaj23SleepDuration", + } + + for _, fieldName := range fieldsToTest { + reflect.ValueOf(cfg).Elem().FieldByName(fieldName).SetInt(-1) + assert.Error(t, cfg.ValidateBasic()) + reflect.ValueOf(cfg).Elem().FieldByName(fieldName).SetInt(0) + } +} + +func TestInstrumentationConfigValidateBasic(t *testing.T) { + cfg := TestInstrumentationConfig() + assert.NoError(t, cfg.ValidateBasic()) + + // tamper with maximum open connections + cfg.MaxOpenConnections = -1 + assert.Error(t, cfg.ValidateBasic()) +} diff --git a/config/toml.go b/config/toml.go index 657b3e8fa..65db774dd 100644 --- a/config/toml.go +++ b/config/toml.go @@ -28,13 +28,13 @@ func init() { // and panics if it fails. func EnsureRoot(rootDir string) { if err := cmn.EnsureDir(rootDir, DefaultDirPerm); err != nil { - cmn.PanicSanity(err.Error()) + panic(err.Error()) } if err := cmn.EnsureDir(filepath.Join(rootDir, defaultConfigDir), DefaultDirPerm); err != nil { - cmn.PanicSanity(err.Error()) + panic(err.Error()) } if err := cmn.EnsureDir(filepath.Join(rootDir, defaultDataDir), DefaultDirPerm); err != nil { - cmn.PanicSanity(err.Error()) + panic(err.Error()) } configFilePath := filepath.Join(rootDir, defaultConfigFilePath) @@ -79,7 +79,7 @@ moniker = "{{ .BaseConfig.Moniker }}" # If this node is many blocks behind the tip of the chain, FastSync # allows them to catchup quickly by downloading blocks in parallel # and verifying their commits -fast_sync = {{ .BaseConfig.FastSync }} +fast_sync = {{ .BaseConfig.FastSyncMode }} # Only take effect when HotSyncReactor is true. # If true, will sync blocks use hot sync protocol @@ -105,7 +105,18 @@ state_sync_reactor = {{ .BaseConfig.StateSyncReactor }} # >0 - sync from that height state_sync_height = {{ .BaseConfig.StateSyncHeight }} -# Database backend: leveldb | memdb | cleveldb +# Database backend: goleveldb | cleveldb | boltdb +# * goleveldb (github.com/syndtr/goleveldb - most popular implementation) +# - pure go +# - stable +# * cleveldb (uses levigo wrapper) +# - fast +# - requires gcc +# - use cleveldb build tag (go build -tags cleveldb) +# * boltdb (uses etcd's fork of bolt - github.com/etcd-io/bbolt) +# - EXPERIMENTAL +# - may be faster is some use-cases (random reads - indexer) +# - use boltdb build tag (go build -tags boltdb) db_backend = "{{ .BaseConfig.DBBackend }}" # Database directory @@ -227,14 +238,22 @@ max_subscriptions_per_client = {{ .RPC.MaxSubscriptionsPerClient }} # See https://github.com/tendermint/tendermint/issues/3435 timeout_broadcast_tx_commit = "{{ .RPC.TimeoutBroadcastTxCommit }}" -# The name of a file containing certificate that is used to create the HTTPS server. +# Maximum size of request body, in bytes +max_body_bytes = {{ .RPC.MaxBodyBytes }} + +# Maximum size of request header, in bytes +max_header_bytes = {{ .RPC.MaxHeaderBytes }} + +# The path to a file containing certificate that is used to create the HTTPS server. +# Migth be either absolute path or path related to tendermint's config directory. # If the certificate is signed by a certificate authority, # the certFile should be the concatenation of the server's certificate, any intermediates, # and the CA's certificate. # NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. Otherwise, HTTP server is run. tls_cert_file = "{{ .RPC.TLSCertFile }}" -# The name of a file containing matching private key that is used to create the HTTPS server. +# The path to a file containing matching private key that is used to create the HTTPS server. +# Migth be either absolute path or path related to tendermint's config directory. # NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. Otherwise, HTTP server is run. tls_key_file = "{{ .RPC.TLSKeyFile }}" @@ -352,6 +371,18 @@ max_txs_bytes = {{ .Mempool.MaxTxsBytes }} # Size of the cache (used to filter transactions we saw earlier) in transactions cache_size = {{ .Mempool.CacheSize }} +# Maximum size of a single transaction. +# NOTE: the max size of a tx transmitted over the network is {max_tx_bytes} + {amino overhead}. +max_tx_bytes = {{ .Mempool.MaxTxBytes }} + +##### fast sync configuration options ##### +[fastsync] + +# Fast Sync version to use: +# 1) "v0" (default) - the legacy fast sync implementation +# 2) "v1" - refactor of v0 version for better testability +version = "{{ .FastSync.Version }}" + ##### consensus configuration options ##### [consensus] diff --git a/consensus/byzantine_test.go b/consensus/byzantine_test.go index 51cfda5b1..99becebec 100644 --- a/consensus/byzantine_test.go +++ b/consensus/byzantine_test.go @@ -10,6 +10,7 @@ import ( "github.com/stretchr/testify/require" cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/p2p" + sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" ) @@ -69,7 +70,7 @@ func TestByzantine(t *testing.T) { blocksSubs[i], err = eventBus.Subscribe(context.Background(), testSubscriber, types.EventQueryNewBlock) require.NoError(t, err) - conR := NewConsensusReactor(css[i], true) // so we dont start the consensus states + conR := NewConsensusReactor(css[i], true) // so we don't start the consensus states conR.SetLogger(logger.With("validator", i)) conR.SetEventBus(eventBus) @@ -81,6 +82,7 @@ func TestByzantine(t *testing.T) { } reactors[i] = conRI + sm.SaveState(css[i].blockExec.DB(), css[i].state) //for save height 1's validators info } defer func() { @@ -175,7 +177,7 @@ func byzantineDecideProposalFunc(t *testing.T, height int64, round int, cs *Cons // Create a new proposal block from state/txs from the mempool. block1, blockParts1 := cs.createProposalBlock() - polRound, propBlockID := cs.ValidRound, types.BlockID{block1.Hash(), blockParts1.Header()} + polRound, propBlockID := cs.ValidRound, types.BlockID{Hash: block1.Hash(), PartsHeader: blockParts1.Header()} proposal1 := types.NewProposal(height, round, polRound, propBlockID) if err := cs.privValidator.SignProposal(cs.state.ChainID, proposal1); err != nil { t.Error(err) @@ -183,7 +185,7 @@ func byzantineDecideProposalFunc(t *testing.T, height int64, round int, cs *Cons // Create a new proposal block from state/txs from the mempool. block2, blockParts2 := cs.createProposalBlock() - polRound, propBlockID = cs.ValidRound, types.BlockID{block2.Hash(), blockParts2.Header()} + polRound, propBlockID = cs.ValidRound, types.BlockID{Hash: block2.Hash(), PartsHeader: blockParts2.Header()} proposal2 := types.NewProposal(height, round, polRound, propBlockID) if err := cs.privValidator.SignProposal(cs.state.ChainID, proposal2); err != nil { t.Error(err) @@ -269,4 +271,4 @@ func (br *ByzantineReactor) Receive(chID byte, peer p2p.Peer, msgBytes []byte) { br.reactor.Receive(chID, peer, msgBytes) } -func (br *ByzantineReactor) InitAddPeer(peer p2p.Peer) p2p.Peer { return peer } +func (br *ByzantineReactor) InitPeer(peer p2p.Peer) p2p.Peer { return peer } diff --git a/consensus/wire.go b/consensus/codec.go similarity index 100% rename from consensus/wire.go rename to consensus/codec.go diff --git a/consensus/common_test.go b/consensus/common_test.go index 94df2b62c..d1d692b17 100644 --- a/consensus/common_test.go +++ b/consensus/common_test.go @@ -14,11 +14,12 @@ import ( "github.com/go-kit/kit/log/term" + "path" + abcicli "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/example/counter" "github.com/tendermint/tendermint/abci/example/kvstore" abci "github.com/tendermint/tendermint/abci/types" - bc "github.com/tendermint/tendermint/blockchain" cfg "github.com/tendermint/tendermint/config" cstypes "github.com/tendermint/tendermint/consensus/types" cmn "github.com/tendermint/tendermint/libs/common" @@ -29,6 +30,7 @@ import ( "github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/privval" sm "github.com/tendermint/tendermint/state" + "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" tmtime "github.com/tendermint/tendermint/types/time" ) @@ -84,7 +86,7 @@ func (vs *validatorStub) signVote(voteType types.SignedMsgType, hash []byte, hea Round: vs.Round, Timestamp: tmtime.Now(), Type: voteType, - BlockID: types.BlockID{hash, header}, + BlockID: types.BlockID{Hash: hash, PartsHeader: header}, } err := vs.PrivValidator.SignVote(config.ChainID(), vote) return vote, err @@ -119,6 +121,24 @@ func incrementRound(vss ...*validatorStub) { } } +type ValidatorStubsByAddress []*validatorStub + +func (vss ValidatorStubsByAddress) Len() int { + return len(vss) +} + +func (vss ValidatorStubsByAddress) Less(i, j int) bool { + return bytes.Compare(vss[i].GetPubKey().Address(), vss[j].GetPubKey().Address()) == -1 +} + +func (vss ValidatorStubsByAddress) Swap(i, j int) { + it := vss[i] + vss[i] = vss[j] + vss[i].Index = i + vss[j] = it + vss[j].Index = j +} + //------------------------------------------------------------------------------- // Functions for transitioning the consensus state @@ -139,7 +159,7 @@ func decideProposal(cs1 *ConsensusState, vs *validatorStub, height int64, round } // Make proposal - polRound, propBlockID := validRound, types.BlockID{block.Hash(), blockParts.Header()} + polRound, propBlockID := validRound, types.BlockID{Hash: block.Hash(), PartsHeader: blockParts.Header()} proposal = types.NewProposal(height, round, polRound, propBlockID) if err := vs.SignProposal(chainID, proposal); err != nil { panic(err) @@ -228,7 +248,7 @@ func validatePrevoteAndPrecommit(t *testing.T, cs *ConsensusState, thisRound, lo } func subscribeToVoter(cs *ConsensusState, addr []byte) <-chan tmpubsub.Message { - votesSub, err := cs.eventBus.Subscribe(context.Background(), testSubscriber, types.EventQueryVote) + votesSub, err := cs.eventBus.SubscribeUnbuffered(context.Background(), testSubscriber, types.EventQueryVote) if err != nil { panic(fmt.Sprintf("failed to subscribe %s to %v", testSubscriber, types.EventQueryVote)) } @@ -260,7 +280,7 @@ func newConsensusStateWithConfig(thisConfig *cfg.Config, state sm.State, pv type func newConsensusStateWithConfigAndBlockStore(thisConfig *cfg.Config, state sm.State, pv types.PrivValidator, app abci.Application, blockDB dbm.DB) *ConsensusState { // Get BlockStore - blockStore := bc.NewBlockStore(blockDB) + blockStore := store.NewBlockStore(blockDB) // one for mempool, one for consensus mtx := new(sync.Mutex) @@ -268,7 +288,7 @@ func newConsensusStateWithConfigAndBlockStore(thisConfig *cfg.Config, state sm.S proxyAppConnCon := abcicli.NewLocalClient(mtx, app) // Make Mempool - mempool := mempl.NewMempool(thisConfig.Mempool, proxyAppConnMem, 0) + mempool := mempl.NewCListMempool(thisConfig.Mempool, proxyAppConnMem, 0) mempool.SetLogger(log.TestingLogger().With("module", "mempool")) if thisConfig.Consensus.WaitForTxs() { mempool.EnableTxsAvailable() @@ -278,7 +298,8 @@ func newConsensusStateWithConfigAndBlockStore(thisConfig *cfg.Config, state sm.S evpool := sm.MockEvidencePool{} // Make ConsensusState - stateDB := dbm.NewMemDB() + stateDB := blockDB + sm.SaveState(stateDB, state) //for save height 1's validators info blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyAppConnCon, mempool, evpool, true) cs := NewConsensusState(thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool) cs.SetLogger(log.TestingLogger().With("module", "consensus")) @@ -351,7 +372,7 @@ func ensureNoNewUnlock(unlockCh <-chan tmpubsub.Message) { } func ensureNoNewTimeout(stepCh <-chan tmpubsub.Message, timeout int64) { - timeoutDuration := time.Duration(timeout*5) * time.Nanosecond + timeoutDuration := time.Duration(timeout*10) * time.Nanosecond ensureNoNewEvent( stepCh, timeoutDuration, @@ -398,7 +419,7 @@ func ensureNewRound(roundCh <-chan tmpubsub.Message, height int64, round int) { } func ensureNewTimeout(timeoutCh <-chan tmpubsub.Message, height int64, round int, timeout int64) { - timeoutDuration := time.Duration(timeout*5) * time.Nanosecond + timeoutDuration := time.Duration(timeout*10) * time.Nanosecond ensureNewEvent(timeoutCh, height, round, timeoutDuration, "Timeout expired while waiting for NewTimeout event") } @@ -564,7 +585,7 @@ func randConsensusNet(nValidators int, testName string, tickerFunc func() Timeou vals := types.TM2PB.ValidatorUpdates(state.Validators) app.InitChain(abci.RequestInitChain{Validators: vals}) - css[i] = newConsensusStateWithConfig(thisConfig, state, privVals[i], app) + css[i] = newConsensusStateWithConfigAndBlockStore(thisConfig, state, privVals[i], app, stateDB) css[i].SetTimeoutTicker(tickerFunc()) css[i].SetLogger(logger.With("validator", i, "module", "consensus")) } @@ -576,12 +597,11 @@ func randConsensusNet(nValidators int, testName string, tickerFunc func() Timeou } // nPeers = nValidators + nNotValidator -func randConsensusNetWithPeers(nValidators, nPeers int, testName string, tickerFunc func() TimeoutTicker, - appFunc func() abci.Application) ([]*ConsensusState, cleanupFunc) { - +func randConsensusNetWithPeers(nValidators, nPeers int, testName string, tickerFunc func() TimeoutTicker, appFunc func(string) abci.Application) ([]*ConsensusState, *types.GenesisDoc, *cfg.Config, cleanupFunc) { genDoc, privVals := randGenesisDoc(nValidators, false, testMinPower) css := make([]*ConsensusState, nPeers) logger := consensusLogger() + var peer0Config *cfg.Config configRootDirs := make([]string, 0, nPeers) for i := 0; i < nPeers; i++ { stateDB := dbm.NewMemDB() // each state needs its own db @@ -589,6 +609,9 @@ func randConsensusNetWithPeers(nValidators, nPeers int, testName string, tickerF thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i)) configRootDirs = append(configRootDirs, thisConfig.RootDir) ensureDir(filepath.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal + if i == 0 { + peer0Config = thisConfig + } var privVal types.PrivValidator if i < nValidators { privVal = privVals[i] @@ -605,15 +628,19 @@ func randConsensusNetWithPeers(nValidators, nPeers int, testName string, tickerF privVal = privval.GenFilePV(tempKeyFile.Name(), tempStateFile.Name()) } - app := appFunc() + app := appFunc(path.Join(config.DBDir(), fmt.Sprintf("%s_%d", testName, i))) vals := types.TM2PB.ValidatorUpdates(state.Validators) + if _, ok := app.(*kvstore.PersistentKVStoreApplication); ok { + state.Version.Consensus.App = kvstore.ProtocolVersion //simulate handshake, receive app version. If don't do this, replay test will fail + } app.InitChain(abci.RequestInitChain{Validators: vals}) + //sm.SaveState(stateDB,state) //height 1's validatorsInfo already saved in LoadStateFromDBOrGenesisDoc above css[i] = newConsensusStateWithConfig(thisConfig, state, privVal, app) css[i].SetTimeoutTicker(tickerFunc()) css[i].SetLogger(logger.With("validator", i, "module", "consensus")) } - return css, func() { + return css, genDoc, peer0Config, func() { for _, dir := range configRootDirs { os.RemoveAll(dir) } @@ -719,3 +746,7 @@ func newPersistentKVStore() abci.Application { } return kvstore.NewPersistentKVStoreApplication(dir) } + +func newPersistentKVStoreWithPath(dbDir string) abci.Application { + return kvstore.NewPersistentKVStoreApplication(dbDir) +} diff --git a/consensus/mempool_test.go b/consensus/mempool_test.go index f6d183757..07e9a94af 100644 --- a/consensus/mempool_test.go +++ b/consensus/mempool_test.go @@ -11,13 +11,15 @@ import ( "github.com/tendermint/tendermint/abci/example/code" abci "github.com/tendermint/tendermint/abci/types" + dbm "github.com/tendermint/tendermint/libs/db" + mempl "github.com/tendermint/tendermint/mempool" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" ) // for testing -func assertMempool(txn txNotifier) sm.Mempool { - return txn.(sm.Mempool) +func assertMempool(txn txNotifier) mempl.Mempool { + return txn.(mempl.Mempool) } func TestMempoolNoProgressUntilTxsAvailable(t *testing.T) { @@ -80,14 +82,14 @@ func TestMempoolProgressInHigherRound(t *testing.T) { ensureNewRound(newRoundCh, height, round) // first round at first height ensureNewEventOnChannel(newBlockCh) // first block gets committed - height = height + 1 // moving to the next height + height++ // moving to the next height round = 0 ensureNewRound(newRoundCh, height, round) // first round at next height deliverTxsRange(cs, 0, 1) // we deliver txs, but dont set a proposal so we get the next round ensureNewTimeout(timeoutCh, height, round, cs.config.TimeoutPropose.Nanoseconds()) - round = round + 1 // moving to the next round + round++ // moving to the next round ensureNewRound(newRoundCh, height, round) // wait for the next round ensureNewEventOnChannel(newBlockCh) // now we can commit the block } @@ -110,7 +112,9 @@ func TestMempoolTxConcurrentWithCommit(t *testing.T) { // to the counter sequence app := NewCounterApplication() app.serial = false - cs := newConsensusState(state, privVals[0], app) + blockDB := dbm.NewMemDB() + cs := newConsensusStateWithConfigAndBlockStore(config, state, privVals[0], app, blockDB) + sm.SaveState(blockDB, state) height, round := cs.Height, cs.Round newBlockCh := subscribe(cs.eventBus, types.EventQueryNewBlock) @@ -133,13 +137,15 @@ func TestMempoolTxConcurrentWithCommit(t *testing.T) { func TestMempoolRmBadTx(t *testing.T) { state, privVals := randGenesisState(1, false, 10) app := NewCounterApplication() - cs := newConsensusState(state, privVals[0], app) + blockDB := dbm.NewMemDB() + cs := newConsensusStateWithConfigAndBlockStore(config, state, privVals[0], app, blockDB) + sm.SaveState(blockDB, state) // increment the counter by 1 txBytes := make([]byte, 8) binary.BigEndian.PutUint64(txBytes, uint64(0)) - resDeliver := app.DeliverTx(txBytes) + resDeliver := app.DeliverTx(abci.RequestDeliverTx{Tx: txBytes}) assert.False(t, resDeliver.IsErr(), fmt.Sprintf("expected no error. got %v", resDeliver)) resCommit := app.Commit() @@ -153,12 +159,14 @@ func TestMempoolRmBadTx(t *testing.T) { // and the tx should get removed from the pool err := assertMempool(cs.txNotifier).CheckTx(txBytes, func(r *abci.Response) { if r.GetCheckTx().Code != code.CodeTypeBadNonce { - t.Fatalf("expected checktx to return bad nonce, got %v", r) + t.Errorf("expected checktx to return bad nonce, got %v", r) + return } checkTxRespCh <- struct{}{} }) if err != nil { - t.Fatalf("Error after CheckTx: %v", err) + t.Errorf("Error after CheckTx: %v", err) + return } // check for the tx @@ -178,7 +186,8 @@ func TestMempoolRmBadTx(t *testing.T) { case <-checkTxRespCh: // success case <-ticker: - t.Fatalf("Timed out waiting for tx to return") + t.Errorf("Timed out waiting for tx to return") + return } // Wait until the tx is removed @@ -187,7 +196,8 @@ func TestMempoolRmBadTx(t *testing.T) { case <-emptyMempoolCh: // success case <-ticker: - t.Fatalf("Timed out waiting for tx to be removed") + t.Errorf("Timed out waiting for tx to be removed") + return } } @@ -208,8 +218,8 @@ func (app *CounterApplication) Info(req abci.RequestInfo) abci.ResponseInfo { return abci.ResponseInfo{Data: fmt.Sprintf("txs:%v", app.txCount)} } -func (app *CounterApplication) DeliverTx(tx []byte) abci.ResponseDeliverTx { - txValue := txAsUint64(tx) +func (app *CounterApplication) DeliverTx(req abci.RequestDeliverTx) abci.ResponseDeliverTx { + txValue := txAsUint64(req.Tx) if app.serial && txValue != uint64(app.txCount) { return abci.ResponseDeliverTx{ Code: code.CodeTypeBadNonce, @@ -219,8 +229,8 @@ func (app *CounterApplication) DeliverTx(tx []byte) abci.ResponseDeliverTx { return abci.ResponseDeliverTx{Code: code.CodeTypeOK} } -func (app *CounterApplication) CheckTx(tx []byte) abci.ResponseCheckTx { - txValue := txAsUint64(tx) +func (app *CounterApplication) CheckTx(req abci.RequestCheckTx) abci.ResponseCheckTx { + txValue := txAsUint64(req.Tx) if app.serial && txValue != uint64(app.mempoolTxCount) { return abci.ResponseCheckTx{ Code: code.CodeTypeBadNonce, diff --git a/consensus/reactor.go b/consensus/reactor.go index 61001e938..6f0be14c3 100644 --- a/consensus/reactor.go +++ b/consensus/reactor.go @@ -26,7 +26,7 @@ const ( VoteChannel = byte(0x22) VoteSetBitsChannel = byte(0x23) - maxMsgSize = 1048576 // 1MB; NOTE/TODO: keep in sync with types.PartSet sizes. + maxMsgSize = 2 * 1048576 // 2MB; NOTE/TODO: keep in sync with types.PartSet sizes. blocksToContributeToBecomeGoodPeer = 10000 votesToContributeToBecomeGoodPeer = 10000 @@ -117,8 +117,13 @@ func (conR *ConsensusReactor) SwitchToConsensus(state sm.State, blocksSynced int } err := conR.conS.Start() if err != nil { - conR.Logger.Error("Error starting conS", "err", err) - return + panic(fmt.Sprintf(`Failed to start consensus state: %v + +conS: +%+v + +conR: +%+v`, err, conR.conS, conR)) } } @@ -156,7 +161,8 @@ func (conR *ConsensusReactor) GetChannels() []*p2p.ChannelDescriptor { } } -// AddPeer implements Reactor +// AddPeer implements Reactor by spawning multiple gossiping goroutines for the +// peer. func (conR *ConsensusReactor) AddPeer(peer p2p.Peer) { if !conR.IsRunning() { return @@ -164,10 +170,8 @@ func (conR *ConsensusReactor) AddPeer(peer p2p.Peer) { peerState, ok := peer.Get(types.PeerStateKey).(*PeerState) if !ok { - // should not happen - panic(fmt.Sprintf("Peer %v has no state", peer)) + panic(fmt.Sprintf("peer %v has no state", peer)) } - // Begin routines for this peer. go conR.gossipDataRoutine(peer, peerState) go conR.gossipVotesRoutine(peer, peerState) @@ -180,14 +184,14 @@ func (conR *ConsensusReactor) AddPeer(peer p2p.Peer) { } } -func (conR *ConsensusReactor) InitAddPeer(peer p2p.Peer) p2p.Peer { +func (conR *ConsensusReactor) InitPeer(peer p2p.Peer) p2p.Peer { // Create peerState for peer peerState := NewPeerState(peer).SetLogger(conR.Logger) peer.Set(types.PeerStateKey, peerState) return peer } -// RemovePeer implements Reactor +// RemovePeer is a noop. func (conR *ConsensusReactor) RemovePeer(peer p2p.Peer, reason interface{}) { if !conR.IsRunning() { return @@ -354,10 +358,6 @@ func (conR *ConsensusReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) default: conR.Logger.Error(fmt.Sprintf("Unknown chId %X", chID)) } - - if err != nil { - conR.Logger.Error("Error in Receive()", "err", err) - } } // SetEventBus sets event bus. @@ -502,7 +502,7 @@ OUTER_LOOP: if prs.ProposalBlockParts == nil { blockMeta := conR.conS.blockStore.LoadBlockMeta(prs.Height) if blockMeta == nil { - cmn.PanicCrisis(fmt.Sprintf("Failed to load block %d when blockStore is at %d", + panic(fmt.Sprintf("Failed to load block %d when blockStore is at %d", prs.Height, conR.conS.blockStore.Height())) } ps.InitProposalBlockParts(blockMeta.BlockID.PartsHeader) @@ -1128,7 +1128,7 @@ func (ps *PeerState) ensureCatchupCommitRound(height int64, round int, numValida NOTE: This is wrong, 'round' could change. e.g. if orig round is not the same as block LastCommit round. if ps.CatchupCommitRound != -1 && ps.CatchupCommitRound != round { - cmn.PanicSanity(fmt.Sprintf("Conflicting CatchupCommitRound. Height: %v, Orig: %v, New: %v", height, ps.CatchupCommitRound, round)) + panic(fmt.Sprintf("Conflicting CatchupCommitRound. Height: %v, Orig: %v, New: %v", height, ps.CatchupCommitRound, round)) } */ if ps.PRS.CatchupCommitRound == round { @@ -1538,6 +1538,9 @@ func (m *BlockPartMessage) ValidateBasic() error { if m.Round < 0 { return errors.New("Negative Round") } + if m.Part == nil { + return errors.New("block part is missing") + } if err := m.Part.ValidateBasic(); err != nil { return fmt.Errorf("Wrong Part: %v", err) } @@ -1558,6 +1561,9 @@ type VoteMessage struct { // ValidateBasic performs basic validation. func (m *VoteMessage) ValidateBasic() error { + if m.Vote == nil { + return errors.New("vote is missing") + } return m.Vote.ValidateBasic() } diff --git a/consensus/reactor_test.go b/consensus/reactor_test.go index 6cdfe9ab5..e782064ef 100644 --- a/consensus/reactor_test.go +++ b/consensus/reactor_test.go @@ -17,28 +17,31 @@ import ( abcicli "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/example/kvstore" abci "github.com/tendermint/tendermint/abci/types" - bc "github.com/tendermint/tendermint/blockchain" cfg "github.com/tendermint/tendermint/config" dbm "github.com/tendermint/tendermint/libs/db" + cstypes "github.com/tendermint/tendermint/consensus/types" + cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/libs/log" mempl "github.com/tendermint/tendermint/mempool" "github.com/tendermint/tendermint/p2p" + "github.com/tendermint/tendermint/p2p/mock" sm "github.com/tendermint/tendermint/state" + "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" ) //---------------------------------------------- // in-process testnets -func startConsensusNet(t *testing.T, css []*ConsensusState, N int) ( +func startConsensusNet(t *testing.T, css []*ConsensusState, n int) ( []*ConsensusReactor, []types.Subscription, []*types.EventBus, ) { - reactors := make([]*ConsensusReactor, N) + reactors := make([]*ConsensusReactor, n) blocksSubs := make([]types.Subscription, 0) - eventBuses := make([]*types.EventBus, N) - for i := 0; i < N; i++ { + eventBuses := make([]*types.EventBus, n) + for i := 0; i < n; i++ { /*logger, err := tmflags.ParseLogLevel("consensus:info,*:error", logger, "info") if err != nil { t.Fatal(err)}*/ reactors[i] = NewConsensusReactor(css[i], true) // so we dont start the consensus states @@ -51,9 +54,13 @@ func startConsensusNet(t *testing.T, css []*ConsensusState, N int) ( blocksSub, err := eventBuses[i].Subscribe(context.Background(), testSubscriber, types.EventQueryNewBlock) require.NoError(t, err) blocksSubs = append(blocksSubs, blocksSub) + + if css[i].state.LastBlockHeight == 0 { //simulate handle initChain in handshake + sm.SaveState(css[i].blockExec.DB(), css[i].state) + } } // make connected switches and start all reactors - p2p.MakeConnectedSwitches(config.P2P, N, func(i int, s *p2p.Switch) *p2p.Switch { + p2p.MakeConnectedSwitches(config.P2P, n, func(i int, s *p2p.Switch) *p2p.Switch { s.AddReactor("CONSENSUS", reactors[i]) s.SetLogger(reactors[i].conS.Logger.With("module", "p2p")) return s @@ -63,7 +70,7 @@ func startConsensusNet(t *testing.T, css []*ConsensusState, N int) ( // If we started the state machines before everyone was connected, // we'd block when the cs fires NewBlockEvent and the peers are trying to start their reactors // TODO: is this still true with new pubsub? - for i := 0; i < N; i++ { + for i := 0; i < n; i++ { s := reactors[i].conS.GetState() reactors[i].SwitchToConsensus(s, 0) } @@ -128,7 +135,7 @@ func TestReactorWithEvidence(t *testing.T) { // css[i] = newConsensusStateWithConfig(thisConfig, state, privVals[i], app) blockDB := dbm.NewMemDB() - blockStore := bc.NewBlockStore(blockDB) + blockStore := store.NewBlockStore(blockDB) // one for mempool, one for consensus mtx := new(sync.Mutex) @@ -136,7 +143,7 @@ func TestReactorWithEvidence(t *testing.T) { proxyAppConnCon := abcicli.NewLocalClient(mtx, app) // Make Mempool - mempool := mempl.NewMempool(thisConfig.Mempool, proxyAppConnMem, 0) + mempool := mempl.NewCListMempool(thisConfig.Mempool, proxyAppConnMem, 0) mempool.SetLogger(log.TestingLogger().With("module", "mempool")) if thisConfig.Consensus.WaitForTxs() { mempool.EnableTxsAvailable() @@ -230,7 +237,7 @@ func TestReactorCreatesBlockWhenEmptyBlocksFalse(t *testing.T) { // send a tx if err := assertMempool(css[3].txNotifier).CheckTx([]byte{1, 2, 3}, nil); err != nil { - //t.Fatal(err) + t.Error(err) } // wait till everyone makes the first new block @@ -239,6 +246,49 @@ func TestReactorCreatesBlockWhenEmptyBlocksFalse(t *testing.T) { }, css) } +func TestReactorReceiveDoesNotPanicIfAddPeerHasntBeenCalledYet(t *testing.T) { + N := 1 + css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter) + defer cleanup() + reactors, _, eventBuses := startConsensusNet(t, css, N) + defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses) + + var ( + reactor = reactors[0] + peer = mock.NewPeer(nil) + msg = cdc.MustMarshalBinaryBare(&HasVoteMessage{Height: 1, Round: 1, Index: 1, Type: types.PrevoteType}) + ) + + reactor.InitPeer(peer) + + // simulate switch calling Receive before AddPeer + assert.NotPanics(t, func() { + reactor.Receive(StateChannel, peer, msg) + reactor.AddPeer(peer) + }) +} + +func TestReactorReceivePanicsIfInitPeerHasntBeenCalledYet(t *testing.T) { + N := 1 + css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter) + defer cleanup() + reactors, _, eventBuses := startConsensusNet(t, css, N) + defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses) + + var ( + reactor = reactors[0] + peer = mock.NewPeer(nil) + msg = cdc.MustMarshalBinaryBare(&HasVoteMessage{Height: 1, Round: 1, Index: 1, Type: types.PrevoteType}) + ) + + // we should call InitPeer here + + // simulate switch calling Receive before AddPeer + assert.Panics(t, func() { + reactor.Receive(StateChannel, peer, msg) + }) +} + // Test we record stats about votes and block parts from other peers. func TestReactorRecordsVotesAndBlockParts(t *testing.T) { N := 4 @@ -329,7 +379,8 @@ func TestReactorVotingPowerChange(t *testing.T) { func TestReactorValidatorSetChanges(t *testing.T) { nPeers := 7 nVals := 4 - css, cleanup := randConsensusNetWithPeers(nVals, nPeers, "consensus_val_set_changes_test", newMockTickerFunc(true), newPersistentKVStore) + css, _, _, cleanup := randConsensusNetWithPeers(nVals, nPeers, "consensus_val_set_changes_test", newMockTickerFunc(true), newPersistentKVStoreWithPath) + defer cleanup() logger := log.TestingLogger() @@ -583,3 +634,253 @@ func capture() { count := runtime.Stack(trace, true) fmt.Printf("Stack of %d bytes: %s\n", count, trace) } + +//------------------------------------------------------------- +// Ensure basic validation of structs is functioning + +func TestNewRoundStepMessageValidateBasic(t *testing.T) { + testCases := []struct { + testName string + messageHeight int64 + messageRound int + messageStep cstypes.RoundStepType + messageLastCommitRound int + expectErr bool + }{ + {"Valid Message", 0, 0, 0x01, 1, false}, + {"Invalid Message", -1, 0, 0x01, 1, true}, + {"Invalid Message", 0, -1, 0x01, 1, true}, + {"Invalid Message", 0, 0, 0x00, 1, true}, + {"Invalid Message", 0, 0, 0x00, 0, true}, + {"Invalid Message", 1, 0, 0x01, 0, true}, + } + + for _, tc := range testCases { + t.Run(tc.testName, func(t *testing.T) { + message := NewRoundStepMessage{ + Height: tc.messageHeight, + Round: tc.messageRound, + Step: tc.messageStep, + LastCommitRound: tc.messageLastCommitRound, + } + + assert.Equal(t, tc.expectErr, message.ValidateBasic() != nil, "Validate Basic had an unexpected result") + }) + } +} + +func TestNewValidBlockMessageValidateBasic(t *testing.T) { + testBitArray := cmn.NewBitArray(1) + testCases := []struct { + testName string + messageHeight int64 + messageRound int + messageBlockParts *cmn.BitArray + expectErr bool + }{ + {"Valid Message", 0, 0, testBitArray, false}, + {"Invalid Message", -1, 0, testBitArray, true}, + {"Invalid Message", 0, -1, testBitArray, true}, + {"Invalid Message", 0, 0, cmn.NewBitArray(0), true}, + } + + for _, tc := range testCases { + t.Run(tc.testName, func(t *testing.T) { + message := NewValidBlockMessage{ + Height: tc.messageHeight, + Round: tc.messageRound, + BlockParts: tc.messageBlockParts, + } + + message.BlockPartsHeader.Total = 1 + + assert.Equal(t, tc.expectErr, message.ValidateBasic() != nil, "Validate Basic had an unexpected result") + }) + } +} + +func TestProposalPOLMessageValidateBasic(t *testing.T) { + testBitArray := cmn.NewBitArray(1) + testCases := []struct { + testName string + messageHeight int64 + messageProposalPOLRound int + messageProposalPOL *cmn.BitArray + expectErr bool + }{ + {"Valid Message", 0, 0, testBitArray, false}, + {"Invalid Message", -1, 0, testBitArray, true}, + {"Invalid Message", 0, -1, testBitArray, true}, + {"Invalid Message", 0, 0, cmn.NewBitArray(0), true}, + } + + for _, tc := range testCases { + t.Run(tc.testName, func(t *testing.T) { + message := ProposalPOLMessage{ + Height: tc.messageHeight, + ProposalPOLRound: tc.messageProposalPOLRound, + ProposalPOL: tc.messageProposalPOL, + } + + assert.Equal(t, tc.expectErr, message.ValidateBasic() != nil, "Validate Basic had an unexpected result") + }) + } +} + +func TestBlockPartMessageValidateBasic(t *testing.T) { + testPart := new(types.Part) + testCases := []struct { + testName string + messageHeight int64 + messageRound int + messagePart *types.Part + expectErr bool + }{ + {"Valid Message", 0, 0, testPart, false}, + {"Invalid Message", -1, 0, testPart, true}, + {"Invalid Message", 0, -1, testPart, true}, + } + + for _, tc := range testCases { + t.Run(tc.testName, func(t *testing.T) { + message := BlockPartMessage{ + Height: tc.messageHeight, + Round: tc.messageRound, + Part: tc.messagePart, + } + + assert.Equal(t, tc.expectErr, message.ValidateBasic() != nil, "Validate Basic had an unexpected result") + }) + } + + message := BlockPartMessage{Height: 0, Round: 0, Part: new(types.Part)} + message.Part.Index = -1 + + assert.Equal(t, true, message.ValidateBasic() != nil, "Validate Basic had an unexpected result") +} + +func TestHasVoteMessageValidateBasic(t *testing.T) { + const ( + validSignedMsgType types.SignedMsgType = 0x01 + invalidSignedMsgType types.SignedMsgType = 0x03 + ) + + testCases := []struct { + testName string + messageHeight int64 + messageRound int + messageType types.SignedMsgType + messageIndex int + expectErr bool + }{ + {"Valid Message", 0, 0, validSignedMsgType, 0, false}, + {"Invalid Message", -1, 0, validSignedMsgType, 0, true}, + {"Invalid Message", 0, -1, validSignedMsgType, 0, true}, + {"Invalid Message", 0, 0, invalidSignedMsgType, 0, true}, + {"Invalid Message", 0, 0, validSignedMsgType, -1, true}, + } + + for _, tc := range testCases { + t.Run(tc.testName, func(t *testing.T) { + message := HasVoteMessage{ + Height: tc.messageHeight, + Round: tc.messageRound, + Type: tc.messageType, + Index: tc.messageIndex, + } + + assert.Equal(t, tc.expectErr, message.ValidateBasic() != nil, "Validate Basic had an unexpected result") + }) + } +} + +func TestVoteSetMaj23MessageValidateBasic(t *testing.T) { + const ( + validSignedMsgType types.SignedMsgType = 0x01 + invalidSignedMsgType types.SignedMsgType = 0x03 + ) + + validBlockID := types.BlockID{} + invalidBlockID := types.BlockID{ + Hash: cmn.HexBytes{}, + PartsHeader: types.PartSetHeader{ + Total: -1, + Hash: cmn.HexBytes{}, + }, + } + + testCases := []struct { + testName string + messageHeight int64 + messageRound int + messageType types.SignedMsgType + messageBlockID types.BlockID + expectErr bool + }{ + {"Valid Message", 0, 0, validSignedMsgType, validBlockID, false}, + {"Invalid Message", -1, 0, validSignedMsgType, validBlockID, true}, + {"Invalid Message", 0, -1, validSignedMsgType, validBlockID, true}, + {"Invalid Message", 0, 0, invalidSignedMsgType, validBlockID, true}, + {"Invalid Message", 0, 0, validSignedMsgType, invalidBlockID, true}, + } + + for _, tc := range testCases { + t.Run(tc.testName, func(t *testing.T) { + message := VoteSetMaj23Message{ + Height: tc.messageHeight, + Round: tc.messageRound, + Type: tc.messageType, + BlockID: tc.messageBlockID, + } + + assert.Equal(t, tc.expectErr, message.ValidateBasic() != nil, "Validate Basic had an unexpected result") + }) + } +} + +func TestVoteSetBitsMessageValidateBasic(t *testing.T) { + const ( + validSignedMsgType types.SignedMsgType = 0x01 + invalidSignedMsgType types.SignedMsgType = 0x03 + ) + + validBlockID := types.BlockID{} + invalidBlockID := types.BlockID{ + Hash: cmn.HexBytes{}, + PartsHeader: types.PartSetHeader{ + Total: -1, + Hash: cmn.HexBytes{}, + }, + } + testBitArray := cmn.NewBitArray(1) + + testCases := []struct { + testName string + messageHeight int64 + messageRound int + messageType types.SignedMsgType + messageBlockID types.BlockID + messageVotes *cmn.BitArray + expectErr bool + }{ + {"Valid Message", 0, 0, validSignedMsgType, validBlockID, testBitArray, false}, + {"Invalid Message", -1, 0, validSignedMsgType, validBlockID, testBitArray, true}, + {"Invalid Message", 0, -1, validSignedMsgType, validBlockID, testBitArray, true}, + {"Invalid Message", 0, 0, invalidSignedMsgType, validBlockID, testBitArray, true}, + {"Invalid Message", 0, 0, validSignedMsgType, invalidBlockID, testBitArray, true}, + } + + for _, tc := range testCases { + t.Run(tc.testName, func(t *testing.T) { + message := VoteSetBitsMessage{ + Height: tc.messageHeight, + Round: tc.messageRound, + Type: tc.messageType, + // Votes: tc.messageVotes, + BlockID: tc.messageBlockID, + } + + assert.Equal(t, tc.expectErr, message.ValidateBasic() != nil, "Validate Basic had an unexpected result") + }) + } +} diff --git a/consensus/replay.go b/consensus/replay.go index a14c28c1e..8fc7a723c 100644 --- a/consensus/replay.go +++ b/consensus/replay.go @@ -13,10 +13,10 @@ import ( abci "github.com/tendermint/tendermint/abci/types" //auto "github.com/tendermint/tendermint/libs/autofile" - cmn "github.com/tendermint/tendermint/libs/common" + dbm "github.com/tendermint/tendermint/libs/db" "github.com/tendermint/tendermint/libs/log" - + "github.com/tendermint/tendermint/mock" "github.com/tendermint/tendermint/proxy" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" @@ -141,14 +141,16 @@ func (cs *ConsensusState) catchupReplay(csHeight int64) error { var msg *TimedWALMessage dec := WALDecoder{gr} +LOOP: for { msg, err = dec.Decode() - if err == io.EOF { - break - } else if IsDataCorruptionError(err) { + switch { + case err == io.EOF: + break LOOP + case IsDataCorruptionError(err): cs.Logger.Error("data has been corrupted in last height of consensus WAL", "err", err, "height", csHeight) return err - } else if err != nil { + case err != nil: return err } @@ -232,6 +234,7 @@ func (h *Handshaker) SetEventBus(eventBus types.BlockEventPublisher) { h.eventBus = eventBus } +// NBlocks returns the number of blocks applied to the state. func (h *Handshaker) NBlocks() int { return h.nBlocks } @@ -259,13 +262,15 @@ func (h *Handshaker) Handshake(proxyApp proxy.AppConns) error { ) // Set AppVersion on the state. - h.initialState.Version.Consensus.App = version.Protocol(res.AppVersion) - sm.SaveState(h.stateDB, h.initialState) + if h.initialState.Version.Consensus.App != version.Protocol(res.AppVersion) { + h.initialState.Version.Consensus.App = version.Protocol(res.AppVersion) + sm.SaveState(h.stateDB, h.initialState) + } // Replay blocks up to the latest in the blockstore. _, err = h.ReplayBlocks(h.initialState, appHash, blockHeight, proxyApp) if err != nil { - return fmt.Errorf("Error on replay: %v", err) + return fmt.Errorf("error on replay: %v", err) } h.logger.Info("Completed ABCI Handshake - Tendermint and App are synced", @@ -276,7 +281,8 @@ func (h *Handshaker) Handshake(proxyApp proxy.AppConns) error { return nil } -// Replay all blocks since appBlockHeight and ensure the result matches the current state. +// ReplayBlocks replays all blocks since appBlockHeight and ensures the result +// matches the current state. // Returns the final AppHash or an error. func (h *Handshaker) ReplayBlocks( state sm.State, @@ -318,11 +324,9 @@ func (h *Handshaker) ReplayBlocks( } state.Validators = types.NewValidatorSet(vals) state.NextValidators = types.NewValidatorSet(vals) - } else { + } else if len(h.genDoc.Validators) == 0 { // If validator set is not set in genesis and still empty after InitChain, exit. - if len(h.genDoc.Validators) == 0 { - return nil, fmt.Errorf("Validator set is nil in genesis and still empty after InitChain") - } + return nil, fmt.Errorf("validator set is nil in genesis and still empty after InitChain") } if res.ConsensusParams != nil { @@ -333,20 +337,22 @@ func (h *Handshaker) ReplayBlocks( } // First handle edge cases and constraints on the storeBlockHeight. - if storeBlockHeight == 0 { - return appHash, checkAppHash(state, appHash, !h.withAppStat) + switch { + case storeBlockHeight == 0: + assertAppHashEqualsOneFromState(appHash, state, !h.withAppStat) + return appHash, nil - } else if storeBlockHeight < appBlockHeight { + case storeBlockHeight < appBlockHeight: // the app should never be ahead of the store (but this is under app's control) return appHash, sm.ErrAppBlockHeightTooHigh{CoreHeight: storeBlockHeight, AppHeight: appBlockHeight} - } else if storeBlockHeight < stateBlockHeight { + case storeBlockHeight < stateBlockHeight: // the state should never be ahead of the store (this is under tendermint's control) - cmn.PanicSanity(fmt.Sprintf("StateBlockHeight (%d) > StoreBlockHeight (%d)", stateBlockHeight, storeBlockHeight)) + panic(fmt.Sprintf("StateBlockHeight (%d) > StoreBlockHeight (%d)", stateBlockHeight, storeBlockHeight)) - } else if storeBlockHeight > stateBlockHeight+1 { + case storeBlockHeight > stateBlockHeight+1: // store should be at most one ahead of the state (this is under tendermint's control) - cmn.PanicSanity(fmt.Sprintf("StoreBlockHeight (%d) > StateBlockHeight + 1 (%d)", storeBlockHeight, stateBlockHeight+1)) + panic(fmt.Sprintf("StoreBlockHeight (%d) > StateBlockHeight + 1 (%d)", storeBlockHeight, stateBlockHeight+1)) } var err error @@ -361,18 +367,20 @@ func (h *Handshaker) ReplayBlocks( } else if appBlockHeight == storeBlockHeight { // We're good! - return appHash, checkAppHash(state, appHash, !h.withAppStat) + assertAppHashEqualsOneFromState(appHash, state, !h.withAppStat) + return appHash, nil } } else if storeBlockHeight == stateBlockHeight+1 { // We saved the block in the store but haven't updated the state, // so we'll need to replay a block using the WAL. - if appBlockHeight < stateBlockHeight { + switch { + case appBlockHeight < stateBlockHeight: // the app is further behind than it should be, so replay blocks // but leave the last block to go through the WAL return h.replayBlocks(state, proxyApp, appBlockHeight, storeBlockHeight, true) - } else if appBlockHeight == stateBlockHeight { + case appBlockHeight == stateBlockHeight: // We haven't run Commit (both the state and app are one block behind), // so replayBlock with the real app. // NOTE: We could instead use the cs.WAL on cs.Start, @@ -381,8 +389,8 @@ func (h *Handshaker) ReplayBlocks( state, err = h.replayBlock(state, storeBlockHeight, proxyApp.Consensus()) return state.AppHash, err - } else if appBlockHeight == storeBlockHeight { - // We ran Commit, but didn't save the state, so replayBlock with mock app + case appBlockHeight == storeBlockHeight: + // We ran Commit, but didn't save the state, so replayBlock with mock app. abciResponses, err := sm.LoadABCIResponses(h.stateDB, storeBlockHeight) if err != nil { return nil, err @@ -395,8 +403,8 @@ func (h *Handshaker) ReplayBlocks( } - cmn.PanicSanity("Should never happen") - return nil, nil + panic(fmt.Sprintf("uncovered case! appHeight: %d, storeHeight: %d, stateHeight: %d", + appBlockHeight, storeBlockHeight, stateBlockHeight)) } func (h *Handshaker) replayBlocks(state sm.State, proxyApp proxy.AppConns, appBlockHeight, storeBlockHeight int64, mutateState bool) ([]byte, error) { @@ -419,7 +427,11 @@ func (h *Handshaker) replayBlocks(state sm.State, proxyApp proxy.AppConns, appBl for i := appBlockHeight + 1; i <= finalBlock; i++ { h.logger.Info("Applying block", "height", i) block := h.store.LoadBlock(i) - appHash, err = sm.ExecCommitBlock(proxyApp.Consensus(), block, h.logger, state.LastValidators, h.stateDB) + // Extra check to ensure the app was not changed in a way it shouldn't have. + if len(appHash) > 0 && h.withAppStat { + assertAppHashEqualsOneFromBlock(appHash, block) + } + appHash, err = sm.ExecCommitBlock(proxyApp.Consensus(), block, h.logger, h.stateDB) if err != nil { return nil, err } @@ -436,7 +448,8 @@ func (h *Handshaker) replayBlocks(state sm.State, proxyApp proxy.AppConns, appBl appHash = state.AppHash } - return appHash, checkAppHash(state, appHash, h.withAppStat) + assertAppHashEqualsOneFromState(appHash, state, !h.withAppStat) + return appHash, nil } // ApplyBlock on the proxyApp with the last block. @@ -444,7 +457,7 @@ func (h *Handshaker) replayBlock(state sm.State, height int64, proxyApp proxy.Ap block := h.store.LoadBlock(height) meta := h.store.LoadBlockMeta(height) - blockExec := sm.NewBlockExecutor(h.stateDB, h.logger, proxyApp, sm.MockMempool{}, sm.MockEvidencePool{}, h.withAppStat) + blockExec := sm.NewBlockExecutor(h.stateDB, h.logger, proxyApp, mock.Mempool{}, sm.MockEvidencePool{}, h.withAppStat) blockExec.SetEventBus(h.eventBus) var err error @@ -458,14 +471,29 @@ func (h *Handshaker) replayBlock(state sm.State, height int64, proxyApp proxy.Ap return state, nil } -func checkAppHash(state sm.State, appHash []byte, skip bool) error { +func assertAppHashEqualsOneFromBlock(appHash []byte, block *types.Block) { + if !bytes.Equal(appHash, block.AppHash) { + panic(fmt.Sprintf(`block.AppHash does not match AppHash after replay. Got %X, expected %X. + +Block: %v +`, + appHash, block.AppHash, block)) + } +} + +func assertAppHashEqualsOneFromState(appHash []byte, state sm.State, skip bool) { if skip { - return nil + return } - if !bytes.Equal(state.AppHash, appHash) { - panic(fmt.Errorf("Tendermint state.AppHash does not match AppHash after replay. Got %X, expected %X", appHash, state.AppHash).Error()) + if !bytes.Equal(appHash, state.AppHash) { + panic(fmt.Sprintf(`state.AppHash does not match AppHash after replay. Got +%X, expected %X. + +State: %v + +Did you reset Tendermint without resetting your application's data?`, + appHash, state.AppHash, state)) } - return nil } //-------------------------------------------------------------------------------- @@ -493,9 +521,12 @@ type mockProxyApp struct { abciResponses *sm.ABCIResponses } -func (mock *mockProxyApp) DeliverTx(tx []byte) abci.ResponseDeliverTx { +func (mock *mockProxyApp) DeliverTx(req abci.RequestDeliverTx) abci.ResponseDeliverTx { r := mock.abciResponses.DeliverTx[mock.txCount] mock.txCount++ + if r == nil { //it could be nil because of amino unMarshall, it will cause an empty ResponseDeliverTx to become nil + return abci.ResponseDeliverTx{} + } return *r } diff --git a/consensus/replay_file.go b/consensus/replay_file.go index 515d19cff..e10d8e723 100644 --- a/consensus/replay_file.go +++ b/consensus/replay_file.go @@ -11,13 +11,14 @@ import ( "github.com/pkg/errors" - bc "github.com/tendermint/tendermint/blockchain" cfg "github.com/tendermint/tendermint/config" cmn "github.com/tendermint/tendermint/libs/common" dbm "github.com/tendermint/tendermint/libs/db" "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/mock" "github.com/tendermint/tendermint/proxy" sm "github.com/tendermint/tendermint/state" + "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" ) @@ -230,10 +231,8 @@ func (pb *playback) replayConsoleLoop() int { fmt.Println("back takes an integer argument") } else if i > pb.count { fmt.Printf("argument to back must not be larger than the current count (%d)\n", pb.count) - } else { - if err := pb.replayReset(i, newStepSub); err != nil { - pb.cs.Logger.Error("Replay reset error", "err", err) - } + } else if err := pb.replayReset(i, newStepSub); err != nil { + pb.cs.Logger.Error("Replay reset error", "err", err) } } @@ -279,7 +278,7 @@ func newConsensusStateForReplay(config cfg.BaseConfig, csConfig *cfg.ConsensusCo dbType := dbm.DBBackendType(config.DBBackend) // Get BlockStore blockStoreDB := dbm.NewDB("blockstore", dbType, config.DBDir()) - blockStore := bc.NewBlockStore(blockStoreDB) + blockStore := store.NewBlockStore(blockStoreDB) // Get State stateDB := dbm.NewDB("state", dbType, config.DBDir()) @@ -312,7 +311,7 @@ func newConsensusStateForReplay(config cfg.BaseConfig, csConfig *cfg.ConsensusCo cmn.Exit(fmt.Sprintf("Error on handshake: %v", err)) } - mempool, evpool := sm.MockMempool{}, sm.MockEvidencePool{} + mempool, evpool := mock.Mempool{}, sm.MockEvidencePool{} blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool, true) consensusState := NewConsensusState(csConfig, state.Copy(), blockExec, diff --git a/consensus/replay_test.go b/consensus/replay_test.go index c2e874567..0f21a12a0 100644 --- a/consensus/replay_test.go +++ b/consensus/replay_test.go @@ -7,7 +7,7 @@ import ( "io" "io/ioutil" "os" - "path" + "path/filepath" "runtime" "testing" "time" @@ -15,12 +15,16 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "sort" + "github.com/tendermint/tendermint/abci/example/kvstore" abci "github.com/tendermint/tendermint/abci/types" cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto" + cmn "github.com/tendermint/tendermint/libs/common" dbm "github.com/tendermint/tendermint/libs/db" "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/mock" "github.com/tendermint/tendermint/privval" "github.com/tendermint/tendermint/proxy" sm "github.com/tendermint/tendermint/state" @@ -88,7 +92,7 @@ func startNewConsensusStateAndWaitForBlock(t *testing.T, consensusReplayConfig * } } -func sendTxs(cs *ConsensusState, ctx context.Context) { +func sendTxs(ctx context.Context, cs *ConsensusState) { for i := 0; i < 256; i++ { select { case <-ctx.Done(): @@ -113,7 +117,7 @@ func TestWALCrash(t *testing.T) { 1}, {"many non-empty blocks", func(stateDB dbm.DB, cs *ConsensusState, ctx context.Context) { - go sendTxs(cs, ctx) + go sendTxs(ctx, cs) }, 3}, } @@ -138,10 +142,10 @@ LOOP: // create consensus state from a clean slate logger := log.NewNopLogger() - stateDB := dbm.NewMemDB() + blockDB := dbm.NewMemDB() + stateDB := blockDB state, _ := sm.MakeGenesisStateFromFile(consensusReplayConfig.GenesisFile()) privValidator := loadPrivValidator(consensusReplayConfig) - blockDB := dbm.NewMemDB() cs := newConsensusStateWithConfigAndBlockStore(consensusReplayConfig, state, privValidator, kvstore.NewKVStoreApplication(), blockDB) cs.SetLogger(logger) @@ -260,15 +264,23 @@ func (w *crashingWAL) Stop() error { return w.next.Stop() } func (w *crashingWAL) Wait() { w.next.Wait() } //------------------------------------------------------------------------------------------ -// Handshake Tests +type testSim struct { + GenesisState sm.State + Config *cfg.Config + Chain []*types.Block + Commits []*types.Commit + CleanupFunc cleanupFunc +} const ( - NUM_BLOCKS = 6 + numBlocks = 6 ) var ( - mempool = sm.MockMempool{} + mempool = mock.Mempool{} evpool = sm.MockEvidencePool{} + + sim testSim ) //--------------------------------------- @@ -279,93 +291,355 @@ var ( // 2 - save block and committed but state is behind var modes = []uint{0, 1, 2} +// This is actually not a test, it's for storing validator change tx data for testHandshakeReplay +func TestSimulateValidatorsChange(t *testing.T) { + nPeers := 7 + nVals := 4 + css, genDoc, config, cleanup := randConsensusNetWithPeers(nVals, nPeers, "replay_test", newMockTickerFunc(true), newPersistentKVStoreWithPath) + sim.Config = config + sim.GenesisState, _ = sm.MakeGenesisState(genDoc) + sim.CleanupFunc = cleanup + + partSize := types.BlockPartSizeBytes + + newRoundCh := subscribe(css[0].eventBus, types.EventQueryNewRound) + proposalCh := subscribe(css[0].eventBus, types.EventQueryCompleteProposal) + + vss := make([]*validatorStub, nPeers) + for i := 0; i < nPeers; i++ { + vss[i] = NewValidatorStub(css[i].privValidator, i) + } + height, round := css[0].Height, css[0].Round + // start the machine + startTestRound(css[0], height, round) + incrementHeight(vss...) + ensureNewRound(newRoundCh, height, 0) + ensureNewProposal(proposalCh, height, round) + rs := css[0].GetRoundState() + signAddVotes(css[0], types.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vss[1:nVals]...) + ensureNewRound(newRoundCh, height+1, 0) + + //height 2 + height++ + incrementHeight(vss...) + newValidatorPubKey1 := css[nVals].privValidator.GetPubKey() + valPubKey1ABCI := types.TM2PB.PubKey(newValidatorPubKey1) + newValidatorTx1 := kvstore.MakeValSetChangeTx(valPubKey1ABCI, testMinPower) + err := assertMempool(css[0].txNotifier).CheckTx(newValidatorTx1, nil) + assert.Nil(t, err) + propBlock, _ := css[0].createProposalBlock() //changeProposer(t, cs1, vs2) + propBlockParts := propBlock.MakePartSet(partSize) + blockID := types.BlockID{Hash: propBlock.Hash(), PartsHeader: propBlockParts.Header()} + proposal := types.NewProposal(vss[1].Height, round, -1, blockID) + if err := vss[1].SignProposal(config.ChainID(), proposal); err != nil { + t.Fatal("failed to sign bad proposal", err) + } + + // set the proposal block + if err := css[0].SetProposalAndBlock(proposal, propBlock, propBlockParts, "some peer"); err != nil { + t.Fatal(err) + } + ensureNewProposal(proposalCh, height, round) + rs = css[0].GetRoundState() + signAddVotes(css[0], types.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vss[1:nVals]...) + ensureNewRound(newRoundCh, height+1, 0) + + //height 3 + height++ + incrementHeight(vss...) + updateValidatorPubKey1 := css[nVals].privValidator.GetPubKey() + updatePubKey1ABCI := types.TM2PB.PubKey(updateValidatorPubKey1) + updateValidatorTx1 := kvstore.MakeValSetChangeTx(updatePubKey1ABCI, 25) + err = assertMempool(css[0].txNotifier).CheckTx(updateValidatorTx1, nil) + assert.Nil(t, err) + propBlock, _ = css[0].createProposalBlock() //changeProposer(t, cs1, vs2) + propBlockParts = propBlock.MakePartSet(partSize) + blockID = types.BlockID{Hash: propBlock.Hash(), PartsHeader: propBlockParts.Header()} + proposal = types.NewProposal(vss[2].Height, round, -1, blockID) + if err := vss[2].SignProposal(config.ChainID(), proposal); err != nil { + t.Fatal("failed to sign bad proposal", err) + } + + // set the proposal block + if err := css[0].SetProposalAndBlock(proposal, propBlock, propBlockParts, "some peer"); err != nil { + t.Fatal(err) + } + ensureNewProposal(proposalCh, height, round) + rs = css[0].GetRoundState() + signAddVotes(css[0], types.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vss[1:nVals]...) + ensureNewRound(newRoundCh, height+1, 0) + + //height 4 + height++ + incrementHeight(vss...) + newValidatorPubKey2 := css[nVals+1].privValidator.GetPubKey() + newVal2ABCI := types.TM2PB.PubKey(newValidatorPubKey2) + newValidatorTx2 := kvstore.MakeValSetChangeTx(newVal2ABCI, testMinPower) + err = assertMempool(css[0].txNotifier).CheckTx(newValidatorTx2, nil) + assert.Nil(t, err) + newValidatorPubKey3 := css[nVals+2].privValidator.GetPubKey() + newVal3ABCI := types.TM2PB.PubKey(newValidatorPubKey3) + newValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, testMinPower) + err = assertMempool(css[0].txNotifier).CheckTx(newValidatorTx3, nil) + assert.Nil(t, err) + propBlock, _ = css[0].createProposalBlock() //changeProposer(t, cs1, vs2) + propBlockParts = propBlock.MakePartSet(partSize) + blockID = types.BlockID{Hash: propBlock.Hash(), PartsHeader: propBlockParts.Header()} + newVss := make([]*validatorStub, nVals+1) + copy(newVss, vss[:nVals+1]) + sort.Sort(ValidatorStubsByAddress(newVss)) + selfIndex := 0 + for i, vs := range newVss { + if vs.GetPubKey().Equals(css[0].privValidator.GetPubKey()) { + selfIndex = i + break + } + } + + proposal = types.NewProposal(vss[3].Height, round, -1, blockID) + if err := vss[3].SignProposal(config.ChainID(), proposal); err != nil { + t.Fatal("failed to sign bad proposal", err) + } + + // set the proposal block + if err := css[0].SetProposalAndBlock(proposal, propBlock, propBlockParts, "some peer"); err != nil { + t.Fatal(err) + } + ensureNewProposal(proposalCh, height, round) + + removeValidatorTx2 := kvstore.MakeValSetChangeTx(newVal2ABCI, 0) + err = assertMempool(css[0].txNotifier).CheckTx(removeValidatorTx2, nil) + assert.Nil(t, err) + + rs = css[0].GetRoundState() + for i := 0; i < nVals+1; i++ { + if i == selfIndex { + continue + } + signAddVotes(css[0], types.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), newVss[i]) + } + + ensureNewRound(newRoundCh, height+1, 0) + + //height 5 + height++ + incrementHeight(vss...) + ensureNewProposal(proposalCh, height, round) + rs = css[0].GetRoundState() + for i := 0; i < nVals+1; i++ { + if i == selfIndex { + continue + } + signAddVotes(css[0], types.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), newVss[i]) + } + ensureNewRound(newRoundCh, height+1, 0) + + //height 6 + height++ + incrementHeight(vss...) + removeValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, 0) + err = assertMempool(css[0].txNotifier).CheckTx(removeValidatorTx3, nil) + assert.Nil(t, err) + propBlock, _ = css[0].createProposalBlock() //changeProposer(t, cs1, vs2) + propBlockParts = propBlock.MakePartSet(partSize) + blockID = types.BlockID{Hash: propBlock.Hash(), PartsHeader: propBlockParts.Header()} + newVss = make([]*validatorStub, nVals+3) + copy(newVss, vss[:nVals+3]) + sort.Sort(ValidatorStubsByAddress(newVss)) + for i, vs := range newVss { + if vs.GetPubKey().Equals(css[0].privValidator.GetPubKey()) { + selfIndex = i + break + } + } + proposal = types.NewProposal(vss[1].Height, round, -1, blockID) + if err := vss[1].SignProposal(config.ChainID(), proposal); err != nil { + t.Fatal("failed to sign bad proposal", err) + } + + // set the proposal block + if err := css[0].SetProposalAndBlock(proposal, propBlock, propBlockParts, "some peer"); err != nil { + t.Fatal(err) + } + ensureNewProposal(proposalCh, height, round) + rs = css[0].GetRoundState() + for i := 0; i < nVals+3; i++ { + if i == selfIndex { + continue + } + signAddVotes(css[0], types.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), newVss[i]) + } + ensureNewRound(newRoundCh, height+1, 0) + + sim.Chain = make([]*types.Block, 0) + sim.Commits = make([]*types.Commit, 0) + for i := 1; i <= numBlocks; i++ { + sim.Chain = append(sim.Chain, css[0].blockStore.LoadBlock(int64(i))) + sim.Commits = append(sim.Commits, css[0].blockStore.LoadBlockCommit(int64(i))) + } +} + // Sync from scratch func TestHandshakeReplayAll(t *testing.T) { - for i, m := range modes { - config := ResetConfig(fmt.Sprintf("%s_%v", t.Name(), i)) - defer os.RemoveAll(config.RootDir) - testHandshakeReplay(t, config, 0, m) + for _, m := range modes { + testHandshakeReplay(t, config, 0, m, false) + } + for _, m := range modes { + testHandshakeReplay(t, config, 0, m, true) } } // Sync many, not from scratch func TestHandshakeReplaySome(t *testing.T) { - for i, m := range modes { - config := ResetConfig(fmt.Sprintf("%s_%v", t.Name(), i)) - defer os.RemoveAll(config.RootDir) - testHandshakeReplay(t, config, 1, m) + for _, m := range modes { + testHandshakeReplay(t, config, 1, m, false) + } + for _, m := range modes { + testHandshakeReplay(t, config, 1, m, true) } } // Sync from lagging by one func TestHandshakeReplayOne(t *testing.T) { - for i, m := range modes { - config := ResetConfig(fmt.Sprintf("%s_%v", t.Name(), i)) - defer os.RemoveAll(config.RootDir) - testHandshakeReplay(t, config, NUM_BLOCKS-1, m) + for _, m := range modes { + testHandshakeReplay(t, config, numBlocks-1, m, false) + } + for _, m := range modes { + testHandshakeReplay(t, config, numBlocks-1, m, true) } } // Sync from caught up func TestHandshakeReplayNone(t *testing.T) { - for i, m := range modes { - config := ResetConfig(fmt.Sprintf("%s_%v", t.Name(), i)) - defer os.RemoveAll(config.RootDir) - testHandshakeReplay(t, config, NUM_BLOCKS, m) + for _, m := range modes { + testHandshakeReplay(t, config, numBlocks, m, false) + } + for _, m := range modes { + testHandshakeReplay(t, config, numBlocks, m, true) } } +// Test mockProxyApp should not panic when app return ABCIResponses with some empty ResponseDeliverTx +func TestMockProxyApp(t *testing.T) { + sim.CleanupFunc() //clean the test env created in TestSimulateValidatorsChange + logger := log.TestingLogger() + var validTxs, invalidTxs = 0, 0 + txIndex := 0 + + assert.NotPanics(t, func() { + abciResWithEmptyDeliverTx := new(sm.ABCIResponses) + abciResWithEmptyDeliverTx.DeliverTx = make([]*abci.ResponseDeliverTx, 0) + abciResWithEmptyDeliverTx.DeliverTx = append(abciResWithEmptyDeliverTx.DeliverTx, &abci.ResponseDeliverTx{}) + + // called when saveABCIResponses: + bytes := cdc.MustMarshalBinaryBare(abciResWithEmptyDeliverTx) + loadedAbciRes := new(sm.ABCIResponses) + + // this also happens sm.LoadABCIResponses + err := cdc.UnmarshalBinaryBare(bytes, loadedAbciRes) + require.NoError(t, err) + + mock := newMockProxyApp([]byte("mock_hash"), loadedAbciRes) + + abciRes := new(sm.ABCIResponses) + abciRes.DeliverTx = make([]*abci.ResponseDeliverTx, len(loadedAbciRes.DeliverTx)) + // Execute transactions and get hash. + proxyCb := func(req *abci.Request, res *abci.Response) { + if r, ok := res.Value.(*abci.Response_DeliverTx); ok { + // TODO: make use of res.Log + // TODO: make use of this info + // Blocks may include invalid txs. + txRes := r.DeliverTx + if txRes.Code == abci.CodeTypeOK { + validTxs++ + } else { + logger.Debug("Invalid tx", "code", txRes.Code, "log", txRes.Log) + invalidTxs++ + } + abciRes.DeliverTx[txIndex] = txRes + txIndex++ + } + } + mock.SetResponseCallback(proxyCb) + + someTx := []byte("tx") + mock.DeliverTxAsync(abci.RequestDeliverTx{Tx: someTx}) + }) + assert.True(t, validTxs == 1) + assert.True(t, invalidTxs == 0) +} + func tempWALWithData(data []byte) string { walFile, err := ioutil.TempFile("", "wal") if err != nil { - panic(fmt.Errorf("failed to create temp WAL file: %v", err)) + panic(fmt.Sprintf("failed to create temp WAL file: %v", err)) } _, err = walFile.Write(data) if err != nil { - panic(fmt.Errorf("failed to write to temp WAL file: %v", err)) + panic(fmt.Sprintf("failed to write to temp WAL file: %v", err)) } if err := walFile.Close(); err != nil { - panic(fmt.Errorf("failed to close temp WAL file: %v", err)) + panic(fmt.Sprintf("failed to close temp WAL file: %v", err)) } return walFile.Name() } // Make some blocks. Start a fresh app and apply nBlocks blocks. Then restart the app and sync it up with the remaining blocks -func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uint) { - walBody, err := WALWithNBlocks(t, NUM_BLOCKS) - require.NoError(t, err) - walFile := tempWALWithData(walBody) - config.Consensus.SetWalFile(walFile) - - privVal := privval.LoadFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile()) +func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uint, testValidatorsChange bool) { + var chain []*types.Block + var commits []*types.Commit + var store *mockBlockStore + var stateDB dbm.DB + var genisisState sm.State + if testValidatorsChange { + testConfig := ResetConfig(fmt.Sprintf("%s_%v_m", t.Name(), mode)) + defer os.RemoveAll(testConfig.RootDir) + stateDB = dbm.NewMemDB() + genisisState = sim.GenesisState + config = sim.Config + chain = sim.Chain + commits = sim.Commits + store = newMockBlockStore(config, genisisState.ConsensusParams) + } else { //test single node + testConfig := ResetConfig(fmt.Sprintf("%s_%v_s", t.Name(), mode)) + defer os.RemoveAll(testConfig.RootDir) + walBody, err := WALWithNBlocks(t, numBlocks) + require.NoError(t, err) + walFile := tempWALWithData(walBody) + config.Consensus.SetWalFile(walFile) - wal, err := NewWAL(walFile) - require.NoError(t, err) - wal.SetLogger(log.TestingLogger()) - err = wal.Start() - require.NoError(t, err) - defer wal.Stop() + privVal := privval.LoadFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile()) - chain, commits, err := makeBlockchainFromWAL(wal) - require.NoError(t, err) + wal, err := NewWAL(walFile) + require.NoError(t, err) + wal.SetLogger(log.TestingLogger()) + err = wal.Start() + require.NoError(t, err) + defer wal.Stop() - stateDB, state, store := stateAndStore(config, privVal.GetPubKey(), kvstore.ProtocolVersion) + chain, commits, err = makeBlockchainFromWAL(wal) + require.NoError(t, err) + stateDB, genisisState, store = stateAndStore(config, privVal.GetPubKey(), kvstore.ProtocolVersion) + } store.chain = chain store.commits = commits + state := genisisState.Copy() // run the chain through state.ApplyBlock to build up the tendermint state - state = buildTMStateFromChain(config, stateDB, state, chain, mode) + state = buildTMStateFromChain(config, stateDB, state, chain, nBlocks, mode) latestAppHash := state.AppHash // make a new client creator - kvstoreApp := kvstore.NewPersistentKVStoreApplication(path.Join(config.DBDir(), "2")) + kvstoreApp := kvstore.NewPersistentKVStoreApplication(filepath.Join(config.DBDir(), fmt.Sprintf("replay_test_%d_%d_a", nBlocks, mode))) + clientCreator2 := proxy.NewLocalClientCreator(kvstoreApp) if nBlocks > 0 { // run nBlocks against a new client to build up the app state. // use a throwaway tendermint state proxyApp := proxy.NewAppConns(clientCreator2) - stateDB, state, _ := stateAndStore(config, privVal.GetPubKey(), kvstore.ProtocolVersion) - buildAppStateFromChain(proxyApp, stateDB, state, chain, nBlocks, mode) + stateDB1 := dbm.NewMemDB() + sm.SaveState(stateDB1, genisisState) + buildAppStateFromChain(proxyApp, stateDB1, genisisState, chain, nBlocks, mode) } // now start the app using the handshake - it should sync @@ -391,8 +665,8 @@ func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uin t.Fatalf("Expected app hashes to match after handshake/replay. got %X, expected %X", res.LastBlockAppHash, latestAppHash) } - expectedBlocksToSync := NUM_BLOCKS - nBlocks - if nBlocks == NUM_BLOCKS && mode > 0 { + expectedBlocksToSync := numBlocks - nBlocks + if nBlocks == numBlocks && mode > 0 { expectedBlocksToSync++ } else if nBlocks > 0 && mode == 1 { expectedBlocksToSync++ @@ -407,7 +681,7 @@ func applyBlock(stateDB dbm.DB, st sm.State, blk *types.Block, proxyApp proxy.Ap testPartSize := types.BlockPartSizeBytes blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool, true) - blkID := types.BlockID{blk.Hash(), blk.MakePartSet(testPartSize).Header()} + blkID := types.BlockID{Hash: blk.Hash(), PartsHeader: blk.MakePartSet(testPartSize).Header()} newState, err := blockExec.ApplyBlock(st, blkID, blk) if err != nil { panic(err) @@ -423,12 +697,14 @@ func buildAppStateFromChain(proxyApp proxy.AppConns, stateDB dbm.DB, } defer proxyApp.Stop() + state.Version.Consensus.App = kvstore.ProtocolVersion //simulate handshake, receive app version validators := types.TM2PB.ValidatorUpdates(state.Validators) if _, err := proxyApp.Consensus().InitChainSync(abci.RequestInitChain{ Validators: validators, }); err != nil { panic(err) } + sm.SaveState(stateDB, state) //save height 1's validatorsInfo switch mode { case 0: @@ -451,21 +727,23 @@ func buildAppStateFromChain(proxyApp proxy.AppConns, stateDB dbm.DB, } -func buildTMStateFromChain(config *cfg.Config, stateDB dbm.DB, state sm.State, chain []*types.Block, mode uint) sm.State { +func buildTMStateFromChain(config *cfg.Config, stateDB dbm.DB, state sm.State, chain []*types.Block, nBlocks int, mode uint) sm.State { // run the whole chain against this client to build up the tendermint state - clientCreator := proxy.NewLocalClientCreator(kvstore.NewPersistentKVStoreApplication(path.Join(config.DBDir(), "1"))) + clientCreator := proxy.NewLocalClientCreator(kvstore.NewPersistentKVStoreApplication(filepath.Join(config.DBDir(), fmt.Sprintf("replay_test_%d_%d_t", nBlocks, mode)))) proxyApp := proxy.NewAppConns(clientCreator) if err := proxyApp.Start(); err != nil { panic(err) } defer proxyApp.Stop() + state.Version.Consensus.App = kvstore.ProtocolVersion //simulate handshake, receive app version validators := types.TM2PB.ValidatorUpdates(state.Validators) if _, err := proxyApp.Consensus().InitChainSync(abci.RequestInitChain{ Validators: validators, }); err != nil { panic(err) } + sm.SaveState(stateDB, state) //save height 1's validatorsInfo switch mode { case 0: @@ -489,28 +767,145 @@ func buildTMStateFromChain(config *cfg.Config, stateDB dbm.DB, state sm.State, c return state } +func TestHandshakePanicsIfAppReturnsWrongAppHash(t *testing.T) { + // 1. Initialize tendermint and commit 3 blocks with the following app hashes: + // - 0x01 + // - 0x02 + // - 0x03 + config := ResetConfig("handshake_test_") + defer os.RemoveAll(config.RootDir) + privVal := privval.LoadFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile()) + const appVersion = 0x0 + stateDB, state, store := stateAndStore(config, privVal.GetPubKey(), appVersion) + genDoc, _ := sm.MakeGenesisDocFromFile(config.GenesisFile()) + state.LastValidators = state.Validators.Copy() + // mode = 0 for committing all the blocks + blocks := makeBlocks(3, &state, privVal) + store.chain = blocks + + // 2. Tendermint must panic if app returns wrong hash for the first block + // - RANDOM HASH + // - 0x02 + // - 0x03 + { + app := &badApp{numBlocks: 3, allHashesAreWrong: true} + clientCreator := proxy.NewLocalClientCreator(app) + proxyApp := proxy.NewAppConns(clientCreator) + err := proxyApp.Start() + require.NoError(t, err) + defer proxyApp.Stop() + + assert.Panics(t, func() { + h := NewHandshaker(stateDB, state, store, genDoc, true) + h.Handshake(proxyApp) + }) + } + + // 3. Tendermint must panic if app returns wrong hash for the last block + // - 0x01 + // - 0x02 + // - RANDOM HASH + { + app := &badApp{numBlocks: 3, onlyLastHashIsWrong: true} + clientCreator := proxy.NewLocalClientCreator(app) + proxyApp := proxy.NewAppConns(clientCreator) + err := proxyApp.Start() + require.NoError(t, err) + defer proxyApp.Stop() + + assert.Panics(t, func() { + h := NewHandshaker(stateDB, state, store, genDoc, true) + h.Handshake(proxyApp) + }) + } +} + +func makeBlocks(n int, state *sm.State, privVal types.PrivValidator) []*types.Block { + blocks := make([]*types.Block, 0) + + var ( + prevBlock *types.Block + prevBlockMeta *types.BlockMeta + ) + + appHeight := byte(0x01) + for i := 0; i < n; i++ { + height := int64(i + 1) + + block, parts := makeBlock(*state, prevBlock, prevBlockMeta, privVal, height) + blocks = append(blocks, block) + + prevBlock = block + prevBlockMeta = types.NewBlockMeta(block, parts) + + // update state + state.AppHash = []byte{appHeight} + appHeight++ + state.LastBlockHeight = height + } + + return blocks +} + +func makeBlock(state sm.State, lastBlock *types.Block, lastBlockMeta *types.BlockMeta, + privVal types.PrivValidator, height int64) (*types.Block, *types.PartSet) { + + lastCommit := types.NewCommit(types.BlockID{}, nil) + if height > 1 { + vote, _ := types.MakeVote(lastBlock.Header.Height, lastBlockMeta.BlockID, state.Validators, privVal, lastBlock.Header.ChainID) + voteCommitSig := vote.CommitSig() + lastCommit = types.NewCommit(lastBlockMeta.BlockID, []*types.CommitSig{voteCommitSig}) + } + + return state.MakeBlock(height, []types.Tx{}, lastCommit, nil, state.Validators.GetProposer().Address) +} + +type badApp struct { + abci.BaseApplication + numBlocks byte + height byte + allHashesAreWrong bool + onlyLastHashIsWrong bool +} + +func (app *badApp) Commit() abci.ResponseCommit { + app.height++ + if app.onlyLastHashIsWrong { + if app.height == app.numBlocks { + return abci.ResponseCommit{Data: cmn.RandBytes(8)} + } + return abci.ResponseCommit{Data: []byte{app.height}} + } else if app.allHashesAreWrong { + return abci.ResponseCommit{Data: cmn.RandBytes(8)} + } + + panic("either allHashesAreWrong or onlyLastHashIsWrong must be set") +} + //-------------------------- // utils for making blocks func makeBlockchainFromWAL(wal WAL) ([]*types.Block, []*types.Commit, error) { + var height int64 + // Search for height marker - gr, found, err := wal.SearchForEndHeight(0, &WALSearchOptions{}) + gr, found, err := wal.SearchForEndHeight(height, &WALSearchOptions{}) if err != nil { return nil, nil, err } if !found { - return nil, nil, fmt.Errorf("WAL does not contain height %d.", 1) + return nil, nil, fmt.Errorf("WAL does not contain height %d", height) } defer gr.Close() // nolint: errcheck // log.Notice("Build a blockchain by reading from the WAL") - var blocks []*types.Block - var commits []*types.Commit - - var thisBlockParts *types.PartSet - var thisBlockCommit *types.Commit - var height int64 + var ( + blocks []*types.Block + commits []*types.Commit + thisBlockParts *types.PartSet + thisBlockCommit *types.Commit + ) dec := NewWALDecoder(gr) for { @@ -602,7 +997,8 @@ func stateAndStore(config *cfg.Config, pubKey crypto.PubKey, appVersion version. stateDB := dbm.NewMemDB() state, _ := sm.MakeGenesisStateFromFile(config.GenesisFile()) state.Version.Consensus.App = appVersion - store := NewMockBlockStore(config, state.ConsensusParams) + store := newMockBlockStore(config, state.ConsensusParams) + sm.SaveState(stateDB, state) return stateDB, state, store } @@ -617,7 +1013,7 @@ type mockBlockStore struct { } // TODO: NewBlockStore(db.NewMemDB) ... -func NewMockBlockStore(config *cfg.Config, params types.ConsensusParams) *mockBlockStore { +func newMockBlockStore(config *cfg.Config, params types.ConsensusParams) *mockBlockStore { return &mockBlockStore{config, params, nil, nil} } @@ -626,7 +1022,7 @@ func (bs *mockBlockStore) LoadBlock(height int64) *types.Block { return bs.chain func (bs *mockBlockStore) LoadBlockMeta(height int64) *types.BlockMeta { block := bs.chain[height-1] return &types.BlockMeta{ - BlockID: types.BlockID{block.Hash(), block.MakePartSet(types.BlockPartSizeBytes).Header()}, + BlockID: types.BlockID{Hash: block.Hash(), PartsHeader: block.MakePartSet(types.BlockPartSizeBytes).Header()}, Header: block.Header, } } @@ -640,15 +1036,16 @@ func (bs *mockBlockStore) LoadSeenCommit(height int64) *types.Commit { return bs.commits[height-1] } -//---------------------------------------- +//--------------------------------------- +// Test handshake/init chain -func TestInitChainUpdateValidators(t *testing.T) { +func TestHandshakeUpdatesValidators(t *testing.T) { val, _ := types.RandValidator(true, 10) vals := types.NewValidatorSet([]*types.Validator{val}) app := &initChainApp{vals: types.TM2PB.ValidatorUpdates(vals)} clientCreator := proxy.NewLocalClientCreator(app) - config := ResetConfig("proxy_test_") + config := ResetConfig("handshake_test_") defer os.RemoveAll(config.RootDir) privVal := privval.LoadFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile()) stateDB, state, store := stateAndStore(config, privVal.GetPubKey(), 0x0) diff --git a/consensus/state.go b/consensus/state.go index 0422890b6..8a46f535d 100644 --- a/consensus/state.go +++ b/consensus/state.go @@ -484,18 +484,9 @@ func (cs *ConsensusState) reconstructLastCommit(state sm.State) { return } seenCommit := cs.blockStore.LoadSeenCommit(state.LastBlockHeight) - lastPrecommits := types.NewVoteSet(state.ChainID, state.LastBlockHeight, seenCommit.Round(), types.PrecommitType, state.LastValidators) - for _, precommit := range seenCommit.Precommits { - if precommit == nil { - continue - } - added, err := lastPrecommits.AddVote(seenCommit.ToVote(precommit)) - if !added || err != nil { - cmn.PanicCrisis(fmt.Sprintf("Failed to reconstruct LastCommit: %v", err)) - } - } + lastPrecommits := types.CommitToVoteSet(state.ChainID, seenCommit, state.LastValidators) if !lastPrecommits.HasTwoThirdsMajority() { - cmn.PanicSanity("Failed to reconstruct LastCommit: Does not have +2/3 maj") + panic("Failed to reconstruct LastCommit: Does not have +2/3 maj") } cs.LastCommit = lastPrecommits } @@ -504,13 +495,13 @@ func (cs *ConsensusState) reconstructLastCommit(state sm.State) { // The round becomes 0 and cs.Step becomes cstypes.RoundStepNewHeight. func (cs *ConsensusState) updateToState(state sm.State) { if cs.CommitRound > -1 && 0 < cs.Height && cs.Height != state.LastBlockHeight { - cmn.PanicSanity(fmt.Sprintf("updateToState() expected state height of %v but found %v", + panic(fmt.Sprintf("updateToState() expected state height of %v but found %v", cs.Height, state.LastBlockHeight)) } if !cs.state.IsEmpty() && cs.state.LastBlockHeight+1 != cs.Height { // This might happen when someone else is mutating cs.state. // Someone forgot to pass in state.Copy() somewhere?! - cmn.PanicSanity(fmt.Sprintf("Inconsistent cs.state.LastBlockHeight+1 %v vs cs.Height %v", + panic(fmt.Sprintf("Inconsistent cs.state.LastBlockHeight+1 %v vs cs.Height %v", cs.state.LastBlockHeight+1, cs.Height)) } @@ -530,7 +521,7 @@ func (cs *ConsensusState) updateToState(state sm.State) { lastPrecommits := (*types.VoteSet)(nil) if cs.CommitRound > -1 && cs.Votes != nil { if !cs.Votes.Precommits(cs.CommitRound).HasTwoThirdsMajority() { - cmn.PanicSanity("updateToState(state) called but last Precommit round didn't have +2/3") + panic("updateToState(state) called but last Precommit round didn't have +2/3") } lastPrecommits = cs.Votes.Precommits(cs.CommitRound) } @@ -699,13 +690,13 @@ func (cs *ConsensusState) handleMsg(mi msgInfo) { cs.statsMsgQueue <- mi } - if err == ErrAddingVote { - // TODO: punish peer - // We probably don't want to stop the peer here. The vote does not - // necessarily comes from a malicious peer but can be just broadcasted by - // a typical peer. - // https://github.com/tendermint/tendermint/issues/1281 - } + // if err == ErrAddingVote { + // TODO: punish peer + // We probably don't want to stop the peer here. The vote does not + // necessarily comes from a malicious peer but can be just broadcasted by + // a typical peer. + // https://github.com/tendermint/tendermint/issues/1281 + // } // NOTE: the vote is broadcast to peers by the reactor listening // for vote events @@ -718,7 +709,7 @@ func (cs *ConsensusState) handleMsg(mi msgInfo) { return } - if err != nil { + if err != nil { // nolint:staticcheck // Causes TestReactorValidatorSetChanges to timeout // https://github.com/tendermint/tendermint/issues/3406 // cs.Logger.Error("Error with msg", "height", cs.Height, "round", cs.Round, @@ -882,7 +873,7 @@ func (cs *ConsensusState) enterPropose(height int64, round int) { } // if not a validator, we're done - address := cs.privValidator.GetAddress() + address := cs.privValidator.GetPubKey().Address() if !cs.Validators.HasAddress(address) { logger.Debug("This node is not a validator", "addr", address, "vals", cs.Validators) return @@ -933,10 +924,8 @@ func (cs *ConsensusState) defaultDecideProposal(height int64, round int) { } cs.Logger.Info("Signed proposal", "height", height, "round", round, "proposal", proposal) cs.Logger.Debug(fmt.Sprintf("Signed proposal block: %v", block)) - } else { - if !cs.replayMode { - cs.Logger.Error("enterPropose: Error signing proposal", "height", height, "round", round, "err", err) - } + } else if !cs.replayMode { + cs.Logger.Error("enterPropose: Error signing proposal", "height", height, "round", round, "err", err) } } @@ -963,20 +952,21 @@ func (cs *ConsensusState) isProposalComplete() bool { // NOTE: keep it side-effect free for clarity. func (cs *ConsensusState) createProposalBlock() (block *types.Block, blockParts *types.PartSet) { var commit *types.Commit - if cs.Height == 1 { + switch { + case cs.Height == 1: // We're creating a proposal for the first block. // The commit is empty, but not nil. commit = types.NewCommit(types.BlockID{}, nil) - } else if cs.LastCommit.HasTwoThirdsMajority() { + case cs.LastCommit.HasTwoThirdsMajority(): // Make the commit from LastCommit commit = cs.LastCommit.MakeCommit() - } else { + default: // This shouldn't happen. cs.Logger.Error("enterPropose: Cannot propose anything: No commit for the previous block.") return } - proposerAddr := cs.privValidator.GetAddress() + proposerAddr := cs.privValidator.GetPubKey().Address() return cs.blockExec.CreateProposalBlock(cs.Height, cs.state, commit, proposerAddr) } @@ -1047,7 +1037,7 @@ func (cs *ConsensusState) enterPrevoteWait(height int64, round int) { return } if !cs.Votes.Prevotes(round).HasTwoThirdsAny() { - cmn.PanicSanity(fmt.Sprintf("enterPrevoteWait(%v/%v), but Prevotes does not have any +2/3 votes", height, round)) + panic(fmt.Sprintf("enterPrevoteWait(%v/%v), but Prevotes does not have any +2/3 votes", height, round)) } logger.Info(fmt.Sprintf("enterPrevoteWait(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) @@ -1103,7 +1093,7 @@ func (cs *ConsensusState) enterPrecommit(height int64, round int) { // the latest POLRound should be this round. polRound, _ := cs.Votes.POLInfo() if polRound < round { - cmn.PanicSanity(fmt.Sprintf("This POLRound should be %v but got %v", round, polRound)) + panic(fmt.Sprintf("This POLRound should be %v but got %v", round, polRound)) } // +2/3 prevoted nil. Unlock and precommit nil. @@ -1137,7 +1127,7 @@ func (cs *ConsensusState) enterPrecommit(height int64, round int) { logger.Info("enterPrecommit: +2/3 prevoted proposal block. Locking", "hash", blockID.Hash) // Validate the block. if err := cs.blockExec.ValidateBlock(cs.state, cs.ProposalBlock); err != nil { - cmn.PanicConsensus(fmt.Sprintf("enterPrecommit: +2/3 prevoted for an invalid block: %v", err)) + panic(fmt.Sprintf("enterPrecommit: +2/3 prevoted for an invalid block: %v", err)) } cs.LockedRound = round cs.LockedBlock = cs.ProposalBlock @@ -1175,7 +1165,7 @@ func (cs *ConsensusState) enterPrecommitWait(height int64, round int) { return } if !cs.Votes.Precommits(round).HasTwoThirdsAny() { - cmn.PanicSanity(fmt.Sprintf("enterPrecommitWait(%v/%v), but Precommits does not have any +2/3 votes", height, round)) + panic(fmt.Sprintf("enterPrecommitWait(%v/%v), but Precommits does not have any +2/3 votes", height, round)) } logger.Info(fmt.Sprintf("enterPrecommitWait(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) @@ -1214,7 +1204,7 @@ func (cs *ConsensusState) enterCommit(height int64, commitRound int) { blockID, ok := cs.Votes.Precommits(commitRound).TwoThirdsMajority() if !ok { - cmn.PanicSanity("RunActionCommit() expects +2/3 precommits") + panic("RunActionCommit() expects +2/3 precommits") } // The Locked* fields no longer matter. @@ -1236,9 +1226,10 @@ func (cs *ConsensusState) enterCommit(height int64, commitRound int) { cs.ProposalBlockParts = types.NewPartSetFromHeader(blockID.PartsHeader) cs.eventBus.PublishEventValidBlock(cs.RoundStateEvent()) cs.evsw.FireEvent(types.EventValidBlock, &cs.RoundState) - } else { - // We just need to keep waiting. } + // else { + // We just need to keep waiting. + // } } } @@ -1247,7 +1238,7 @@ func (cs *ConsensusState) tryFinalizeCommit(height int64) { logger := cs.Logger.With("height", height) if cs.Height != height { - cmn.PanicSanity(fmt.Sprintf("tryFinalizeCommit() cs.Height: %v vs height: %v", cs.Height, height)) + panic(fmt.Sprintf("tryFinalizeCommit() cs.Height: %v vs height: %v", cs.Height, height)) } blockID, ok := cs.Votes.Precommits(cs.CommitRound).TwoThirdsMajority() @@ -1277,16 +1268,16 @@ func (cs *ConsensusState) finalizeCommit(height int64) { block, blockParts := cs.ProposalBlock, cs.ProposalBlockParts if !ok { - cmn.PanicSanity(fmt.Sprintf("Cannot finalizeCommit, commit does not have two thirds majority")) + panic(fmt.Sprintf("Cannot finalizeCommit, commit does not have two thirds majority")) } if !blockParts.HasHeader(blockID.PartsHeader) { - cmn.PanicSanity(fmt.Sprintf("Expected ProposalBlockParts header to be commit header")) + panic(fmt.Sprintf("Expected ProposalBlockParts header to be commit header")) } if !block.HashesTo(blockID.Hash) { - cmn.PanicSanity(fmt.Sprintf("Cannot finalizeCommit, ProposalBlock does not hash to commit hash")) + panic(fmt.Sprintf("Cannot finalizeCommit, ProposalBlock does not hash to commit hash")) } if err := cs.blockExec.ValidateBlock(cs.state, block); err != nil { - cmn.PanicConsensus(fmt.Sprintf("+2/3 committed an invalid block: %v", err)) + panic(fmt.Sprintf("+2/3 committed an invalid block: %v", err)) } cs.Logger.Info(fmt.Sprintf("Finalizing commit of block with %d txs", block.NumTxs), @@ -1519,7 +1510,7 @@ func (cs *ConsensusState) tryAddVote(vote *types.Vote, peerID p2p.ID) (bool, err if err == ErrVoteHeightMismatch { return added, err } else if voteErr, ok := err.(*types.ErrVoteConflictingVotes); ok { - addr := cs.privValidator.GetAddress() + addr := cs.privValidator.GetPubKey().Address() if bytes.Equal(vote.ValidatorAddress, addr) { cs.Logger.Error("Found conflicting vote from ourselves. Did you unsafe_reset a validator?", "height", vote.Height, "round", vote.Round, "type", vote.Type) return added, err @@ -1527,9 +1518,11 @@ func (cs *ConsensusState) tryAddVote(vote *types.Vote, peerID p2p.ID) (bool, err cs.evpool.AddEvidence(voteErr.DuplicateVoteEvidence) return added, err } else { - // Probably an invalid signature / Bad peer. - // Seems this can also err sometimes with "Unexpected step" - perhaps not from a bad peer ? - cs.Logger.Error("Error attempting to add vote", "err", err) + // Either + // 1) bad peer OR + // 2) not a bad peer? this can also err sometimes with "Unexpected step" OR + // 3) tmkms use with multiple validators connecting to a single tmkms instance (https://github.com/tendermint/tendermint/issues/3839). + cs.Logger.Info("Error attempting to add vote", "err", err) return added, ErrAddingVote } } @@ -1638,17 +1631,18 @@ func (cs *ConsensusState) addVote(vote *types.Vote, peerID p2p.ID) (added bool, } // If +2/3 prevotes for *anything* for future round: - if cs.Round < vote.Round && prevotes.HasTwoThirdsAny() { + switch { + case cs.Round < vote.Round && prevotes.HasTwoThirdsAny(): // Round-skip if there is any 2/3+ of votes ahead of us cs.enterNewRound(height, vote.Round) - } else if cs.Round == vote.Round && cstypes.RoundStepPrevote <= cs.Step { // current round + case cs.Round == vote.Round && cstypes.RoundStepPrevote <= cs.Step: // current round blockID, ok := prevotes.TwoThirdsMajority() if ok && (cs.isProposalComplete() || len(blockID.Hash) == 0) { cs.enterPrecommit(height, vote.Round) } else if prevotes.HasTwoThirdsAny() { cs.enterPrevoteWait(height, vote.Round) } - } else if cs.Proposal != nil && 0 <= cs.Proposal.POLRound && cs.Proposal.POLRound == vote.Round { + case cs.Proposal != nil && 0 <= cs.Proposal.POLRound && cs.Proposal.POLRound == vote.Round: // If the proposal is now complete, enter prevote of cs.Round. if cs.isProposalComplete() { cs.enterPrevote(height, cs.Round) @@ -1684,7 +1678,7 @@ func (cs *ConsensusState) addVote(vote *types.Vote, peerID p2p.ID) (added bool, } default: - panic(fmt.Sprintf("Unexpected vote type %X", vote.Type)) // go-wire should prevent this. + panic(fmt.Sprintf("Unexpected vote type %X", vote.Type)) // go-amino should prevent this. } return @@ -1694,7 +1688,7 @@ func (cs *ConsensusState) signVote(type_ types.SignedMsgType, hash []byte, heade // Flush the WAL. Otherwise, we may not recompute the same vote to sign, and the privValidator will refuse to sign anything. cs.wal.FlushAndSync() - addr := cs.privValidator.GetAddress() + addr := cs.privValidator.GetPubKey().Address() valIndex, _ := cs.Validators.GetByAddress(addr) vote := &types.Vote{ @@ -1730,7 +1724,7 @@ func (cs *ConsensusState) voteTime() time.Time { // sign the vote and publish on internalMsgQueue func (cs *ConsensusState) signAddVote(type_ types.SignedMsgType, hash []byte, header types.PartSetHeader) *types.Vote { // if we don't have a key or we're not in the validator set, do nothing - if cs.privValidator == nil || !cs.Validators.HasAddress(cs.privValidator.GetAddress()) { + if cs.privValidator == nil || !cs.Validators.HasAddress(cs.privValidator.GetPubKey().Address()) { return nil } vote, err := cs.signVote(type_, hash, header) diff --git a/consensus/state_test.go b/consensus/state_test.go index fc1e3e949..8409f2235 100644 --- a/consensus/state_test.go +++ b/consensus/state_test.go @@ -181,7 +181,7 @@ func TestStateBadProposal(t *testing.T) { propBlock, _ := cs1.createProposalBlock() //changeProposer(t, cs1, vs2) // make the second validator the proposer by incrementing round - round = round + 1 + round++ incrementRound(vss[1:]...) // make the block bad by tampering with statehash @@ -192,7 +192,7 @@ func TestStateBadProposal(t *testing.T) { stateHash[0] = byte((stateHash[0] + 1) % 255) propBlock.AppHash = stateHash propBlockParts := propBlock.MakePartSet(partSize) - blockID := types.BlockID{propBlock.Hash(), propBlockParts.Header()} + blockID := types.BlockID{Hash: propBlock.Hash(), PartsHeader: propBlockParts.Header()} proposal := types.NewProposal(vs2.Height, round, -1, blockID) if err := vs2.SignProposal(config.ChainID(), proposal); err != nil { t.Fatal("failed to sign bad proposal", err) @@ -239,7 +239,7 @@ func TestStateFullRound1(t *testing.T) { cs.SetEventBus(eventBus) eventBus.Start() - voteCh := subscribe(cs.eventBus, types.EventQueryVote) + voteCh := subscribeUnBuffered(cs.eventBus, types.EventQueryVote) propCh := subscribe(cs.eventBus, types.EventQueryCompleteProposal) newRoundCh := subscribe(cs.eventBus, types.EventQueryNewRound) @@ -267,7 +267,7 @@ func TestStateFullRoundNil(t *testing.T) { cs, vss := randConsensusState(1) height, round := cs.Height, cs.Round - voteCh := subscribe(cs.eventBus, types.EventQueryVote) + voteCh := subscribeUnBuffered(cs.eventBus, types.EventQueryVote) cs.enterPrevote(height, round) cs.startRoutines(4) @@ -286,7 +286,7 @@ func TestStateFullRound2(t *testing.T) { vs2 := vss[1] height, round := cs1.Height, cs1.Round - voteCh := subscribe(cs1.eventBus, types.EventQueryVote) + voteCh := subscribeUnBuffered(cs1.eventBus, types.EventQueryVote) newBlockCh := subscribe(cs1.eventBus, types.EventQueryNewBlock) // start round and wait for propose and prevote @@ -330,7 +330,7 @@ func TestStateLockNoPOL(t *testing.T) { timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) - voteCh := subscribe(cs1.eventBus, types.EventQueryVote) + voteCh := subscribeUnBuffered(cs1.eventBus, types.EventQueryVote) proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) @@ -370,11 +370,11 @@ func TestStateLockNoPOL(t *testing.T) { // (note we're entering precommit for a second time this round) // but with invalid args. then we enterPrecommitWait, and the timeout to new round - ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrecommit.Nanoseconds()) + ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) /// - round = round + 1 // moving to the next round + round++ // moving to the next round ensureNewRound(newRoundCh, height, round) t.Log("#### ONTO ROUND 1") /* @@ -384,7 +384,7 @@ func TestStateLockNoPOL(t *testing.T) { incrementRound(vs2) // now we're on a new round and not the proposer, so wait for timeout - ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.TimeoutPropose.Nanoseconds()) + ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.Propose(round).Nanoseconds()) rs := cs1.GetRoundState() @@ -403,7 +403,7 @@ func TestStateLockNoPOL(t *testing.T) { // now we're going to enter prevote again, but with invalid args // and then prevote wait, which should timeout. then wait for precommit - ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrevote.Nanoseconds()) + ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Prevote(round).Nanoseconds()) ensurePrecommit(voteCh, height, round) // precommit // the proposed block should still be locked and our precommit added @@ -416,9 +416,9 @@ func TestStateLockNoPOL(t *testing.T) { // (note we're entering precommit for a second time this round, but with invalid args // then we enterPrecommitWait and timeout into NewRound - ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrecommit.Nanoseconds()) + ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) - round = round + 1 // entering new round + round++ // entering new round ensureNewRound(newRoundCh, height, round) t.Log("#### ONTO ROUND 2") /* @@ -441,7 +441,7 @@ func TestStateLockNoPOL(t *testing.T) { signAddVotes(cs1, types.PrevoteType, hash, rs.ProposalBlock.MakePartSet(partSize).Header(), vs2) ensurePrevote(voteCh, height, round) - ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrevote.Nanoseconds()) + ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Prevote(round).Nanoseconds()) ensurePrecommit(voteCh, height, round) // precommit validatePrecommit(t, cs1, round, 0, vss[0], nil, theBlockHash) // precommit nil but be locked on proposal @@ -449,7 +449,7 @@ func TestStateLockNoPOL(t *testing.T) { signAddVotes(cs1, types.PrecommitType, hash, rs.ProposalBlock.MakePartSet(partSize).Header(), vs2) // NOTE: conflicting precommits at same height ensurePrecommit(voteCh, height, round) - ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrecommit.Nanoseconds()) + ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) cs2, _ := randConsensusState(2) // needed so generated block is different than locked block // before we time out into new round, set next proposal block @@ -460,7 +460,7 @@ func TestStateLockNoPOL(t *testing.T) { incrementRound(vs2) - round = round + 1 // entering new round + round++ // entering new round ensureNewRound(newRoundCh, height, round) t.Log("#### ONTO ROUND 3") /* @@ -482,7 +482,7 @@ func TestStateLockNoPOL(t *testing.T) { signAddVotes(cs1, types.PrevoteType, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2) ensurePrevote(voteCh, height, round) - ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrevote.Nanoseconds()) + ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Prevote(round).Nanoseconds()) ensurePrecommit(voteCh, height, round) validatePrecommit(t, cs1, round, 0, vss[0], nil, theBlockHash) // precommit nil but locked on proposal @@ -542,9 +542,9 @@ func TestStateLockPOLRelock(t *testing.T) { incrementRound(vs2, vs3, vs4) // timeout to new round - ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrecommit.Nanoseconds()) + ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) - round = round + 1 // moving to the next round + round++ // moving to the next round //XXX: this isnt guaranteed to get there before the timeoutPropose ... if err := cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer"); err != nil { t.Fatal(err) @@ -621,8 +621,6 @@ func TestStateLockPOLUnlock(t *testing.T) { // the proposed block should now be locked and our precommit added validatePrecommit(t, cs1, round, round, vss[0], theBlockHash, theBlockHash) - rs = cs1.GetRoundState() - // add precommits from the rest signAddVotes(cs1, types.PrecommitType, nil, types.PartSetHeader{}, vs2, vs4) signAddVotes(cs1, types.PrecommitType, theBlockHash, theBlockParts, vs3) @@ -632,12 +630,12 @@ func TestStateLockPOLUnlock(t *testing.T) { propBlockParts := propBlock.MakePartSet(partSize) // timeout to new round - ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrecommit.Nanoseconds()) + ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) rs = cs1.GetRoundState() lockedBlockHash := rs.LockedBlock.Hash() incrementRound(vs2, vs3, vs4) - round = round + 1 // moving to the next round + round++ // moving to the next round ensureNewRound(newRoundCh, height, round) t.Log("#### ONTO ROUND 1") @@ -710,7 +708,7 @@ func TestStateLockPOLSafety1(t *testing.T) { // cs1 precommit nil ensurePrecommit(voteCh, height, round) - ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrecommit.Nanoseconds()) + ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) t.Log("### ONTO ROUND 1") @@ -720,7 +718,7 @@ func TestStateLockPOLSafety1(t *testing.T) { incrementRound(vs2, vs3, vs4) - round = round + 1 // moving to the next round + round++ // moving to the next round ensureNewRound(newRoundCh, height, round) //XXX: this isnt guaranteed to get there before the timeoutPropose ... @@ -754,10 +752,10 @@ func TestStateLockPOLSafety1(t *testing.T) { signAddVotes(cs1, types.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4) - ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrecommit.Nanoseconds()) + ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) incrementRound(vs2, vs3, vs4) - round = round + 1 // moving to the next round + round++ // moving to the next round ensureNewRound(newRoundCh, height, round) @@ -767,7 +765,7 @@ func TestStateLockPOLSafety1(t *testing.T) { */ // timeout of propose - ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.TimeoutPropose.Nanoseconds()) + ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.Propose(round).Nanoseconds()) // finish prevote ensurePrevote(voteCh, height, round) @@ -811,7 +809,7 @@ func TestStateLockPOLSafety2(t *testing.T) { _, propBlock0 := decideProposal(cs1, vss[0], height, round) propBlockHash0 := propBlock0.Hash() propBlockParts0 := propBlock0.MakePartSet(partSize) - propBlockID0 := types.BlockID{propBlockHash0, propBlockParts0.Header()} + propBlockID0 := types.BlockID{Hash: propBlockHash0, PartsHeader: propBlockParts0.Header()} // the others sign a polka but we don't see it prevotes := signVotes(types.PrevoteType, propBlockHash0, propBlockParts0.Header(), vs2, vs3, vs4) @@ -823,7 +821,7 @@ func TestStateLockPOLSafety2(t *testing.T) { incrementRound(vs2, vs3, vs4) - round = round + 1 // moving to the next round + round++ // moving to the next round t.Log("### ONTO Round 1") // jump in at round 1 startTestRound(cs1, height, round) @@ -850,9 +848,9 @@ func TestStateLockPOLSafety2(t *testing.T) { incrementRound(vs2, vs3, vs4) // timeout of precommit wait to new round - ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrecommit.Nanoseconds()) + ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) - round = round + 1 // moving to the next round + round++ // moving to the next round // in round 2 we see the polkad block from round 0 newProp := types.NewProposal(height, round, 0, propBlockID0) if err := vs3.SignProposal(config.ChainID(), newProp); err != nil { @@ -919,17 +917,17 @@ func TestProposeValidBlock(t *testing.T) { signAddVotes(cs1, types.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4) - ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrecommit.Nanoseconds()) + ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) incrementRound(vs2, vs3, vs4) - round = round + 1 // moving to the next round + round++ // moving to the next round ensureNewRound(newRoundCh, height, round) t.Log("### ONTO ROUND 2") // timeout of propose - ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.TimeoutPropose.Nanoseconds()) + ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.Propose(round).Nanoseconds()) ensurePrevote(voteCh, height, round) validatePrevote(t, cs1, round, vss[0], propBlockHash) @@ -947,14 +945,14 @@ func TestProposeValidBlock(t *testing.T) { signAddVotes(cs1, types.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4) - round = round + 2 // moving to the next round + round += 2 // moving to the next round ensureNewRound(newRoundCh, height, round) t.Log("### ONTO ROUND 3") - ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrecommit.Nanoseconds()) + ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) - round = round + 1 // moving to the next round + round++ // moving to the next round ensureNewRound(newRoundCh, height, round) @@ -1004,7 +1002,7 @@ func TestSetValidBlockOnDelayedPrevote(t *testing.T) { // vs3 send prevote nil signAddVotes(cs1, types.PrevoteType, nil, types.PartSetHeader{}, vs3) - ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrevote.Nanoseconds()) + ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Prevote(round).Nanoseconds()) ensurePrecommit(voteCh, height, round) // we should have precommitted @@ -1046,13 +1044,13 @@ func TestSetValidBlockOnDelayedProposal(t *testing.T) { voteCh := subscribeToVoter(cs1, addr) proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) - round = round + 1 // move to round in which P0 is not proposer + round++ // move to round in which P0 is not proposer incrementRound(vs2, vs3, vs4) startTestRound(cs1, cs1.Height, round) ensureNewRound(newRoundCh, height, round) - ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.TimeoutPropose.Nanoseconds()) + ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.Propose(round).Nanoseconds()) ensurePrevote(voteCh, height, round) validatePrevote(t, cs1, round, vss[0], nil) @@ -1065,7 +1063,7 @@ func TestSetValidBlockOnDelayedProposal(t *testing.T) { signAddVotes(cs1, types.PrevoteType, propBlockHash, propBlockParts.Header(), vs2, vs3, vs4) ensureNewValidBlock(validBlockCh, height, round) - ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrevote.Nanoseconds()) + ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Prevote(round).Nanoseconds()) ensurePrecommit(voteCh, height, round) validatePrecommit(t, cs1, round, -1, vss[0], nil, nil) @@ -1099,7 +1097,7 @@ func TestWaitingTimeoutOnNilPolka(t *testing.T) { signAddVotes(cs1, types.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4) - ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrecommit.Nanoseconds()) + ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) ensureNewRound(newRoundCh, height, round+1) } @@ -1125,13 +1123,13 @@ func TestWaitingTimeoutProposeOnNewRound(t *testing.T) { incrementRound(vss[1:]...) signAddVotes(cs1, types.PrevoteType, nil, types.PartSetHeader{}, vs2, vs3, vs4) - round = round + 1 // moving to the next round + round++ // moving to the next round ensureNewRound(newRoundCh, height, round) rs := cs1.GetRoundState() assert.True(t, rs.Step == cstypes.RoundStepPropose) // P0 does not prevote before timeoutPropose expires - ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPropose.Nanoseconds()) + ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Propose(round).Nanoseconds()) ensurePrevote(voteCh, height, round) validatePrevote(t, cs1, round, vss[0], nil) @@ -1159,15 +1157,15 @@ func TestRoundSkipOnNilPolkaFromHigherRound(t *testing.T) { incrementRound(vss[1:]...) signAddVotes(cs1, types.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4) - round = round + 1 // moving to the next round + round++ // moving to the next round ensureNewRound(newRoundCh, height, round) ensurePrecommit(voteCh, height, round) validatePrecommit(t, cs1, round, -1, vss[0], nil, nil) - ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrecommit.Nanoseconds()) + ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) - round = round + 1 // moving to the next round + round++ // moving to the next round ensureNewRound(newRoundCh, height, round) } @@ -1191,7 +1189,7 @@ func TestWaitTimeoutProposeOnNilPolkaForTheCurrentRound(t *testing.T) { incrementRound(vss[1:]...) signAddVotes(cs1, types.PrevoteType, nil, types.PartSetHeader{}, vs2, vs3, vs4) - ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.TimeoutPropose.Nanoseconds()) + ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.Propose(round).Nanoseconds()) ensurePrevote(voteCh, height, round) validatePrevote(t, cs1, round, vss[0], nil) @@ -1317,8 +1315,6 @@ func TestStartNextHeightCorrectly(t *testing.T) { // the proposed block should now be locked and our precommit added validatePrecommit(t, cs1, round, round, vss[0], theBlockHash, theBlockHash) - rs = cs1.GetRoundState() - // add precommits signAddVotes(cs1, types.PrecommitType, nil, types.PartSetHeader{}, vs2) signAddVotes(cs1, types.PrecommitType, theBlockHash, theBlockParts, vs3) @@ -1332,7 +1328,7 @@ func TestStartNextHeightCorrectly(t *testing.T) { cs1.txNotifier.(*fakeTxNotifier).Notify() - ensureNewTimeout(timeoutProposeCh, height+1, round, cs1.config.TimeoutPropose.Nanoseconds()) + ensureNewTimeout(timeoutProposeCh, height+1, round, cs1.config.Propose(round).Nanoseconds()) rs = cs1.GetRoundState() assert.False(t, rs.TriggeredTimeoutPrecommit, "triggeredTimeoutPrecommit should be false at the beginning of each round") } @@ -1370,17 +1366,11 @@ func TestResetTimeoutPrecommitUponNewHeight(t *testing.T) { ensurePrecommit(voteCh, height, round) validatePrecommit(t, cs1, round, round, vss[0], theBlockHash, theBlockHash) - rs = cs1.GetRoundState() - // add precommits signAddVotes(cs1, types.PrecommitType, nil, types.PartSetHeader{}, vs2) signAddVotes(cs1, types.PrecommitType, theBlockHash, theBlockParts, vs3) - time.Sleep(5 * time.Millisecond) signAddVotes(cs1, types.PrecommitType, theBlockHash, theBlockParts, vs4) - rs = cs1.GetRoundState() - assert.True(t, rs.TriggeredTimeoutPrecommit) - ensureNewBlockHeader(newBlockHeader, height, theBlockHash) prop, propBlock := decideProposal(cs1, vs2, height+1, 0) @@ -1519,9 +1509,9 @@ func TestStateHalt1(t *testing.T) { incrementRound(vs2, vs3, vs4) // timeout to new round - ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrecommit.Nanoseconds()) + ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) - round = round + 1 // moving to the next round + round++ // moving to the next round ensureNewRound(newRoundCh, height, round) rs = cs1.GetRoundState() @@ -1627,3 +1617,12 @@ func subscribe(eventBus *types.EventBus, q tmpubsub.Query) <-chan tmpubsub.Messa } return sub.Out() } + +// subscribe subscribes test client to the given query and returns a channel with cap = 0. +func subscribeUnBuffered(eventBus *types.EventBus, q tmpubsub.Query) <-chan tmpubsub.Message { + sub, err := eventBus.SubscribeUnbuffered(context.Background(), testSubscriber, q) + if err != nil { + panic(fmt.Sprintf("failed to subscribe %s to %v", testSubscriber, q)) + } + return sub.Out() +} diff --git a/consensus/types/wire.go b/consensus/types/codec.go similarity index 100% rename from consensus/types/wire.go rename to consensus/types/codec.go diff --git a/consensus/types/height_vote_set.go b/consensus/types/height_vote_set.go index eee013eea..35c9a486d 100644 --- a/consensus/types/height_vote_set.go +++ b/consensus/types/height_vote_set.go @@ -6,7 +6,6 @@ import ( "strings" "sync" - cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/types" ) @@ -83,7 +82,7 @@ func (hvs *HeightVoteSet) SetRound(round int) { hvs.mtx.Lock() defer hvs.mtx.Unlock() if hvs.round != 0 && (round < hvs.round+1) { - cmn.PanicSanity("SetRound() must increment hvs.round") + panic("SetRound() must increment hvs.round") } for r := hvs.round + 1; r <= round; r++ { if _, ok := hvs.roundVoteSets[r]; ok { @@ -96,7 +95,7 @@ func (hvs *HeightVoteSet) SetRound(round int) { func (hvs *HeightVoteSet) addRound(round int) { if _, ok := hvs.roundVoteSets[round]; ok { - cmn.PanicSanity("addRound() for an existing round") + panic("addRound() for an existing round") } // log.Debug("addRound(round)", "round", round) prevotes := types.NewVoteSet(hvs.chainID, hvs.height, round, types.PrevoteType, hvs.valSet) @@ -169,8 +168,7 @@ func (hvs *HeightVoteSet) getVoteSet(round int, type_ types.SignedMsgType) *type case types.PrecommitType: return rvs.Precommits default: - cmn.PanicSanity(fmt.Sprintf("Unexpected vote type %X", type_)) - return nil + panic(fmt.Sprintf("Unexpected vote type %X", type_)) } } diff --git a/consensus/types/height_vote_set_test.go b/consensus/types/height_vote_set_test.go index 42b5333a1..f45492aa4 100644 --- a/consensus/types/height_vote_set_test.go +++ b/consensus/types/height_vote_set_test.go @@ -62,7 +62,7 @@ func makeVoteHR(t *testing.T, height int64, round int, privVals []types.PrivVali Round: round, Timestamp: tmtime.Now(), Type: types.PrecommitType, - BlockID: types.BlockID{[]byte("fakehash"), types.PartSetHeader{}}, + BlockID: types.BlockID{Hash: []byte("fakehash"), PartsHeader: types.PartSetHeader{}}, } chainID := config.ChainID() err := privVal.SignVote(chainID, vote) diff --git a/consensus/wal.go b/consensus/wal.go index c63c6b940..8005fc6b1 100644 --- a/consensus/wal.go +++ b/consensus/wal.go @@ -20,7 +20,7 @@ import ( const ( // must be greater than types.BlockPartSizeBytes + a few bytes - maxMsgSizeBytes = 1024 * 1024 // 1MB + maxMsgSizeBytes = 2 * 1024 * 1024 + 200 // TODO: temp bugfix, will follow up tendermint's fix // how often the WAL should be sync'd during period sync'ing walDefaultFlushInterval = 2 * time.Second diff --git a/consensus/wal_generator.go b/consensus/wal_generator.go index e96e8c9a2..af3a6f2a9 100644 --- a/consensus/wal_generator.go +++ b/consensus/wal_generator.go @@ -10,15 +10,17 @@ import ( "time" "github.com/pkg/errors" + "github.com/tendermint/tendermint/abci/example/kvstore" - bc "github.com/tendermint/tendermint/blockchain" cfg "github.com/tendermint/tendermint/config" cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/libs/db" "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/mock" "github.com/tendermint/tendermint/privval" "github.com/tendermint/tendermint/proxy" sm "github.com/tendermint/tendermint/state" + "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" ) @@ -45,14 +47,16 @@ func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int) (err error) { if err != nil { return errors.Wrap(err, "failed to read genesis file") } - stateDB := db.NewMemDB() blockStoreDB := db.NewMemDB() + stateDB := blockStoreDB state, err := sm.MakeGenesisState(genDoc) if err != nil { return errors.Wrap(err, "failed to make genesis state") } state.Version.Consensus.App = kvstore.ProtocolVersion - blockStore := bc.NewBlockStore(blockStoreDB) + sm.SaveState(stateDB, state) + blockStore := store.NewBlockStore(blockStoreDB) + proxyApp := proxy.NewAppConns(proxy.NewLocalClientCreator(app)) proxyApp.SetLogger(logger.With("module", "proxy")) if err := proxyApp.Start(); err != nil { @@ -66,7 +70,7 @@ func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int) (err error) { return errors.Wrap(err, "failed to start event bus") } defer eventBus.Stop() - mempool := sm.MockMempool{} + mempool := mock.Mempool{} evpool := sm.MockEvidencePool{} blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool, true) consensusState := NewConsensusState(config.Consensus, state.Copy(), blockExec, blockStore, mempool, evpool) diff --git a/crypto/ed25519/ed25519.go b/crypto/ed25519/ed25519.go index bc60838d5..8947608ae 100644 --- a/crypto/ed25519/ed25519.go +++ b/crypto/ed25519/ed25519.go @@ -54,7 +54,7 @@ func (privKey PrivKeyEd25519) Bytes() []byte { // incorrect signature. func (privKey PrivKeyEd25519) Sign(msg []byte) ([]byte, error) { signatureBytes := ed25519.Sign(privKey[:], msg) - return signatureBytes[:], nil + return signatureBytes, nil } // PubKey gets the corresponding public key from the private key. @@ -100,7 +100,7 @@ func GenPrivKey() PrivKeyEd25519 { // genPrivKey generates a new ed25519 private key using the provided reader. func genPrivKey(rand io.Reader) PrivKeyEd25519 { seed := make([]byte, 32) - _, err := io.ReadFull(rand, seed[:]) + _, err := io.ReadFull(rand, seed) if err != nil { panic(err) } diff --git a/crypto/internal/benchmarking/bench.go b/crypto/internal/benchmarking/bench.go index c988de48e..43ab312f0 100644 --- a/crypto/internal/benchmarking/bench.go +++ b/crypto/internal/benchmarking/bench.go @@ -24,10 +24,10 @@ func (zeroReader) Read(buf []byte) (int, error) { // BenchmarkKeyGeneration benchmarks the given key generation algorithm using // a dummy reader. -func BenchmarkKeyGeneration(b *testing.B, GenerateKey func(reader io.Reader) crypto.PrivKey) { +func BenchmarkKeyGeneration(b *testing.B, generateKey func(reader io.Reader) crypto.PrivKey) { var zero zeroReader for i := 0; i < b.N; i++ { - GenerateKey(zero) + generateKey(zero) } } diff --git a/crypto/merkle/wire.go b/crypto/merkle/codec.go similarity index 100% rename from crypto/merkle/wire.go rename to crypto/merkle/codec.go diff --git a/crypto/merkle/proof.go b/crypto/merkle/proof.go index 5e2a3ab12..ad101d94d 100644 --- a/crypto/merkle/proof.go +++ b/crypto/merkle/proof.go @@ -3,7 +3,7 @@ package merkle import ( "bytes" - cmn "github.com/tendermint/tendermint/libs/common" + "github.com/pkg/errors" ) //---------------------------------------- @@ -44,11 +44,11 @@ func (poz ProofOperators) Verify(root []byte, keypath string, args [][]byte) (er key := op.GetKey() if len(key) != 0 { if len(keys) == 0 { - return cmn.NewError("Key path has insufficient # of parts: expected no more keys but got %+v", string(key)) + return errors.Errorf("Key path has insufficient # of parts: expected no more keys but got %+v", string(key)) } lastKey := keys[len(keys)-1] if !bytes.Equal(lastKey, key) { - return cmn.NewError("Key mismatch on operation #%d: expected %+v but got %+v", i, string(lastKey), string(key)) + return errors.Errorf("Key mismatch on operation #%d: expected %+v but got %+v", i, string(lastKey), string(key)) } keys = keys[:len(keys)-1] } @@ -58,10 +58,10 @@ func (poz ProofOperators) Verify(root []byte, keypath string, args [][]byte) (er } } if !bytes.Equal(root, args[0]) { - return cmn.NewError("Calculated root hash is invalid: expected %+v but got %+v", root, args[0]) + return errors.Errorf("Calculated root hash is invalid: expected %+v but got %+v", root, args[0]) } if len(keys) != 0 { - return cmn.NewError("Keypath not consumed all") + return errors.New("Keypath not consumed all") } return nil } @@ -92,7 +92,7 @@ func (prt *ProofRuntime) RegisterOpDecoder(typ string, dec OpDecoder) { func (prt *ProofRuntime) Decode(pop ProofOp) (ProofOperator, error) { decoder := prt.decoders[pop.Type] if decoder == nil { - return nil, cmn.NewError("unrecognized proof type %v", pop.Type) + return nil, errors.Errorf("unrecognized proof type %v", pop.Type) } return decoder(pop) } @@ -102,7 +102,7 @@ func (prt *ProofRuntime) DecodeProof(proof *Proof) (ProofOperators, error) { for _, pop := range proof.Ops { operator, err := prt.Decode(pop) if err != nil { - return nil, cmn.ErrorWrap(err, "decoding a proof operator") + return nil, errors.Wrap(err, "decoding a proof operator") } poz = append(poz, operator) } @@ -122,7 +122,7 @@ func (prt *ProofRuntime) VerifyAbsence(proof *Proof, root []byte, keypath string func (prt *ProofRuntime) Verify(proof *Proof, root []byte, keypath string, args [][]byte) (err error) { poz, err := prt.DecodeProof(proof) if err != nil { - return cmn.ErrorWrap(err, "decoding proof") + return errors.Wrap(err, "decoding proof") } return poz.Verify(root, keypath, args) } diff --git a/crypto/merkle/proof_key_path.go b/crypto/merkle/proof_key_path.go index aec93e826..7ea67853b 100644 --- a/crypto/merkle/proof_key_path.go +++ b/crypto/merkle/proof_key_path.go @@ -6,7 +6,7 @@ import ( "net/url" "strings" - cmn "github.com/tendermint/tendermint/libs/common" + "github.com/pkg/errors" ) /* @@ -87,7 +87,7 @@ func (pth KeyPath) String() string { // Each key must use a known encoding. func KeyPathToKeys(path string) (keys [][]byte, err error) { if path == "" || path[0] != '/' { - return nil, cmn.NewError("key path string must start with a forward slash '/'") + return nil, errors.New("key path string must start with a forward slash '/'") } parts := strings.Split(path[1:], "/") keys = make([][]byte, len(parts)) @@ -96,13 +96,13 @@ func KeyPathToKeys(path string) (keys [][]byte, err error) { hexPart := part[2:] key, err := hex.DecodeString(hexPart) if err != nil { - return nil, cmn.ErrorWrap(err, "decoding hex-encoded part #%d: /%s", i, part) + return nil, errors.Wrapf(err, "decoding hex-encoded part #%d: /%s", i, part) } keys[i] = key } else { key, err := url.PathUnescape(part) if err != nil { - return nil, cmn.ErrorWrap(err, "decoding url-encoded part #%d: /%s", i, part) + return nil, errors.Wrapf(err, "decoding url-encoded part #%d: /%s", i, part) } keys[i] = []byte(key) // TODO Test this with random bytes, I'm not sure that it works for arbitrary bytes... } diff --git a/crypto/merkle/proof_simple_value.go b/crypto/merkle/proof_simple_value.go index 247921ad5..2c89bb5fd 100644 --- a/crypto/merkle/proof_simple_value.go +++ b/crypto/merkle/proof_simple_value.go @@ -4,8 +4,9 @@ import ( "bytes" "fmt" + "github.com/pkg/errors" + "github.com/tendermint/tendermint/crypto/tmhash" - cmn "github.com/tendermint/tendermint/libs/common" ) const ProofOpSimpleValue = "simple:v" @@ -39,12 +40,12 @@ func NewSimpleValueOp(key []byte, proof *SimpleProof) SimpleValueOp { func SimpleValueOpDecoder(pop ProofOp) (ProofOperator, error) { if pop.Type != ProofOpSimpleValue { - return nil, cmn.NewError("unexpected ProofOp.Type; got %v, want %v", pop.Type, ProofOpSimpleValue) + return nil, errors.Errorf("unexpected ProofOp.Type; got %v, want %v", pop.Type, ProofOpSimpleValue) } var op SimpleValueOp // a bit strange as we'll discard this, but it works. err := cdc.UnmarshalBinaryLengthPrefixed(pop.Data, &op) if err != nil { - return nil, cmn.ErrorWrap(err, "decoding ProofOp.Data into SimpleValueOp") + return nil, errors.Wrap(err, "decoding ProofOp.Data into SimpleValueOp") } return NewSimpleValueOp(pop.Key, op.Proof), nil } @@ -64,7 +65,7 @@ func (op SimpleValueOp) String() string { func (op SimpleValueOp) Run(args [][]byte) ([][]byte, error) { if len(args) != 1 { - return nil, cmn.NewError("expected 1 arg, got %v", len(args)) + return nil, errors.Errorf("expected 1 arg, got %v", len(args)) } value := args[0] hasher := tmhash.New() @@ -78,7 +79,7 @@ func (op SimpleValueOp) Run(args [][]byte) ([][]byte, error) { kvhash := leafHash(bz.Bytes()) if !bytes.Equal(kvhash, op.Proof.LeafHash) { - return nil, cmn.NewError("leaf hash mismatch: want %X got %X", op.Proof.LeafHash, kvhash) + return nil, errors.Errorf("leaf hash mismatch: want %X got %X", op.Proof.LeafHash, kvhash) } return [][]byte{ diff --git a/crypto/merkle/proof_test.go b/crypto/merkle/proof_test.go index 4de3246f1..4dc916ac9 100644 --- a/crypto/merkle/proof_test.go +++ b/crypto/merkle/proof_test.go @@ -3,9 +3,9 @@ package merkle import ( "testing" + "github.com/pkg/errors" "github.com/stretchr/testify/assert" amino "github.com/tendermint/go-amino" - cmn "github.com/tendermint/tendermint/libs/common" ) const ProofOpDomino = "test:domino" @@ -34,7 +34,7 @@ func DominoOpDecoder(pop ProofOp) (ProofOperator, error) { var op DominoOp // a bit strange as we'll discard this, but it works. err := amino.UnmarshalBinaryLengthPrefixed(pop.Data, &op) if err != nil { - return nil, cmn.ErrorWrap(err, "decoding ProofOp.Data into SimpleValueOp") + return nil, errors.Wrap(err, "decoding ProofOp.Data into SimpleValueOp") } return NewDominoOp(string(pop.Key), op.Input, op.Output), nil } @@ -50,10 +50,10 @@ func (dop DominoOp) ProofOp() ProofOp { func (dop DominoOp) Run(input [][]byte) (output [][]byte, err error) { if len(input) != 1 { - return nil, cmn.NewError("Expected input of length 1") + return nil, errors.New("Expected input of length 1") } if string(input[0]) != dop.Input { - return nil, cmn.NewError("Expected input %v, got %v", + return nil, errors.Errorf("Expected input %v, got %v", dop.Input, string(input[0])) } return [][]byte{[]byte(dop.Output)}, nil diff --git a/crypto/merkle/simple_proof.go b/crypto/merkle/simple_proof.go index f01dcdca1..d3be5d7ec 100644 --- a/crypto/merkle/simple_proof.go +++ b/crypto/merkle/simple_proof.go @@ -162,11 +162,12 @@ func (spn *SimpleProofNode) FlattenAunts() [][]byte { // Nonrecursive impl. innerHashes := [][]byte{} for spn != nil { - if spn.Left != nil { + switch { + case spn.Left != nil: innerHashes = append(innerHashes, spn.Left.Hash) - } else if spn.Right != nil { + case spn.Right != nil: innerHashes = append(innerHashes, spn.Right.Hash) - } else { + default: break } spn = spn.Parent diff --git a/crypto/merkle/simple_tree.go b/crypto/merkle/simple_tree.go index 5de514b51..03dc9d9d1 100644 --- a/crypto/merkle/simple_tree.go +++ b/crypto/merkle/simple_tree.go @@ -20,6 +20,77 @@ func SimpleHashFromByteSlices(items [][]byte) []byte { } } +// SimpleHashFromByteSliceIterative is an iterative alternative to +// SimpleHashFromByteSlice motivated by potential performance improvements. +// (#2611) had suggested that an iterative version of +// SimpleHashFromByteSlice would be faster, presumably because +// we can envision some overhead accumulating from stack +// frames and function calls. Additionally, a recursive algorithm risks +// hitting the stack limit and causing a stack overflow should the tree +// be too large. +// +// Provided here is an iterative alternative, a simple test to assert +// correctness and a benchmark. On the performance side, there appears to +// be no overall difference: +// +// BenchmarkSimpleHashAlternatives/recursive-4 20000 77677 ns/op +// BenchmarkSimpleHashAlternatives/iterative-4 20000 76802 ns/op +// +// On the surface it might seem that the additional overhead is due to +// the different allocation patterns of the implementations. The recursive +// version uses a single [][]byte slices which it then re-slices at each level of the tree. +// The iterative version reproduces [][]byte once within the function and +// then rewrites sub-slices of that array at each level of the tree. +// +// Experimenting by modifying the code to simply calculate the +// hash and not store the result show little to no difference in performance. +// +// These preliminary results suggest: +// +// 1. The performance of the SimpleHashFromByteSlice is pretty good +// 2. Go has low overhead for recursive functions +// 3. The performance of the SimpleHashFromByteSlice routine is dominated +// by the actual hashing of data +// +// Although this work is in no way exhaustive, point #3 suggests that +// optimization of this routine would need to take an alternative +// approach to make significant improvements on the current performance. +// +// Finally, considering that the recursive implementation is easier to +// read, it might not be worthwhile to switch to a less intuitive +// implementation for so little benefit. +func SimpleHashFromByteSlicesIterative(input [][]byte) []byte { + items := make([][]byte, len(input)) + + for i, leaf := range input { + items[i] = leafHash(leaf) + } + + size := len(items) + for { + switch size { + case 0: + return nil + case 1: + return items[0] + default: + rp := 0 // read position + wp := 0 // write position + for rp < size { + if rp+1 < size { + items[wp] = innerHash(items[rp], items[rp+1]) + rp += 2 + } else { + items[wp] = items[rp] + rp += 1 + } + wp += 1 + } + size = wp + } + } +} + // SimpleHashFromMap computes a Merkle tree from sorted map. // Like calling SimpleHashFromHashers with // `item = []byte(Hash(key) | Hash(value))`, diff --git a/crypto/merkle/simple_tree_test.go b/crypto/merkle/simple_tree_test.go index 9abe321c3..5bbe294af 100644 --- a/crypto/merkle/simple_tree_test.go +++ b/crypto/merkle/simple_tree_test.go @@ -70,6 +70,42 @@ func TestSimpleProof(t *testing.T) { } } +func TestSimpleHashAlternatives(t *testing.T) { + + total := 100 + + items := make([][]byte, total) + for i := 0; i < total; i++ { + items[i] = testItem(cmn.RandBytes(tmhash.Size)) + } + + rootHash1 := SimpleHashFromByteSlicesIterative(items) + rootHash2 := SimpleHashFromByteSlices(items) + require.Equal(t, rootHash1, rootHash2, "Unmatched root hashes: %X vs %X", rootHash1, rootHash2) +} + +func BenchmarkSimpleHashAlternatives(b *testing.B) { + total := 100 + + items := make([][]byte, total) + for i := 0; i < total; i++ { + items[i] = testItem(cmn.RandBytes(tmhash.Size)) + } + + b.ResetTimer() + b.Run("recursive", func(b *testing.B) { + for i := 0; i < b.N; i++ { + _ = SimpleHashFromByteSlices(items) + } + }) + + b.Run("iterative", func(b *testing.B) { + for i := 0; i < b.N; i++ { + _ = SimpleHashFromByteSlicesIterative(items) + } + }) +} + func Test_getSplitPoint(t *testing.T) { tests := []struct { length int diff --git a/crypto/multisig/wire.go b/crypto/multisig/codec.go similarity index 100% rename from crypto/multisig/wire.go rename to crypto/multisig/codec.go diff --git a/crypto/multisig/multisignature.go b/crypto/multisig/multisignature.go index 0d1796890..1e3bef4e1 100644 --- a/crypto/multisig/multisignature.go +++ b/crypto/multisig/multisignature.go @@ -1,7 +1,8 @@ package multisig import ( - "errors" + "fmt" + "strings" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/multisig/bitarray" @@ -53,13 +54,19 @@ func (mSig *Multisignature) AddSignature(sig []byte, index int) { mSig.Sigs[newSigIndex] = sig } -// AddSignatureFromPubKey adds a signature to the multisig, -// at the index in keys corresponding to the provided pubkey. +// AddSignatureFromPubKey adds a signature to the multisig, at the index in +// keys corresponding to the provided pubkey. func (mSig *Multisignature) AddSignatureFromPubKey(sig []byte, pubkey crypto.PubKey, keys []crypto.PubKey) error { index := getIndex(pubkey, keys) if index == -1 { - return errors.New("provided key didn't exist in pubkeys") + keysStr := make([]string, len(keys)) + for i, k := range keys { + keysStr[i] = fmt.Sprintf("%X", k.Bytes()) + } + + return fmt.Errorf("provided key %X doesn't exist in pubkeys: \n%s", pubkey.Bytes(), strings.Join(keysStr, "\n")) } + mSig.AddSignature(sig, index) return nil } diff --git a/crypto/multisig/threshold_pubkey_test.go b/crypto/multisig/threshold_pubkey_test.go index 2d2632abd..d1d7e803c 100644 --- a/crypto/multisig/threshold_pubkey_test.go +++ b/crypto/multisig/threshold_pubkey_test.go @@ -36,30 +36,68 @@ func TestThresholdMultisigValidCases(t *testing.T) { for tcIndex, tc := range cases { multisigKey := NewPubKeyMultisigThreshold(tc.k, tc.pubkeys) multisignature := NewMultisig(len(tc.pubkeys)) + for i := 0; i < tc.k-1; i++ { signingIndex := tc.signingIndices[i] - multisignature.AddSignatureFromPubKey(tc.signatures[signingIndex], tc.pubkeys[signingIndex], tc.pubkeys) - require.False(t, multisigKey.VerifyBytes(tc.msg, multisignature.Marshal()), - "multisig passed when i < k, tc %d, i %d", tcIndex, i) - multisignature.AddSignatureFromPubKey(tc.signatures[signingIndex], tc.pubkeys[signingIndex], tc.pubkeys) - require.Equal(t, i+1, len(multisignature.Sigs), - "adding a signature for the same pubkey twice increased signature count by 2, tc %d", tcIndex) + require.NoError( + t, + multisignature.AddSignatureFromPubKey(tc.signatures[signingIndex], tc.pubkeys[signingIndex], tc.pubkeys), + ) + require.False( + t, + multisigKey.VerifyBytes(tc.msg, multisignature.Marshal()), + "multisig passed when i < k, tc %d, i %d", tcIndex, i, + ) + require.NoError( + t, + multisignature.AddSignatureFromPubKey(tc.signatures[signingIndex], tc.pubkeys[signingIndex], tc.pubkeys), + ) + require.Equal( + t, + i+1, + len(multisignature.Sigs), + "adding a signature for the same pubkey twice increased signature count by 2, tc %d", tcIndex, + ) } - require.False(t, multisigKey.VerifyBytes(tc.msg, multisignature.Marshal()), - "multisig passed with k - 1 sigs, tc %d", tcIndex) - multisignature.AddSignatureFromPubKey(tc.signatures[tc.signingIndices[tc.k]], tc.pubkeys[tc.signingIndices[tc.k]], tc.pubkeys) - require.True(t, multisigKey.VerifyBytes(tc.msg, multisignature.Marshal()), - "multisig failed after k good signatures, tc %d", tcIndex) + + require.False( + t, + multisigKey.VerifyBytes(tc.msg, multisignature.Marshal()), + "multisig passed with k - 1 sigs, tc %d", tcIndex, + ) + require.NoError( + t, + multisignature.AddSignatureFromPubKey(tc.signatures[tc.signingIndices[tc.k]], tc.pubkeys[tc.signingIndices[tc.k]], tc.pubkeys), + ) + require.True( + t, + multisigKey.VerifyBytes(tc.msg, multisignature.Marshal()), + "multisig failed after k good signatures, tc %d", tcIndex, + ) + for i := tc.k + 1; i < len(tc.signingIndices); i++ { signingIndex := tc.signingIndices[i] - multisignature.AddSignatureFromPubKey(tc.signatures[signingIndex], tc.pubkeys[signingIndex], tc.pubkeys) - require.Equal(t, tc.passAfterKSignatures[i-tc.k-1], - multisigKey.VerifyBytes(tc.msg, multisignature.Marshal()), - "multisig didn't verify as expected after k sigs, tc %d, i %d", tcIndex, i) - multisignature.AddSignatureFromPubKey(tc.signatures[signingIndex], tc.pubkeys[signingIndex], tc.pubkeys) - require.Equal(t, i+1, len(multisignature.Sigs), - "adding a signature for the same pubkey twice increased signature count by 2, tc %d", tcIndex) + require.NoError( + t, + multisignature.AddSignatureFromPubKey(tc.signatures[signingIndex], tc.pubkeys[signingIndex], tc.pubkeys), + ) + require.Equal( + t, + tc.passAfterKSignatures[i-tc.k-1], + multisigKey.VerifyBytes(tc.msg, multisignature.Marshal()), + "multisig didn't verify as expected after k sigs, tc %d, i %d", tcIndex, i, + ) + require.NoError( + t, + multisignature.AddSignatureFromPubKey(tc.signatures[signingIndex], tc.pubkeys[signingIndex], tc.pubkeys), + ) + require.Equal( + t, + i+1, + len(multisignature.Sigs), + "adding a signature for the same pubkey twice increased signature count by 2, tc %d", tcIndex, + ) } } } diff --git a/crypto/secp256k1/internal/secp256k1/curve.go b/crypto/secp256k1/internal/secp256k1/curve.go index 5409ee1d2..df87200f2 100644 --- a/crypto/secp256k1/internal/secp256k1/curve.go +++ b/crypto/secp256k1/internal/secp256k1/curve.go @@ -30,6 +30,7 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// nolint:gocritic package secp256k1 import ( diff --git a/crypto/xsalsa20symmetric/symmetric.go b/crypto/xsalsa20symmetric/symmetric.go index 10a0f6f33..73dc9dec5 100644 --- a/crypto/xsalsa20symmetric/symmetric.go +++ b/crypto/xsalsa20symmetric/symmetric.go @@ -7,7 +7,6 @@ import ( "golang.org/x/crypto/nacl/secretbox" "github.com/tendermint/tendermint/crypto" - cmn "github.com/tendermint/tendermint/libs/common" ) // TODO, make this into a struct that implements crypto.Symmetric. @@ -19,7 +18,7 @@ const secretLen = 32 // The ciphertext is (secretbox.Overhead + 24) bytes longer than the plaintext. func EncryptSymmetric(plaintext []byte, secret []byte) (ciphertext []byte) { if len(secret) != secretLen { - cmn.PanicSanity(fmt.Sprintf("Secret must be 32 bytes long, got len %v", len(secret))) + panic(fmt.Sprintf("Secret must be 32 bytes long, got len %v", len(secret))) } nonce := crypto.CRandBytes(nonceLen) nonceArr := [nonceLen]byte{} @@ -36,7 +35,7 @@ func EncryptSymmetric(plaintext []byte, secret []byte) (ciphertext []byte) { // The ciphertext is (secretbox.Overhead + 24) bytes longer than the plaintext. func DecryptSymmetric(ciphertext []byte, secret []byte) (plaintext []byte, err error) { if len(secret) != secretLen { - cmn.PanicSanity(fmt.Sprintf("Secret must be 32 bytes long, got len %v", len(secret))) + panic(fmt.Sprintf("Secret must be 32 bytes long, got len %v", len(secret))) } if len(ciphertext) <= secretbox.Overhead+nonceLen { return nil, errors.New("Ciphertext is too short") diff --git a/docs/.vuepress/config.js b/docs/.vuepress/config.js index 0b54d2011..70b404c42 100644 --- a/docs/.vuepress/config.js +++ b/docs/.vuepress/config.js @@ -34,6 +34,14 @@ module.exports = { "/introduction/what-is-tendermint" ] }, + { + title: "Guides", + collapsable: false, + children: [ + "/guides/go-built-in", + "/guides/go" + ] + }, { title: "Apps", collapsable: false, @@ -44,7 +52,7 @@ module.exports = { "/app-dev/app-development", "/app-dev/subscribing-to-events-via-websocket", "/app-dev/indexing-transactions", - "/app-dev/abci-spec", + "/spec/abci/abci", "/app-dev/ecosystem" ] }, diff --git a/docs/DOCS_README.md b/docs/DOCS_README.md index 49c2030a2..5b743cfa9 100644 --- a/docs/DOCS_README.md +++ b/docs/DOCS_README.md @@ -6,14 +6,12 @@ The documentation for Tendermint Core is hosted at: - https://tendermint-staging.interblock.io/docs/ built from the files in this (`/docs`) directory for -[master](https://github.com/tendermint/tendermint/tree/master/docs) -and [develop](https://github.com/tendermint/tendermint/tree/develop/docs), -respectively. +[master](https://github.com/tendermint/tendermint/tree/master/docs) respectively. ## How It Works There is a CircleCI job listening for changes in the `/docs` directory, on both -the `master` and `develop` branches. Any updates to files in this directory +the `master` branch. Any updates to files in this directory on those branches will automatically trigger a website deployment. Under the hood, the private website repository has a `make build-docs` target consumed by a CircleCI job in that repo. @@ -35,7 +33,7 @@ of the sidebar. **NOTE:** Strongly consider the existing links - both within this directory and to the website docs - when moving or deleting files. -Links to directories *MUST* end in a `/`. +Links to directories _MUST_ end in a `/`. Relative links should be used nearly everywhere, having discovered and weighed the following: @@ -101,4 +99,4 @@ We are using [Algolia](https://www.algolia.com) to power full-text search. This ## Consistency Because the build processes are identical (as is the information contained herein), this file should be kept in sync as -much as possible with its [counterpart in the Cosmos SDK repo](https://github.com/cosmos/cosmos-sdk/blob/develop/docs/DOCS_README.md). +much as possible with its [counterpart in the Cosmos SDK repo](https://github.com/cosmos/cosmos-sdk/blob/master/docs/DOCS_README.md). diff --git a/docs/app-dev/abci-cli.md b/docs/app-dev/abci-cli.md index 3e6cced87..4b21a4b2d 100644 --- a/docs/app-dev/abci-cli.md +++ b/docs/app-dev/abci-cli.md @@ -16,7 +16,6 @@ cd $GOPATH/src/github.com/tendermint git clone https://github.com/tendermint/tendermint.git cd tendermint make get_tools -make get_vendor_deps make install_abci ``` @@ -63,7 +62,7 @@ as `abci-cli` above. The kvstore just stores transactions in a merkle tree. Its code can be found -[here](https://github.com/tendermint/tendermint/blob/develop/abci/cmd/abci-cli/abci-cli.go) +[here](https://github.com/tendermint/tendermint/blob/master/abci/cmd/abci-cli/abci-cli.go) and looks like: ``` @@ -138,7 +137,7 @@ response. The server may be generic for a particular language, and we provide a [reference implementation in -Golang](https://github.com/tendermint/tendermint/tree/develop/abci/server). See the +Golang](https://github.com/tendermint/tendermint/tree/master/abci/server). See the [list of other ABCI implementations](./ecosystem.md) for servers in other languages. @@ -325,12 +324,20 @@ But the ultimate flexibility comes from being able to write the application easily in any language. We have implemented the counter in a number of languages [see the -example directory](https://github.com/tendermint/tendermint/tree/develop/abci/example). +example directory](https://github.com/tendermint/tendermint/tree/master/abci/example). -To run the Node JS version, `cd` to `example/js` and run +To run the Node.js version, fist download & install [the Javascript ABCI server](https://github.com/tendermint/js-abci): ``` -node app.js +git clone https://github.com/tendermint/js-abci.git +cd js-abci +npm install abci +``` + +Now you can start the app: + +```bash +node example/counter.js ``` (you'll have to kill the other counter application process). In another diff --git a/docs/app-dev/app-development.md b/docs/app-dev/app-development.md index d157ce378..ba21d3a37 100644 --- a/docs/app-dev/app-development.md +++ b/docs/app-dev/app-development.md @@ -48,9 +48,9 @@ open ABCI connection with the application, which hosts an ABCI server. Shown are the request and response types sent on each connection. Most of the examples below are from [kvstore -application](https://github.com/tendermint/tendermint/blob/develop/abci/example/kvstore/kvstore.go), +application](https://github.com/tendermint/tendermint/blob/master/abci/example/kvstore/kvstore.go), which is a part of the abci repo. [persistent_kvstore -application](https://github.com/tendermint/tendermint/blob/develop/abci/example/kvstore/persistent_kvstore.go) +application](https://github.com/tendermint/tendermint/blob/master/abci/example/kvstore/persistent_kvstore.go) is used to show `BeginBlock`, `EndBlock` and `InitChain` example implementations. @@ -101,8 +101,8 @@ mempool state (this behaviour can be turned off with In go: ``` -func (app *KVStoreApplication) CheckTx(tx []byte) types.Result { - return types.OK +func (app *KVStoreApplication) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx { + return types.ResponseCheckTx{Code: code.CodeTypeOK, GasWanted: 1} } ``` @@ -133,8 +133,8 @@ the mempool. If Tendermint is just started or the clients sent more than 100k transactions, old transactions may be sent to the application. So it is important CheckTx implements some logic to handle them. -There are cases where a transaction will (or may) become valid in some -future state, in which case you probably want to disable Tendermint's +If there are cases in your application where a transaction may become invalid in some +future state, you probably want to disable Tendermint's cache. You can do that by setting `[mempool] cache_size = 0` in the config. @@ -168,14 +168,29 @@ In go: ``` // tx is either "key=value" or just arbitrary bytes -func (app *KVStoreApplication) DeliverTx(tx []byte) types.Result { - parts := strings.Split(string(tx), "=") - if len(parts) == 2 { - app.state.Set([]byte(parts[0]), []byte(parts[1])) - } else { - app.state.Set(tx, tx) - } - return types.OK +func (app *KVStoreApplication) DeliverTx(req types.RequestDeliverTx) types.ResponseDeliverTx { + var key, value []byte + parts := bytes.Split(req.Tx, []byte("=")) + if len(parts) == 2 { + key, value = parts[0], parts[1] + } else { + key, value = req.Tx, req.Tx + } + + app.state.db.Set(prefixKey(key), value) + app.state.Size += 1 + + events := []types.Event{ + { + Type: "app", + Attributes: []cmn.KVPair{ + {Key: []byte("creator"), Value: []byte("Cosmoshi Netowoko")}, + {Key: []byte("key"), Value: key}, + }, + }, + } + + return types.ResponseDeliverTx{Code: code.CodeTypeOK, Events: events} } ``` @@ -205,7 +220,7 @@ Once all processing of the block is complete, Tendermint sends the Commit request and blocks waiting for a response. While the mempool may run concurrently with block processing (the BeginBlock, DeliverTxs, and EndBlock), it is locked for the Commit request so that its state can be -safely reset during Commit. This means the app _MUST NOT_ do any +safely updated during Commit. This means the app _MUST NOT_ do any blocking communication with the mempool (ie. broadcast_tx) during Commit, or there will be deadlock. Note also that all remaining transactions in the mempool are replayed on the mempool connection @@ -223,9 +238,14 @@ job of the [Handshake](#handshake). In go: ``` -func (app *KVStoreApplication) Commit() types.Result { - hash := app.state.Hash() - return types.NewResultOK(hash, "") +func (app *KVStoreApplication) Commit() types.ResponseCommit { + // Using a memdb - just return the big endian size of the db + appHash := make([]byte, 8) + binary.PutVarint(appHash, app.state.Size) + app.state.AppHash = appHash + app.state.Height += 1 + saveState(app.state) + return types.ResponseCommit{Data: appHash} } ``` @@ -256,12 +276,10 @@ In go: ``` // Track the block hash and header information -func (app *PersistentKVStoreApplication) BeginBlock(params types.RequestBeginBlock) { - // update latest block info - app.blockHeader = params.Header - - // reset valset changes - app.changes = make([]*types.Validator, 0) +func (app *PersistentKVStoreApplication) BeginBlock(req types.RequestBeginBlock) types.ResponseBeginBlock { + // reset valset changes + app.ValUpdates = make([]types.ValidatorUpdate, 0) + return types.ResponseBeginBlock{} } ``` @@ -303,7 +321,7 @@ In go: ``` // Update the validator set func (app *PersistentKVStoreApplication) EndBlock(req types.RequestEndBlock) types.ResponseEndBlock { - return types.ResponseEndBlock{ValidatorUpdates: app.ValUpdates} + return types.ResponseEndBlock{ValidatorUpdates: app.ValUpdates} } ``` @@ -347,43 +365,29 @@ Note: these query formats are subject to change! In go: ``` - func (app *KVStoreApplication) Query(reqQuery types.RequestQuery) (resQuery types.ResponseQuery) { - if reqQuery.Prove { - value, proof, exists := app.state.GetWithProof(reqQuery.Data) - resQuery.Index = -1 // TODO make Proof return index - resQuery.Key = reqQuery.Data - resQuery.Value = value - resQuery.Proof = proof - if exists { - resQuery.Log = "exists" - } else { - resQuery.Log = "does not exist" - } - return - } else { - index, value, exists := app.state.Get(reqQuery.Data) - resQuery.Index = int64(index) - resQuery.Value = value - if exists { - resQuery.Log = "exists" - } else { - resQuery.Log = "does not exist" - } - return - } - } - return - } else { - index, value, exists := app.state.Get(reqQuery.Data) - resQuery.Index = int64(index) - resQuery.Value = value - if exists { - resQuery.Log = "exists" - } else { - resQuery.Log = "does not exist" - } - return - } +func (app *KVStoreApplication) Query(reqQuery types.RequestQuery) (resQuery types.ResponseQuery) { + if reqQuery.Prove { + value := app.state.db.Get(prefixKey(reqQuery.Data)) + resQuery.Index = -1 // TODO make Proof return index + resQuery.Key = reqQuery.Data + resQuery.Value = value + if value != nil { + resQuery.Log = "exists" + } else { + resQuery.Log = "does not exist" + } + return + } else { + resQuery.Key = reqQuery.Data + value := app.state.db.Get(prefixKey(reqQuery.Data)) + resQuery.Value = value + if value != nil { + resQuery.Log = "exists" + } else { + resQuery.Log = "does not exist" + } + return + } } ``` @@ -439,7 +443,11 @@ In go: ``` func (app *KVStoreApplication) Info(req types.RequestInfo) (resInfo types.ResponseInfo) { - return types.ResponseInfo{Data: fmt.Sprintf("{\"size\":%v}", app.state.Size())} + return types.ResponseInfo{ + Data: fmt.Sprintf("{\"size\":%v}", app.state.Size), + Version: version.ABCIVersion, + AppVersion: ProtocolVersion.Uint64(), + } } ``` @@ -463,13 +471,14 @@ In go: ``` // Save the validators in the merkle tree -func (app *PersistentKVStoreApplication) InitChain(params types.RequestInitChain) { - for _, v := range params.Validators { - r := app.updateValidator(v) - if r.IsErr() { - app.logger.Error("Error updating validators", "r", r) - } - } +func (app *PersistentKVStoreApplication) InitChain(req types.RequestInitChain) types.ResponseInitChain { + for _, v := range req.Validators { + r := app.updateValidator(v) + if r.IsErr() { + app.logger.Error("Error updating validators", "r", r) + } + } + return types.ResponseInitChain{} } ``` diff --git a/docs/app-dev/getting-started.md b/docs/app-dev/getting-started.md index 5509a7012..eff70db68 100644 --- a/docs/app-dev/getting-started.md +++ b/docs/app-dev/getting-started.md @@ -28,7 +28,6 @@ Then run go get github.com/tendermint/tendermint cd $GOPATH/src/github.com/tendermint/tendermint make get_tools -make get_vendor_deps make install_abci ``` @@ -138,7 +137,7 @@ The result should look like: Note the `value` in the result (`YWJjZA==`); this is the base64-encoding of the ASCII of `abcd`. You can verify this in a python 2 shell by running `"YWJjZA==".decode('base64')` or in python 3 shell by running -`import codecs; codecs.decode("YWJjZA==", 'base64').decode('ascii')`. +`import codecs; codecs.decode(b"YWJjZA==", 'base64').decode('ascii')`. Stay tuned for a future release that [makes this output more human-readable](https://github.com/tendermint/tendermint/issues/1794). diff --git a/docs/app-dev/indexing-transactions.md b/docs/app-dev/indexing-transactions.md index de8336a43..ffe8b989a 100644 --- a/docs/app-dev/indexing-transactions.md +++ b/docs/app-dev/indexing-transactions.md @@ -47,7 +47,7 @@ pairs of UTF-8 encoded strings (e.g. "account.owner": "Bob", "balance": Example: ``` -func (app *KVStoreApplication) DeliverTx(tx []byte) types.Result { +func (app *KVStoreApplication) DeliverTx(req types.RequestDeliverTx) types.Result { ... tags := []cmn.KVPair{ {[]byte("account.name"), []byte("igor")}, diff --git a/docs/app-dev/subscribing-to-events-via-websocket.md b/docs/app-dev/subscribing-to-events-via-websocket.md index d745769c3..890b061bf 100644 --- a/docs/app-dev/subscribing-to-events-via-websocket.md +++ b/docs/app-dev/subscribing-to-events-via-websocket.md @@ -2,7 +2,7 @@ Tendermint emits different events, to which you can subscribe via [Websocket](https://en.wikipedia.org/wiki/WebSocket). This can be useful -for third-party applications (for analysys) or inspecting state. +for third-party applications (for analysis) or inspecting state. [List of events](https://godoc.org/github.com/tendermint/tendermint/types#pkg-constants) diff --git a/docs/architecture/README.md b/docs/architecture/README.md index 1cfc7ddce..0ff6682ac 100644 --- a/docs/architecture/README.md +++ b/docs/architecture/README.md @@ -20,3 +20,41 @@ it stands today. If recorded decisions turned out to be lacking, convene a discussion, record the new decisions here, and then modify the code to match. Note the context/background should be written in the present tense. + +### Table of Contents: + +- [ADR-001-Logging](./adr-001-logging.md) +- [ADR-002-Event-Subscription](./adr-002-event-subscription.md) +- [ADR-003-ABCI-APP-RPC](./adr-003-abci-app-rpc.md) +- [ADR-004-Historical-Validators](./adr-004-historical-validators.md) +- [ADR-005-Consensus-Params](./adr-005-consensus-params.md) +- [ADR-006-Trust-Metric](./adr-006-trust-metric.md) +- [ADR-007-Trust-Metric-Usage](./adr-007-trust-metric-usage.md) +- [ADR-008-Priv-Validator](./adr-008-priv-validator.md) +- [ADR-009-ABCI-Design](./adr-009-abci-design.md) +- [ADR-010-Crypto-Changes](./adr-010-crypto-changes.md) +- [ADR-011-Monitoring](./adr-011-monitoring.md) +- [ADR-012-Peer-Transport](./adr-012-peer-transport.md) +- [ADR-013-Symmetric-Crypto](./adr-013-symmetric-crypto.md) +- [ADR-014-Secp-Malleability](./adr-014-secp-malleability.md) +- [ADR-015-Crypto-Encoding](./adr-015-crypto-encoding.md) +- [ADR-016-Protocol-Versions](./adr-016-protocol-versions.md) +- [ADR-017-Chain-Versions](./adr-017-chain-versions.md) +- [ADR-018-ABCI-Validators](./adr-018-abci-validators.md) +- [ADR-019-Multisigs](./adr-019-multisigs.md) +- [ADR-020-Block-Size](./adr-020-block-size.md) +- [ADR-021-ABCI-Events](./adr-021-abci-events.md) +- [ADR-022-ABCI-Errors](./adr-022-abci-errors.md) +- [ADR-023-ABCI-Propose-tx](./adr-023-ABCI-propose-tx.md) +- [ADR-024-Sign-Bytes](./adr-024-sign-bytes.md) +- [ADR-025-Commit](./adr-025-commit.md) +- [ADR-026-General-Merkle-Proof](./adr-026-general-merkle-proof.md) +- [ADR-029-Check-Tx-Consensus](./adr-029-check-tx-consensus.md) +- [ADR-030-Consensus-Refactor](./adr-030-consensus-refactor.md) +- [ADR-033-Pubsub](./adr-033-pubsub.md) +- [ADR-034-Priv-Validator-File-Structure](./adr-034-priv-validator-file-structure.md) +- [ADR-035-Documentation](./adr-035-documentation.md) +- [ADR-037-Deliver-Block](./adr-037-deliver-block.md) +- [ADR-039-Peer-Behaviour](./adr-039-peer-behaviour.md) +- [ADR-041-Proposer-Selection-via-ABCI](./adr-041-proposer-selection-via-abci.md) +- [ADR-043-Blockchain-RiRi-Org](./adr-043-blockchain-riri-org.md) diff --git a/docs/architecture/adr-018-ABCI-Validators.md b/docs/architecture/adr-018-ABCI-Validators.md index b632da855..f40efca15 100644 --- a/docs/architecture/adr-018-ABCI-Validators.md +++ b/docs/architecture/adr-018-ABCI-Validators.md @@ -2,10 +2,7 @@ ## Changelog -016-08-2018: Follow up from review: - - Revert changes to commit round - - Remind about justification for removing pubkey - - Update pros/cons +016-08-2018: Follow up from review: - Revert changes to commit round - Remind about justification for removing pubkey - Update pros/cons 05-08-2018: Initial draft ## Context @@ -35,11 +32,11 @@ message ValidatorUpdate { } ``` -As noted in ADR-009[https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-009-ABCI-design.md], +As noted in ADR-009[https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-009-ABCI-design.md], the `Validator` does not contain a pubkey because quantum public keys are quite large and it would be wasteful to send them all over ABCI with every block. Thus, applications that want to take advantage of the information in BeginBlock -are *required* to store pubkeys in state (or use much less efficient lazy means +are _required_ to store pubkeys in state (or use much less efficient lazy means of verifying BeginBlock data). ### RequestBeginBlock diff --git a/docs/architecture/adr-025-commit.md b/docs/architecture/adr-025-commit.md index 3f2527951..6db039d43 100644 --- a/docs/architecture/adr-025-commit.md +++ b/docs/architecture/adr-025-commit.md @@ -1,14 +1,18 @@ # ADR 025 Commit ## Context + Currently the `Commit` structure contains a lot of potentially redundant or unnecessary data. -In particular it contains an array of every precommit from the validators, which includes many copies of the same data. Such as `Height`, `Round`, `Type`, and `BlockID`. Also the `ValidatorIndex` could be derived from the vote's position in the array, and the `ValidatorAddress` could potentially be derived from runtime context. The only truely necessary data is the `Signature` and `Timestamp` associated with each `Vote`. +It contains a list of precommits from every validator, where the precommit +includes the whole `Vote` structure. Thus each of the commit height, round, +type, and blockID are repeated for every validator, and could be deduplicated. ``` type Commit struct { BlockID BlockID `json:"block_id"` Precommits []*Vote `json:"precommits"` } + type Vote struct { ValidatorAddress Address `json:"validator_address"` ValidatorIndex int `json:"validator_index"` @@ -26,7 +30,9 @@ References: [#2226](https://github.com/tendermint/tendermint/issues/2226) ## Proposed Solution + We can improve efficiency by replacing the usage of the `Vote` struct with a subset of each vote, and by storing the constant values (`Height`, `Round`, `BlockID`) in the Commit itself. + ``` type Commit struct { Height int64 @@ -34,42 +40,56 @@ type Commit struct { BlockID BlockID `json:"block_id"` Precommits []*CommitSig `json:"precommits"` } + type CommitSig struct { + BlockID BlockIDFlag ValidatorAddress Address - Signature []byte Timestamp time.Time + Signature []byte } -``` -Continuing to store the `ValidatorAddress` in the `CommitSig` takes up extra space, but simplifies the process and allows for easier debugging. -## Status -Proposed -## Consequences +// indicate which BlockID the signature is for +type BlockIDFlag int -### Positive -The size of a `Commit` transmitted over the network goes from: +const ( + BlockIDFlagAbsent BlockIDFlag = iota // vote is not included in the Commit.Precommits + BlockIDFlagCommit // voted for the Commit.BlockID + BlockIDFlagNil // voted for nil +) -|BlockID| + n * (|Address| + |ValidatorIndex| + |Height| + |Round| + |Timestamp| + |Type| + |BlockID| + |Signature|) +``` -to: +Note the need for an extra byte to indicate whether the signature is for the BlockID or for nil. +This byte can also be used to indicate an absent vote, rather than using a nil object like we currently do, +which has been [problematic for compatibility between Amino and proto3](https://github.com/tendermint/go-amino/issues/260). +Note we also continue to store the `ValidatorAddress` in the `CommitSig`. +While this still takes 20-bytes per signature, it ensures that the Commit has all +information necessary to reconstruct Vote, which simplifies mapping between Commit and Vote objects +and with debugging. It also may be necessary for the light-client to know which address a signature corresponds to if +it is trying to verify a current commit with an older validtor set. + +## Status -|BlockID|+|Height|+|Round| + n*(|Address| + |Signature| + |Timestamp|) +Proposed -This saves: +## Consequences -n * (|BlockID| + |ValidatorIndex| + |Type|) + (n-1) * (Height + Round) +### Positive -In the current context, this would concretely be: -(assuming all ints are int64, and hashes are 32 bytes) +Removing the Type/Height/Round/Index and the BlockID saves roughly 80 bytes per precommit. +It varies because some integers are varint. The BlockID contains two 32-byte hashes an integer, +and the Height is 8-bytes. -n *(72 + 8 + 1 + 8 + 8) - 16 = n * 97 - 16 +For a chain with 100 validators, that's up to 8kB in savings per block! -With 100 validators this is a savings of almost 10KB on every block. ### Negative -This would add some complexity to the processing and verification of blocks and commits, as votes would have to be reconstructed to be verified and gossiped. The reconstruction could be relatively straightforward, only requiring the copying of data from the `Commit` itself into the newly created `Vote`. + +- Large breaking change to the block and commit structure +- Requires differentiating in code between the Vote and CommitSig objects, which may add some complexity (votes need to be reconstructed to be verified and gossiped) ### Neutral -This design leaves the `ValidatorAddress` in the `CommitSig` and in the `Vote`. These could be removed at some point for additional savings, but that would introduce more complexity, and make printing of `Commit` and `VoteSet` objects less informative, which could harm debugging efficiency and UI/UX. \ No newline at end of file + +- Commit.Precommits no longer contains nil values diff --git a/docs/architecture/adr-037-deliver-block.md b/docs/architecture/adr-037-deliver-block.md new file mode 100644 index 000000000..31907c9ae --- /dev/null +++ b/docs/architecture/adr-037-deliver-block.md @@ -0,0 +1,100 @@ +# ADR 037: Deliver Block + +Author: Daniil Lashin (@danil-lashin) + +## Changelog + +13-03-2019: Initial draft + +## Context + +Initial conversation: https://github.com/tendermint/tendermint/issues/2901 + +Some applications can handle transactions in parallel, or at least some +part of tx processing can be parallelized. Now it is not possible for developer +to execute txs in parallel because Tendermint delivers them consequentially. + +## Decision + +Now Tendermint have `BeginBlock`, `EndBlock`, `Commit`, `DeliverTx` steps +while executing block. This doc proposes merging this steps into one `DeliverBlock` +step. It will allow developers of applications to decide how they want to +execute transactions (in parallel or consequentially). Also it will simplify and +speed up communications between application and Tendermint. + +As @jaekwon [mentioned](https://github.com/tendermint/tendermint/issues/2901#issuecomment-477746128) +in discussion not all application will benefit from this solution. In some cases, +when application handles transaction consequentially, it way slow down the blockchain, +because it need to wait until full block is transmitted to application to start +processing it. Also, in the case of complete change of ABCI, we need to force all the apps +to change their implementation completely. That's why I propose to introduce one more ABCI +type. + +# Implementation Changes + +In addition to default application interface which now have this structure + +```go +type Application interface { + // Info and Mempool methods... + + // Consensus Connection + InitChain(RequestInitChain) ResponseInitChain // Initialize blockchain with validators and other info from TendermintCore + BeginBlock(RequestBeginBlock) ResponseBeginBlock // Signals the beginning of a block + DeliverTx(tx []byte) ResponseDeliverTx // Deliver a tx for full processing + EndBlock(RequestEndBlock) ResponseEndBlock // Signals the end of a block, returns changes to the validator set + Commit() ResponseCommit // Commit the state and return the application Merkle root hash +} +``` + +this doc proposes to add one more: + +```go +type Application interface { + // Info and Mempool methods... + + // Consensus Connection + InitChain(RequestInitChain) ResponseInitChain // Initialize blockchain with validators and other info from TendermintCore + DeliverBlock(RequestDeliverBlock) ResponseDeliverBlock // Deliver full block + Commit() ResponseCommit // Commit the state and return the application Merkle root hash +} + +type RequestDeliverBlock struct { + Hash []byte + Header Header + Txs Txs + LastCommitInfo LastCommitInfo + ByzantineValidators []Evidence +} + +type ResponseDeliverBlock struct { + ValidatorUpdates []ValidatorUpdate + ConsensusParamUpdates *ConsensusParams + Tags []common.KVPair + TxResults []ResponseDeliverTx +} + +``` + +Also, we will need to add new config param, which will specify what kind of ABCI application uses. +For example, it can be `abci_type`. Then we will have 2 types: +- `advanced` - current ABCI +- `simple` - proposed implementation + +## Status + +In review + +## Consequences + +### Positive + +- much simpler introduction and tutorials for new developers (instead of implementing 5 methods whey +will need to implement only 3) +- txs can be handled in parallel +- simpler interface +- faster communications between Tendermint and application + +### Negative + +- Tendermint should now support 2 kinds of ABCI diff --git a/docs/architecture/adr-040-blockchain-reactor-refactor.md b/docs/architecture/adr-040-blockchain-reactor-refactor.md new file mode 100644 index 000000000..520d55b5d --- /dev/null +++ b/docs/architecture/adr-040-blockchain-reactor-refactor.md @@ -0,0 +1,534 @@ +# ADR 040: Blockchain Reactor Refactor + +## Changelog + +19-03-2019: Initial draft + +## Context + +The Blockchain Reactor's high level responsibility is to enable peers who are far behind the current state of the +blockchain to quickly catch up by downloading many blocks in parallel from its peers, verifying block correctness, and +executing them against the ABCI application. We call the protocol executed by the Blockchain Reactor `fast-sync`. +The current architecture diagram of the blockchain reactor can be found here: + +![Blockchain Reactor Architecture Diagram](img/bc-reactor.png) + +The current architecture consists of dozens of routines and it is tightly depending on the `Switch`, making writing +unit tests almost impossible. Current tests require setting up complex dependency graphs and dealing with concurrency. +Note that having dozens of routines is in this case overkill as most of the time routines sits idle waiting for +something to happen (message to arrive or timeout to expire). Due to dependency on the `Switch`, testing relatively +complex network scenarios and failures (for example adding and removing peers) is very complex tasks and frequently lead +to complex tests with not deterministic behavior ([#3400]). Impossibility to write proper tests makes confidence in +the code low and this resulted in several issues (some are fixed in the meantime and some are still open): +[#3400], [#2897], [#2896], [#2699], [#2888], [#2457], [#2622], [#2026]. + +## Decision + +To remedy these issues we plan a major refactor of the blockchain reactor. The proposed architecture is largely inspired +by ADR-30 and is presented on the following diagram: +![Blockchain Reactor Refactor Diagram](img/bc-reactor-refactor.png) + +We suggest a concurrency architecture where the core algorithm (we call it `Controller`) is extracted into a finite +state machine. The active routine of the reactor is called `Executor` and is responsible for receiving and sending +messages from/to peers and triggering timeouts. What messages should be sent and timeouts triggered is determined mostly +by the `Controller`. The exception is `Peer Heartbeat` mechanism which is `Executor` responsibility. The heartbeat +mechanism is used to remove slow and unresponsive peers from the peer list. Writing of unit tests is simpler with +this architecture as most of the critical logic is part of the `Controller` function. We expect that simpler concurrency +architecture will not have significant negative effect on the performance of this reactor (to be confirmed by +experimental evaluation). + + +### Implementation changes + +We assume the following system model for "fast sync" protocol: + +* a node is connected to a random subset of all nodes that represents its peer set. Some nodes are correct and some + might be faulty. We don't make assumptions about ratio of faulty nodes, i.e., it is possible that all nodes in some + peer set are faulty. +* we assume that communication between correct nodes is synchronous, i.e., if a correct node `p` sends a message `m` to + a correct node `q` at time `t`, then `q` will receive message the latest at time `t+Delta` where `Delta` is a system + parameter that is known by network participants. `Delta` is normally chosen to be an order of magnitude higher than + the real communication delay (maximum) between correct nodes. Therefore if a correct node `p` sends a request message + to a correct node `q` at time `t` and there is no the corresponding reply at time `t + 2*Delta`, then `p` can assume + that `q` is faulty. Note that the network assumptions for the consensus reactor are different (we assume partially + synchronous model there). + +The requirements for the "fast sync" protocol are formally specified as follows: + +- `Correctness`: If a correct node `p` is connected to a correct node `q` for a long enough period of time, then `p` +- will eventually download all requested blocks from `q`. +- `Termination`: If a set of peers of a correct node `p` is stable (no new nodes are added to the peer set of `p`) for +- a long enough period of time, then protocol eventually terminates. +- `Fairness`: A correct node `p` sends requests for blocks to all peers from its peer set. + +As explained above, the `Executor` is responsible for sending and receiving messages that are part of the `fast-sync` +protocol. The following messages are exchanged as part of `fast-sync` protocol: + +``` go +type Message int +const ( + MessageUnknown Message = iota + MessageStatusRequest + MessageStatusResponse + MessageBlockRequest + MessageBlockResponse +) +``` +`MessageStatusRequest` is sent periodically to all peers as a request for a peer to provide its current height. It is +part of the `Peer Heartbeat` mechanism and a failure to respond timely to this message results in a peer being removed +from the peer set. Note that the `Peer Heartbeat` mechanism is used only while a peer is in `fast-sync` mode. We assume +here existence of a mechanism that gives node a possibility to inform its peers that it is in the `fast-sync` mode. + +``` go +type MessageStatusRequest struct { + SeqNum int64 // sequence number of the request +} +``` +`MessageStatusResponse` is sent as a response to `MessageStatusRequest` to inform requester about the peer current +height. + +``` go +type MessageStatusResponse struct { + SeqNum int64 // sequence number of the corresponding request + Height int64 // current peer height +} +``` + +`MessageBlockRequest` is used to make a request for a block and the corresponding commit certificate at a given height. + +``` go +type MessageBlockRequest struct { + Height int64 +} +``` + +`MessageBlockResponse` is a response for the corresponding block request. In addition to providing the block and the +corresponding commit certificate, it contains also a current peer height. + +``` go +type MessageBlockResponse struct { + Height int64 + Block Block + Commit Commit + PeerHeight int64 +} +``` + +In addition to sending and receiving messages, and `HeartBeat` mechanism, controller is also managing timeouts +that are triggered upon `Controller` request. `Controller` is then informed once a timeout expires. + +``` go +type TimeoutTrigger int +const ( + TimeoutUnknown TimeoutTrigger = iota + TimeoutResponseTrigger + TimeoutTerminationTrigger +) +``` + +The `Controller` can be modelled as a function with clearly defined inputs: + +* `State` - current state of the node. Contains data about connected peers and its behavior, pending requests, +* received blocks, etc. +* `Event` - significant events in the network. + +producing clear outputs: + +* `State` - updated state of the node, +* `MessageToSend` - signal what message to send and to which peer +* `TimeoutTrigger` - signal that timeout should be triggered. + + +We consider the following `Event` types: + +``` go +type Event int +const ( + EventUnknown Event = iota + EventStatusReport + EventBlockRequest + EventBlockResponse + EventRemovePeer + EventTimeoutResponse + EventTimeoutTermination +) +``` + +`EventStatusResponse` event is generated once `MessageStatusResponse` is received by the `Executor`. + +``` go +type EventStatusReport struct { + PeerID ID + Height int64 +} +``` + +`EventBlockRequest` event is generated once `MessageBlockRequest` is received by the `Executor`. + +``` go +type EventBlockRequest struct { + Height int64 + PeerID p2p.ID +} +``` +`EventBlockResponse` event is generated upon reception of `MessageBlockResponse` message by the `Executor`. + +``` go +type EventBlockResponse struct { + Height int64 + Block Block + Commit Commit + PeerID ID + PeerHeight int64 +} +``` +`EventRemovePeer` is generated by `Executor` to signal that the connection to a peer is closed due to peer misbehavior. + +``` go +type EventRemovePeer struct { + PeerID ID +} +``` +`EventTimeoutResponse` is generated by `Executor` to signal that a timeout triggered by `TimeoutResponseTrigger` has +expired. + +``` go +type EventTimeoutResponse struct { + PeerID ID + Height int64 +} +``` +`EventTimeoutTermination` is generated by `Executor` to signal that a timeout triggered by `TimeoutTerminationTrigger` +has expired. + +``` go +type EventTimeoutTermination struct { + Height int64 +} +``` + +`MessageToSend` is just a wrapper around `Message` type that contains id of the peer to which message should be sent. + +``` go +type MessageToSend struct { + PeerID ID + Message Message +} +``` + +The Controller state machine can be in two modes: `ModeFastSync` when +a node is trying to catch up with the network by downloading committed blocks, +and `ModeConsensus` in which it executes Tendermint consensus protocol. We +consider that `fast sync` mode terminates once the Controller switch to +`ModeConsensus`. + +``` go +type Mode int +const ( + ModeUnknown Mode = iota + ModeFastSync + ModeConsensus +) +``` +`Controller` is managing the following state: + +``` go +type ControllerState struct { + Height int64 // the first block that is not committed + Mode Mode // mode of operation + PeerMap map[ID]PeerStats // map of peer IDs to peer statistics + MaxRequestPending int64 // maximum height of the pending requests + FailedRequests []int64 // list of failed block requests + PendingRequestsNum int // total number of pending requests + Store []BlockInfo // contains list of downloaded blocks + Executor BlockExecutor // store, verify and executes blocks +} +``` + +`PeerStats` data structure keeps for every peer its current height and a list of pending requests for blocks. + +``` go +type PeerStats struct { + Height int64 + PendingRequest int64 // a request sent to this peer +} +``` + +`BlockInfo` data structure is used to store information (as part of block store) about downloaded blocks: from what peer + a block and the corresponding commit certificate are received. +``` go +type BlockInfo struct { + Block Block + Commit Commit + PeerID ID // a peer from which we received the corresponding Block and Commit +} +``` + +The `Controller` is initialized by providing an initial height (`startHeight`) from which it will start downloading +blocks from peers and the current state of the `BlockExecutor`. + +``` go +func NewControllerState(startHeight int64, executor BlockExecutor) ControllerState { + state = ControllerState {} + state.Height = startHeight + state.Mode = ModeFastSync + state.MaxRequestPending = startHeight - 1 + state.PendingRequestsNum = 0 + state.Executor = executor + initialize state.PeerMap, state.FailedRequests and state.Store to empty data structures + return state +} +``` + +The core protocol logic is given with the following function: + +``` go +func handleEvent(state ControllerState, event Event) (ControllerState, Message, TimeoutTrigger, Error) { + msg = nil + timeout = nil + error = nil + + switch state.Mode { + case ModeConsensus: + switch event := event.(type) { + case EventBlockRequest: + msg = createBlockResponseMessage(state, event) + return state, msg, timeout, error + default: + error = "Only respond to BlockRequests while in ModeConsensus!" + return state, msg, timeout, error + } + + case ModeFastSync: + switch event := event.(type) { + case EventBlockRequest: + msg = createBlockResponseMessage(state, event) + return state, msg, timeout, error + + case EventStatusResponse: + return handleEventStatusResponse(event, state) + + case EventRemovePeer: + return handleEventRemovePeer(event, state) + + case EventBlockResponse: + return handleEventBlockResponse(event, state) + + case EventResponseTimeout: + return handleEventResponseTimeout(event, state) + + case EventTerminationTimeout: + // Termination timeout is triggered in case of empty peer set and in case there are no pending requests. + // If this timeout expires and in the meantime no new peers are added or new pending requests are made + // then `fast-sync` mode terminates by switching to `ModeConsensus`. + // Note that termination timeout should be higher than the response timeout. + if state.Height == event.Height && state.PendingRequestsNum == 0 { state.State = ConsensusMode } + return state, msg, timeout, error + + default: + error = "Received unknown event type!" + return state, msg, timeout, error + } + } +} +``` + +``` go +func createBlockResponseMessage(state ControllerState, event BlockRequest) MessageToSend { + msgToSend = nil + if _, ok := state.PeerMap[event.PeerID]; !ok { peerStats = PeerStats{-1, -1} } + if state.Executor.ContainsBlockWithHeight(event.Height) && event.Height > peerStats.Height { + peerStats = event.Height + msg = BlockResponseMessage{ + Height: event.Height, + Block: state.Executor.getBlock(eventHeight), + Commit: state.Executor.getCommit(eventHeight), + PeerID: event.PeerID, + CurrentHeight: state.Height - 1, + } + msgToSend = MessageToSend { event.PeerID, msg } + } + state.PeerMap[event.PeerID] = peerStats + return msgToSend +} +``` + +``` go +func handleEventStatusResponse(event EventStatusResponse, state ControllerState) (ControllerState, MessageToSend, TimeoutTrigger, Error) { + if _, ok := state.PeerMap[event.PeerID]; !ok { + peerStats = PeerStats{ -1, -1 } + } else { + peerStats = state.PeerMap[event.PeerID] + } + + if event.Height > peerStats.Height { peerStats.Height = event.Height } + // if there are no pending requests for this peer, try to send him a request for block + if peerStats.PendingRequest == -1 { + msg = createBlockRequestMessages(state, event.PeerID, peerStats.Height) + // msg is nil if no request for block can be made to a peer at this point in time + if msg != nil { + peerStats.PendingRequests = msg.Height + state.PendingRequestsNum++ + // when a request for a block is sent to a peer, a response timeout is triggered. If no corresponding block is sent by the peer + // during response timeout period, then the peer is considered faulty and is removed from the peer set. + timeout = ResponseTimeoutTrigger{ msg.PeerID, msg.Height, PeerTimeout } + } else if state.PendingRequestsNum == 0 { + // if there are no pending requests and no new request can be placed to the peer, termination timeout is triggered. + // If termination timeout expires and we are still at the same height and there are no pending requests, the "fast-sync" + // mode is finished and we switch to `ModeConsensus`. + timeout = TerminationTimeoutTrigger{ state.Height, TerminationTimeout } + } + } + state.PeerMap[event.PeerID] = peerStats + return state, msg, timeout, error +} +``` + +``` go +func handleEventRemovePeer(event EventRemovePeer, state ControllerState) (ControllerState, MessageToSend, TimeoutTrigger, Error) { + if _, ok := state.PeerMap[event.PeerID]; ok { + pendingRequest = state.PeerMap[event.PeerID].PendingRequest + // if a peer is removed from the peer set, its pending request is declared failed and added to the `FailedRequests` list + // so it can be retried. + if pendingRequest != -1 { + add(state.FailedRequests, pendingRequest) + } + state.PendingRequestsNum-- + delete(state.PeerMap, event.PeerID) + // if the peer set is empty after removal of this peer then termination timeout is triggered. + if state.PeerMap.isEmpty() { + timeout = TerminationTimeoutTrigger{ state.Height, TerminationTimeout } + } + } else { error = "Removing unknown peer!" } + return state, msg, timeout, error +``` + +``` go +func handleEventBlockResponse(event EventBlockResponse, state ControllerState) (ControllerState, MessageToSend, TimeoutTrigger, Error) + if state.PeerMap[event.PeerID] { + peerStats = state.PeerMap[event.PeerID] + // when expected block arrives from a peer, it is added to the store so it can be verified and if correct executed after. + if peerStats.PendingRequest == event.Height { + peerStats.PendingRequest = -1 + state.PendingRequestsNum-- + if event.PeerHeight > peerStats.Height { peerStats.Height = event.PeerHeight } + state.Store[event.Height] = BlockInfo{ event.Block, event.Commit, event.PeerID } + // blocks are verified sequentially so adding a block to the store does not mean that it will be immediately verified + // as some of the previous blocks might be missing. + state = verifyBlocks(state) // it can lead to event.PeerID being removed from peer list + if _, ok := state.PeerMap[event.PeerID]; ok { + // we try to identify new request for a block that can be asked to the peer + msg = createBlockRequestMessage(state, event.PeerID, peerStats.Height) + if msg != nil { + peerStats.PendingRequests = msg.Height + state.PendingRequestsNum++ + // if request for block is made, response timeout is triggered + timeout = ResponseTimeoutTrigger{ msg.PeerID, msg.Height, PeerTimeout } + } else if state.PeerMap.isEmpty() || state.PendingRequestsNum == 0 { + // if the peer map is empty (the peer can be removed as block verification failed) or there are no pending requests + // termination timeout is triggered. + timeout = TerminationTimeoutTrigger{ state.Height, TerminationTimeout } + } + } + } else { error = "Received Block from wrong peer!" } + } else { error = "Received Block from unknown peer!" } + + state.PeerMap[event.PeerID] = peerStats + return state, msg, timeout, error +} +``` + +``` go +func handleEventResponseTimeout(event, state) { + if _, ok := state.PeerMap[event.PeerID]; ok { + peerStats = state.PeerMap[event.PeerID] + // if a response timeout expires and the peer hasn't delivered the block, the peer is removed from the peer list and + // the request is added to the `FailedRequests` so the block can be downloaded from other peer + if peerStats.PendingRequest == event.Height { + add(state.FailedRequests, pendingRequest) + delete(state.PeerMap, event.PeerID) + state.PendingRequestsNum-- + // if peer set is empty, then termination timeout is triggered + if state.PeerMap.isEmpty() { + timeout = TimeoutTrigger{ state.Height, TerminationTimeout } + } + } + } + return state, msg, timeout, error +} +``` + +``` go +func createBlockRequestMessage(state ControllerState, peerID ID, peerHeight int64) MessageToSend { + msg = nil + blockHeight = -1 + r = find request in state.FailedRequests such that r <= peerHeight // returns `nil` if there are no such request + // if there is a height in failed requests that can be downloaded from the peer send request to it + if r != nil { + blockNumber = r + delete(state.FailedRequests, r) + } else if state.MaxRequestPending < peerHeight { + // if height of the maximum pending request is smaller than peer height, then ask peer for next block + state.MaxRequestPending++ + blockHeight = state.MaxRequestPending // increment state.MaxRequestPending and then return the new value + } + + if blockHeight > -1 { msg = MessageToSend { peerID, MessageBlockRequest { blockHeight } } + return msg +} +``` + +``` go +func verifyBlocks(state State) State { + done = false + for !done { + block = state.Store[height] + if block != nil { + verified = verify block.Block using block.Commit // return `true` is verification succeed, 'false` otherwise + + if verified { + block.Execute() // executing block is costly operation so it might make sense executing asynchronously + state.Height++ + } else { + // if block verification failed, then it is added to `FailedRequests` and the peer is removed from the peer set + add(state.FailedRequests, height) + state.Store[height] = nil + if _, ok := state.PeerMap[block.PeerID]; ok { + pendingRequest = state.PeerMap[block.PeerID].PendingRequest + // if there is a pending request sent to the peer that is just to be removed from the peer set, add it to `FailedRequests` + if pendingRequest != -1 { + add(state.FailedRequests, pendingRequest) + state.PendingRequestsNum-- + } + delete(state.PeerMap, event.PeerID) + } + done = true + } + } else { done = true } + } + return state +} +``` + +In the proposed architecture `Controller` is not active task, i.e., it is being called by the `Executor`. Depending on +the return values returned by `Controller`,`Executor` will send a message to some peer (`msg` != nil), trigger a +timeout (`timeout` != nil) or deal with errors (`error` != nil). +In case a timeout is triggered, it will provide as an input to `Controller` the corresponding timeout event once +timeout expires. + + +## Status + +Draft. + +## Consequences + +### Positive + +- isolated implementation of the algorithm +- improved testability - simpler to prove correctness +- clearer separation of concerns - easier to reason + +### Negative + +### Neutral diff --git a/docs/architecture/adr-041-proposer-selection-via-abci.md b/docs/architecture/adr-041-proposer-selection-via-abci.md new file mode 100644 index 000000000..58bf20de3 --- /dev/null +++ b/docs/architecture/adr-041-proposer-selection-via-abci.md @@ -0,0 +1,29 @@ +# ADR 041: Application should be in charge of validator set + +## Changelog + + +## Context + +Currently Tendermint is in charge of validator set and proposer selection. Application can only update the validator set changes at EndBlock time. +To support Light Client, application should make sure at least 2/3 of validator are same at each round. + +Application should have full control on validator set changes and proposer selection. In each round Application can provide the list of validators for next rounds in order with their power. The proposer is the first in the list, in case the proposer is offline, the next one can propose the proposal and so on. + +## Decision + +## Status + +## Consequences + +Tendermint is no more in charge of validator set and its changes. The Application should provide the correct information. +However Tendermint can provide psedo-randomness algorithm to help application for selecting proposer in each round. + +### Positive + +### Negative + +### Neutral + +## References + diff --git a/docs/architecture/adr-042-state-sync.md b/docs/architecture/adr-042-state-sync.md new file mode 100644 index 000000000..d525a4974 --- /dev/null +++ b/docs/architecture/adr-042-state-sync.md @@ -0,0 +1,239 @@ +# ADR 042: State Sync Design + +## Changelog + +2019-06-27: Init by EB +2019-07-04: Follow up by brapse + +## Context +StateSync is a feature which would allow a new node to receive a +snapshot of the application state without downloading blocks or going +through consensus. Once downloaded, the node could switch to FastSync +and eventually participate in consensus. The goal of StateSync is to +facilitate setting up a new node as quickly as possible. + +## Considerations +Because Tendermint doesn't know anything about the application state, +StateSync will broker messages between nodes and through +the ABCI to an opaque applicaton. The implementation will have multiple +touch points on both the tendermint code base and ABCI application. + +* A StateSync reactor to facilitate peer communication - Tendermint +* A Set of ABCI messages to transmit application state to the reactor - Tendermint +* A Set of MultiStore APIs for exposing snapshot data to the ABCI - ABCI application +* A Storage format with validation and performance considerations - ABCI application + +### Implementation Properties +Beyond the approach, any implementation of StateSync can be evaluated +across different criteria: + +* Speed: Expected throughput of producing and consuming snapshots +* Safety: Cost of pushing invalid snapshots to a node +* Liveness: Cost of preventing a node from receiving/constructing a snapshot +* Effort: How much effort does an implementation require + +### Implementation Question +* What is the format of a snapshot + * Complete snapshot + * Ordered IAVL key ranges + * Compressed individually chunks which can be validated +* How is data validated + * Trust a peer with it's data blindly + * Trust a majority of peers + * Use light client validation to validate each chunk against consensus + produced merkle tree root +* What are the performance characteristics + * Random vs sequential reads + * How parallelizeable is the scheduling algorithm + +### Proposals +Broadly speaking there are two approaches to this problem which have had +varying degrees of discussion and progress. These approach can be +summarized as: + +**Lazy:** Where snapshots are produced dynamically at request time. This +solution would use the existing data structure. +**Eager:** Where snapshots are produced periodically and served from disk at +request time. This solution would create an auxiliary data structure +optimized for batch read/writes. + +Additionally the propsosals tend to vary on how they provide safety +properties. + +**LightClient** Where a client can aquire the merkle root from the block +headers synchronized from a trusted validator set. Subsets of the application state, +called chunks can therefore be validated on receipt to ensure each chunk +is part of the merkle root. + +**Majority of Peers** Where manifests of chunks along with checksums are +downloaded and compared against versions provided by a majority of +peers. + +#### Lazy StateSync +An [initial specification](https://docs.google.com/document/d/15MFsQtNA0MGBv7F096FFWRDzQ1vR6_dics5Y49vF8JU/edit?ts=5a0f3629) was published by Alexis Sellier. +In this design, the state has a given `size` of primitive elements (like +keys or nodes), each element is assigned a number from 0 to `size-1`, +and chunks consists of a range of such elements. Ackratos raised +[some concerns](https://docs.google.com/document/d/1npGTAa1qxe8EQZ1wG0a0Sip9t5oX2vYZNUDwr_LVRR4/edit) +about this design, somewhat specific to the IAVL tree, and mainly concerning +performance of random reads and of iterating through the tree to determine element numbers +(ie. elements aren't indexed by the element number). + +An alternative design was suggested by Jae Kwon in +[#3639](https://github.com/tendermint/tendermint/issues/3639) where chunking +happens lazily and in a dynamic way: nodes request key ranges from their peers, +and peers respond with some subset of the +requested range and with notes on how to request the rest in parallel from other +peers. Unlike chunk numbers, keys can be verified directly. And if some keys in the +range are ommitted, proofs for the range will fail to verify. +This way a node can start by requesting the entire tree from one peer, +and that peer can respond with say the first few keys, and the ranges to request +from other peers. + +Additionally, per chunk validation tends to come more naturally to the +Lazy approach since it tends to use the existing structure of the tree +(ie. keys or nodes) rather than state-sync specific chunks. Such a +design for tendermint was originally tracked in +[#828](https://github.com/tendermint/tendermint/issues/828). + +#### Eager StateSync +Warp Sync as implemented in Parity +["Warp Sync"](https://wiki.parity.io/Warp-Sync-Snapshot-Format.html) to rapidly +download both blocks and state snapshots from peers. Data is carved into ~4MB +chunks and snappy compressed. Hashes of snappy compressed chunks are stored in a +manifest file which co-ordinates the state-sync. Obtaining a correct manifest +file seems to require an honest majority of peers. This means you may not find +out the state is incorrect until you download the whole thing and compare it +with a verified block header. + +A similar solution was implemented by Binance in +[#3594](https://github.com/tendermint/tendermint/pull/3594) +based on their initial implementation in +[PR #3243](https://github.com/tendermint/tendermint/pull/3243) +and [some learnings](https://docs.google.com/document/d/1npGTAa1qxe8EQZ1wG0a0Sip9t5oX2vYZNUDwr_LVRR4/edit). +Note this still requires the honest majority peer assumption. + +As an eager protocol, warp-sync can efficiently compress larger, more +predicatable chunks once per snapshot and service many new peers. By +comparison lazy chunkers would have to compress each chunk at request +time. + +### Analysis of Lazy vs Eager +Lazy vs Eager have more in common than they differ. They all require +reactors on the tendermint side, a set of ABCI messages and a method for +serializing/deserializing snapshots facilitated by a SnapshotFormat. + +The biggest difference between Lazy and Eager proposals is in the +read/write patterns necessitated by serving a snapshot chunk. +Specifically, Lazy State Sync performs random reads to the underlying data +structure while Eager can optimize for sequential reads. + +This distinctin between approaches was demonstrated by Binance's +[ackratos](https://github.com/ackratos) in their implementation of [Lazy +State sync](https://github.com/tendermint/tendermint/pull/3243), The +[analysis](https://docs.google.com/document/d/1npGTAa1qxe8EQZ1wG0a0Sip9t5oX2vYZNUDwr_LVRR4/) +of the performance, and follow up implementation of [Warp +Sync](http://github.com/tendermint/tendermint/pull/3594). + +#### Compairing Security Models +There are several different security models which have been +discussed/proposed in the past but generally fall into two categories. + +Light client validation: In which the node receiving data is expected to +first perform a light client sync and have all the nessesary block +headers. Within the trusted block header (trusted in terms of from a +validator set subject to [weak +subjectivity](https://github.com/tendermint/tendermint/pull/3795)) and +can compare any subset of keys called a chunk against the merkle root. +The advantage of light client validation is that the block headers are +signed by validators which have something to lose for malicious +behaviour. If a validator were to provide an invalid proof, they can be +slashed. + +Majority of peer validation: A manifest file containing a list of chunks +along with checksums of each chunk is downloaded from a +trusted source. That source can be a community resource similar to +[sum.golang.org](https://sum.golang.org) or downloaded from the majority +of peers. One disadantage of the majority of peer security model is the +vuliberability to eclipse attacks in which a malicious users looks to +saturate a target node's peer list and produce a manufactured picture of +majority. + +A third option would be to include snapshot related data in the +block header. This could include the manifest with related checksums and be +secured through consensus. One challenge of this approach is to +ensure that creating snapshots does not put undo burden on block +propsers by synchronizing snapshot creation and block creation. One +approach to minimizing the burden is for snapshots for height +`H` to be included in block `H+n` where `n` is some `n` block away, +giving the block propser enough time to complete the snapshot +asynchronousy. + +## Proposal: Eager StateSync With Per Chunk Light Client Validation +The conclusion after some concideration of the advantages/disadvances of +eager/lazy and different security models is to produce a state sync +which eagerly produces snapshots and uses light client validation. This +approach has the performance advantages of pre-computing efficient +snapshots which can streamed to new nodes on demand using sequential IO. +Secondly, by using light client validation we cna validate each chunk on +receipt and avoid the potential eclipse attack of majority of peer based +security. + +### Implementation +Tendermint is responsible for downloading and verifying chunks of +AppState from peers. ABCI Application is responsible for taking +AppStateChunk objects from TM and constructing a valid state tree whose +root corresponds with the AppHash of syncing block. In particular we +will need implement: + +* Build new StateSync reactor brokers message transmission between the peers + and the ABCI application +* A set of ABCI Messages +* Design SnapshotFormat as an interface which can: + * validate chunks + * read/write chunks from file + * read/write chunks to/from application state store + * convert manifests into chunkRequest ABCI messages +* Implement SnapshotFormat for cosmos-hub with concrete implementation for: + * read/write chunks in a way which can be: + * parallelized across peers + * validated on receipt + * read/write to/from IAVL+ tree + +![StateSync Architecture Diagram](img/state-sync.png) + +## Implementation Path +* Create StateSync reactor based on [#3753](https://github.com/tendermint/tendermint/pull/3753) +* Design SnapshotFormat with an eye towards cosmos-hub implementation +* ABCI message to send/receive SnapshotFormat +* IAVL+ changes to support SnapshotFormat +* Deliver Warp sync (no chunk validation) +* light client implementation for weak subjectivity +* Deliver StateSync with chunk validation + +## Status + +Proposed + +## Concequences + +### Neutral + +### Positive +* Safe & performant state sync design substantiated with real world implementation experience +* General interfaces allowing application specific innovation +* Parallizable implementation trajectory with reasonable engineering effort + +### Negative +* Static Scheduling lacks opportunity for real time chunk availability optimizations + +## References +[sync: Sync current state without full replay for Applications](https://github.com/tendermint/tendermint/issues/828) - original issue +[tendermint state sync proposal](https://docs.google.com/document/d/15MFsQtNA0MGBv7F096FFWRDzQ1vR6_dics5Y49vF8JU/edit?ts=5a0f3629) - Cloudhead proposal +[tendermint state sync proposal 2](https://docs.google.com/document/d/1npGTAa1qxe8EQZ1wG0a0Sip9t5oX2vYZNUDwr_LVRR4/edit) - ackratos proposal +[proposal 2 implementation](https://github.com/tendermint/tendermint/pull/3243) - ackratos implementation +[WIP General/Lazy State-Sync pseudo-spec](https://github.com/tendermint/tendermint/issues/3639) - Jae Proposal +[Warp Sync Implementation](https://github.com/tendermint/tendermint/pull/3594) - ackratos +[Chunk Proposal](https://github.com/tendermint/tendermint/pull/3799) - Bucky proposed + + diff --git a/docs/architecture/adr-043-blockchain-riri-org.md b/docs/architecture/adr-043-blockchain-riri-org.md new file mode 100644 index 000000000..3cdf6e31e --- /dev/null +++ b/docs/architecture/adr-043-blockchain-riri-org.md @@ -0,0 +1,391 @@ +# ADR 043: Blockhchain Reactor Riri-Org + +## Changelog +* 18-06-2019: Initial draft +* 08-07-2019: Reviewed + +## Context + +The blockchain reactor is responsible for two high level processes:sending/receiving blocks from peers and FastSync-ing blocks to catch upnode who is far behind. The goal of [ADR-40](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-040-blockchain-reactor-refactor.md) was to refactor these two processes by separating business logic currently wrapped up in go-channels into pure `handle*` functions. While the ADR specified what the final form of the reactor might look like it lacked guidance on intermediary steps to get there. +The following diagram illustrates the state of the [blockchain-reorg](https://github.com/tendermint/tendermint/pull/35610) reactor which will be referred to as `v1`. + +![v1 Blockchain Reactor Architecture +Diagram](https://github.com/tendermint/tendermint/blob/f9e556481654a24aeb689bdadaf5eab3ccd66829/docs/architecture/img/blockchain-reactor-v1.png) + +While `v1` of the blockchain reactor has shown significant improvements in terms of simplifying the concurrency model, the current PR has run into few roadblocks. + +* The current PR large and difficult to review. +* Block gossiping and fast sync processes are highly coupled to the shared `Pool` data structure. +* Peer communication is spread over multiple components creating complex dependency graph which must be mocked out during testing. +* Timeouts modeled as stateful tickers introduce non-determinism in tests + +This ADR is meant to specify the missing components and control necessary to achieve [ADR-40](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-040-blockchain-reactor-refactor.md). + +## Decision + +Partition the responsibilities of the blockchain reactor into a set of components which communicate exclusively with events. Events will contain timestamps allowing each component to track time as internal state. The internal state will be mutated by a set of `handle*` which will produce event(s). The integration between components will happen in the reactor and reactor tests will then become integration tests between components. This design will be known as `v2`. + +![v2 Blockchain Reactor Architecture +Diagram](https://github.com/tendermint/tendermint/blob/f9e556481654a24aeb689bdadaf5eab3ccd66829/docs/architecture/img/blockchain-reactor-v2.png) + +### Reactor changes in detail + +The reactor will include a demultiplexing routine which will send each message to each sub routine for independent processing. Each sub routine will then select the messages it's interested in and call the handle specific function specified in [ADR-40](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-040-blockchain-reactor-refactor.md). The demuxRoutine acts as "pacemaker" setting the time in which events are expected to be handled. + + +```go +func demuxRoutine(msgs, scheduleMsgs, processorMsgs, ioMsgs) { + timer := time.NewTicker(interval) + for { + select { + case <-timer.C: + now := evTimeCheck{time.Now()} + schedulerMsgs <- now + processorMsgs <- now + ioMsgs <- now + case msg:= <- msgs: + msg.time = time.Now() + // These channels should produce backpressure before + // being full to avoid starving each other + schedulerMsgs <- msg + processorMsgs <- msg + ioMesgs <- msg + if msg == stop { + break; + } + } + } +} + +func processRoutine(input chan Message, output chan Message) { + processor := NewProcessor(..) + for { + msg := <- input + switch msg := msg.(type) { + case bcBlockRequestMessage: + output <- processor.handleBlockRequest(msg)) + ... + case stop: + processor.stop() + break; + } +} + +func scheduleRoutine(input chan Message, output chan Message) { + schelduer = NewScheduler(...) + for { + msg := <-msgs + switch msg := input.(type) { + case bcBlockResponseMessage: + output <- scheduler.handleBlockResponse(msg) + ... + case stop: + schedule.stop() + break; + } + } +} +``` + +## Lifecycle management + +A set of routines for individual processes allow processes to run in parallel with clear lifecycle management. `Start`, `Stop`, and `AddPeer` hooks currently present in the reactor will delegate to the sub-routines allowing them to manage internal state independent without further coupling to the reactor. + +```go +func (r *BlockChainReactor) Start() { + r.msgs := make(chan Message, maxInFlight) + schedulerMsgs := make(chan Message) + processorMsgs := make(chan Message) + ioMsgs := make(chan Message) + + go processorRoutine(processorMsgs, r.msgs) + go scheduleRoutine(schedulerMsgs, r.msgs) + go ioRoutine(ioMsgs, r.msgs) + ... +} + +func (bcR *BlockchainReactor) Receive(...) { + ... + r.msgs <- msg + ... +} + +func (r *BlockchainReactor) Stop() { + ... + r.msgs <- stop + ... +} + +... +func (r *BlockchainReactor) Stop() { + ... + r.msgs <- stop + ... +} +... + +func (r *BlockchainReactor) AddPeer(peer p2p.Peer) { + ... + r.msgs <- bcAddPeerEv{peer.ID} + ... +} + +``` + +## IO handling +An io handling routine within the reactor will isolate peer communication. Message going through the ioRoutine will usually be one way, using `p2p` APIs. In the case in which the `p2p` API such as `trySend` return errors, the ioRoutine can funnel those message back to the demuxRoutine for distribution to the other routines. For instance errors from the ioRoutine can be consumed by the scheduler to inform better peer selection implementations. + +```go +func (r *BlockchainReacor) ioRoutine(ioMesgs chan Message, outMsgs chan Message) { + ... + for { + msg := <-ioMsgs + switch msg := msg.(type) { + case scBlockRequestMessage: + queued := r.sendBlockRequestToPeer(...) + if queued { + outMsgs <- ioSendQueued{...} + } + case scStatusRequestMessage + r.sendStatusRequestToPeer(...) + case bcPeerError + r.Swtich.StopPeerForError(msg.src) + ... + ... + case bcFinished + break; + } + } +} + +``` +### Processor Internals + +The processor is responsible for ordering, verifying and executing blocks. The Processor will maintain an internal cursor `height` refering to the last processed block. As a set of blocks arrive unordered, the Processor will check if it has `height+1` necessary to process the next block. The processor also maintains the map `blockPeers` of peers to height, to keep track of which peer provided the block at `height`. `blockPeers` can be used in`handleRemovePeer(...)` to reschedule all unprocessed blocks provided by a peer who has errored. + +```go +type Processor struct { + height int64 // the height cursor + state ... + blocks [height]*Block // keep a set of blocks in memory until they are processed + blockPeers [height]PeerID // keep track of which heights came from which peerID + lastTouch timestamp +} + +func (proc *Processor) handleBlockResponse(peerID, block) { + if block.height <= height || block[block.height] { + } else if blocks[block.height] { + return errDuplicateBlock{} + } else { + blocks[block.height] = block + } + + if blocks[height] && blocks[height+1] { + ... = state.Validators.VerifyCommit(...) + ... = store.SaveBlock(...) + state, err = blockExec.ApplyBlock(...) + ... + if err == nil { + delete blocks[height] + height++ + lastTouch = msg.time + return pcBlockProcessed{height-1} + } else { + ... // Delete all unprocessed block from the peer + return pcBlockProcessError{peerID, height} + } + } +} + +func (proc *Processor) handleRemovePeer(peerID) { + events = [] + // Delete all unprocessed blocks from peerID + for i = height; i < len(blocks); i++ { + if blockPeers[i] == peerID { + events = append(events, pcBlockReschedule{height}) + + delete block[height] + } + } + return events +} + +func handleTimeCheckEv(time) { + if time - lastTouch > timeout { + // Timeout the processor + ... + } +} +``` + +## Schedule + +The Schedule maintains the internal state used for scheduling blockRequestMessages based on some scheduling algorithm. The schedule needs to maintain state on: + +* The state `blockState` of every block seem up to height of maxHeight +* The set of peers and their peer state `peerState` +* which peers have which blocks +* which blocks have been requested from which peers + +```go +type blockState int + +const ( + blockStateNew = iota + blockStatePending, + blockStateReceived, + blockStateProcessed +) + +type schedule { + // a list of blocks in which blockState + blockStates map[height]blockState + + // a map of which blocks are available from which peers + blockPeers map[height]map[p2p.ID]scPeer + + // a map of peerID to schedule specific peer struct `scPeer` + peers map[p2p.ID]scPeer + + // a map of heights to the peer we are waiting for a response from + pending map[height]scPeer + + targetPending int // the number of blocks we want in blockStatePending + targetReceived int // the number of blocks we want in blockStateReceived + + peerTimeout int + peerMinSpeed int +} + +func (sc *schedule) numBlockInState(state blockState) uint32 { + num := 0 + for i := sc.minHeight(); i <= sc.maxHeight(); i++ { + if sc.blockState[i] == state { + num++ + } + } + return num +} + + +func (sc *schedule) popSchedule(maxRequest int) []scBlockRequestMessage { + // We only want to schedule requests such that we have less than sc.targetPending and sc.targetReceived + // This ensures we don't saturate the network or flood the processor with unprocessed blocks + todo := min(sc.targetPending - sc.numBlockInState(blockStatePending), sc.numBlockInState(blockStateReceived)) + events := []scBlockRequestMessage{} + for i := sc.minHeight(); i < sc.maxMaxHeight(); i++ { + if todo == 0 { + break + } + if blockStates[i] == blockStateNew { + peer = sc.selectPeer(blockPeers[i]) + sc.blockStates[i] = blockStatePending + sc.pending[i] = peer + events = append(events, scBlockRequestMessage{peerID: peer.peerID, height: i}) + todo-- + } + } + return events +} +... + +type scPeer struct { + peerID p2p.ID + numOustandingRequest int + lastTouched time.Time + monitor flow.Monitor +} + +``` + +# Scheduler +The scheduler is configured to maintain a target `n` of in flight +messages and will use feedback from `_blockResponseMessage`, +`_statusResponseMessage` and `_peerError` produce an optimal assignment +of scBlockRequestMessage at each `timeCheckEv`. + +``` + +func handleStatusResponse(peerID, height, time) { + schedule.touchPeer(peerID, time) + schedule.setPeerHeight(peerID, height) +} + +func handleBlockResponseMessage(peerID, height, block, time) { + schedule.touchPeer(peerID, time) + schedule.markReceived(peerID, height, size(block)) +} + +func handleNoBlockResponseMessage(peerID, height, time) { + schedule.touchPeer(peerID, time) + // reschedule that block, punish peer... + ... +} + +func handlePeerError(peerID) { + // Remove the peer, reschedule the requests + ... +} + +func handleTimeCheckEv(time) { + // clean peer list + + events = [] + for peerID := range schedule.peersNotTouchedSince(time) { + pending = schedule.pendingFrom(peerID) + schedule.setPeerState(peerID, timedout) + schedule.resetBlocks(pending) + events = append(events, peerTimeout{peerID}) + } + + events = append(events, schedule.popSchedule()) + + return events +} +``` + +## Peer +The Peer Stores per peer state based on messages received by the scheduler. + +```go +type Peer struct { + lastTouched timestamp + lastDownloaded timestamp + pending map[height]struct{} + height height // max height for the peer + state { + pending, // we know the peer but not the height + active, // we know the height + timeout // the peer has timed out + } +} +``` + +## Status + +Work in progress + +## Consequences + +### Positive + +* Test become deterministic +* Simulation becomes a-termporal: no need wait for a wall-time timeout +* Peer Selection can be independently tested/simulated +* Develop a general approach to refactoring reactors + +### Negative + +### Neutral + +### Implementation Path + +* Implement the scheduler, test the scheduler, review the rescheduler +* Implement the processor, test the processor, review the processor +* Implement the demuxer, write integration test, review integration tests + +## References + + +* [ADR-40](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-040-blockchain-reactor-refactor.md): The original blockchain reactor re-org proposal +* [Blockchain re-org](https://github.com/tendermint/tendermint/pull/3561): The current blockchain reactor re-org implementation (v1) diff --git a/docs/architecture/adr-044-lite-client-with-weak-subjectivity.md b/docs/architecture/adr-044-lite-client-with-weak-subjectivity.md new file mode 100644 index 000000000..066f68f7f --- /dev/null +++ b/docs/architecture/adr-044-lite-client-with-weak-subjectivity.md @@ -0,0 +1,141 @@ +# ADR 044: Lite Client with Weak Subjectivity + +## Changelog +* 13-07-2019: Initial draft +* 14-08-2019: Address cwgoes comments + +## Context + +The concept of light clients was introduced in the Bitcoin white paper. It +describes a watcher of distributed consensus process that only validates the +consensus algorithm and not the state machine transactions within. + +Tendermint light clients allow bandwidth & compute-constrained devices, such as smartphones, low-power embedded chips, or other blockchains to +efficiently verify the consensus of a Tendermint blockchain. This forms the +basis of safe and efficient state synchronization for new network nodes and +inter-blockchain communication (where a light client of one Tendermint instance +runs in another chain's state machine). + +In a network that is expected to reliably punish validators for misbehavior +by slashing bonded stake and where the validator set changes +infrequently, clients can take advantage of this assumption to safely +synchronize a lite client without downloading the intervening headers. + +Light clients (and full nodes) operating in the Proof Of Stake context need a +trusted block height from a trusted source that is no older than 1 unbonding +window plus a configurable evidence submission synchrony bound. This is called “weak subjectivity”. + +Weak subjectivity is required in Proof of Stake blockchains because it is +costless for an attacker to buy up voting keys that are no longer bonded and +fork the network at some point in its prior history. See Vitalik’s post at +[Proof of Stake: How I Learned to Love Weak +Subjectivity](https://blog.ethereum.org/2014/11/25/proof-stake-learned-love-weak-subjectivity/). + +Currently, Tendermint provides a lite client implementation in the +[lite](https://github.com/tendermint/tendermint/tree/master/lite) package. This +lite client implements a bisection algorithm that tries to use a binary search +to find the minimum number of block headers where the validator set voting +power changes are less than < 1/3rd. This interface does not support weak +subjectivity at this time. The Cosmos SDK also does not support counterfactual +slashing, nor does the lite client have any capacity to report evidence making +these systems *theoretically unsafe*. + +NOTE: Tendermint provides a somewhat different (stronger) light client model +than Bitcoin under eclipse, since the eclipsing node(s) can only fool the light +client if they have two-thirds of the private keys from the last root-of-trust. + +## Decision + +### The Weak Subjectivity Interface + +Add the weak subjectivity interface for when a new light client connects to the +network or when a light client that has been offline for longer than the +unbonding period connects to the network. Specifically, the node needs to +initialize the following structure before syncing from user input: + +``` +type TrustOptions struct { + // Required: only trust commits up to this old. + // Should be equal to the unbonding period minus some delta for evidence reporting. + TrustPeriod time.Duration `json:"trust-period"` + + // Option 1: TrustHeight and TrustHash can both be provided + // to force the trusting of a particular height and hash. + // If the latest trusted height/hash is more recent, then this option is + // ignored. + TrustHeight int64 `json:"trust-height"` + TrustHash []byte `json:"trust-hash"` + + // Option 2: Callback can be set to implement a confirmation + // step if the trust store is uninitialized, or expired. + Callback func(height int64, hash []byte) error +} +``` + +The expectation is the user will get this information from a trusted source +like a validator, a friend, or a secure website. A more user friendly +solution with trust tradeoffs is that we establish an https based protocol with +a default end point that populates this information. Also an on-chain registry +of roots-of-trust (e.g. on the Cosmos Hub) seems likely in the future. + +### Linear Verification + +The linear verification algorithm requires downloading all headers +between the `TrustHeight` and the `LatestHeight`. The lite client downloads the +full header for the provided `TrustHeight` and then proceeds to download `N+1` +headers and applies the [Tendermint validation +rules](https://github.com/tendermint/tendermint/blob/master/docs/spec/blockchain/blockchain.md#validation) +to each block. + +### Bisecting Verification + +Bisecting Verification is a more bandwidth and compute intensive mechanism that +in the most optimistic case requires a light client to only download two block +headers to come into synchronization. + +The bisection algorithm proceeds in the following fashion. The client downloads +and verifies the full block header for `TrustHeight` and then fetches +`LatestHeight` blocker header. The client then verifies the `LatestHeight` +header. Finally the client attempts to verify the `LatestHeight` header with +voting powers taken from `NextValidatorSet` in the `TrustHeight` header. This +verification will succeed if the validators from `TrustHeight` still have > 2/3 ++1 of voting power in the `LatestHeight`. If this succeeds, the client is fully +synchronized. If this fails, then following Bisection Algorithm should be +executed. + +The Client tries to download the block at the mid-point block between +`LatestHeight` and `TrustHeight` and attempts that same algorithm as above +using `MidPointHeight` instead of `LatestHeight` and a different threshold - +1/3 +1 of voting power for *non-adjacent headers*. In the case the of failure, +recursively perform the `MidPoint` verification until success then start over +with an updated `NextValidatorSet` and `TrustHeight`. + +If the client encounters a forged header, it should submit the header along +with some other intermediate headers as the evidence of misbehavior to other +full nodes. After that, it can retry the bisection using another full node. An +optimal client will cache trusted headers from the previous run to minimize +network usage. + +--- + +Check out the formal specification +[here](https://github.com/tendermint/tendermint/blob/master/docs/spec/consensus/light-client.md). + +## Status + +Accepted. + +## Consequences + +### Positive + +* light client which is safe to use (it can go offline, but not for too long) + +### Negative + +* complexity of bisection + +### Neutral + +* social consensus can be prone to errors (for cases where a new light client + joins a network or it has been offline for too long) diff --git a/docs/architecture/img/bc-reactor-refactor.png b/docs/architecture/img/bc-reactor-refactor.png new file mode 100644 index 000000000..4cd84a02f Binary files /dev/null and b/docs/architecture/img/bc-reactor-refactor.png differ diff --git a/docs/architecture/img/bc-reactor.png b/docs/architecture/img/bc-reactor.png new file mode 100644 index 000000000..f7fe0f819 Binary files /dev/null and b/docs/architecture/img/bc-reactor.png differ diff --git a/docs/architecture/img/blockchain-reactor-v1.png b/docs/architecture/img/blockchain-reactor-v1.png new file mode 100644 index 000000000..70debcd66 Binary files /dev/null and b/docs/architecture/img/blockchain-reactor-v1.png differ diff --git a/docs/architecture/img/blockchain-reactor-v2.png b/docs/architecture/img/blockchain-reactor-v2.png new file mode 100644 index 000000000..086bf71bd Binary files /dev/null and b/docs/architecture/img/blockchain-reactor-v2.png differ diff --git a/docs/architecture/img/state-sync.png b/docs/architecture/img/state-sync.png new file mode 100644 index 000000000..08b6eac43 Binary files /dev/null and b/docs/architecture/img/state-sync.png differ diff --git a/docs/guides/go-built-in.md b/docs/guides/go-built-in.md new file mode 100644 index 000000000..705022c90 --- /dev/null +++ b/docs/guides/go-built-in.md @@ -0,0 +1,639 @@ +# Creating a built-in application in Go + +## Guide assumptions + +This guide is designed for beginners who want to get started with a Tendermint +Core application from scratch. It does not assume that you have any prior +experience with Tendermint Core. + +Tendermint Core is Byzantine Fault Tolerant (BFT) middleware that takes a state +transition machine - written in any programming language - and securely +replicates it on many machines. + +Although Tendermint Core is written in the Golang programming language, prior +knowledge of it is not required for this guide. You can learn it as we go due +to it's simplicity. However, you may want to go through [Learn X in Y minutes +Where X=Go](https://learnxinyminutes.com/docs/go/) first to familiarize +yourself with the syntax. + +By following along with this guide, you'll create a Tendermint Core project +called kvstore, a (very) simple distributed BFT key-value store. + +## Built-in app vs external app + +Running your application inside the same process as Tendermint Core will give +you the best possible performance. + +For other languages, your application have to communicate with Tendermint Core +through a TCP, Unix domain socket or gRPC. + +## 1.1 Installing Go + +Please refer to [the official guide for installing +Go](https://golang.org/doc/install). + +Verify that you have the latest version of Go installed: + +```sh +$ go version +go version go1.12.7 darwin/amd64 +``` + +Make sure you have `$GOPATH` environment variable set: + +```sh +$ echo $GOPATH +/Users/melekes/go +``` + +## 1.2 Creating a new Go project + +We'll start by creating a new Go project. + +```sh +$ mkdir -p $GOPATH/src/github.com/me/kvstore +$ cd $GOPATH/src/github.com/me/kvstore +``` + +Inside the example directory create a `main.go` file with the following content: + +```go +package main + +import ( + "fmt" +) + +func main() { + fmt.Println("Hello, Tendermint Core") +} +``` + +When run, this should print "Hello, Tendermint Core" to the standard output. + +```sh +$ go run main.go +Hello, Tendermint Core +``` + +## 1.3 Writing a Tendermint Core application + +Tendermint Core communicates with the application through the Application +BlockChain Interface (ABCI). All message types are defined in the [protobuf +file](https://github.com/tendermint/tendermint/blob/develop/abci/types/types.proto). +This allows Tendermint Core to run applications written in any programming +language. + +Create a file called `app.go` with the following content: + +```go +package main + +import ( + abcitypes "github.com/tendermint/tendermint/abci/types" +) + +type KVStoreApplication struct {} + +var _ abcitypes.Application = (*KVStoreApplication)(nil) + +func NewKVStoreApplication() *KVStoreApplication { + return &KVStoreApplication{} +} + +func (KVStoreApplication) Info(req abcitypes.RequestInfo) abcitypes.ResponseInfo { + return abcitypes.ResponseInfo{} +} + +func (KVStoreApplication) SetOption(req abcitypes.RequestSetOption) abcitypes.ResponseSetOption { + return abcitypes.ResponseSetOption{} +} + +func (KVStoreApplication) DeliverTx(req abcitypes.RequestDeliverTx) abcitypes.ResponseDeliverTx { + return abcitypes.ResponseDeliverTx{Code: 0} +} + +func (KVStoreApplication) CheckTx(req abcitypes.RequestCheckTx) abcitypes.ResponseCheckTx { + return abcitypes.ResponseCheckTx{Code: 0} +} + +func (KVStoreApplication) Commit() abcitypes.ResponseCommit { + return abcitypes.ResponseCommit{} +} + +func (KVStoreApplication) Query(req abcitypes.RequestQuery) abcitypes.ResponseQuery { + return abcitypes.ResponseQuery{Code: 0} +} + +func (KVStoreApplication) InitChain(req abcitypes.RequestInitChain) abcitypes.ResponseInitChain { + return abcitypes.ResponseInitChain{} +} + +func (KVStoreApplication) BeginBlock(req abcitypes.RequestBeginBlock) abcitypes.ResponseBeginBlock { + return abcitypes.ResponseBeginBlock{} +} + +func (KVStoreApplication) EndBlock(req abcitypes.RequestEndBlock) abcitypes.ResponseEndBlock { + return abcitypes.ResponseEndBlock{} +} +``` + +Now I will go through each method explaining when it's called and adding +required business logic. + +### 1.3.1 CheckTx + +When a new transaction is added to the Tendermint Core, it will ask the +application to check it (validate the format, signatures, etc.). + +```go +func (app *KVStoreApplication) isValid(tx []byte) (code uint32) { + // check format + parts := bytes.Split(tx, []byte("=")) + if len(parts) != 2 { + return 1 + } + + key, value := parts[0], parts[1] + + // check if the same key=value already exists + err := app.db.View(func(txn *badger.Txn) error { + item, err := txn.Get(key) + if err != nil && err != badger.ErrKeyNotFound { + return err + } + if err == nil { + return item.Value(func(val []byte) error { + if bytes.Equal(val, value) { + code = 2 + } + return nil + }) + } + return nil + }) + if err != nil { + panic(err) + } + + return code +} + +func (app *KVStoreApplication) CheckTx(req abcitypes.RequestCheckTx) abcitypes.ResponseCheckTx { + code := app.isValid(req.Tx) + return abcitypes.ResponseCheckTx{Code: code, GasWanted: 1} +} +``` + +Don't worry if this does not compile yet. + +If the transaction does not have a form of `{bytes}={bytes}`, we return `1` +code. When the same key=value already exist (same key and value), we return `2` +code. For others, we return a zero code indicating that they are valid. + +Note that anything with non-zero code will be considered invalid (`-1`, `100`, +etc.) by Tendermint Core. + +Valid transactions will eventually be committed given they are not too big and +have enough gas. To learn more about gas, check out ["the +specification"](https://tendermint.com/docs/spec/abci/apps.html#gas). + +For the underlying key-value store we'll use +[badger](https://github.com/dgraph-io/badger), which is an embeddable, +persistent and fast key-value (KV) database. + +```go +import "github.com/dgraph-io/badger" + +type KVStoreApplication struct { + db *badger.DB + currentBatch *badger.Txn +} + +func NewKVStoreApplication(db *badger.DB) *KVStoreApplication { + return &KVStoreApplication{ + db: db, + } +} +``` + +### 1.3.2 BeginBlock -> DeliverTx -> EndBlock -> Commit + +When Tendermint Core has decided on the block, it's transfered to the +application in 3 parts: `BeginBlock`, one `DeliverTx` per transaction and +`EndBlock` in the end. DeliverTx are being transfered asynchronously, but the +responses are expected to come in order. + +``` +func (app *KVStoreApplication) BeginBlock(req abcitypes.RequestBeginBlock) abcitypes.ResponseBeginBlock { + app.currentBatch = app.db.NewTransaction(true) + return abcitypes.ResponseBeginBlock{} +} + +``` + +Here we create a batch, which will store block's transactions. + +```go +func (app *KVStoreApplication) DeliverTx(req abcitypes.RequestDeliverTx) abcitypes.ResponseDeliverTx { + code := app.isValid(req.Tx) + if code != 0 { + return abcitypes.ResponseDeliverTx{Code: code} + } + + parts := bytes.Split(req.Tx, []byte("=")) + key, value := parts[0], parts[1] + + err := app.currentBatch.Set(key, value) + if err != nil { + panic(err) + } + + return abcitypes.ResponseDeliverTx{Code: 0} +} +``` + +If the transaction is badly formatted or the same key=value already exist, we +again return the non-zero code. Otherwise, we add it to the current batch. + +In the current design, a block can include incorrect transactions (those who +passed CheckTx, but failed DeliverTx or transactions included by the proposer +directly). This is done for performance reasons. + +Note we can't commit transactions inside the `DeliverTx` because in such case +`Query`, which may be called in parallel, will return inconsistent data (i.e. +it will report that some value already exist even when the actual block was not +yet committed). + +`Commit` instructs the application to persist the new state. + +```go +func (app *KVStoreApplication) Commit() abcitypes.ResponseCommit { + app.currentBatch.Commit() + return abcitypes.ResponseCommit{Data: []byte{}} +} +``` + +### 1.3.3 Query + +Now, when the client wants to know whenever a particular key/value exist, it +will call Tendermint Core RPC `/abci_query` endpoint, which in turn will call +the application's `Query` method. + +Applications are free to provide their own APIs. But by using Tendermint Core +as a proxy, clients (including [light client +package](https://godoc.org/github.com/tendermint/tendermint/lite)) can leverage +the unified API across different applications. Plus they won't have to call the +otherwise separate Tendermint Core API for additional proofs. + +Note we don't include a proof here. + +```go +func (app *KVStoreApplication) Query(reqQuery abcitypes.RequestQuery) (resQuery abcitypes.ResponseQuery) { + resQuery.Key = reqQuery.Data + err := app.db.View(func(txn *badger.Txn) error { + item, err := txn.Get(reqQuery.Data) + if err != nil && err != badger.ErrKeyNotFound { + return err + } + if err == badger.ErrKeyNotFound { + resQuery.Log = "does not exist" + } else { + return item.Value(func(val []byte) error { + resQuery.Log = "exists" + resQuery.Value = val + return nil + }) + } + return nil + }) + if err != nil { + panic(err) + } + return +} +``` + +The complete specification can be found +[here](https://tendermint.com/docs/spec/abci/). + +## 1.4 Starting an application and a Tendermint Core instance in the same process + +Put the following code into the "main.go" file: + +```go +package main + +import ( + "flag" + "fmt" + "os" + "os/signal" + "path/filepath" + "syscall" + + "github.com/dgraph-io/badger" + "github.com/pkg/errors" + "github.com/spf13/viper" + + abci "github.com/tendermint/tendermint/abci/types" + cfg "github.com/tendermint/tendermint/config" + tmflags "github.com/tendermint/tendermint/libs/cli/flags" + "github.com/tendermint/tendermint/libs/log" + nm "github.com/tendermint/tendermint/node" + "github.com/tendermint/tendermint/p2p" + "github.com/tendermint/tendermint/privval" + "github.com/tendermint/tendermint/proxy" +) + +var configFile string + +func init() { + flag.StringVar(&configFile, "config", "$HOME/.tendermint/config/config.toml", "Path to config.toml") +} + +func main() { + db, err := badger.Open(badger.DefaultOptions("/tmp/badger")) + if err != nil { + fmt.Fprintf(os.Stderr, "failed to open badger db: %v", err) + os.Exit(1) + } + defer db.Close() + app := NewKVStoreApplication(db) + + flag.Parse() + + node, err := newTendermint(app, configFile) + if err != nil { + fmt.Fprintf(os.Stderr, "%v", err) + os.Exit(2) + } + + node.Start() + defer func() { + node.Stop() + node.Wait() + }() + + c := make(chan os.Signal, 1) + signal.Notify(c, os.Interrupt, syscall.SIGTERM) + <-c + os.Exit(0) +} + +func newTendermint(app abci.Application, configFile string) (*nm.Node, error) { + // read config + config := cfg.DefaultConfig() + config.RootDir = filepath.Dir(filepath.Dir(configFile)) + viper.SetConfigFile(configFile) + if err := viper.ReadInConfig(); err != nil { + return nil, errors.Wrap(err, "viper failed to read config file") + } + if err := viper.Unmarshal(config); err != nil { + return nil, errors.Wrap(err, "viper failed to unmarshal config") + } + if err := config.ValidateBasic(); err != nil { + return nil, errors.Wrap(err, "config is invalid") + } + + // create logger + logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout)) + var err error + logger, err = tmflags.ParseLogLevel(config.LogLevel, logger, cfg.DefaultLogLevel()) + if err != nil { + return nil, errors.Wrap(err, "failed to parse log level") + } + + // read private validator + pv := privval.LoadFilePV( + config.PrivValidatorKeyFile(), + config.PrivValidatorStateFile(), + ) + + // read node key + nodeKey, err := p2p.LoadNodeKey(config.NodeKeyFile()) + if err != nil { + return nil, errors.Wrap(err, "failed to load node's key") + } + + // create node + node, err := nm.NewNode( + config, + pv, + nodeKey, + proxy.NewLocalClientCreator(app), + nm.DefaultGenesisDocProviderFunc(config), + nm.DefaultDBProvider, + nm.DefaultMetricsProvider(config.Instrumentation), + logger) + if err != nil { + return nil, errors.Wrap(err, "failed to create new Tendermint node") + } + + return node, nil +} +``` + +This is a huge blob of code, so let's break it down into pieces. + +First, we initialize the Badger database and create an app instance: + +```go +db, err := badger.Open(badger.DefaultOptions("/tmp/badger")) +if err != nil { + fmt.Fprintf(os.Stderr, "failed to open badger db: %v", err) + os.Exit(1) +} +defer db.Close() +app := NewKVStoreApplication(db) +``` + +Then we use it to create a Tendermint Core `Node` instance: + +```go +flag.Parse() + +node, err := newTendermint(app, configFile) +if err != nil { + fmt.Fprintf(os.Stderr, "%v", err) + os.Exit(2) +} + +... + +// create node +node, err := nm.NewNode( + config, + pv, + nodeKey, + proxy.NewLocalClientCreator(app), + nm.DefaultGenesisDocProviderFunc(config), + nm.DefaultDBProvider, + nm.DefaultMetricsProvider(config.Instrumentation), + logger) +if err != nil { + return nil, errors.Wrap(err, "failed to create new Tendermint node") +} +``` + +`NewNode` requires a few things including a configuration file, a private +validator, a node key and a few others in order to construct the full node. + +Note we use `proxy.NewLocalClientCreator` here to create a local client instead +of one communicating through a socket or gRPC. + +[viper](https://github.com/spf13/viper) is being used for reading the config, +which we will generate later using the `tendermint init` command. + +```go +config := cfg.DefaultConfig() +config.RootDir = filepath.Dir(filepath.Dir(configFile)) +viper.SetConfigFile(configFile) +if err := viper.ReadInConfig(); err != nil { + return nil, errors.Wrap(err, "viper failed to read config file") +} +if err := viper.Unmarshal(config); err != nil { + return nil, errors.Wrap(err, "viper failed to unmarshal config") +} +if err := config.ValidateBasic(); err != nil { + return nil, errors.Wrap(err, "config is invalid") +} +``` + +We use `FilePV`, which is a private validator (i.e. thing which signs consensus +messages). Normally, you would use `SignerRemote` to connect to an external +[HSM](https://kb.certus.one/hsm.html). + +```go +pv := privval.LoadFilePV( + config.PrivValidatorKeyFile(), + config.PrivValidatorStateFile(), +) + +``` + +`nodeKey` is needed to identify the node in a p2p network. + +```go +nodeKey, err := p2p.LoadNodeKey(config.NodeKeyFile()) +if err != nil { + return nil, errors.Wrap(err, "failed to load node's key") +} +``` + +As for the logger, we use the build-in library, which provides a nice +abstraction over [go-kit's +logger](https://github.com/go-kit/kit/tree/master/log). + +```go +logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout)) +var err error +logger, err = tmflags.ParseLogLevel(config.LogLevel, logger, cfg.DefaultLogLevel()) +if err != nil { + return nil, errors.Wrap(err, "failed to parse log level") +} +``` + +Finally, we start the node and add some signal handling to gracefully stop it +upon receiving SIGTERM or Ctrl-C. + +```go +node.Start() +defer func() { + node.Stop() + node.Wait() +}() + +c := make(chan os.Signal, 1) +signal.Notify(c, os.Interrupt, syscall.SIGTERM) +<-c +os.Exit(0) +``` + +## 1.5 Getting Up and Running + +We are going to use [Go modules](https://github.com/golang/go/wiki/Modules) for +dependency management. + +```sh +$ export GO111MODULE=on +$ go mod init github.com/me/example +$ go build +``` + +This should build the binary. + +To create a default configuration, nodeKey and private validator files, let's +execute `tendermint init`. But before we do that, we will need to install +Tendermint Core. + +```sh +$ rm -rf /tmp/example +$ cd $GOPATH/src/github.com/tendermint/tendermint +$ make install +$ TMHOME="/tmp/example" tendermint init + +I[2019-07-16|18:40:36.480] Generated private validator module=main keyFile=/tmp/example/config/priv_validator_key.json stateFile=/tmp/example2/data/priv_validator_state.json +I[2019-07-16|18:40:36.481] Generated node key module=main path=/tmp/example/config/node_key.json +I[2019-07-16|18:40:36.482] Generated genesis file module=main path=/tmp/example/config/genesis.json +``` + +We are ready to start our application: + +```sh +$ ./example -config "/tmp/example/config/config.toml" + +badger 2019/07/16 18:42:25 INFO: All 0 tables opened in 0s +badger 2019/07/16 18:42:25 INFO: Replaying file id: 0 at offset: 0 +badger 2019/07/16 18:42:25 INFO: Replay took: 695.227s +E[2019-07-16|18:42:25.818] Couldn't connect to any seeds module=p2p +I[2019-07-16|18:42:26.853] Executed block module=state height=1 validTxs=0 invalidTxs=0 +I[2019-07-16|18:42:26.865] Committed state module=state height=1 txs=0 appHash= +``` + +Now open another tab in your terminal and try sending a transaction: + +```sh +$ curl -s 'localhost:26657/broadcast_tx_commit?tx="tendermint=rocks"' +{ + "jsonrpc": "2.0", + "id": "", + "result": { + "check_tx": { + "gasWanted": "1" + }, + "deliver_tx": {}, + "hash": "1B3C5A1093DB952C331B1749A21DCCBB0F6C7F4E0055CD04D16346472FC60EC6", + "height": "128" + } +} +``` + +Response should contain the height where this transaction was committed. + +Now let's check if the given key now exists and its value: + +``` +$ curl -s 'localhost:26657/abci_query?data="tendermint"' +{ + "jsonrpc": "2.0", + "id": "", + "result": { + "response": { + "log": "exists", + "key": "dGVuZGVybWludA==", + "value": "cm9ja3M=" + } + } +} +``` + +"dGVuZGVybWludA==" and "cm9ja3M=" are the base64-encoding of the ASCII of +"tendermint" and "rocks" accordingly. + +## Outro + +I hope everything went smoothly and your first, but hopefully not the last, +Tendermint Core application is up and running. If not, please [open an issue on +Github](https://github.com/tendermint/tendermint/issues/new/choose). To dig +deeper, read [the docs](https://tendermint.com/docs/). diff --git a/docs/guides/go.md b/docs/guides/go.md new file mode 100644 index 000000000..ada84adfc --- /dev/null +++ b/docs/guides/go.md @@ -0,0 +1,523 @@ +# Creating an application in Go + +## Guide Assumptions + +This guide is designed for beginners who want to get started with a Tendermint +Core application from scratch. It does not assume that you have any prior +experience with Tendermint Core. + +Tendermint Core is Byzantine Fault Tolerant (BFT) middleware that takes a state +transition machine - written in any programming language - and securely +replicates it on many machines. + +Although Tendermint Core is written in the Golang programming language, prior +knowledge of it is not required for this guide. You can learn it as we go due +to it's simplicity. However, you may want to go through [Learn X in Y minutes +Where X=Go](https://learnxinyminutes.com/docs/go/) first to familiarize +yourself with the syntax. + +By following along with this guide, you'll create a Tendermint Core project +called kvstore, a (very) simple distributed BFT key-value store. + +## Built-in app vs external app + +To get maximum performance it is better to run your application alongside +Tendermint Core. [Cosmos SDK](https://github.com/cosmos/cosmos-sdk) is written +this way. Please refer to [Writing a built-in Tendermint Core application in +Go](./go-built-in.md) guide for details. + +Having a separate application might give you better security guarantees as two +processes would be communicating via established binary protocol. Tendermint +Core will not have access to application's state. + +## 1.1 Installing Go + +Please refer to [the official guide for installing +Go](https://golang.org/doc/install). + +Verify that you have the latest version of Go installed: + +```sh +$ go version +go version go1.12.7 darwin/amd64 +``` + +Make sure you have `$GOPATH` environment variable set: + +```sh +$ echo $GOPATH +/Users/melekes/go +``` + +## 1.2 Creating a new Go project + +We'll start by creating a new Go project. + +```sh +$ mkdir -p $GOPATH/src/github.com/me/kvstore +$ cd $GOPATH/src/github.com/me/kvstore +``` + +Inside the example directory create a `main.go` file with the following content: + +```go +package main + +import ( + "fmt" +) + +func main() { + fmt.Println("Hello, Tendermint Core") +} +``` + +When run, this should print "Hello, Tendermint Core" to the standard output. + +```sh +$ go run main.go +Hello, Tendermint Core +``` + +## 1.3 Writing a Tendermint Core application + +Tendermint Core communicates with the application through the Application +BlockChain Interface (ABCI). All message types are defined in the [protobuf +file](https://github.com/tendermint/tendermint/blob/develop/abci/types/types.proto). +This allows Tendermint Core to run applications written in any programming +language. + +Create a file called `app.go` with the following content: + +```go +package main + +import ( + abcitypes "github.com/tendermint/tendermint/abci/types" +) + +type KVStoreApplication struct {} + +var _ abcitypes.Application = (*KVStoreApplication)(nil) + +func NewKVStoreApplication() *KVStoreApplication { + return &KVStoreApplication{} +} + +func (KVStoreApplication) Info(req abcitypes.RequestInfo) abcitypes.ResponseInfo { + return abcitypes.ResponseInfo{} +} + +func (KVStoreApplication) SetOption(req abcitypes.RequestSetOption) abcitypes.ResponseSetOption { + return abcitypes.ResponseSetOption{} +} + +func (KVStoreApplication) DeliverTx(req abcitypes.RequestDeliverTx) abcitypes.ResponseDeliverTx { + return abcitypes.ResponseDeliverTx{Code: 0} +} + +func (KVStoreApplication) CheckTx(req abcitypes.RequestCheckTx) abcitypes.ResponseCheckTx { + return abcitypes.ResponseCheckTx{Code: 0} +} + +func (KVStoreApplication) Commit() abcitypes.ResponseCommit { + return abcitypes.ResponseCommit{} +} + +func (KVStoreApplication) Query(req abcitypes.RequestQuery) abcitypes.ResponseQuery { + return abcitypes.ResponseQuery{Code: 0} +} + +func (KVStoreApplication) InitChain(req abcitypes.RequestInitChain) abcitypes.ResponseInitChain { + return abcitypes.ResponseInitChain{} +} + +func (KVStoreApplication) BeginBlock(req abcitypes.RequestBeginBlock) abcitypes.ResponseBeginBlock { + return abcitypes.ResponseBeginBlock{} +} + +func (KVStoreApplication) EndBlock(req abcitypes.RequestEndBlock) abcitypes.ResponseEndBlock { + return abcitypes.ResponseEndBlock{} +} +``` + +Now I will go through each method explaining when it's called and adding +required business logic. + +### 1.3.1 CheckTx + +When a new transaction is added to the Tendermint Core, it will ask the +application to check it (validate the format, signatures, etc.). + +```go +func (app *KVStoreApplication) isValid(tx []byte) (code uint32) { + // check format + parts := bytes.Split(tx, []byte("=")) + if len(parts) != 2 { + return 1 + } + + key, value := parts[0], parts[1] + + // check if the same key=value already exists + err := app.db.View(func(txn *badger.Txn) error { + item, err := txn.Get(key) + if err != nil && err != badger.ErrKeyNotFound { + return err + } + if err == nil { + return item.Value(func(val []byte) error { + if bytes.Equal(val, value) { + code = 2 + } + return nil + }) + } + return nil + }) + if err != nil { + panic(err) + } + + return code +} + +func (app *KVStoreApplication) CheckTx(req abcitypes.RequestCheckTx) abcitypes.ResponseCheckTx { + code := app.isValid(req.Tx) + return abcitypes.ResponseCheckTx{Code: code, GasWanted: 1} +} +``` + +Don't worry if this does not compile yet. + +If the transaction does not have a form of `{bytes}={bytes}`, we return `1` +code. When the same key=value already exist (same key and value), we return `2` +code. For others, we return a zero code indicating that they are valid. + +Note that anything with non-zero code will be considered invalid (`-1`, `100`, +etc.) by Tendermint Core. + +Valid transactions will eventually be committed given they are not too big and +have enough gas. To learn more about gas, check out ["the +specification"](https://tendermint.com/docs/spec/abci/apps.html#gas). + +For the underlying key-value store we'll use +[badger](https://github.com/dgraph-io/badger), which is an embeddable, +persistent and fast key-value (KV) database. + +```go +import "github.com/dgraph-io/badger" + +type KVStoreApplication struct { + db *badger.DB + currentBatch *badger.Txn +} + +func NewKVStoreApplication(db *badger.DB) *KVStoreApplication { + return &KVStoreApplication{ + db: db, + } +} +``` + +### 1.3.2 BeginBlock -> DeliverTx -> EndBlock -> Commit + +When Tendermint Core has decided on the block, it's transfered to the +application in 3 parts: `BeginBlock`, one `DeliverTx` per transaction and +`EndBlock` in the end. DeliverTx are being transfered asynchronously, but the +responses are expected to come in order. + +``` +func (app *KVStoreApplication) BeginBlock(req abcitypes.RequestBeginBlock) abcitypes.ResponseBeginBlock { + app.currentBatch = app.db.NewTransaction(true) + return abcitypes.ResponseBeginBlock{} +} + +``` + +Here we create a batch, which will store block's transactions. + +```go +func (app *KVStoreApplication) DeliverTx(req abcitypes.RequestDeliverTx) abcitypes.ResponseDeliverTx { + code := app.isValid(req.Tx) + if code != 0 { + return abcitypes.ResponseDeliverTx{Code: code} + } + + parts := bytes.Split(req.Tx, []byte("=")) + key, value := parts[0], parts[1] + + err := app.currentBatch.Set(key, value) + if err != nil { + panic(err) + } + + return abcitypes.ResponseDeliverTx{Code: 0} +} +``` + +If the transaction is badly formatted or the same key=value already exist, we +again return the non-zero code. Otherwise, we add it to the current batch. + +In the current design, a block can include incorrect transactions (those who +passed CheckTx, but failed DeliverTx or transactions included by the proposer +directly). This is done for performance reasons. + +Note we can't commit transactions inside the `DeliverTx` because in such case +`Query`, which may be called in parallel, will return inconsistent data (i.e. +it will report that some value already exist even when the actual block was not +yet committed). + +`Commit` instructs the application to persist the new state. + +```go +func (app *KVStoreApplication) Commit() abcitypes.ResponseCommit { + app.currentBatch.Commit() + return abcitypes.ResponseCommit{Data: []byte{}} +} +``` + +### 1.3.3 Query + +Now, when the client wants to know whenever a particular key/value exist, it +will call Tendermint Core RPC `/abci_query` endpoint, which in turn will call +the application's `Query` method. + +Applications are free to provide their own APIs. But by using Tendermint Core +as a proxy, clients (including [light client +package](https://godoc.org/github.com/tendermint/tendermint/lite)) can leverage +the unified API across different applications. Plus they won't have to call the +otherwise separate Tendermint Core API for additional proofs. + +Note we don't include a proof here. + +```go +func (app *KVStoreApplication) Query(reqQuery abcitypes.RequestQuery) (resQuery abcitypes.ResponseQuery) { + resQuery.Key = reqQuery.Data + err := app.db.View(func(txn *badger.Txn) error { + item, err := txn.Get(reqQuery.Data) + if err != nil && err != badger.ErrKeyNotFound { + return err + } + if err == badger.ErrKeyNotFound { + resQuery.Log = "does not exist" + } else { + return item.Value(func(val []byte) error { + resQuery.Log = "exists" + resQuery.Value = val + return nil + }) + } + return nil + }) + if err != nil { + panic(err) + } + return +} +``` + +The complete specification can be found +[here](https://tendermint.com/docs/spec/abci/). + +## 1.4 Starting an application and a Tendermint Core instances + +Put the following code into the "main.go" file: + +```go +package main + +import ( + "flag" + "fmt" + "os" + "os/signal" + "syscall" + + "github.com/dgraph-io/badger" + + abciserver "github.com/tendermint/tendermint/abci/server" + "github.com/tendermint/tendermint/libs/log" +) + +var socketAddr string + +func init() { + flag.StringVar(&socketAddr, "socket-addr", "unix://example.sock", "Unix domain socket address") +} + +func main() { + db, err := badger.Open(badger.DefaultOptions("/tmp/badger")) + if err != nil { + fmt.Fprintf(os.Stderr, "failed to open badger db: %v", err) + os.Exit(1) + } + defer db.Close() + app := NewKVStoreApplication(db) + + flag.Parse() + + logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout)) + + server := abciserver.NewSocketServer(socketAddr, app) + server.SetLogger(logger) + if err := server.Start(); err != nil { + fmt.Fprintf(os.Stderr, "error starting socket server: %v", err) + os.Exit(1) + } + defer server.Stop() + + c := make(chan os.Signal, 1) + signal.Notify(c, os.Interrupt, syscall.SIGTERM) + <-c + os.Exit(0) +} +``` + +This is a huge blob of code, so let's break it down into pieces. + +First, we initialize the Badger database and create an app instance: + +```go +db, err := badger.Open(badger.DefaultOptions("/tmp/badger")) +if err != nil { + fmt.Fprintf(os.Stderr, "failed to open badger db: %v", err) + os.Exit(1) +} +defer db.Close() +app := NewKVStoreApplication(db) +``` + +Then we start the ABCI server and add some signal handling to gracefully stop +it upon receiving SIGTERM or Ctrl-C. Tendermint Core will act as a client, +which connects to our server and send us transactions and other messages. + +```go +server := abciserver.NewSocketServer(socketAddr, app) +server.SetLogger(logger) +if err := server.Start(); err != nil { + fmt.Fprintf(os.Stderr, "error starting socket server: %v", err) + os.Exit(1) +} +defer server.Stop() + +c := make(chan os.Signal, 1) +signal.Notify(c, os.Interrupt, syscall.SIGTERM) +<-c +os.Exit(0) +``` + +## 1.5 Getting Up and Running + +We are going to use [Go modules](https://github.com/golang/go/wiki/Modules) for +dependency management. + +```sh +$ export GO111MODULE=on +$ go mod init github.com/me/example +$ go build +``` + +This should build the binary. + +To create a default configuration, nodeKey and private validator files, let's +execute `tendermint init`. But before we do that, we will need to install +Tendermint Core. + +```sh +$ rm -rf /tmp/example +$ cd $GOPATH/src/github.com/tendermint/tendermint +$ make install +$ TMHOME="/tmp/example" tendermint init + +I[2019-07-16|18:20:36.480] Generated private validator module=main keyFile=/tmp/example/config/priv_validator_key.json stateFile=/tmp/example2/data/priv_validator_state.json +I[2019-07-16|18:20:36.481] Generated node key module=main path=/tmp/example/config/node_key.json +I[2019-07-16|18:20:36.482] Generated genesis file module=main path=/tmp/example/config/genesis.json +``` + +Feel free to explore the generated files, which can be found at +`/tmp/example/config` directory. Documentation on the config can be found +[here](https://tendermint.com/docs/tendermint-core/configuration.html). + +We are ready to start our application: + +```sh +$ rm example.sock +$ ./example + +badger 2019/07/16 18:25:11 INFO: All 0 tables opened in 0s +badger 2019/07/16 18:25:11 INFO: Replaying file id: 0 at offset: 0 +badger 2019/07/16 18:25:11 INFO: Replay took: 300.4s +I[2019-07-16|18:25:11.523] Starting ABCIServer impl=ABCIServ +``` + +Then we need to start Tendermint Core and point it to our application. Staying +within the application directory execute: + +```sh +$ TMHOME="/tmp/example" tendermint node --proxy_app=unix://example.sock + +I[2019-07-16|18:26:20.362] Version info module=main software=0.32.1 block=10 p2p=7 +I[2019-07-16|18:26:20.383] Starting Node module=main impl=Node +E[2019-07-16|18:26:20.392] Couldn't connect to any seeds module=p2p +I[2019-07-16|18:26:20.394] Started node module=main nodeInfo="{ProtocolVersion:{P2P:7 Block:10 App:0} ID_:8dab80770ae8e295d4ce905d86af78c4ff634b79 ListenAddr:tcp://0.0.0.0:26656 Network:test-chain-nIO96P Version:0.32.1 Channels:4020212223303800 Moniker:app48.fun-box.ru Other:{TxIndex:on RPCAddress:tcp://127.0.0.1:26657}}" +I[2019-07-16|18:26:21.440] Executed block module=state height=1 validTxs=0 invalidTxs=0 +I[2019-07-16|18:26:21.446] Committed state module=state height=1 txs=0 appHash= +``` + +This should start the full node and connect to our ABCI application. + +``` +I[2019-07-16|18:25:11.525] Waiting for new connection... +I[2019-07-16|18:26:20.329] Accepted a new connection +I[2019-07-16|18:26:20.329] Waiting for new connection... +I[2019-07-16|18:26:20.330] Accepted a new connection +I[2019-07-16|18:26:20.330] Waiting for new connection... +I[2019-07-16|18:26:20.330] Accepted a new connection +``` + +Now open another tab in your terminal and try sending a transaction: + +```sh +$ curl -s 'localhost:26657/broadcast_tx_commit?tx="tendermint=rocks"' +{ + "jsonrpc": "2.0", + "id": "", + "result": { + "check_tx": { + "gasWanted": "1" + }, + "deliver_tx": {}, + "hash": "CDD3C6DFA0A08CAEDF546F9938A2EEC232209C24AA0E4201194E0AFB78A2C2BB", + "height": "33" +} +``` + +Response should contain the height where this transaction was committed. + +Now let's check if the given key now exists and its value: + +``` +$ curl -s 'localhost:26657/abci_query?data="tendermint"' +{ + "jsonrpc": "2.0", + "id": "", + "result": { + "response": { + "log": "exists", + "key": "dGVuZGVybWludA==", + "value": "cm9ja3My" + } + } +} +``` + +"dGVuZGVybWludA==" and "cm9ja3M=" are the base64-encoding of the ASCII of +"tendermint" and "rocks" accordingly. + +## Outro + +I hope everything went smoothly and your first, but hopefully not the last, +Tendermint Core application is up and running. If not, please [open an issue on +Github](https://github.com/tendermint/tendermint/issues/new/choose). To dig +deeper, read [the docs](https://tendermint.com/docs/). diff --git a/docs/guides/java.md b/docs/guides/java.md new file mode 100644 index 000000000..162b40fd7 --- /dev/null +++ b/docs/guides/java.md @@ -0,0 +1,600 @@ +# Creating an application in Java + +## Guide Assumptions + +This guide is designed for beginners who want to get started with a Tendermint +Core application from scratch. It does not assume that you have any prior +experience with Tendermint Core. + +Tendermint Core is Byzantine Fault Tolerant (BFT) middleware that takes a state +transition machine (your application) - written in any programming language - and securely +replicates it on many machines. + +By following along with this guide, you'll create a Tendermint Core project +called kvstore, a (very) simple distributed BFT key-value store. The application (which should +implementing the blockchain interface (ABCI)) will be written in Java. + +This guide assumes that you are not new to JVM world. If you are new please see [JVM Minimal Survival Guide](https://hadihariri.com/2013/12/29/jvm-minimal-survival-guide-for-the-dotnet-developer/#java-the-language-java-the-ecosystem-java-the-jvm) and [Gradle Docs](https://docs.gradle.org/current/userguide/userguide.html). + +## Built-in app vs external app + +If you use Golang, you can run your app and Tendermint Core in the same process to get maximum performance. +[Cosmos SDK](https://github.com/cosmos/cosmos-sdk) is written this way. +Please refer to [Writing a built-in Tendermint Core application in Go](./go-built-in.md) guide for details. + +If you choose another language, like we did in this guide, you have to write a separate app, +which will communicate with Tendermint Core via a socket (UNIX or TCP) or gRPC. +This guide will show you how to build external application using RPC server. + +Having a separate application might give you better security guarantees as two +processes would be communicating via established binary protocol. Tendermint +Core will not have access to application's state. + +## 1.1 Installing Java and Gradle + +Please refer to [the Oracle's guide for installing JDK](https://www.oracle.com/technetwork/java/javase/downloads/index.html). + +Verify that you have installed Java successfully: + +```sh +$ java -version +java version "12.0.2" 2019-07-16 +Java(TM) SE Runtime Environment (build 12.0.2+10) +Java HotSpot(TM) 64-Bit Server VM (build 12.0.2+10, mixed mode, sharing) +``` + +You can choose any version of Java higher or equal to 8. +This guide is written using Java SE Development Kit 12. + +Make sure you have `$JAVA_HOME` environment variable set: + +```sh +$ echo $JAVA_HOME +/Library/Java/JavaVirtualMachines/jdk-12.0.2.jdk/Contents/Home +``` + +For Gradle installation, please refer to [their official guide](https://gradle.org/install/). + +## 1.2 Creating a new Java project + +We'll start by creating a new Gradle project. + +```sh +$ export KVSTORE_HOME=~/kvstore +$ mkdir $KVSTORE_HOME +$ cd $KVSTORE_HOME +``` + +Inside the example directory run: +```sh +gradle init --dsl groovy --package io.example --project-name example --type java-application --test-framework junit +``` +This will create a new project for you. The tree of files should look like: +```sh +$ tree +. +|-- build.gradle +|-- gradle +| `-- wrapper +| |-- gradle-wrapper.jar +| `-- gradle-wrapper.properties +|-- gradlew +|-- gradlew.bat +|-- settings.gradle +`-- src + |-- main + | |-- java + | | `-- io + | | `-- example + | | `-- App.java + | `-- resources + `-- test + |-- java + | `-- io + | `-- example + | `-- AppTest.java + `-- resources +``` + +When run, this should print "Hello world." to the standard output. + +```sh +$ ./gradlew run +> Task :run +Hello world. +``` + +## 1.3 Writing a Tendermint Core application + +Tendermint Core communicates with the application through the Application +BlockChain Interface (ABCI). All message types are defined in the [protobuf +file](https://github.com/tendermint/tendermint/blob/develop/abci/types/types.proto). +This allows Tendermint Core to run applications written in any programming +language. + +### 1.3.1 Compile .proto files + +Add the following piece to the top of the `build.gradle`: +```groovy +buildscript { + repositories { + mavenCentral() + } + dependencies { + classpath 'com.google.protobuf:protobuf-gradle-plugin:0.8.8' + } +} +``` + +Enable the protobuf plugin in the `plugins` section of the `build.gradle`: +```groovy +plugins { + id 'com.google.protobuf' version '0.8.8' +} +``` + +Add the following code to `build.gradle`: +```groovy +protobuf { + protoc { + artifact = "com.google.protobuf:protoc:3.7.1" + } + plugins { + grpc { + artifact = 'io.grpc:protoc-gen-grpc-java:1.22.1' + } + } + generateProtoTasks { + all()*.plugins { + grpc {} + } + } +} +``` + +Now we should be ready to compile the `*.proto` files. + + +Copy the necessary `.proto` files to your project: +```sh +mkdir -p \ + $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/abci/types \ + $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/crypto/merkle \ + $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/libs/common \ + $KVSTORE_HOME/src/main/proto/github.com/gogo/protobuf/gogoproto + +cp $GOPATH/src/github.com/tendermint/tendermint/abci/types/types.proto \ + $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/abci/types/types.proto +cp $GOPATH/src/github.com/tendermint/tendermint/crypto/merkle/merkle.proto \ + $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/crypto/merkle/merkle.proto +cp $GOPATH/src/github.com/tendermint/tendermint/libs/common/types.proto \ + $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/libs/common/types.proto +cp $GOPATH/src/github.com/gogo/protobuf/gogoproto/gogo.proto \ + $KVSTORE_HOME/src/main/proto/github.com/gogo/protobuf/gogoproto/gogo.proto +``` + +Add these dependencies to `build.gradle`: +```groovy +dependencies { + implementation 'io.grpc:grpc-protobuf:1.22.1' + implementation 'io.grpc:grpc-netty-shaded:1.22.1' + implementation 'io.grpc:grpc-stub:1.22.1' +} +``` + +To generate all protobuf-type classes run: +```sh +./gradlew generateProto +``` +To verify that everything went smoothly, you can inspect the `build/generated/` directory: +```sh +$ tree build/generated/ +build/generated/ +|-- source +| `-- proto +| `-- main +| |-- grpc +| | `-- types +| | `-- ABCIApplicationGrpc.java +| `-- java +| |-- com +| | `-- google +| | `-- protobuf +| | `-- GoGoProtos.java +| |-- common +| | `-- Types.java +| |-- merkle +| | `-- Merkle.java +| `-- types +| `-- Types.java +``` + +### 1.3.2 Implementing ABCI + +The resulting `$KVSTORE_HOME/build/generated/source/proto/main/grpc/types/ABCIApplicationGrpc.java` file +contains the abstract class `ABCIApplicationImplBase`, which is an interface we'll need to implement. + +Create `$KVSTORE_HOME/src/main/java/io/example/KVStoreApp.java` file with the following content: +```java +package io.example; + +import io.grpc.stub.StreamObserver; +import types.ABCIApplicationGrpc; +import types.Types.*; + +class KVStoreApp extends ABCIApplicationGrpc.ABCIApplicationImplBase { + + // methods implementation + +} +``` + +Now I will go through each method of `ABCIApplicationImplBase` explaining when it's called and adding +required business logic. + +### 1.3.3 CheckTx + +When a new transaction is added to the Tendermint Core, it will ask the +application to check it (validate the format, signatures, etc.). + +```java +@Override +public void checkTx(RequestCheckTx req, StreamObserver responseObserver) { + var tx = req.getTx(); + int code = validate(tx); + var resp = ResponseCheckTx.newBuilder() + .setCode(code) + .setGasWanted(1) + .build(); + responseObserver.onNext(resp); + responseObserver.onCompleted(); +} + +private int validate(ByteString tx) { + List parts = split(tx, '='); + if (parts.size() != 2) { + return 1; + } + byte[] key = parts.get(0); + byte[] value = parts.get(1); + + // check if the same key=value already exists + var stored = getPersistedValue(key); + if (stored != null && Arrays.equals(stored, value)) { + return 2; + } + + return 0; +} + +private List split(ByteString tx, char separator) { + var arr = tx.toByteArray(); + int i; + for (i = 0; i < tx.size(); i++) { + if (arr[i] == (byte)separator) { + break; + } + } + if (i == tx.size()) { + return Collections.emptyList(); + } + return List.of( + tx.substring(0, i).toByteArray(), + tx.substring(i + 1).toByteArray() + ); +} +``` + +Don't worry if this does not compile yet. + +If the transaction does not have a form of `{bytes}={bytes}`, we return `1` +code. When the same key=value already exist (same key and value), we return `2` +code. For others, we return a zero code indicating that they are valid. + +Note that anything with non-zero code will be considered invalid (`-1`, `100`, +etc.) by Tendermint Core. + +Valid transactions will eventually be committed given they are not too big and +have enough gas. To learn more about gas, check out ["the +specification"](https://tendermint.com/docs/spec/abci/apps.html#gas). + +For the underlying key-value store we'll use +[JetBrains Xodus](https://github.com/JetBrains/xodus), which is a transactional schema-less embedded high-performance database written in Java. + +`build.gradle`: +```groovy +dependencies { + implementation 'org.jetbrains.xodus:xodus-environment:1.3.91' +} +``` + +```java +... +import jetbrains.exodus.ArrayByteIterable; +import jetbrains.exodus.ByteIterable; +import jetbrains.exodus.env.Environment; +import jetbrains.exodus.env.Store; +import jetbrains.exodus.env.StoreConfig; +import jetbrains.exodus.env.Transaction; + +class KVStoreApp extends ABCIApplicationGrpc.ABCIApplicationImplBase { + private Environment env; + private Transaction txn = null; + private Store store = null; + + KVStoreApp(Environment env) { + this.env = env; + } + + ... + + private byte[] getPersistedValue(byte[] k) { + return env.computeInReadonlyTransaction(txn -> { + var store = env.openStore("store", StoreConfig.WITHOUT_DUPLICATES, txn); + ByteIterable byteIterable = store.get(txn, new ArrayByteIterable(k)); + if (byteIterable == null) { + return null; + } + return byteIterable.getBytesUnsafe(); + }); + } +} +``` + +### 1.3.4 BeginBlock -> DeliverTx -> EndBlock -> Commit + +When Tendermint Core has decided on the block, it's transferred to the +application in 3 parts: `BeginBlock`, one `DeliverTx` per transaction and +`EndBlock` in the end. `DeliverTx` are being transferred asynchronously, but the +responses are expected to come in order. + +```java +@Override +public void beginBlock(RequestBeginBlock req, StreamObserver responseObserver) { + txn = env.beginTransaction(); + store = env.openStore("store", StoreConfig.WITHOUT_DUPLICATES, txn); + var resp = ResponseBeginBlock.newBuilder().build(); + responseObserver.onNext(resp); + responseObserver.onCompleted(); +} +``` +Here we begin a new transaction, which will accumulate the block's transactions and open the corresponding store. + +```java +@Override +public void deliverTx(RequestDeliverTx req, StreamObserver responseObserver) { + var tx = req.getTx(); + int code = validate(tx); + if (code == 0) { + List parts = split(tx, '='); + var key = new ArrayByteIterable(parts.get(0)); + var value = new ArrayByteIterable(parts.get(1)); + store.put(txn, key, value); + } + var resp = ResponseDeliverTx.newBuilder() + .setCode(code) + .build(); + responseObserver.onNext(resp); + responseObserver.onCompleted(); +} +``` + +If the transaction is badly formatted or the same key=value already exist, we +again return the non-zero code. Otherwise, we add it to the store. + +In the current design, a block can include incorrect transactions (those who +passed `CheckTx`, but failed `DeliverTx` or transactions included by the proposer +directly). This is done for performance reasons. + +Note we can't commit transactions inside the `DeliverTx` because in such case +`Query`, which may be called in parallel, will return inconsistent data (i.e. +it will report that some value already exist even when the actual block was not +yet committed). + +`Commit` instructs the application to persist the new state. + +```java +@Override +public void commit(RequestCommit req, StreamObserver responseObserver) { + txn.commit(); + var resp = ResponseCommit.newBuilder() + .setData(ByteString.copyFrom(new byte[8])) + .build(); + responseObserver.onNext(resp); + responseObserver.onCompleted(); +} +``` + +### 1.3.5 Query + +Now, when the client wants to know whenever a particular key/value exist, it +will call Tendermint Core RPC `/abci_query` endpoint, which in turn will call +the application's `Query` method. + +Applications are free to provide their own APIs. But by using Tendermint Core +as a proxy, clients (including [light client +package](https://godoc.org/github.com/tendermint/tendermint/lite)) can leverage +the unified API across different applications. Plus they won't have to call the +otherwise separate Tendermint Core API for additional proofs. + +Note we don't include a proof here. + +```java +@Override +public void query(RequestQuery req, StreamObserver responseObserver) { + var k = req.getData().toByteArray(); + var v = getPersistedValue(k); + var builder = ResponseQuery.newBuilder(); + if (v == null) { + builder.setLog("does not exist"); + } else { + builder.setLog("exists"); + builder.setKey(ByteString.copyFrom(k)); + builder.setValue(ByteString.copyFrom(v)); + } + responseObserver.onNext(builder.build()); + responseObserver.onCompleted(); +} +``` + +The complete specification can be found +[here](https://tendermint.com/docs/spec/abci/). + +## 1.4 Starting an application and a Tendermint Core instances + +Put the following code into the `$KVSTORE_HOME/src/main/java/io/example/App.java` file: + +```java +package io.example; + +import jetbrains.exodus.env.Environment; +import jetbrains.exodus.env.Environments; + +import java.io.IOException; + +public class App { + public static void main(String[] args) throws IOException, InterruptedException { + try (Environment env = Environments.newInstance("tmp/storage")) { + var app = new KVStoreApp(env); + var server = new GrpcServer(app, 26658); + server.start(); + server.blockUntilShutdown(); + } + } +} +``` + +It is the entry point of the application. +Here we create a special object `Environment`, which knows where to store the application state. +Then we create and start the gRPC server to handle Tendermint Core requests. + +Create the `$KVSTORE_HOME/src/main/java/io/example/GrpcServer.java` file with the following content: +```java +package io.example; + +import io.grpc.BindableService; +import io.grpc.Server; +import io.grpc.ServerBuilder; + +import java.io.IOException; + +class GrpcServer { + private Server server; + + GrpcServer(BindableService service, int port) { + this.server = ServerBuilder.forPort(port) + .addService(service) + .build(); + } + + void start() throws IOException { + server.start(); + System.out.println("gRPC server started, listening on $port"); + Runtime.getRuntime().addShutdownHook(new Thread(() -> { + System.out.println("shutting down gRPC server since JVM is shutting down"); + GrpcServer.this.stop(); + System.out.println("server shut down"); + })); + } + + private void stop() { + server.shutdown(); + } + + /** + * Await termination on the main thread since the grpc library uses daemon threads. + */ + void blockUntilShutdown() throws InterruptedException { + server.awaitTermination(); + } +} +``` + +## 1.5 Getting Up and Running + +To create a default configuration, nodeKey and private validator files, let's +execute `tendermint init`. But before we do that, we will need to install +Tendermint Core. + +```sh +$ rm -rf /tmp/example +$ cd $GOPATH/src/github.com/tendermint/tendermint +$ make install +$ TMHOME="/tmp/example" tendermint init + +I[2019-07-16|18:20:36.480] Generated private validator module=main keyFile=/tmp/example/config/priv_validator_key.json stateFile=/tmp/example2/data/priv_validator_state.json +I[2019-07-16|18:20:36.481] Generated node key module=main path=/tmp/example/config/node_key.json +I[2019-07-16|18:20:36.482] Generated genesis file module=main path=/tmp/example/config/genesis.json +``` + +Feel free to explore the generated files, which can be found at +`/tmp/example/config` directory. Documentation on the config can be found +[here](https://tendermint.com/docs/tendermint-core/configuration.html). + +We are ready to start our application: + +```sh +./gradlew run + +gRPC server started, listening on 26658 +``` + +Then we need to start Tendermint Core and point it to our application. Staying +within the application directory execute: + +```sh +$ TMHOME="/tmp/example" tendermint node --abci grpc --proxy_app tcp://127.0.0.1:26658 + +I[2019-07-28|15:44:53.632] Version info module=main software=0.32.1 block=10 p2p=7 +I[2019-07-28|15:44:53.677] Starting Node module=main impl=Node +I[2019-07-28|15:44:53.681] Started node module=main nodeInfo="{ProtocolVersion:{P2P:7 Block:10 App:0} ID_:7639e2841ccd47d5ae0f5aad3011b14049d3f452 ListenAddr:tcp://0.0.0.0:26656 Network:test-chain-Nhl3zk Version:0.32.1 Channels:4020212223303800 Moniker:Ivans-MacBook-Pro.local Other:{TxIndex:on RPCAddress:tcp://127.0.0.1:26657}}" +I[2019-07-28|15:44:54.801] Executed block module=state height=8 validTxs=0 invalidTxs=0 +I[2019-07-28|15:44:54.814] Committed state module=state height=8 txs=0 appHash=0000000000000000 +``` + +Now open another tab in your terminal and try sending a transaction: + +```sh +$ curl -s 'localhost:26657/broadcast_tx_commit?tx="tendermint=rocks"' +{ + "jsonrpc": "2.0", + "id": "", + "result": { + "check_tx": { + "gasWanted": "1" + }, + "deliver_tx": {}, + "hash": "CDD3C6DFA0A08CAEDF546F9938A2EEC232209C24AA0E4201194E0AFB78A2C2BB", + "height": "33" +} +``` + +Response should contain the height where this transaction was committed. + +Now let's check if the given key now exists and its value: + +```sh +$ curl -s 'localhost:26657/abci_query?data="tendermint"' +{ + "jsonrpc": "2.0", + "id": "", + "result": { + "response": { + "log": "exists", + "key": "dGVuZGVybWludA==", + "value": "cm9ja3My" + } + } +} +``` + +`dGVuZGVybWludA==` and `cm9ja3M=` are the base64-encoding of the ASCII of `tendermint` and `rocks` accordingly. + +## Outro + +I hope everything went smoothly and your first, but hopefully not the last, +Tendermint Core application is up and running. If not, please [open an issue on +Github](https://github.com/tendermint/tendermint/issues/new/choose). To dig +deeper, read [the docs](https://tendermint.com/docs/). + +The full source code of this example project can be found [here](https://github.com/climber73/tendermint-abci-grpc-java). diff --git a/docs/guides/kotlin.md b/docs/guides/kotlin.md new file mode 100644 index 000000000..fa9e10b35 --- /dev/null +++ b/docs/guides/kotlin.md @@ -0,0 +1,574 @@ +# Creating an application in Kotlin + +## Guide Assumptions + +This guide is designed for beginners who want to get started with a Tendermint +Core application from scratch. It does not assume that you have any prior +experience with Tendermint Core. + +Tendermint Core is Byzantine Fault Tolerant (BFT) middleware that takes a state +transition machine (your application) - written in any programming language - and securely +replicates it on many machines. + +By following along with this guide, you'll create a Tendermint Core project +called kvstore, a (very) simple distributed BFT key-value store. The application (which should +implementing the blockchain interface (ABCI)) will be written in Kotlin. + +This guide assumes that you are not new to JVM world. If you are new please see [JVM Minimal Survival Guide](https://hadihariri.com/2013/12/29/jvm-minimal-survival-guide-for-the-dotnet-developer/#java-the-language-java-the-ecosystem-java-the-jvm) and [Gradle Docs](https://docs.gradle.org/current/userguide/userguide.html). + +## Built-in app vs external app + +If you use Golang, you can run your app and Tendermint Core in the same process to get maximum performance. +[Cosmos SDK](https://github.com/cosmos/cosmos-sdk) is written this way. +Please refer to [Writing a built-in Tendermint Core application in Go](./go-built-in.md) guide for details. + +If you choose another language, like we did in this guide, you have to write a separate app, +which will communicate with Tendermint Core via a socket (UNIX or TCP) or gRPC. +This guide will show you how to build external application using RPC server. + +Having a separate application might give you better security guarantees as two +processes would be communicating via established binary protocol. Tendermint +Core will not have access to application's state. + +## 1.1 Installing Java and Gradle + +Please refer to [the Oracle's guide for installing JDK](https://www.oracle.com/technetwork/java/javase/downloads/index.html). + +Verify that you have installed Java successfully: + +```sh +$ java -version +java version "1.8.0_162" +Java(TM) SE Runtime Environment (build 1.8.0_162-b12) +Java HotSpot(TM) 64-Bit Server VM (build 25.162-b12, mixed mode) +``` + +You can choose any version of Java higher or equal to 8. +In my case it is Java SE Development Kit 8. + +Make sure you have `$JAVA_HOME` environment variable set: + +```sh +$ echo $JAVA_HOME +/Library/Java/JavaVirtualMachines/jdk1.8.0_162.jdk/Contents/Home +``` + +For Gradle installation, please refer to [their official guide](https://gradle.org/install/). + +## 1.2 Creating a new Kotlin project + +We'll start by creating a new Gradle project. + +```sh +$ export KVSTORE_HOME=~/kvstore +$ mkdir $KVSTORE_HOME +$ cd $KVSTORE_HOME +``` + +Inside the example directory run: +```sh +gradle init --dsl groovy --package io.example --project-name example --type kotlin-application +``` +This will create a new project for you. The tree of files should look like: +```sh +$ tree +. +|-- build.gradle +|-- gradle +| `-- wrapper +| |-- gradle-wrapper.jar +| `-- gradle-wrapper.properties +|-- gradlew +|-- gradlew.bat +|-- settings.gradle +`-- src + |-- main + | |-- kotlin + | | `-- io + | | `-- example + | | `-- App.kt + | `-- resources + `-- test + |-- kotlin + | `-- io + | `-- example + | `-- AppTest.kt + `-- resources +``` + +When run, this should print "Hello world." to the standard output. + +```sh +$ ./gradlew run +> Task :run +Hello world. +``` + +## 1.3 Writing a Tendermint Core application + +Tendermint Core communicates with the application through the Application +BlockChain Interface (ABCI). All message types are defined in the [protobuf +file](https://github.com/tendermint/tendermint/blob/develop/abci/types/types.proto). +This allows Tendermint Core to run applications written in any programming +language. + +### 1.3.1 Compile .proto files + +Add the following piece to the top of the `build.gradle`: +```groovy +buildscript { + repositories { + mavenCentral() + } + dependencies { + classpath 'com.google.protobuf:protobuf-gradle-plugin:0.8.8' + } +} +``` + +Enable the protobuf plugin in the `plugins` section of the `build.gradle`: +```groovy +plugins { + id 'com.google.protobuf' version '0.8.8' +} +``` + +Add the following code to `build.gradle`: +```groovy +protobuf { + protoc { + artifact = "com.google.protobuf:protoc:3.7.1" + } + plugins { + grpc { + artifact = 'io.grpc:protoc-gen-grpc-java:1.22.1' + } + } + generateProtoTasks { + all()*.plugins { + grpc {} + } + } +} +``` + +Now we should be ready to compile the `*.proto` files. + + +Copy the necessary `.proto` files to your project: +```sh +mkdir -p \ + $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/abci/types \ + $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/crypto/merkle \ + $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/libs/common \ + $KVSTORE_HOME/src/main/proto/github.com/gogo/protobuf/gogoproto + +cp $GOPATH/src/github.com/tendermint/tendermint/abci/types/types.proto \ + $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/abci/types/types.proto +cp $GOPATH/src/github.com/tendermint/tendermint/crypto/merkle/merkle.proto \ + $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/crypto/merkle/merkle.proto +cp $GOPATH/src/github.com/tendermint/tendermint/libs/common/types.proto \ + $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/libs/common/types.proto +cp $GOPATH/src/github.com/gogo/protobuf/gogoproto/gogo.proto \ + $KVSTORE_HOME/src/main/proto/github.com/gogo/protobuf/gogoproto/gogo.proto +``` + +Add these dependencies to `build.gradle`: +```groovy +dependencies { + implementation 'io.grpc:grpc-protobuf:1.22.1' + implementation 'io.grpc:grpc-netty-shaded:1.22.1' + implementation 'io.grpc:grpc-stub:1.22.1' +} +``` + +To generate all protobuf-type classes run: +```sh +./gradlew generateProto +``` +To verify that everything went smoothly, you can inspect the `build/generated/` directory: +```sh +$ tree build/generated/ +build/generated/ +`-- source + `-- proto + `-- main + |-- grpc + | `-- types + | `-- ABCIApplicationGrpc.java + `-- java + |-- com + | `-- google + | `-- protobuf + | `-- GoGoProtos.java + |-- common + | `-- Types.java + |-- merkle + | `-- Merkle.java + `-- types + `-- Types.java +``` + +### 1.3.2 Implementing ABCI + +The resulting `$KVSTORE_HOME/build/generated/source/proto/main/grpc/types/ABCIApplicationGrpc.java` file +contains the abstract class `ABCIApplicationImplBase`, which is an interface we'll need to implement. + +Create `$KVSTORE_HOME/src/main/kotlin/io/example/KVStoreApp.kt` file with the following content: +```kotlin +package io.example + +import io.grpc.stub.StreamObserver +import types.ABCIApplicationGrpc +import types.Types.* + +class KVStoreApp : ABCIApplicationGrpc.ABCIApplicationImplBase() { + + // methods implementation + +} +``` + +Now I will go through each method of `ABCIApplicationImplBase` explaining when it's called and adding +required business logic. + +### 1.3.3 CheckTx + +When a new transaction is added to the Tendermint Core, it will ask the +application to check it (validate the format, signatures, etc.). + +```kotlin +override fun checkTx(req: RequestCheckTx, responseObserver: StreamObserver) { + val code = req.tx.validate() + val resp = ResponseCheckTx.newBuilder() + .setCode(code) + .setGasWanted(1) + .build() + responseObserver.onNext(resp) + responseObserver.onCompleted() +} + +private fun ByteString.validate(): Int { + val parts = this.split('=') + if (parts.size != 2) { + return 1 + } + val key = parts[0] + val value = parts[1] + + // check if the same key=value already exists + val stored = getPersistedValue(key) + if (stored != null && stored.contentEquals(value)) { + return 2 + } + + return 0 +} + +private fun ByteString.split(separator: Char): List { + val arr = this.toByteArray() + val i = (0 until this.size()).firstOrNull { arr[it] == separator.toByte() } + ?: return emptyList() + return listOf( + this.substring(0, i).toByteArray(), + this.substring(i + 1).toByteArray() + ) +} +``` + +Don't worry if this does not compile yet. + +If the transaction does not have a form of `{bytes}={bytes}`, we return `1` +code. When the same key=value already exist (same key and value), we return `2` +code. For others, we return a zero code indicating that they are valid. + +Note that anything with non-zero code will be considered invalid (`-1`, `100`, +etc.) by Tendermint Core. + +Valid transactions will eventually be committed given they are not too big and +have enough gas. To learn more about gas, check out ["the +specification"](https://tendermint.com/docs/spec/abci/apps.html#gas). + +For the underlying key-value store we'll use +[JetBrains Xodus](https://github.com/JetBrains/xodus), which is a transactional schema-less embedded high-performance database written in Java. + +`build.gradle`: +```groovy +dependencies { + implementation 'org.jetbrains.xodus:xodus-environment:1.3.91' +} +``` + +```kotlin +... +import jetbrains.exodus.ArrayByteIterable +import jetbrains.exodus.env.Environment +import jetbrains.exodus.env.Store +import jetbrains.exodus.env.StoreConfig +import jetbrains.exodus.env.Transaction + +class KVStoreApp( + private val env: Environment +) : ABCIApplicationGrpc.ABCIApplicationImplBase() { + + private var txn: Transaction? = null + private var store: Store? = null + + ... + + private fun getPersistedValue(k: ByteArray): ByteArray? { + return env.computeInReadonlyTransaction { txn -> + val store = env.openStore("store", StoreConfig.WITHOUT_DUPLICATES, txn) + store.get(txn, ArrayByteIterable(k))?.bytesUnsafe + } + } +} +``` + +### 1.3.4 BeginBlock -> DeliverTx -> EndBlock -> Commit + +When Tendermint Core has decided on the block, it's transferred to the +application in 3 parts: `BeginBlock`, one `DeliverTx` per transaction and +`EndBlock` in the end. `DeliverTx` are being transferred asynchronously, but the +responses are expected to come in order. + +```kotlin +override fun beginBlock(req: RequestBeginBlock, responseObserver: StreamObserver) { + txn = env.beginTransaction() + store = env.openStore("store", StoreConfig.WITHOUT_DUPLICATES, txn!!) + val resp = ResponseBeginBlock.newBuilder().build() + responseObserver.onNext(resp) + responseObserver.onCompleted() +} +``` +Here we begin a new transaction, which will accumulate the block's transactions and open the corresponding store. + +```kotlin +override fun deliverTx(req: RequestDeliverTx, responseObserver: StreamObserver) { + val code = req.tx.validate() + if (code == 0) { + val parts = req.tx.split('=') + val key = ArrayByteIterable(parts[0]) + val value = ArrayByteIterable(parts[1]) + store!!.put(txn!!, key, value) + } + val resp = ResponseDeliverTx.newBuilder() + .setCode(code) + .build() + responseObserver.onNext(resp) + responseObserver.onCompleted() +} +``` + +If the transaction is badly formatted or the same key=value already exist, we +again return the non-zero code. Otherwise, we add it to the store. + +In the current design, a block can include incorrect transactions (those who +passed `CheckTx`, but failed `DeliverTx` or transactions included by the proposer +directly). This is done for performance reasons. + +Note we can't commit transactions inside the `DeliverTx` because in such case +`Query`, which may be called in parallel, will return inconsistent data (i.e. +it will report that some value already exist even when the actual block was not +yet committed). + +`Commit` instructs the application to persist the new state. + +```kotlin +override fun commit(req: RequestCommit, responseObserver: StreamObserver) { + txn!!.commit() + val resp = ResponseCommit.newBuilder() + .setData(ByteString.copyFrom(ByteArray(8))) + .build() + responseObserver.onNext(resp) + responseObserver.onCompleted() +} +``` + +### 1.3.5 Query + +Now, when the client wants to know whenever a particular key/value exist, it +will call Tendermint Core RPC `/abci_query` endpoint, which in turn will call +the application's `Query` method. + +Applications are free to provide their own APIs. But by using Tendermint Core +as a proxy, clients (including [light client +package](https://godoc.org/github.com/tendermint/tendermint/lite)) can leverage +the unified API across different applications. Plus they won't have to call the +otherwise separate Tendermint Core API for additional proofs. + +Note we don't include a proof here. + +```kotlin +override fun query(req: RequestQuery, responseObserver: StreamObserver) { + val k = req.data.toByteArray() + val v = getPersistedValue(k) + val builder = ResponseQuery.newBuilder() + if (v == null) { + builder.log = "does not exist" + } else { + builder.log = "exists" + builder.key = ByteString.copyFrom(k) + builder.value = ByteString.copyFrom(v) + } + responseObserver.onNext(builder.build()) + responseObserver.onCompleted() +} +``` + +The complete specification can be found +[here](https://tendermint.com/docs/spec/abci/). + +## 1.4 Starting an application and a Tendermint Core instances + +Put the following code into the `$KVSTORE_HOME/src/main/kotlin/io/example/App.kt` file: + +```kotlin +package io.example + +import jetbrains.exodus.env.Environments + +fun main() { + Environments.newInstance("tmp/storage").use { env -> + val app = KVStoreApp(env) + val server = GrpcServer(app, 26658) + server.start() + server.blockUntilShutdown() + } +} +``` + +It is the entry point of the application. +Here we create a special object `Environment`, which knows where to store the application state. +Then we create and start the gRPC server to handle Tendermint Core requests. + +Create `$KVSTORE_HOME/src/main/kotlin/io/example/GrpcServer.kt` file with the following content: +```kotlin +package io.example + +import io.grpc.BindableService +import io.grpc.ServerBuilder + +class GrpcServer( + private val service: BindableService, + private val port: Int +) { + private val server = ServerBuilder + .forPort(port) + .addService(service) + .build() + + fun start() { + server.start() + println("gRPC server started, listening on $port") + Runtime.getRuntime().addShutdownHook(object : Thread() { + override fun run() { + println("shutting down gRPC server since JVM is shutting down") + this@GrpcServer.stop() + println("server shut down") + } + }) + } + + fun stop() { + server.shutdown() + } + + /** + * Await termination on the main thread since the grpc library uses daemon threads. + */ + fun blockUntilShutdown() { + server.awaitTermination() + } + +} +``` + +## 1.5 Getting Up and Running + +To create a default configuration, nodeKey and private validator files, let's +execute `tendermint init`. But before we do that, we will need to install +Tendermint Core. + +```sh +$ rm -rf /tmp/example +$ cd $GOPATH/src/github.com/tendermint/tendermint +$ make install +$ TMHOME="/tmp/example" tendermint init + +I[2019-07-16|18:20:36.480] Generated private validator module=main keyFile=/tmp/example/config/priv_validator_key.json stateFile=/tmp/example2/data/priv_validator_state.json +I[2019-07-16|18:20:36.481] Generated node key module=main path=/tmp/example/config/node_key.json +I[2019-07-16|18:20:36.482] Generated genesis file module=main path=/tmp/example/config/genesis.json +``` + +Feel free to explore the generated files, which can be found at +`/tmp/example/config` directory. Documentation on the config can be found +[here](https://tendermint.com/docs/tendermint-core/configuration.html). + +We are ready to start our application: + +```sh +./gradlew run + +gRPC server started, listening on 26658 +``` + +Then we need to start Tendermint Core and point it to our application. Staying +within the application directory execute: + +```sh +$ TMHOME="/tmp/example" tendermint node --abci grpc --proxy_app tcp://127.0.0.1:26658 + +I[2019-07-28|15:44:53.632] Version info module=main software=0.32.1 block=10 p2p=7 +I[2019-07-28|15:44:53.677] Starting Node module=main impl=Node +I[2019-07-28|15:44:53.681] Started node module=main nodeInfo="{ProtocolVersion:{P2P:7 Block:10 App:0} ID_:7639e2841ccd47d5ae0f5aad3011b14049d3f452 ListenAddr:tcp://0.0.0.0:26656 Network:test-chain-Nhl3zk Version:0.32.1 Channels:4020212223303800 Moniker:Ivans-MacBook-Pro.local Other:{TxIndex:on RPCAddress:tcp://127.0.0.1:26657}}" +I[2019-07-28|15:44:54.801] Executed block module=state height=8 validTxs=0 invalidTxs=0 +I[2019-07-28|15:44:54.814] Committed state module=state height=8 txs=0 appHash=0000000000000000 +``` + +Now open another tab in your terminal and try sending a transaction: + +```sh +$ curl -s 'localhost:26657/broadcast_tx_commit?tx="tendermint=rocks"' +{ + "jsonrpc": "2.0", + "id": "", + "result": { + "check_tx": { + "gasWanted": "1" + }, + "deliver_tx": {}, + "hash": "CDD3C6DFA0A08CAEDF546F9938A2EEC232209C24AA0E4201194E0AFB78A2C2BB", + "height": "33" +} +``` + +Response should contain the height where this transaction was committed. + +Now let's check if the given key now exists and its value: + +```sh +$ curl -s 'localhost:26657/abci_query?data="tendermint"' +{ + "jsonrpc": "2.0", + "id": "", + "result": { + "response": { + "log": "exists", + "key": "dGVuZGVybWludA==", + "value": "cm9ja3My" + } + } +} +``` + +`dGVuZGVybWludA==` and `cm9ja3M=` are the base64-encoding of the ASCII of `tendermint` and `rocks` accordingly. + +## Outro + +I hope everything went smoothly and your first, but hopefully not the last, +Tendermint Core application is up and running. If not, please [open an issue on +Github](https://github.com/tendermint/tendermint/issues/new/choose). To dig +deeper, read [the docs](https://tendermint.com/docs/). + +The full source code of this example project can be found [here](https://github.com/climber73/tendermint-abci-grpc-kotlin). diff --git a/docs/introduction/install.md b/docs/introduction/install.md index 3005a7349..0a013bed1 100644 --- a/docs/introduction/install.md +++ b/docs/introduction/install.md @@ -1,9 +1,9 @@ # Install Tendermint The fastest and easiest way to install the `tendermint` binary -is to run [this script](https://github.com/tendermint/tendermint/blob/develop/scripts/install/install_tendermint_ubuntu.sh) on +is to run [this script](https://github.com/tendermint/tendermint/blob/master/scripts/install/install_tendermint_ubuntu.sh) on a fresh Ubuntu instance, -or [this script](https://github.com/tendermint/tendermint/blob/develop/scripts/install/install_tendermint_bsd.sh) +or [this script](https://github.com/tendermint/tendermint/blob/master/scripts/install/install_tendermint_bsd.sh) on a fresh FreeBSD instance. Read the comments / instructions carefully (i.e., reset your terminal after running the script, make sure you are okay with the network connections being made). @@ -29,7 +29,6 @@ cd tendermint ``` make get_tools -make get_vendor_deps ``` ### Compile @@ -46,6 +45,8 @@ make build to put the binary in `./build`. +_DISCLAIMER_ The binary of tendermint is build/installed without the DWARF symbol table. If you would like to build/install tendermint with the DWARF symbol and debug information, remove `-s -w` from `BUILD_FLAGS` in the make file. + The latest `tendermint version` is now installed. ## Run @@ -71,7 +72,6 @@ To upgrade, run ``` cd $GOPATH/src/github.com/tendermint/tendermint git pull origin master -make get_vendor_deps make install ``` @@ -79,9 +79,7 @@ make install Install [LevelDB](https://github.com/google/leveldb) (minimum version is 1.7). -### Ubuntu - -Install LevelDB with snappy (optionally): +Install LevelDB with snappy (optionally). Below are commands for Ubuntu: ``` sudo apt-get update @@ -100,23 +98,23 @@ wget https://github.com/google/leveldb/archive/v1.20.tar.gz && \ rm -f v1.20.tar.gz ``` -Set database backend to cleveldb: +Set a database backend to `cleveldb`: ``` # config/config.toml db_backend = "cleveldb" ``` -To install Tendermint, run +To install Tendermint, run: ``` CGO_LDFLAGS="-lsnappy" make install_c ``` -or run +or run: ``` CGO_LDFLAGS="-lsnappy" make build_c ``` -to put the binary in `./build`. +which puts the binary in `./build`. diff --git a/docs/introduction/introduction.md b/docs/introduction/introduction.md index f80a159ca..4f435bbf5 100644 --- a/docs/introduction/introduction.md +++ b/docs/introduction/introduction.md @@ -122,7 +122,7 @@ consensus engine, and provides a particular application state. ## ABCI Overview The [Application BlockChain Interface -(ABCI)](https://github.com/tendermint/tendermint/tree/develop/abci) +(ABCI)](https://github.com/tendermint/tendermint/tree/master/abci) allows for Byzantine Fault Tolerant replication of applications written in any programming language. @@ -190,7 +190,7 @@ core to the application. The application replies with corresponding response messages. The messages are specified here: [ABCI Message -Types](https://github.com/tendermint/tendermint/blob/develop/abci/README.md#message-types). +Types](https://github.com/tendermint/tendermint/blob/master/abci/README.md#message-types). The **DeliverTx** message is the work horse of the application. Each transaction in the blockchain is delivered with this message. The diff --git a/docs/introduction/what-is-tendermint.md b/docs/introduction/what-is-tendermint.md index a35dd9ec1..0371afc63 100644 --- a/docs/introduction/what-is-tendermint.md +++ b/docs/introduction/what-is-tendermint.md @@ -116,7 +116,7 @@ consensus engine, and provides a particular application state. ## ABCI Overview The [Application BlockChain Interface -(ABCI)](https://github.com/tendermint/tendermint/tree/develop/abci) +(ABCI)](https://github.com/tendermint/tendermint/tree/master/abci) allows for Byzantine Fault Tolerant replication of applications written in any programming language. @@ -184,7 +184,7 @@ core to the application. The application replies with corresponding response messages. The messages are specified here: [ABCI Message -Types](https://github.com/tendermint/tendermint/blob/develop/abci/README.md#message-types). +Types](https://github.com/tendermint/tendermint/blob/master/abci/README.md#message-types). The **DeliverTx** message is the work horse of the application. Each transaction in the blockchain is delivered with this message. The diff --git a/docs/networks/docker-compose.md b/docs/networks/docker-compose.md index 8db49af5e..37b53fafe 100644 --- a/docs/networks/docker-compose.md +++ b/docs/networks/docker-compose.md @@ -78,9 +78,9 @@ cd $GOPATH/src/github.com/tendermint/tendermint rm -rf ./build/node* ``` -## Configuring abci containers +## Configuring abci containers -To use your own abci applications with 4-node setup edit the [docker-compose.yaml](https://github.com/tendermint/tendermint/blob/develop/docker-compose.yml) file and add image to your abci application. +To use your own abci applications with 4-node setup edit the [docker-compose.yaml](https://github.com/tendermint/tendermint/blob/master/docker-compose.yml) file and add image to your abci application. ``` abci0: @@ -129,7 +129,7 @@ To use your own abci applications with 4-node setup edit the [docker-compose.yam ``` -Override the [command](https://github.com/tendermint/tendermint/blob/master/networks/local/localnode/Dockerfile#L12) in each node to connect to it's abci. +Override the [command](https://github.com/tendermint/tendermint/blob/master/networks/local/localnode/Dockerfile#L12) in each node to connect to it's abci. ``` node0: diff --git a/docs/networks/terraform-and-ansible.md b/docs/networks/terraform-and-ansible.md index c08ade17a..3ef6056a0 100644 --- a/docs/networks/terraform-and-ansible.md +++ b/docs/networks/terraform-and-ansible.md @@ -8,7 +8,7 @@ testnets on those servers. ## Install NOTE: see the [integration bash -script](https://github.com/tendermint/tendermint/blob/develop/networks/remote/integration.sh) +script](https://github.com/tendermint/tendermint/blob/master/networks/remote/integration.sh) that can be run on a fresh DO droplet and will automatically spin up a 4 node testnet. The script more or less does everything described below. @@ -62,16 +62,18 @@ There are several roles that are self-explanatory: First, we configure our droplets by specifying the paths for tendermint (`BINARY`) and the node files (`CONFIGDIR`). The latter expects any number of directories named `node0, node1, ...` and so on (equal to the -number of droplets created). For this example, we use pre-created files -from [this -directory](https://github.com/tendermint/tendermint/tree/master/docs/examples). -To create your own files, use either the `tendermint testnet` command or -review [manual deployments](./deploy-testnets.md). +number of droplets created). -Here's the command to run: +To create the node files run: ``` -ansible-playbook -i inventory/digital_ocean.py -l sentrynet config.yml -e BINARY=$GOPATH/src/github.com/tendermint/tendermint/build/tendermint -e CONFIGDIR=$GOPATH/src/github.com/tendermint/tendermint/docs/examples +tendermint testnet +``` + +Then, to configure our droplets run: + +``` +ansible-playbook -i inventory/digital_ocean.py -l sentrynet config.yml -e BINARY=$GOPATH/src/github.com/tendermint/tendermint/build/tendermint -e CONFIGDIR=$GOPATH/src/github.com/tendermint/tendermint/networks/remote/ansible/mytestnet ``` Voila! All your droplets now have the `tendermint` binary and required diff --git a/docs/spec/abci/README.md b/docs/spec/abci/README.md index bb1c38b6e..56d5e8aaf 100644 --- a/docs/spec/abci/README.md +++ b/docs/spec/abci/README.md @@ -2,11 +2,11 @@ ABCI is the interface between Tendermint (a state-machine replication engine) and your application (the actual state machine). It consists of a set of -*methods*, where each method has a corresponding `Request` and `Response` +_methods_, where each method has a corresponding `Request` and `Response` message type. Tendermint calls the ABCI methods on the ABCI application by sending the `Request*` messages and receiving the `Response*` messages in return. -All message types are defined in a [protobuf file](https://github.com/tendermint/tendermint/blob/develop/abci/types/types.proto). +All message types are defined in a [protobuf file](https://github.com/tendermint/tendermint/blob/master/abci/types/types.proto). This allows Tendermint to run applications written in any programming language. This specification is split as follows: diff --git a/docs/spec/abci/abci.md b/docs/spec/abci/abci.md index 4b05ba692..dd6bdbc3f 100644 --- a/docs/spec/abci/abci.md +++ b/docs/spec/abci/abci.md @@ -3,9 +3,9 @@ ## Overview The ABCI message types are defined in a [protobuf -file](https://github.com/tendermint/tendermint/blob/develop/abci/types/types.proto). +file](https://github.com/tendermint/tendermint/blob/master/abci/types/types.proto). -ABCI methods are split across 3 separate ABCI *connections*: +ABCI methods are split across 3 separate ABCI _connections_: - `Consensus Connection`: `InitChain, BeginBlock, DeliverTx, EndBlock, Commit` - `Mempool Connection`: `CheckTx` @@ -38,20 +38,58 @@ Finally, `Query`, `CheckTx`, and `DeliverTx` include a `Codespace string`, whose intended use is to disambiguate `Code` values returned by different domains of the application. The `Codespace` is a namespace for the `Code`. -## Tags +## Events Some methods (`CheckTx, BeginBlock, DeliverTx, EndBlock`) -include a `Tags` field in their `Response*`. Each tag is key-value pair denoting -something about what happened during the methods execution. +include an `Events` field in their `Response*`. Each event contains a type and a +list of attributes, which are key-value pairs denoting something about what happened +during the method's execution. -Tags can be used to index transactions and blocks according to what happened -during their execution. Note that the set of tags returned for a block from +Events can be used to index transactions and blocks according to what happened +during their execution. Note that the set of events returned for a block from `BeginBlock` and `EndBlock` are merged. In case both methods return the same tag, only the value defined in `EndBlock` is used. -Keys and values in tags must be UTF-8 encoded strings (e.g. -"account.owner": "Bob", "balance": "100.0", -"time": "2018-01-02T12:30:00Z") +Each event has a `type` which is meant to categorize the event for a particular +`Response*` or tx. A `Response*` or tx may contain multiple events with duplicate +`type` values, where each distinct entry is meant to categorize attributes for a +particular event. Every key and value in an event's attributes must be UTF-8 +encoded strings along with the even type itself. + +Example: + +```go + abci.ResponseDeliverTx{ + // ... + Events: []abci.Event{ + { + Type: "validator.provisions", + Attributes: cmn.KVPairs{ + cmn.KVPair{Key: []byte("address"), Value: []byte("...")}, + cmn.KVPair{Key: []byte("amount"), Value: []byte("...")}, + cmn.KVPair{Key: []byte("balance"), Value: []byte("...")}, + }, + }, + { + Type: "validator.provisions", + Attributes: cmn.KVPairs{ + cmn.KVPair{Key: []byte("address"), Value: []byte("...")}, + cmn.KVPair{Key: []byte("amount"), Value: []byte("...")}, + cmn.KVPair{Key: []byte("balance"), Value: []byte("...")}, + }, + }, + { + Type: "validator.slashed", + Attributes: cmn.KVPairs{ + cmn.KVPair{Key: []byte("address"), Value: []byte("...")}, + cmn.KVPair{Key: []byte("amount"), Value: []byte("...")}, + cmn.KVPair{Key: []byte("reason"), Value: []byte("...")}, + }, + }, + // ... + }, +} +``` ## Determinism @@ -77,19 +115,19 @@ non-determinism must be fixed and the nodes restarted. Sources of non-determinism in applications may include: - Hardware failures - - Cosmic rays, overheating, etc. + - Cosmic rays, overheating, etc. - Node-dependent state - - Random numbers - - Time + - Random numbers + - Time - Underspecification - - Library version changes - - Race conditions - - Floating point numbers - - JSON serialization - - Iterating through hash-tables/maps/dictionaries + - Library version changes + - Race conditions + - Floating point numbers + - JSON serialization + - Iterating through hash-tables/maps/dictionaries - External Sources - - Filesystem - - Network calls (eg. some external REST API service) + - Filesystem + - Network calls (eg. some external REST API service) See [#56](https://github.com/tendermint/abci/issues/56) for original discussion. @@ -103,7 +141,7 @@ on them. All other fields in the `Response*` must be strictly deterministic. ## Block Execution The first time a new blockchain is started, Tendermint calls -`InitChain`. From then on, the follow sequence of methods is executed for each +`InitChain`. From then on, the following sequence of methods is executed for each block: `BeginBlock, [DeliverTx], EndBlock, Commit` @@ -202,9 +240,9 @@ Commit are included in the header of the next block. - `Path (string)`: Path of request, like an HTTP GET path. Can be used with or in liue of Data. - Apps MUST interpret '/store' as a query by key on the - underlying store. The key SHOULD be specified in the Data field. + underlying store. The key SHOULD be specified in the Data field. - Apps SHOULD allow queries over specific types like - '/accounts/...' or '/votes/...' + '/accounts/...' or '/votes/...' - `Height (int64)`: The block height for which you want the query (default=0 returns data for the latest committed block). Note that this is the height of the block containing the @@ -231,7 +269,7 @@ Commit are included in the header of the next block. - Query for data from the application at current or past height. - Optionally return Merkle proof. - Merkle proof includes self-describing `type` field to support many types - of Merkle trees and encoding formats. + of Merkle trees and encoding formats. ### BeginBlock @@ -258,6 +296,10 @@ Commit are included in the header of the next block. - **Request**: - `Tx ([]byte)`: The request transaction bytes + - `Type (CheckTxType)`: What type of `CheckTx` request is this? At present, + there are two possible values: `CheckTx_New` (the default, which says + that a full check is required), and `CheckTx_Recheck` (when the mempool is + initiating a normal recheck of a transaction). - **Response**: - `Code (uint32)`: Response code - `Data ([]byte)`: Result bytes, if any. @@ -442,7 +484,7 @@ Commit are included in the header of the next block. - `Votes ([]VoteInfo)`: List of validators addresses in the last validator set with their voting power and whether or not they signed a vote. -### ConsensusParams +### ConsensusParams - **Fields**: - `BlockSize (BlockSizeParams)`: Parameters limiting the size of a block. @@ -456,17 +498,17 @@ Commit are included in the header of the next block. - `MaxBytes (int64)`: Max size of a block, in bytes. - `MaxGas (int64)`: Max sum of `GasWanted` in a proposed block. - NOTE: blocks that violate this may be committed if there are Byzantine proposers. - It's the application's responsibility to handle this when processing a - block! + It's the application's responsibility to handle this when processing a + block! ### EvidenceParams - **Fields**: - `MaxAge (int64)`: Max age of evidence, in blocks. Evidence older than this is considered stale and ignored. - - This should correspond with an app's "unbonding period" or other - similar mechanism for handling Nothing-At-Stake attacks. - - NOTE: this should change to time (instead of blocks)! + - This should correspond with an app's "unbonding period" or other + similar mechanism for handling Nothing-At-Stake attacks. + - NOTE: this should change to time (instead of blocks)! ### ValidatorParams @@ -488,4 +530,3 @@ Commit are included in the header of the next block. - `Type (string)`: Type of Merkle proof and how it's encoded. - `Key ([]byte)`: Key in the Merkle tree that this proof is for. - `Data ([]byte)`: Encoded Merkle proof for the key. - diff --git a/docs/spec/abci/apps.md b/docs/spec/abci/apps.md index 0c404c41e..4fe0389e4 100644 --- a/docs/spec/abci/apps.md +++ b/docs/spec/abci/apps.md @@ -65,7 +65,10 @@ begin. After `Commit`, CheckTx is run again on all transactions that remain in the node's local mempool after filtering those included in the block. To prevent the mempool from rechecking all transactions every time a block is committed, set -the configuration option `mempool.recheck=false`. +the configuration option `mempool.recheck=false`. As of Tendermint v0.32.1, +an additional `Type` parameter is made available to the CheckTx function that +indicates whether an incoming transaction is new (`CheckTxType_New`), or a +recheck (`CheckTxType_Recheck`). Finally, the mempool will unlock and new transactions can be processed through CheckTx again. @@ -208,7 +211,7 @@ message PubKey { The `pub_key` currently supports only one type: -- `type = "ed25519" and`data = ` +- `type = "ed25519"` and `data = ` The `power` is the new voting power for the validator, with the following rules: @@ -258,7 +261,7 @@ This is enforced by Tendermint consensus. If a block includes evidence older than this, the block will be rejected (validators won't vote for it). -Must have `0 < MaxAge`. +Must have `MaxAge > 0`. ### Updates diff --git a/docs/spec/abci/client-server.md b/docs/spec/abci/client-server.md index 5ac7b3eb4..94485f0d9 100644 --- a/docs/spec/abci/client-server.md +++ b/docs/spec/abci/client-server.md @@ -9,7 +9,7 @@ Applications](./apps.md). ## Message Protocol The message protocol consists of pairs of requests and responses defined in the -[protobuf file](https://github.com/tendermint/tendermint/blob/develop/abci/types/types.proto). +[protobuf file](https://github.com/tendermint/tendermint/blob/master/abci/types/types.proto). Some messages have no fields, while others may include byte-arrays, strings, integers, or custom protobuf types. @@ -33,9 +33,9 @@ The latter two can be tested using the `abci-cli` by setting the `--abci` flag appropriately (ie. to `socket` or `grpc`). See examples, in various stages of maintenance, in -[Go](https://github.com/tendermint/tendermint/tree/develop/abci/server), +[Go](https://github.com/tendermint/tendermint/tree/master/abci/server), [JavaScript](https://github.com/tendermint/js-abci), -[Python](https://github.com/tendermint/tendermint/tree/develop/abci/example/python3/abci), +[Python](https://github.com/tendermint/tendermint/tree/master/abci/example/python3/abci), [C++](https://github.com/mdyring/cpp-tmsp), and [Java](https://github.com/jTendermint/jabci). @@ -44,14 +44,13 @@ See examples, in various stages of maintenance, in The simplest implementation uses function calls within Golang. This means ABCI applications written in Golang can be compiled with TendermintCore and run as a single binary. - ### GRPC If GRPC is available in your language, this is the easiest approach, though it will have significant performance overhead. To get started with GRPC, copy in the [protobuf -file](https://github.com/tendermint/tendermint/blob/develop/abci/types/types.proto) +file](https://github.com/tendermint/tendermint/blob/master/abci/types/types.proto) and compile it using the GRPC plugin for your language. For instance, for golang, the command is `protoc --go_out=plugins=grpc:. types.proto`. See the [grpc documentation for more details](http://www.grpc.io/docs/). @@ -107,4 +106,4 @@ received or a block is committed. It is unlikely that you will need to implement a client. For details of our client, see -[here](https://github.com/tendermint/tendermint/tree/develop/abci/client). +[here](https://github.com/tendermint/tendermint/tree/master/abci/client). diff --git a/docs/spec/blockchain/encoding.md b/docs/spec/blockchain/encoding.md index bde580a14..170e91605 100644 --- a/docs/spec/blockchain/encoding.md +++ b/docs/spec/blockchain/encoding.md @@ -59,20 +59,20 @@ familiar with amino encoding. You can simply use below table and concatenate Prefix || Length (of raw bytes) || raw bytes ( while || stands for byte concatenation here). -| Type | Name | Prefix | Length | Notes | -| ------------------ | ----------------------------- | ---------- | -------- | ----- | -| PubKeyEd25519 | tendermint/PubKeyEd25519 | 0x1624DE64 | 0x20 | | -| PubKeySecp256k1 | tendermint/PubKeySecp256k1 | 0xEB5AE987 | 0x21 | | -| PrivKeyEd25519 | tendermint/PrivKeyEd25519 | 0xA3288910 | 0x40 | | -| PrivKeySecp256k1 | tendermint/PrivKeySecp256k1 | 0xE1B0F79B | 0x20 | | -| PubKeyMultisigThreshold | tendermint/PubKeyMultisigThreshold | 0x22C1F7E2 | variable | | +| Type | Name | Prefix | Length | Notes | +| ----------------------- | ---------------------------------- | ---------- | -------- | ----- | +| PubKeyEd25519 | tendermint/PubKeyEd25519 | 0x1624DE64 | 0x20 | | +| PubKeySecp256k1 | tendermint/PubKeySecp256k1 | 0xEB5AE987 | 0x21 | | +| PrivKeyEd25519 | tendermint/PrivKeyEd25519 | 0xA3288910 | 0x40 | | +| PrivKeySecp256k1 | tendermint/PrivKeySecp256k1 | 0xE1B0F79B | 0x20 | | +| PubKeyMultisigThreshold | tendermint/PubKeyMultisigThreshold | 0x22C1F7E2 | variable | | ### Example For example, the 33-byte (or 0x21-byte in hex) Secp256k1 pubkey - `020BD40F225A57ED383B440CF073BC5539D0341F5767D2BF2D78406D00475A2EE9` - would be encoded as - `EB5AE98721020BD40F225A57ED383B440CF073BC5539D0341F5767D2BF2D78406D00475A2EE9` +`020BD40F225A57ED383B440CF073BC5539D0341F5767D2BF2D78406D00475A2EE9` +would be encoded as +`EB5AE98721020BD40F225A57ED383B440CF073BC5539D0341F5767D2BF2D78406D00475A2EE9` ### Key Types @@ -170,11 +170,11 @@ We use the RFC 6962 specification of a merkle tree, with sha256 as the hash func Merkle trees are used throughout Tendermint to compute a cryptographic digest of a data structure. The differences between RFC 6962 and the simplest form a merkle tree are that: -1) leaf nodes and inner nodes have different hashes. +1. leaf nodes and inner nodes have different hashes. This is for "second pre-image resistance", to prevent the proof to an inner node being valid as the proof of a leaf. The leaf nodes are `SHA256(0x00 || leaf_data)`, and inner nodes are `SHA256(0x01 || left_hash || right_hash)`. -2) When the number of items isn't a power of two, the left half of the tree is as big as it could be. +2. When the number of items isn't a power of two, the left half of the tree is as big as it could be. (The largest power of two less than the number of items) This allows new leaves to be added with less recomputation. For example: @@ -218,7 +218,7 @@ func MerkleRoot(items [][]byte) []byte{ case 0: return nil case 1: - return leafHash(leafs[0]) + return leafHash(items[0]) default: k := getSplitPoint(len(items)) left := MerkleRoot(items[:k]) @@ -290,7 +290,7 @@ func computeHashFromAunts(index, total int, leafHash []byte, innerHashes [][]byt ### IAVL+ Tree -Because Tendermint only uses a Simple Merkle Tree, application developers are expect to use their own Merkle tree in their applications. For example, the IAVL+ Tree - an immutable self-balancing binary tree for persisting application state is used by the [Cosmos SDK](https://github.com/cosmos/cosmos-sdk/blob/develop/docs/sdk/core/multistore.md) +Because Tendermint only uses a Simple Merkle Tree, application developers are expect to use their own Merkle tree in their applications. For example, the IAVL+ Tree - an immutable self-balancing binary tree for persisting application state is used by the [Cosmos SDK](https://github.com/cosmos/cosmos-sdk/blob/master/docs/clients/lite/specification.md) ## JSON diff --git a/docs/spec/blockchain/state.md b/docs/spec/blockchain/state.md index 7df096bc9..0a91c857e 100644 --- a/docs/spec/blockchain/state.md +++ b/docs/spec/blockchain/state.md @@ -59,7 +59,7 @@ type Validator struct { When hashing the Validator struct, the address is not included, because it is redundant with the pubkey. -The `state.Validators`, `state.LastValidators`, and `state.NextValidators`, must always by sorted by validator address, +The `state.Validators`, `state.LastValidators`, and `state.NextValidators`, must always be sorted by validator address, so that there is a canonical order for computing the MerkleRoot. We also define a `TotalVotingPower` function, to return the total voting power: diff --git a/docs/spec/consensus/consensus.md b/docs/spec/consensus/consensus.md index acd07397a..7b424dc6b 100644 --- a/docs/spec/consensus/consensus.md +++ b/docs/spec/consensus/consensus.md @@ -73,11 +73,11 @@ parameters over each successive round. |(When +2/3 Precommits for block found) | v | +--------------------------------------------------------------------+ - | Commit | - | | - | * Set CommitTime = now; | - | * Wait for block, then stage/save/commit block; | - +--------------------------------------------------------------------+ +| Commit | +| | +| * Set CommitTime = now; | +| * Wait for block, then stage/save/commit block; | ++--------------------------------------------------------------------+ ``` # Background Gossip @@ -120,7 +120,7 @@ A proposal is signed and published by the designated proposer at each round. The proposer is chosen by a deterministic and non-choking round robin selection algorithm that selects proposers in proportion to their voting power (see -[implementation](https://github.com/tendermint/tendermint/blob/develop/types/validator_set.go)). +[implementation](https://github.com/tendermint/tendermint/blob/master/types/validator_set.go)). A proposal at `(H,R)` is composed of a block and an optional latest `PoLC-Round < R` which is included iff the proposer knows of one. This @@ -131,13 +131,15 @@ liveness property. ### Propose Step (height:H,round:R) -Upon entering `Propose`: - The designated proposer proposes a block at -`(H,R)`. +Upon entering `Propose`: +- The designated proposer proposes a block at `(H,R)`. -The `Propose` step ends: - After `timeoutProposeR` after entering -`Propose`. --> goto `Prevote(H,R)` - After receiving proposal block -and all prevotes at `PoLC-Round`. --> goto `Prevote(H,R)` - After -[common exit conditions](#common-exit-conditions) +The `Propose` step ends: +- After `timeoutProposeR` after entering `Propose`. --> goto + `Prevote(H,R)` +- After receiving proposal block and all prevotes at `PoLC-Round`. --> + goto `Prevote(H,R)` +- After [common exit conditions](#common-exit-conditions) ### Prevote Step (height:H,round:R) @@ -152,10 +154,12 @@ Upon entering `Prevote`, each validator broadcasts its prevote vote. - Else, if the proposal is invalid or wasn't received on time, it prevotes ``. -The `Prevote` step ends: - After +2/3 prevotes for a particular block or -``. -->; goto `Precommit(H,R)` - After `timeoutPrevote` after -receiving any +2/3 prevotes. --> goto `Precommit(H,R)` - After -[common exit conditions](#common-exit-conditions) +The `Prevote` step ends: +- After +2/3 prevotes for a particular block or ``. -->; goto + `Precommit(H,R)` +- After `timeoutPrevote` after receiving any +2/3 prevotes. --> goto + `Precommit(H,R)` +- After [common exit conditions](#common-exit-conditions) ### Precommit Step (height:H,round:R) @@ -163,17 +167,19 @@ Upon entering `Precommit`, each validator broadcasts its precommit vote. - If the validator has a PoLC at `(H,R)` for a particular block `B`, it (re)locks (or changes lock to) and precommits `B` and sets - `LastLockRound = R`. - Else, if the validator has a PoLC at `(H,R)` for - ``, it unlocks and precommits ``. - Else, it keeps the lock - unchanged and precommits ``. + `LastLockRound = R`. +- Else, if the validator has a PoLC at `(H,R)` for ``, it unlocks + and precommits ``. +- Else, it keeps the lock unchanged and precommits ``. A precommit for `` means "I didn’t see a PoLC for this round, but I did get +2/3 prevotes and waited a bit". -The Precommit step ends: - After +2/3 precommits for ``. --> -goto `Propose(H,R+1)` - After `timeoutPrecommit` after receiving any -+2/3 precommits. --> goto `Propose(H,R+1)` - After [common exit -conditions](#common-exit-conditions) +The Precommit step ends: +- After +2/3 precommits for ``. --> goto `Propose(H,R+1)` +- After `timeoutPrecommit` after receiving any +2/3 precommits. --> goto + `Propose(H,R+1)` +- After [common exit conditions](#common-exit-conditions) ### Common exit conditions diff --git a/docs/spec/consensus/light-client.md b/docs/spec/consensus/light-client.md index 4b683b9a6..18dc280a3 100644 --- a/docs/spec/consensus/light-client.md +++ b/docs/spec/consensus/light-client.md @@ -1,113 +1,329 @@ -# Light Client - -A light client is a process that connects to the Tendermint Full Node(s) and then tries to verify the Merkle proofs -about the blockchain application. In this document we describe mechanisms that ensures that the Tendermint light client -has the same level of security as Full Node processes (without being itself a Full Node). - -To be able to validate a Merkle proof, a light client needs to validate the blockchain header that contains the root app hash. -Validating a blockchain header in Tendermint consists in verifying that the header is committed (signed) by >2/3 of the -voting power of the corresponding validator set. As the validator set is a dynamic set (it is changing), one of the -core functionality of the light client is updating the current validator set, that is then used to verify the -blockchain header, and further the corresponding Merkle proofs. - -For the purpose of this light client specification, we assume that the Tendermint Full Node exposes the following functions over -Tendermint RPC: - -```golang -Header(height int64) (SignedHeader, error) // returns signed header for the given height -Validators(height int64) (ResultValidators, error) // returns validator set for the given height -LastHeader(valSetNumber int64) (SignedHeader, error) // returns last header signed by the validator set with the given validator set number - -type SignedHeader struct { - Header Header - Commit Commit - ValSetNumber int64 -} +# Lite client -type ResultValidators struct { - BlockHeight int64 - Validators []Validator - // time the current validator set is initialised, i.e, time of the last validator change before header BlockHeight - ValSetTime int64 -} +A lite client is a process that connects to Tendermint full nodes and then tries to verify application data using the Merkle proofs. + +## Context of this document + +In order to make sure that full nodes have the incentive to follow the protocol, we have to address the following three Issues + +1) The lite client needs a method to verify headers it obtains from full nodes according to trust assumptions -- this document. + +2) The lite client must be able to connect to one correct full node to detect and report on failures in the trust assumptions (i.e., conflicting headers) -- a future document. + +3) In the event the trust assumption fails (i.e., a lite client is fooled by a conflicting header), the Tendermint fork accountability protocol must account for the evidence -- see #3840 + +## Problem statement + + +We assume that the lite client knows a (base) header *inithead* it trusts (by social consensus or because the lite client has decided to trust the header before). The goal is to check whether another header *newhead* can be trusted based on the data in *inithead*. + +The correctness of the protocol is based on the assumption that *inithead* was generated by an instance of Tendermint consensus. The term "trusting" above indicates that the correctness on the protocol depends on this assumption. It is in the responsibility of the user that runs the lite client to make sure that the risk of trusting a corrupted/forged *inithead* is negligible. + + +## Definitions + +### Data structures + +In the following, only the details of the data structures needed for this specification are given. + + * header fields + - *height* + - *bfttime*: the chain time when the header (block) was generated + - *V*: validator set containing validators for this block. + - *NextV*: validator set for next block. + - *commit*: evidence that block with height *height* - 1 was committed by a set of validators (canonical commit). We will use ```signers(commit)``` to refer to the set of validators that committed the block. + + * signed header fields: contains a header and a *commit* for the current header; a "seen commit". In the Tendermint consensus the "canonical commit" is stored in header *height* + 1. + + * For each header *h* it has locally stored, the lite client stores whether + it trusts *h*. We write *trust(h) = true*, if this is the case. + + * Validator fields. We will write a validator as a tuple *(v,p)* such that + + *v* is the identifier (we assume identifiers are unique in each validator set) + + *p* is its voting power + + +### Functions + +For the purpose of this lite client specification, we assume that the Tendermint Full Node exposes the following function over Tendermint RPC: +```go + func Commit(height int64) (SignedHeader, error) + // returns signed header: header (with the fields from + // above) with Commit that include signatures of + // validators that signed the header + + + type SignedHeader struct { + Header Header + Commit Commit + } ``` -We assume that Tendermint keeps track of the validator set changes and that each time a validator set is changed it is -being assigned the next sequence number. We can call this number the validator set sequence number. Tendermint also remembers -the Time from the header when the next validator set is initialised (starts to be in power), and we refer to this time -as validator set init time. -Furthermore, we assume that each validator set change is signed (committed) by the current validator set. More precisely, -given a block `H` that contains transactions that are modifying the current validator set, the Merkle root hash of the next -validator set (modified based on transactions from block H) will be in block `H+1` (and signed by the current validator -set), and then starting from the block `H+2`, it will be signed by the next validator set. - -Note that the real Tendermint RPC API is slightly different (for example, response messages contain more data and function -names are slightly different); we shortened (and modified) it for the purpose of this document to make the spec more -clear and simple. Furthermore, note that in case of the third function, the returned header has `ValSetNumber` equals to -`valSetNumber+1`. - -Locally, light client manages the following state: - -```golang -valSet []Validator // current validator set (last known and verified validator set) -valSetNumber int64 // sequence number of the current validator set -valSetHash []byte // hash of the current validator set -valSetTime int64 // time when the current validator set is initialised +### Definitions + +* *tp*: trusting period +* for realtime *t*, the predicate *correct(v,t)* is true if the validator *v* + follows the protocol until time *t* (we will see about recovery later). + + + + +### Tendermint Failure Model + +If a block *h* is generated at time *bfttime* (and this time is stored in the block), then a set of validators that hold more than 2/3 of the voting power in h.Header.NextV is correct until time h.Header.bfttime + tp. + +Formally, +\[ +\sum_{(v,p) \in h.Header.NextV \wedge correct(v,h.Header.bfttime + tp)} p > +2/3 \sum_{(v,p) \in h.Header.NextV} p +\] + +*Assumption*: "correct" is defined w.r.t. realtime (some Newtonian global notion of time, i.e., wall time), while *bfttime* corresponds to the reading of the local clock of a validator (how this time is computed may change when the Tendermint consensus is modified). In this note, we assume that all clocks are synchronized to realtime. We can make this more precise eventually (incorporating clock drift, accuracy, precision, etc.). Right now, we consider this assumption sufficient, as clock synchronization (under NTP) is in the order of milliseconds and *tp* is in the order of weeks. + +*Remark*: This failure model might change to a hybrid version that takes heights into account in the future. + +The specification in this document considers an implementation of the lite client under this assumption. Issues like *counter-factual signing* and *fork accountability* and *evidence submission* are mechanisms that justify this assumption by incentivizing validators to follow the protocol. +If they don't, and we have more that 1/3 faults, safety may be violated. Our approach then is to *detect* these cases (after the fact), and take suitable repair actions (automatic and social). This is discussed in an upcoming document on "Fork accountability". (These safety violations include the lite client wrongly trusting a header, a fork in the blockchain, etc.) + + +## Lite Client Trusting Spec + +The lite client communicates with a full node and learns new headers. The goal is to locally decide whether to trust a header. Our implementation needs to ensure the following two properties: + +- Lite Client Completeness: If header *h* was correctly generated by an instance of Tendermint consensus (and its age is less than the trusting period), then the lite client should eventually set *trust(h)* to true. + +- Lite Client Accuracy: If header *h* was *not generated* by an instance of Tendermint consensus, then the lite client should never set *trust(h)* to true. + +*Remark*: If in the course of the computation, the lite client obtains certainty that some headers were forged by adversaries (that is were not generated by an instance of Tendermint consensus), it may submit (a subset of) the headers it has seen as evidence of misbehavior. + +*Remark*: In Completeness we use "eventually", while in practice *trust(h)* should be set to true before *h.Header.bfttime + tp*. If not, the block cannot be trusted because it is too old. + +*Remark*: If a header *h* is marked with *trust(h)*, but it is too old (its bfttime is more than *tp* ago), then the lite client should set *trust(h)* to false again. + +*Assumption*: Initially, the lite client has a header *inithead* that it trusts correctly, that is, *inithead* was correctly generated by the Tendermint consensus. + +To reason about the correctness, we may prove the following invariant. + +*Verification Condition: Lite Client Invariant.* + For each lite client *l* and each header *h*: +if *l* has set *trust(h) = true*, + then validators that are correct until time *h.Header.bfttime + tp* have more than two thirds of the voting power in *h.Header.NextV*. + + Formally, + \[ + \sum_{(v,p) \in h.Header.NextV \wedge correct(v,h.Header.bfttime + tp)} p > + 2/3 \sum_{(v,p) \in h.Header.NextV} p + \] + +*Remark.* To prove the invariant, we will have to prove that the lite client only trusts headers that were correctly generated by Tendermint consensus, then the formula above follows from the Tendermint failure model. + + +## High Level Solution + +Upon initialization, the lite client is given a header *inithead* it trusts (by +social consensus). It is assumed that *inithead* satisfies the lite client invariant. (If *inithead* has been correctly generated by Tendermint consensus, the invariant follows from the Tendermint Failure Model.) + +When a lite clients sees a signed new header *snh*, it has to decide whether to trust the new +header. Trust can be obtained by (possibly) the combination of three methods. + +1. **Uninterrupted sequence of proof.** If a block is appended to the chain, where the last block +is trusted (and properly committed by the old validator set in the next block), +and the new block contains a new validator set, the new block is trusted if the lite client knows all headers in the prefix. +Intuitively, a trusted validator set is assumed to only chose a new validator set that will obey the Tendermint Failure Model. + +2. **Trusting period.** Based on a trusted block *h*, and the lite client +invariant, which ensures the fault assumption during the trusting period, we can check whether at least one validator, that has been continuously correct from *h.Header.bfttime* until now, has signed *snh*. +If this is the case, similarly to above, the chosen validator set in *snh* does not violate the Tendermint Failure Model. + +3. **Bisection.** If a check according to the trusting period fails, the lite client can try to obtain a header *hp* whose height lies between *h* and *snh* in order to check whether *h* can be used to get trust for *hp*, and *hp* can be used to get trust for *snh*. If this is the case we can trust *snh*; if not, we may continue recursively. + +## How to use it + +We consider the following use case: + the lite client wants to verify a header for some given height *k*. Thus: + - it requests the signed header for height *k* from a full node + - it tries to verify this header with the methods described here. + +This can be used in several settings: + - someone tells the lite client that application data that is relevant for it can be read in the block of height *k*. + - the lite clients wants the latest state. It asks a full nude for the current height, and uses the response for *k*. + + +## Details + +*Assumptions* + +1. *tp < unbonding period*. +2. *snh.Header.bfttime < now* +3. *snh.Header.bfttime < h.Header.bfttime+tp* +4. *trust(h)=true* + + +**Observation 1.** If *h.Header.bfttime + tp > now*, we trust the old +validator set *h.Header.NextV*. + +When we say we trust *h.Header.NextV* we do *not* trust that each individual validator in *h.Header.NextV* is correct, but we only trust the fact that at most 1/3 of them are faulty (more precisely, the faulty ones have at most 1/3 of the total voting power). + + + +### Functions + +The function *Bisection* checks whether to trust header *h2* based on the trusted header *h1*. It does so by calling +the function *CheckSupport* in the process of +bisection/recursion. *CheckSupport* implements the trusted period method and, for two adjacent headers (in term of heights), it checks uninterrupted sequence of proof. + +*Assumption*: In the following, we assume that *h2.Header.height > h1.Header.height*. We will quickly discuss the other case in the next section. + +We consider the following set-up: +- the lite client communicates with one full node +- the lite client locally stores all the signed headers it obtained (trusted or not). In the pseudo code below we write *Store(header)* for this. +- If *Bisection* returns *false*, then the lite client has seen a forged header. + * However, it does not know which header(s) is/are the problematic one(s). + * In this case, the lite client can submit (some of) the headers it has seen as evidence. As the lite client communicates with one full node only when executing Bisection, there are two cases + - the full node is faulty + - the full node is correct and there was a fork in Tendermint consensus. Header *h1* is from a different branch than the one taken by the full node. This case is not focus of this document, but will be treated in the document on fork accountability. + +- the lite client must retry to retrieve correct headers from another full node + * it picks a new full node + * it restarts *Bisection* + * there might be optimizations; a lite client may not need to call *Commit(k)*, for a height *k* for which it already has a signed header it trusts. + * how to make sure that a lite client can communicate with a correct full node will be the focus of a separate document (recall Issue 3 from "Context of this document"). + +**Auxiliary Functions.** We will use the function ```votingpower_in(V1,V2)``` to compute the voting power the validators in set V1 have according to their voting power in set V2; +we will write ```totalVotingPower(V)``` for ```votingpower_in(V,V)```, which returns the total voting power in V. +We further use the function ```signers(Commit)``` that returns the set of validators that signed the Commit. + +**CheckSupport.** The following function checks whether we can trust the header h2 based on header h1 following the trusting period method. + +```go + func CheckSupport(h1,h2,trustlevel) bool { + if h1.Header.bfttime + tp < now { // Observation 1 + return false // old header was once trusted but it is expired + } + vp_all := totalVotingPower(h1.Header.NextV) + // total sum of voting power of validators in h2 + + if h2.Header.height == h1.Header.height + 1 { + // specific check for adjacent headers; everything must be + // properly signed. + // also check that h2.Header.V == h1.Header.NextV + // Plus the following check that 2/3 of the voting power + // in h1 signed h2 + return (votingpower_in(signers(h2.Commit),h1.Header.NextV) > + 2/3 * vp_all) + // signing validators are more than two third in h1. + } + + return (votingpower_in(signers(h2.Commit),h1.Header.NextV) > + max(1/3,trustlevel) * vp_all) + // get validators in h1 that signed h2 + // sum of voting powers in h1 of + // validators that signed h2 + // is more than a third in h1 + } ``` -The light client is initialised with the trusted validator set, for example based on the known validator set hash, -validator set sequence number and the validator set init time. -The core of the light client logic is captured by the VerifyAndUpdate function that is used to 1) verify if the given header is valid, -and 2) update the validator set (when the given header is valid and it is more recent than the seen headers). + *Remark*: Basic header verification must be done for *h2*. Similar checks are done in: + https://github.com/tendermint/tendermint/blob/master/types/validator_set.go#L591-L633 + + *Remark*: There are some sanity checks which are not in the code: + *h2.Header.height > h1.Header.height* and *h2.Header.bfttime > h1.Header.bfttime* and *h2.Header.bfttime < now*. + + *Remark*: ```return (votingpower_in(signers(h2.Commit),h1.Header.NextV) > max(1/3,trustlevel) * vp_all)``` may return false even if *h2* was properly generated by Tendermint consensus in the case of big changes in the validator sets. However, the check ```return (votingpower_in(signers(h2.Commit),h1.Header.NextV) > + 2/3 * vp_all)``` must return true if *h1* and *h2* were generated by Tendermint consensus. + +*Remark*: The 1/3 check differs from a previously proposed method that was based on intersecting validator sets and checking that the new validator set contains "enough" correct validators. We found that the old check is not suited for realistic changes in the validator sets. The new method is not only based on cardinalities, but also exploits that we can trust what is signed by a correct validator (i.e., signed by more than 1/3 of the voting power). -```golang -VerifyAndUpdate(signedHeader SignedHeader): - assertThat signedHeader.valSetNumber >= valSetNumber - if isValid(signedHeader) and signedHeader.Header.Time <= valSetTime + UNBONDING_PERIOD then - setValidatorSet(signedHeader) +*Correctness arguments* + +Towards Lite Client Accuracy: +- Assume by contradiction that *h2* was not generated correctly and the lite client sets trust to true because *CheckSupport* returns true. +- h1 is trusted and sufficiently new +- by Tendermint Fault Model, less than 1/3 of voting power held by faulty validators => at least one correct validator *v* has signed *h2*. +- as *v* is correct up to now, it followed the Tendermint consensus protocol at least up to signing *h2* => *h2* was correctly generated, we arrive at the required contradiction. + + +Towards Lite Client Completeness: +- The check is successful if sufficiently many validators of *h1* are still validators in *h2* and signed *h2*. +- If *h2.Header.height = h1.Header.height + 1*, and both headers were generated correctly, the test passes + +*Verification Condition:* We may need a Tendermint invariant stating that if *h2.Header.height = h1.Header.height + 1* then *signers(h2.Commit) \subseteq h1.Header.NextV*. + +*Remark*: The variable *trustlevel* can be used if the user believes that relying on one correct validator is not sufficient. However, in case of (frequent) changes in the validator set, the higher the *trustlevel* is chosen, the more unlikely it becomes that CheckSupport returns true for non-adjacent headers. + +**Bisection.** The following function uses CheckSupport in a recursion to find intermediate headers that allow to establish a sequence of trust. + + + + +```go +func Bisection(h1,h2,trustlevel) bool{ + if CheckSupport(h1,h2,trustlevel) { return true - else - updateValidatorSet(signedHeader.ValSetNumber) - return VerifyAndUpdate(signedHeader) - -isValid(signedHeader SignedHeader): - valSetOfTheHeader = Validators(signedHeader.Header.Height) - assertThat Hash(valSetOfTheHeader) == signedHeader.Header.ValSetHash - assertThat signedHeader is passing basic validation - if votingPower(signedHeader.Commit) > 2/3 * votingPower(valSetOfTheHeader) then return true - else + } + if h2.Header.height == h1.Header.height + 1 { + // we have adjacent headers that are not matching (failed + // the CheckSupport) + // we could submit evidence here return false + } + pivot := (h1.Header.height + h2.Header.height) / 2 + hp := Commit(pivot) + // ask a full node for header of height pivot + Store(hp) + // store header hp locally + if Bisection(h1,hp,trustlevel) { + // only check right branch if hp is trusted + // (otherwise a lot of unnecessary computation may be done) + return Bisection(hp,h2,trustlevel) + } + else { + return false + } +} +``` -setValidatorSet(signedHeader SignedHeader): - nextValSet = Validators(signedHeader.Header.Height) - assertThat Hash(nextValSet) == signedHeader.Header.ValidatorsHash - valSet = nextValSet.Validators - valSetHash = signedHeader.Header.ValidatorsHash - valSetNumber = signedHeader.ValSetNumber - valSetTime = nextValSet.ValSetTime - -votingPower(commit Commit): - votingPower = 0 - for each precommit in commit.Precommits do: - if precommit.ValidatorAddress is in valSet and signature of the precommit verifies then - votingPower += valSet[precommit.ValidatorAddress].VotingPower - return votingPower - -votingPower(validatorSet []Validator): - for each validator in validatorSet do: - votingPower += validator.VotingPower - return votingPower - -updateValidatorSet(valSetNumberOfTheHeader): - while valSetNumber != valSetNumberOfTheHeader do - signedHeader = LastHeader(valSetNumber) - if isValid(signedHeader) then - setValidatorSet(signedHeader) - else return error - return -``` -Note that in the logic above we assume that the light client will always go upward with respect to header verifications, -i.e., that it will always be used to verify more recent headers. In case a light client needs to be used to verify older -headers (go backward) the same mechanisms and similar logic can be used. In case a call to the FullNode or subsequent -checks fail, a light client need to implement some recovery strategy, for example connecting to other FullNode. + + +*Correctness arguments (sketch)* + +Lite Client Accuracy: +- Assume by contradiction that *h2* was not generated correctly and the lite client sets trust to true because Bisection returns true. +- Bisection returns true only if all calls to CheckSupport in the recursion return true. +- Thus we have a sequence of headers that all satisfied the CheckSupport +- again a contradiction + +Lite Client Completeness: + +This is only ensured if upon *Commit(pivot)* the lite client is always provided with a correctly generated header. + +*Stalling* + +With Bisection, a faulty full node could stall a lite client by creating a long sequence of headers that are queried one-by-one by the lite client and look OK, before the lite client eventually detects a problem. There are several ways to address this: +* Each call to ```Commit``` could be issued to a different full node +* Instead of querying header by header, the lite client tells a full node which header it trusts, and the height of the header it needs. The full node responds with the header along with a proof consisting of intermediate headers that the light client can use to verify. Roughly, Bisection would then be executed at the full node. +* We may set a timeout how long bisection may take. + + +### The case *h2.Header.height < h1.Header.height* + +In the use case where someone tells the lite client that application data that is relevant for it can be read in the block of height *k* and the lite client trusts a more recent header, we can use the hashes to verify headers "down the chain." That is, we iterate down the heights and check the hashes in each step. + +*Remark.* For the case were the lite client trusts two headers *i* and *j* with *i < k < j*, we should discuss/experiment whether the forward or the backward method is more effective. + +```go +func Backwards(h1,h2) bool { + assert (h2.Header.height < h1.Header.height) + old := h1 + for i := h1.Header.height - 1; i > h2.Header.height; i-- { + new := Commit(i) + Store(new) + if (hash(new) != old.Header.hash) { + return false + } + old := new + } + return (hash(h2) == old.Header.hash) + } +``` diff --git a/docs/spec/p2p/config.md b/docs/spec/p2p/config.md index b31a36736..7ff2b5e8d 100644 --- a/docs/spec/p2p/config.md +++ b/docs/spec/p2p/config.md @@ -12,14 +12,14 @@ and upon incoming connection shares some peers and disconnects. ## Seeds -`--p2p.seeds “1.2.3.4:26656,2.3.4.5:4444”` +`--p2p.seeds “id100000000000000000000000000000000@1.2.3.4:26656,id200000000000000000000000000000000@2.3.4.5:4444”` Dials these seeds when we need more peers. They should return a list of peers and then disconnect. If we already have enough peers in the address book, we may never need to dial them. ## Persistent Peers -`--p2p.persistent_peers “1.2.3.4:26656,2.3.4.5:26656”` +`--p2p.persistent_peers “id100000000000000000000000000000000@1.2.3.4:26656,id200000000000000000000000000000000@2.3.4.5:26656”` Dial these peers and auto-redial them if the connection fails. These are intended to be trusted persistent peers that can help @@ -30,9 +30,9 @@ backoff and will give up after a day of trying to connect. the user will be warned that seeds may auto-close connections and that the node may not be able to keep the connection persistent. -## Private Persistent Peers +## Private Peers -`--p2p.private_persistent_peers “1.2.3.4:26656,2.3.4.5:26656”` +`--p2p.private_peer_ids “id100000000000000000000000000000000,id200000000000000000000000000000000”` -These are persistent peers that we do not add to the address book or -gossip to other peers. They stay private to us. +These are IDs of the peers that we do not add to the address book or gossip to +other peers. They stay private to us. diff --git a/docs/spec/p2p/connection.md b/docs/spec/p2p/connection.md index 47366a549..fd2e7bc4d 100644 --- a/docs/spec/p2p/connection.md +++ b/docs/spec/p2p/connection.md @@ -61,7 +61,7 @@ func (m MConnection) TrySend(chID byte, msg interface{}) bool {} `Send(chID, msg)` is a blocking call that waits until `msg` is successfully queued for the channel with the given id byte `chID`. The message `msg` is serialized -using the `tendermint/wire` submodule's `WriteBinary()` reflection routine. +using the `tendermint/go-amino` submodule's `WriteBinary()` reflection routine. `TrySend(chID, msg)` is a nonblocking call that queues the message msg in the channel with the given id byte chID if the queue is not full; otherwise it returns false immediately. diff --git a/docs/spec/reactors/block_sync/bcv1/img/bc-reactor-new-datastructs.png b/docs/spec/reactors/block_sync/bcv1/img/bc-reactor-new-datastructs.png new file mode 100644 index 000000000..1a92871a5 Binary files /dev/null and b/docs/spec/reactors/block_sync/bcv1/img/bc-reactor-new-datastructs.png differ diff --git a/docs/spec/reactors/block_sync/bcv1/img/bc-reactor-new-fsm.png b/docs/spec/reactors/block_sync/bcv1/img/bc-reactor-new-fsm.png new file mode 100644 index 000000000..87d6fad93 Binary files /dev/null and b/docs/spec/reactors/block_sync/bcv1/img/bc-reactor-new-fsm.png differ diff --git a/docs/spec/reactors/block_sync/bcv1/img/bc-reactor-new-goroutines.png b/docs/spec/reactors/block_sync/bcv1/img/bc-reactor-new-goroutines.png new file mode 100644 index 000000000..ee853ea93 Binary files /dev/null and b/docs/spec/reactors/block_sync/bcv1/img/bc-reactor-new-goroutines.png differ diff --git a/docs/spec/reactors/block_sync/bcv1/impl-v1.md b/docs/spec/reactors/block_sync/bcv1/impl-v1.md new file mode 100644 index 000000000..0ffaaea69 --- /dev/null +++ b/docs/spec/reactors/block_sync/bcv1/impl-v1.md @@ -0,0 +1,237 @@ +# Blockchain Reactor v1 + +### Data Structures +The data structures used are illustrated below. + +![Data Structures](img/bc-reactor-new-datastructs.png) + +#### BlockchainReactor +- is a `p2p.BaseReactor`. +- has a `store.BlockStore` for persistence. +- executes blocks using an `sm.BlockExecutor`. +- starts the FSM and the `poolRoutine()`. +- relays the fast-sync responses and switch messages to the FSM. +- handles errors from the FSM and when necessarily reports them to the switch. +- implements the blockchain reactor interface used by the FSM to send requests, errors to the switch and state timer resets. +- registers all the concrete types and interfaces for serialisation. + +```go +type BlockchainReactor struct { + p2p.BaseReactor + + initialState sm.State // immutable + state sm.State + + blockExec *sm.BlockExecutor + store *store.BlockStore + + fastSync bool + + fsm *BcReactorFSM + blocksSynced int + + // Receive goroutine forwards messages to this channel to be processed in the context of the poolRoutine. + messagesForFSMCh chan bcReactorMessage + + // Switch goroutine may send RemovePeer to the blockchain reactor. This is an error message that is relayed + // to this channel to be processed in the context of the poolRoutine. + errorsForFSMCh chan bcReactorMessage + + // This channel is used by the FSM and indirectly the block pool to report errors to the blockchain reactor and + // the switch. + eventsFromFSMCh chan bcFsmMessage +} +``` + +#### BcReactorFSM +- implements a simple finite state machine. +- has a state and a state timer. +- has a `BlockPool` to keep track of block requests sent to peers and blocks received from peers. +- uses an interface to send status requests, block requests and reporting errors. The interface is implemented by the `BlockchainReactor` and tests. + +```go +type BcReactorFSM struct { + logger log.Logger + mtx sync.Mutex + + startTime time.Time + + state *bcReactorFSMState + stateTimer *time.Timer + pool *BlockPool + + // interface used to call the Blockchain reactor to send StatusRequest, BlockRequest, reporting errors, etc. + toBcR bcReactor +} +``` + +#### BlockPool +- maintains a peer set, implemented as a map of peer ID to `BpPeer`. +- maintains a set of requests made to peers, implemented as a map of block request heights to peer IDs. +- maintains a list of future block requests needed to advance the fast-sync. This is a list of block heights. +- keeps track of the maximum height of the peers in the set. +- uses an interface to send requests and report errors to the reactor (via FSM). + +```go +type BlockPool struct { + logger log.Logger + // Set of peers that have sent status responses, with height bigger than pool.Height + peers map[p2p.ID]*BpPeer + // Set of block heights and the corresponding peers from where a block response is expected or has been received. + blocks map[int64]p2p.ID + + plannedRequests map[int64]struct{} // list of blocks to be assigned peers for blockRequest + nextRequestHeight int64 // next height to be added to plannedRequests + + Height int64 // height of next block to execute + MaxPeerHeight int64 // maximum height of all peers + toBcR bcReactor +} +``` +Some reasons for the `BlockPool` data structure content: +1. If a peer is removed by the switch fast access is required to the peer and the block requests made to that peer in order to redo them. +2. When block verification fails fast access is required from the block height to the peer and the block requests made to that peer in order to redo them. +3. The `BlockchainReactor` main routine decides when the block pool is running low and asks the `BlockPool` (via FSM) to make more requests. The `BlockPool` creates a list of requests and triggers the sending of the block requests (via the interface). The reason it maintains a list of requests is the redo operations that may occur during error handling. These are redone when the `BlockchainReactor` requires more blocks. + +#### BpPeer +- keeps track of a single peer, with height bigger than the initial height. +- maintains the block requests made to the peer and the blocks received from the peer until they are executed. +- monitors the peer speed when there are pending requests. +- it has an active timer when pending requests are present and reports error on timeout. + +```go +type BpPeer struct { + logger log.Logger + ID p2p.ID + + Height int64 // the peer reported height + NumPendingBlockRequests int // number of requests still waiting for block responses + blocks map[int64]*types.Block // blocks received or expected to be received from this peer + blockResponseTimer *time.Timer + recvMonitor *flow.Monitor + params *BpPeerParams // parameters for timer and monitor + + onErr func(err error, peerID p2p.ID) // function to call on error +} +``` + +### Concurrency Model + +The diagram below shows the goroutines (depicted by the gray blocks), timers (shown on the left with their values) and channels (colored rectangles). The FSM box shows some of the functionality and it is not a separate goroutine. + +The interface used by the FSM is shown in light red with the `IF` block. This is used to: +- send block requests +- report peer errors to the switch - this results in the reactor calling `switch.StopPeerForError()` and, if triggered by the peer timeout routine, a `removePeerEv` is sent to the FSM and action is taken from the context of the `poolRoutine()` +- ask the reactor to reset the state timers. The timers are owned by the FSM while the timeout routine is defined by the reactor. This was done in order to avoid running timers in tests and will change in the next revision. + +There are two main goroutines implemented by the blockchain reactor. All I/O operations are performed from the `poolRoutine()` context while the CPU intensive operations related to the block execution are performed from the context of the `executeBlocksRoutine()`. All goroutines are detailed in the next sections. + +![Go Routines Diagram](img/bc-reactor-new-goroutines.png) + +#### Receive() +Fast-sync messages from peers are received by this goroutine. It performs basic validation and: +- in helper mode (i.e. for request message) it replies immediately. This is different than the proposal in adr-040 that specifies having the FSM handling these. +- forwards response messages to the `poolRoutine()`. + +#### poolRoutine() +(named kept as in the previous reactor). +It starts the `executeBlocksRoutine()` and the FSM. It then waits in a loop for events. These are received from the following channels: +- `sendBlockRequestTicker.C` - every 10msec the reactor asks FSM to make more block requests up to a maximum. Note: currently this value is constant but could be changed based on low/ high watermark thresholds for the number of blocks received and waiting to be processed, the number of blockResponse messages waiting in messagesForFSMCh, etc. +- `statusUpdateTicker.C` - every 10 seconds the reactor broadcasts status requests to peers. While adr-040 specifies this to run within the FSM, at this point this functionality is kept in the reactor. +- `messagesForFSMCh` - the `Receive()` goroutine sends status and block response messages to this channel and the reactor calls FSM to handle them. +- `errorsForFSMCh` - this channel receives the following events: + - peer remove - when the switch removes a peer + - sate timeout event - when FSM state timers trigger + The reactor forwards this messages to the FSM. +- `eventsFromFSMCh` - there are two type of events sent over this channel: + - `syncFinishedEv` - triggered when FSM enters `finished` state and calls the switchToConsensus() interface function. + - `peerErrorEv`- peer timer expiry goroutine sends this event over the channel for processing from poolRoutine() context. + +#### executeBlocksRoutine() +Started by the `poolRoutine()`, it retrieves blocks from the pool and executes them: +- `processReceivedBlockTicker.C` - a ticker event is received over the channel every 10msec and its handling results in a signal being sent to the doProcessBlockCh channel. +- doProcessBlockCh - events are received on this channel as described as above and upon processing blocks are retrieved from the pool and executed. + + +### FSM + +![fsm](img/bc-reactor-new-fsm.png) + +#### States +##### init (aka unknown) +The FSM is created in `unknown` state. When started, by the reactor (`startFSMEv`), it broadcasts Status requests and transitions to `waitForPeer` state. + +##### waitForPeer +In this state, the FSM waits for a Status responses from a "tall" peer. A timer is running in this state to allow the FSM to finish if there are no useful peers. + +If the timer expires, it moves to `finished` state and calls the reactor to switch to consensus. +If a Status response is received from a peer within the timeout, the FSM transitions to `waitForBlock` state. + +##### waitForBlock +In this state the FSM makes Block requests (triggered by a ticker in reactor) and waits for Block responses. There is a timer running in this state to detect if a peer is not sending the block at current processing height. If the timer expires, the FSM removes the peer where the request was sent and all requests made to that peer are redone. + +As blocks are received they are stored by the pool. Block execution is independently performed by the reactor and the result reported to the FSM: +- if there are no errors, the FSM increases the pool height and resets the state timer. +- if there are errors, the peers that delivered the two blocks (at height and height+1) are removed and the requests redone. + +In this state the FSM may receive peer remove events in any of the following scenarios: +- the switch is removing a peer +- a peer is penalized because it has not responded to some block requests for a long time +- a peer is penalized for being slow + +When processing of the last block (the one with height equal to the highest peer height minus one) is successful, the FSM transitions to `finished` state. +If after a peer update or removal the pool height is same as maxPeerHeight, the FSM transitions to `finished` state. + +##### finished +When entering this state, the FSM calls the reactor to switch to consensus and performs cleanup. + +#### Events + +The following events are handled by the FSM: + +```go +const ( + startFSMEv = iota + 1 + statusResponseEv + blockResponseEv + processedBlockEv + makeRequestsEv + stopFSMEv + peerRemoveEv = iota + 256 + stateTimeoutEv +) +``` + +### Examples of Scenarios and Termination Handling +A few scenarios are covered in this section together with the current/ proposed handling. +In general, the scenarios involving faulty peers are made worse by the fact that they may quickly be re-added. + +#### 1. No Tall Peers + +S: In this scenario a node is started and while there are status responses received, none of the peers are at a height higher than this node. + +H: The FSM times out in `waitForPeer` state, moves to `finished` state where it calls the reactor to switch to consensus. + +#### 2. Typical Fast Sync + +S: A node fast syncs blocks from honest peers and eventually downloads and executes the penultimate block. + +H: The FSM in `waitForBlock` state will receive the processedBlockEv from the reactor and detect that the termination height is achieved. + +#### 3. Peer Claims Big Height but no Blocks + +S: In this scenario a faulty peer claims a big height (for which there are no blocks). + +H: The requests for the non-existing block will timeout, the peer removed and the pool's `MaxPeerHeight` updated. FSM checks if the termination height is achieved when peers are removed. + +#### 4. Highest Peer Removed or Updated to Short + +S: The fast sync node is caught up with all peers except one tall peer. The tall peer is removed or it sends status response with low height. + +H: FSM checks termination condition on peer removal and updates. + +#### 5. Block At Current Height Delayed + +S: A peer can block the progress of fast sync by delaying indefinitely the block response for the current processing height (h1). + +H: Currently, given h1 < h2, there is no enforcement at peer level that the response for h1 should be received before h2. So a peer will timeout only after delivering all blocks except h1. However the `waitForBlock` state timer fires if the block for current processing height is not received within a timeout. The peer is removed and the requests to that peer (including the one for current height) redone. diff --git a/docs/spec/reactors/block_sync/img/bc-reactor-routines.png b/docs/spec/reactors/block_sync/img/bc-reactor-routines.png new file mode 100644 index 000000000..3f574a79b Binary files /dev/null and b/docs/spec/reactors/block_sync/img/bc-reactor-routines.png differ diff --git a/docs/spec/reactors/block_sync/impl.md b/docs/spec/reactors/block_sync/impl.md index 195f9b862..35a37debb 100644 --- a/docs/spec/reactors/block_sync/impl.md +++ b/docs/spec/reactors/block_sync/impl.md @@ -1,4 +1,6 @@ -## Blockchain Reactor +## Blockchain Reactor v0 Modules + +### Blockchain Reactor - coordinates the pool for syncing - coordinates the store for persistence @@ -8,7 +10,7 @@ - starts the pool.Start() and its poolRoutine() - registers all the concrete types and interfaces for serialisation -### poolRoutine +#### poolRoutine - listens to these channels: - pool requests blocks from a specific peer by posting to requestsCh, block reactor then sends @@ -22,7 +24,7 @@ - implements Receive which is called by the switch/peer - calls AddBlock on the pool when it receives a new block from a peer -## Block Pool +### Block Pool - responsible for downloading blocks from peers - makeRequestersRoutine() @@ -36,6 +38,7 @@ - we receive a block - gotBlockCh is strange -## Block Store -- persists blocks to disk +### Go Routines in Blockchain Reactor + +![Go Routines Diagram](img/bc-reactor-routines.png) diff --git a/docs/spec/reactors/mempool/messages.md b/docs/spec/reactors/mempool/messages.md index 117fc5f2f..9c583ac0f 100644 --- a/docs/spec/reactors/mempool/messages.md +++ b/docs/spec/reactors/mempool/messages.md @@ -13,13 +13,13 @@ type TxMessage struct { } ``` -TxMessage is go-wire encoded and prepended with `0x1` as a -"type byte". This is followed by a go-wire encoded byte-slice. +TxMessage is go-amino encoded and prepended with `0x1` as a +"type byte". This is followed by a go-amino encoded byte-slice. Prefix of 40=0x28 byte tx is: `0x010128...` followed by the actual 40-byte tx. Prefix of 350=0x015e byte tx is: `0x0102015e...` followed by the actual 350 byte tx. -(Please see the [go-wire repo](https://github.com/tendermint/go-wire#an-interface-example) for more information) +(Please see the [go-amino repo](https://github.com/tendermint/go-amino#an-interface-example) for more information) ## RPC Messages diff --git a/docs/spec/reactors/mempool/reactor.md b/docs/spec/reactors/mempool/reactor.md index d349fc7cc..7e9a2d8fe 100644 --- a/docs/spec/reactors/mempool/reactor.md +++ b/docs/spec/reactors/mempool/reactor.md @@ -7,7 +7,7 @@ See [this issue](https://github.com/tendermint/tendermint/issues/1503) Mempool maintains a cache of the last 10000 transactions to prevent replaying old transactions (plus transactions coming from other validators, who are continually exchanging transactions). Read [Replay -Protection](../../../../app-development.md#replay-protection) +Protection](../../../app-dev/app-development.md#replay-protection) for details. Sending incorrectly encoded data or data exceeding `maxMsgSize` will result diff --git a/docs/spec/reactors/pex/pex.md b/docs/spec/reactors/pex/pex.md index 26f1fa8bb..268b4a318 100644 --- a/docs/spec/reactors/pex/pex.md +++ b/docs/spec/reactors/pex/pex.md @@ -21,17 +21,20 @@ inbound (they dialed our public address) or outbound (we dialed them). ## Discovery Peer discovery begins with a list of seeds. -When we have no peers, or have been unable to find enough peers from existing ones, -we dial a randomly selected seed to get a list of peers to dial. + +When we don't have enough peers, we + +1. ask existing peers +2. dial seeds if we're not dialing anyone currently On startup, we will also immediately dial the given list of `persistent_peers`, -and will attempt to maintain persistent connections with them. If the connections die, or we fail to dial, -we will redial every 5s for a few minutes, then switch to an exponential backoff schedule, -and after about a day of trying, stop dialing the peer. +and will attempt to maintain persistent connections with them. If the +connections die, or we fail to dial, we will redial every 5s for a few minutes, +then switch to an exponential backoff schedule, and after about a day of +trying, stop dialing the peer. -So long as we have less than `MaxNumOutboundPeers`, we periodically request additional peers -from each of our own. If sufficient time goes by and we still can't find enough peers, -we try the seeds again. +As long as we have less than `MaxNumOutboundPeers`, we periodically request +additional peers from each of our own and try seeds. ## Listening diff --git a/docs/spec/rpc/index.html b/docs/spec/rpc/index.html new file mode 100644 index 000000000..d6b0fc5a9 --- /dev/null +++ b/docs/spec/rpc/index.html @@ -0,0 +1,25 @@ + + + + + + Tendermint RPC + + + + + + +
+ + + diff --git a/docs/spec/rpc/swagger.yaml b/docs/spec/rpc/swagger.yaml new file mode 100644 index 000000000..a276304a1 --- /dev/null +++ b/docs/spec/rpc/swagger.yaml @@ -0,0 +1,2679 @@ +swagger: "2.0" +info: + version: "Master" + title: RPC client for Tendermint + description: A REST interface for state queries, transaction generation and broadcasting. + license: + name: Apache 2.0 + url: https://github.com/tendermint/tendermint/blob/master/LICENSE +tags: + - name: Websocket + description: Subscribe/unsubscribe are reserved for websocket events. + - name: Info + description: Informations about the node APIs + - name: Tx + description: Transactions broadcast APIs + - name: ABCI + description: ABCI APIs + - name: Evidence + description: Evidence APIs +schemes: + - https +host: stargate.cosmos.network:26657 +securityDefinitions: + kms: + type: basic +paths: + /broadcast_tx_sync: + get: + summary: Returns with the response from CheckTx. Does not wait for DeliverTx result. + tags: + - Tx + operationId: broadcast_tx_sync + description: | + If you want to be sure that the transaction is included in a block, you can + subscribe for the result using JSONRPC via a websocket. See + https://tendermint.com/docs/app-dev/subscribing-to-events-via-websocket.html + If you haven't received anything after a couple of blocks, resend it. If the + same happens again, send it to some other node. A few reasons why it could + happen: + + 1. malicious node can drop or pretend it had committed your tx + 2. malicious proposer (not necessary the one you're communicating with) can + drop transactions, which might become valid in the future + (https://github.com/tendermint/tendermint/issues/3322) + + + Please refer to + https://tendermint.com/docs/tendermint-core/using-tendermint.html#formatting + for formatting/encoding rules. + parameters: + - in: query + name: tx + type: string + required: true + description: The transaction + x-example: "456" + produces: + - application/json + responses: + 200: + description: empty answer + schema: + $ref: "#/definitions/BroadcastTxResponse" + 500: + description: empty error + schema: + $ref: "#/definitions/ErrorResponse" + /broadcast_tx_async: + get: + summary: Returns right away, with no response. Does not wait for CheckTx nor DeliverTx results. + tags: + - Tx + operationId: broadcast_tx_async + description: | + If you want to be sure that the transaction is included in a block, you can + subscribe for the result using JSONRPC via a websocket. See + https://tendermint.com/docs/app-dev/subscribing-to-events-via-websocket.html + If you haven't received anything after a couple of blocks, resend it. If the + same happens again, send it to some other node. A few reasons why it could + happen: + + 1. malicious node can drop or pretend it had committed your tx + 2. malicious proposer (not necessary the one you're communicating with) can + drop transactions, which might become valid in the future + (https://github.com/tendermint/tendermint/issues/3322) + 3. node can be offline + + Please refer to + https://tendermint.com/docs/tendermint-core/using-tendermint.html#formatting + for formatting/encoding rules. + parameters: + - in: query + name: tx + type: string + required: true + description: The transaction + x-example: "123" + produces: + - application/json + responses: + 200: + description: empty answer + schema: + $ref: "#/definitions/BroadcastTxResponse" + 500: + description: empty error + schema: + $ref: "#/definitions/ErrorResponse" + /broadcast_tx_commit: + get: + summary: Returns with the responses from CheckTx and DeliverTx. + tags: + - Tx + operationId: broadcast_tx_commit + description: | + IMPORTANT: use only for testing and development. In production, use + BroadcastTxSync or BroadcastTxAsync. You can subscribe for the transaction + result using JSONRPC via a websocket. See + https://tendermint.com/docs/app-dev/subscribing-to-events-via-websocket.html + + CONTRACT: only returns error if mempool.CheckTx() errs or if we timeout + waiting for tx to commit. + + If CheckTx or DeliverTx fail, no error will be returned, but the returned result + will contain a non-OK ABCI code. + + Please refer to + https://tendermint.com/docs/tendermint-core/using-tendermint.html#formatting + for formatting/encoding rules. + parameters: + - in: query + name: tx + type: string + required: true + description: The transaction + x-example: "785" + produces: + - application/json + responses: + 200: + description: empty answer + schema: + $ref: "#/definitions/BroadcastTxCommitResponse" + 500: + description: empty error + schema: + $ref: "#/definitions/ErrorResponse" + /subscribe: + get: + summary: Subscribe for events via WebSocket. + tags: + - Websocket + operationId: subscribe + description: | + To tell which events you want, you need to provide a query. query is a + string, which has a form: "condition AND condition ..." (no OR at the + moment). condition has a form: "key operation operand". key is a string with + a restricted set of possible symbols ( \t\n\r\\()"'=>< are not allowed). + operation can be "=", "<", "<=", ">", ">=", "CONTAINS". operand can be a + string (escaped with single quotes), number, date or time. + + Examples: + tm.event = 'NewBlock' # new blocks + tm.event = 'CompleteProposal' # node got a complete proposal + tm.event = 'Tx' AND tx.hash = 'XYZ' # single transaction + tm.event = 'Tx' AND tx.height = 5 # all txs of the fifth block + tx.height = 5 # all txs of the fifth block + + Tendermint provides a few predefined keys: tm.event, tx.hash and tx.height. + Note for transactions, you can define additional keys by providing events with + DeliverTx response. + + import ( + abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/libs/pubsub/query" + ) + + abci.ResponseDeliverTx{ + Events: []abci.Event{ + { + Type: "rewards.withdraw", + Attributes: cmn.KVPairs{ + cmn.KVPair{Key: []byte("address"), Value: []byte("AddrA")}, + cmn.KVPair{Key: []byte("source"), Value: []byte("SrcX")}, + cmn.KVPair{Key: []byte("amount"), Value: []byte("...")}, + cmn.KVPair{Key: []byte("balance"), Value: []byte("...")}, + }, + }, + { + Type: "rewards.withdraw", + Attributes: cmn.KVPairs{ + cmn.KVPair{Key: []byte("address"), Value: []byte("AddrB")}, + cmn.KVPair{Key: []byte("source"), Value: []byte("SrcY")}, + cmn.KVPair{Key: []byte("amount"), Value: []byte("...")}, + cmn.KVPair{Key: []byte("balance"), Value: []byte("...")}, + }, + }, + { + Type: "transfer", + Attributes: cmn.KVPairs{ + cmn.KVPair{Key: []byte("sender"), Value: []byte("AddrC")}, + cmn.KVPair{Key: []byte("recipient"), Value: []byte("AddrD")}, + cmn.KVPair{Key: []byte("amount"), Value: []byte("...")}, + }, + }, + }, + } + + All events are indexed by a composite key of the form {eventType}.{evenAttrKey}. + In the above examples, the following keys would be indexed: + - rewards.withdraw.address + - rewards.withdraw.source + - rewards.withdraw.amount + - rewards.withdraw.balance + - transfer.sender + - transfer.recipient + - transfer.amount + + Multiple event types with duplicate keys are allowed and are meant to + categorize unique and distinct events. In the above example, all events + indexed under the key `rewards.withdraw.address` will have the following + values stored and queryable: + + - AddrA + - AddrB + + To create a query for txs where address AddrA withdrew rewards: + query.MustParse("tm.event = 'Tx' AND rewards.withdraw.address = 'AddrA'") + + To create a query for txs where address AddrA withdrew rewards from source Y: + query.MustParse("tm.event = 'Tx' AND rewards.withdraw.address = 'AddrA' AND rewards.withdraw.source = 'Y'") + + To create a query for txs where AddrA transferred funds: + query.MustParse("tm.event = 'Tx' AND transfer.sender = 'AddrA'") + + The following queries would return no results: + query.MustParse("tm.event = 'Tx' AND transfer.sender = 'AddrZ'") + query.MustParse("tm.event = 'Tx' AND rewards.withdraw.address = 'AddrZ'") + query.MustParse("tm.event = 'Tx' AND rewards.withdraw.source = 'W'") + + See list of all possible events here + https://godoc.org/github.com/tendermint/tendermint/types#pkg-constants + + For complete query syntax, check out + https://godoc.org/github.com/tendermint/tendermint/libs/pubsub/query. + + ```go + import "github.com/tendermint/tendermint/types" + + client := client.NewHTTP("tcp:0.0.0.0:26657", "/websocket") + err := client.Start() + if err != nil { + handle error + } + defer client.Stop() + ctx, cancel := context.WithTimeout(context.Background(), 1 * time.Second) + defer cancel() + query := "tm.event = 'Tx' AND tx.height = 3" + txs, err := client.Subscribe(ctx, "test-client", query) + if err != nil { + handle error + } + + go func() { + for e := range txs { + fmt.Println("got ", e.Data.(types.EventDataTx)) + } + }() + ``` + parameters: + - in: query + name: query + type: string + required: true + description: | + query is a string, which has a form: "condition AND condition ..." (no OR at the + moment). condition has a form: "key operation operand". key is a string with + a restricted set of possible symbols ( \t\n\r\\()"'=>< are not allowed). + operation can be "=", "<", "<=", ">", ">=", "CONTAINS". operand can be a + string (escaped with single quotes), number, date or time. + x-example: tm.event = 'Tx' AND tx.height = 5 + produces: + - application/json + responses: + 200: + description: empty answer + schema: + $ref: "#/definitions/EmptyResponse" + 500: + description: empty error + schema: + $ref: "#/definitions/ErrorResponse" + /unsubscribe: + get: + summary: Unsubscribe from event on Websocket + tags: + - Websocket + operationId: unsubscribe + description: | + ```go + client := client.NewHTTP("tcp:0.0.0.0:26657", "/websocket") + err := client.Start() + if err != nil { + handle error + } + defer client.Stop() + query := "tm.event = 'Tx' AND tx.height = 3" + err = client.Unsubscribe(context.Background(), "test-client", query) + if err != nil { + handle error + } + ``` + parameters: + - in: query + name: query + type: string + required: true + description: | + query is a string, which has a form: "condition AND condition ..." (no OR at the + moment). condition has a form: "key operation operand". key is a string with + a restricted set of possible symbols ( \t\n\r\\()"'=>< are not allowed). + operation can be "=", "<", "<=", ">", ">=", "CONTAINS". operand can be a + string (escaped with single quotes), number, date or time. + x-example: tm.event = 'Tx' AND tx.height = 5 + produces: + - application/json + responses: + 200: + description: empty answer + schema: + $ref: "#/definitions/EmptyResponse" + 500: + description: empty error + schema: + $ref: "#/definitions/ErrorResponse" + /unsubscribe_all: + get: + summary: Unsubscribe from all events via WebSocket + tags: + - Websocket + operationId: unsubscribe_all + description: | + Unsubscribe from all events via WebSocket + produces: + - application/json + responses: + 200: + description: empty answer + schema: + $ref: "#/definitions/EmptyResponse" + 500: + description: empty error + schema: + $ref: "#/definitions/ErrorResponse" + /health: + get: + summary: Node heartbeat + tags: + - Info + operationId: health + description: | + Get node health. Returns empty result (200 OK) on success, no response - in case of an error. + produces: + - application/json + responses: + 200: + description: empty answer + schema: + $ref: "#/definitions/EmptyResponse" + 500: + description: empty error + schema: + $ref: "#/definitions/ErrorResponse" + /status: + get: + summary: Node Status + operationId: status + tags: + - Info + description: | + Get Tendermint status including node info, pubkey, latest block hash, app hash, block height and time. + produces: + - application/json + responses: + 200: + description: Status of the node + schema: + $ref: "#/definitions/StatusResponse" + 500: + description: empty error + schema: + $ref: "#/definitions/ErrorResponse" + /net_info: + get: + summary: Network informations + operationId: net_info + tags: + - Info + description: | + Get network info. + produces: + - application/json + responses: + 200: + description: empty answer + schema: + $ref: "#/definitions/NetInfoResponse" + 500: + description: empty error + schema: + $ref: "#/definitions/ErrorResponse" + /blockchain: + get: + summary: Get block headers for minHeight <= height <= maxHeight. + operationId: blockchain + parameters: + - in: query + name: minHeight + type: number + description: Minimum block height to return + x-example: 1 + - in: query + name: maxHeight + type: number + description: Maximum block height to return + x-example: 2 + tags: + - Info + description: | + Get Blockchain info. + produces: + - application/json + responses: + 200: + description: Block headers, returned in descending order (highest first). + schema: + $ref: "#/definitions/BlockchainResponse" + 500: + description: Error + schema: + $ref: "#/definitions/ErrorResponse" + /block: + get: + summary: Get block at a specified height + operationId: block + parameters: + - in: query + name: height + type: number + description: height to return. If no height is provided, it will fetch the latest block. 0 means latest + default: 0 + x-example: 1 + tags: + - Info + description: | + Get Block. + produces: + - application/json + responses: + 200: + description: Block informations. + schema: + $ref: "#/definitions/BlockResponse" + 500: + description: Error + schema: + $ref: "#/definitions/ErrorResponse" + /block_results: + get: + summary: Get block results at a specified height + operationId: block_results + parameters: + - in: query + name: height + type: number + description: height to return. If no height is provided, it will fetch informations regarding the latest block. 0 means latest + default: 0 + x-example: 1 + tags: + - Info + description: | + Get block_results. + produces: + - application/json + responses: + 200: + description: Block results. + schema: + $ref: "#/definitions/BlockResultsResponse" + 500: + description: Error + schema: + $ref: "#/definitions/ErrorResponse" + /commit: + get: + summary: Get commit results at a specified height + operationId: commit + parameters: + - in: query + name: height + type: number + description: height to return. If no height is provided, it will fetch commit informations regarding the latest block. 0 means latest + default: 0 + x-example: 1 + tags: + - Info + description: | + Get Commit. + produces: + - application/json + responses: + 200: + description: Commit results. + schema: + $ref: "#/definitions/CommitResponse" + 500: + description: Error + schema: + $ref: "#/definitions/ErrorResponse" + /validators: + get: + summary: Get validator set at a specified height + operationId: validators + parameters: + - in: query + name: height + type: number + description: height to return. If no height is provided, it will fetch validato set at the latest block. 0 means latest + default: 0 + x-example: 1 + tags: + - Info + description: | + Get Validators. + produces: + - application/json + responses: + 200: + description: Commit results. + schema: + $ref: "#/definitions/ValidatorsResponse" + 500: + description: Error + schema: + $ref: "#/definitions/ErrorResponse" + /genesis: + get: + summary: Get Genesis + operationId: genesis + tags: + - Info + description: | + Get genesis. + produces: + - application/json + responses: + 200: + description: Genesis results. + schema: + $ref: "#/definitions/GenesisResponse" + 500: + description: Error + schema: + $ref: "#/definitions/ErrorResponse" + /dump_consensus_state: + get: + summary: Get consensus state + operationId: dump_consensus_state + tags: + - Info + description: | + Get consensus state. + produces: + - application/json + responses: + 200: + description: consensus state results. + schema: + $ref: "#/definitions/DumpConsensusResponse" + 500: + description: Error + schema: + $ref: "#/definitions/ErrorResponse" + /consensus_state: + get: + summary: Get consensus state + operationId: consensus_state + tags: + - Info + description: | + Get consensus state. + produces: + - application/json + responses: + 200: + description: consensus state results. + schema: + $ref: "#/definitions/ConsensusStateResponse" + 500: + description: Error + schema: + $ref: "#/definitions/ErrorResponse" + /consensus_params: + get: + summary: Get consensus parameters + operationId: consensus_params + parameters: + - in: query + name: height + type: number + description: height to return. If no height is provided, it will fetch commit informations regarding the latest block. 0 means latest + default: 0 + x-example: 1 + tags: + - Info + description: | + Get consensus parameters. + produces: + - application/json + responses: + 200: + description: consensus parameters results. + schema: + $ref: "#/definitions/ConsensusParamsResponse" + 500: + description: Error + schema: + $ref: "#/definitions/ErrorResponse" + /unconfirmed_txs: + get: + summary: Get the list of unconfirmed transactions + operationId: unconfirmed_txs + parameters: + - in: query + name: limit + type: number + description: Maximum number of unconfirmed transactions to return + x-example: 1 + tags: + - Info + description: | + Get list of unconfirmed transactions + produces: + - application/json + responses: + 200: + description: List of unconfirmed transactions + schema: + $ref: "#/definitions/UnconfirmedTransactionsResponse" + 500: + description: Error + schema: + $ref: "#/definitions/ErrorResponse" + /num_unconfirmed_txs: + get: + summary: Get data about unconfirmed transactions + operationId: num_unconfirmed_txs + tags: + - Info + description: | + Get data about unconfirmed transactions + produces: + - application/json + responses: + 200: + description: status about unconfirmed transactions + schema: + $ref: "#/definitions/NumUnconfirmedTransactionsResponse" + 500: + description: Error + schema: + $ref: "#/definitions/ErrorResponse" + /tx_search: + get: + summary: Search for transactions + operationId: tx_search + parameters: + - in: query + name: query + type: string + description: Query + required: true + x-example: "tx.height=1000" + - in: query + name: prove + type: boolean + description: Include proofs of the transactions inclusion in the block + required: false + x-example: true + default: false + - in: query + name: page + type: number + description: "Page number (1-based)" + required: false + x-example: 1 + default: 1 + - in: query + name: per_page + type: number + description: "Number of entries per page (max: 100)" + required: false + x-example: 30 + default: 30 + tags: + - Info + description: | + Get list of unconfirmed transactions + produces: + - application/json + responses: + 200: + description: List of unconfirmed transactions + schema: + $ref: "#/definitions/TxSearchResponse" + 500: + description: Error + schema: + $ref: "#/definitions/ErrorResponse" + /tx: + get: + summary: Get transactions by hash + operationId: tx + parameters: + - in: query + name: hash + type: string + description: transaction Hash to retrive + required: true + x-example: "0xD70952032620CC4E2737EB8AC379806359D8E0B17B0488F627997A0B043ABDED" + - in: query + name: prove + type: boolean + description: Include proofs of the transactions inclusion in the block + required: false + x-example: true + default: false + tags: + - Info + description: | + Get a trasasction + produces: + - application/json + responses: + 200: + description: Get a transaction + schema: + $ref: "#/definitions/TxResponse" + 500: + description: Error + schema: + $ref: "#/definitions/ErrorResponse" + /abci_info: + get: + summary: Get some info about the application. + operationId: abci_info + tags: + - ABCI + description: | + Get some info about the application. + produces: + - application/json + responses: + 200: + description: Get some info about the application. + schema: + $ref: "#/definitions/ABCIInfoResponse" + 500: + description: Error + schema: + $ref: "#/definitions/ErrorResponse" + /abci_query: + get: + summary: Query the application for some information. + operationId: abci_query + parameters: + - in: query + name: path + type: string + description: Path to the data ("/a/b/c") + required: true + x-example: "/a/b/c" + - in: query + name: data + type: string + description: Data + required: true + x-example: "IHAVENOIDEA" + - in: query + name: height + type: number + description: Height (0 means latest) + required: false + x-example: 1 + default: 0 + - in: query + name: prove + type: boolean + description: Include proofs of the transactions inclusion in the block + required: false + x-example: true + default: false + tags: + - ABCI + description: | + Query the application for some information. + produces: + - application/json + responses: + 200: + description: Response of the submitted query + schema: + $ref: "#/definitions/ABCIQueryResponse" + 500: + description: Error + schema: + $ref: "#/definitions/ErrorResponse" + + /broadcast_evidence: + get: + summary: Broadcast evidence of the misbehavior. + operationId: broadcast_evidence + parameters: + - in: query + name: evidence + type: string + description: Amino-encoded JSON evidence + required: true + x-example: "JSON_EVIDENCE_Amino_encoded" + tags: + - Info + description: | + Broadcast evidence of the misbehavior. + produces: + - application/json + responses: + 200: + description: Broadcast evidence of the misbehavior. + schema: + $ref: "#/definitions/BroadcastEvidenceResponse" + 500: + description: Error + schema: + $ref: "#/definitions/ErrorResponse" + +definitions: + JSONRPC: + type: object + properties: + id: + type: string + x-example: "" + jsonrpc: + type: string + x-example: "2.0" + EmptyResponse: + description: Empty Response + allOf: + - $ref: "#/definitions/JSONRPC" + - type: object + properties: + result: + type: object + additionalProperties: {} + ErrorResponse: + description: Error Response + allOf: + - $ref: "#/definitions/JSONRPC" + - type: object + properties: + error: + type: string + x-example: "Description of failure" + ProtocolVersion: + type: object + properties: + p2p: + type: string + x-example: "7" + block: + type: string + x-example: "10" + app: + type: string + x-example: "0" + PubKey: + type: object + properties: + type: + type: string + x-example: "tendermint/PubKeyEd25519" + value: + type: string + x-example: "A6DoBUypNtUAyEHWtQ9bFjfNg8Bo9CrnkUGl6k6OHN4=" + NodeInfo: + type: object + properties: + protocol_version: + $ref: "#/definitions/ProtocolVersion" + id: + type: string + x-example: "5576458aef205977e18fd50b274e9b5d9014525a" + listen_addr: + type: string + x-example: "tcp:0.0.0.0:26656" + network: + type: string + x-example: "cosmoshub-2" + version: + type: string + x-example: "0.32.1" + channels: + type: string + x-example: "4020212223303800" + moniker: + type: string + x-example: "moniker-node" + other: + type: object + properties: + tx_index: + type: string + x-example: "on" + rpc_address: + type: string + x-example: "tcp:0.0.0.0:26657" + x-example: "moniker-node" + SyncInfo: + type: object + properties: + latest_block_hash: + type: string + x-example: "790BA84C3545FCCC49A5C629CEE6EA58A6E875C3862175BDC11EE7AF54703501" + latest_app_hash: + type: string + x-example: "C9AEBB441B787D9F1D846DE51F3826F4FD386108B59B08239653ABF59455C3F8" + latest_block_height: + type: string + x-example: "1262196" + latest_block_time: + type: string + x-example: "2019-08-01T11:52:22.818762194Z" + catching_up: + type: boolean + x-example: false + ValidatorInfo: + type: object + properties: + address: + type: string + x-example: "5D6A51A8E9899C44079C6AF90618BA0369070E6E" + pub_key: + $ref: "#/definitions/PubKey" + voting_power: + type: string + x-example: "0" + Status: + description: Status Response + type: object + properties: + node_info: + $ref: "#/definitions/NodeInfo" + sync_info: + $ref: "#/definitions/SyncInfo" + validator_info: + $ref: "#/definitions/ValidatorInfo" + StatusResponse: + description: Status Response + allOf: + - $ref: "#/definitions/JSONRPC" + - type: object + properties: + result: + $ref: "#/definitions/Status" + Monitor: + type: object + properties: + Active: + type: boolean + x-example: true + Start: + type: string + x-example: "2019-07-31T14:31:28.66Z" + Duration: + type: string + x-example: "168901060000000" + Idle: + type: string + x-example: "168901040000000" + Bytes: + type: string + x-example: "5" + Samples: + type: string + x-example: "1" + InstRate: + type: string + x-example: "0" + CurRate: + type: string + x-example: "0" + AvgRate: + type: string + x-example: "0" + PeakRate: + type: string + x-example: "0" + BytesRem: + type: string + x-example: "0" + TimeRem: + type: string + x-example: "0" + Progress: + type: number + x-example: 0 + Channel: + type: object + properties: + ID: + type: number + x-example: 48 + SendQueueCapacity: + type: string + x-example: "1" + SendQueueSize: + type: string + x-example: "0" + Priority: + type: string + x-example: "5" + RecentlySent: + type: string + x-example: "0" + ConnectionStatus: + type: object + properties: + Duration: + type: string + x-example: "168901057956119" + SendMonitor: + $ref: "#/definitions/Monitor" + RecvMonitor: + $ref: "#/definitions/Monitor" + Channels: + type: array + items: + $ref: "#/definitions/Channel" + Peer: + type: object + properties: + node_info: + $ref: "#/definitions/NodeInfo" + is_outbound: + type: boolean + x-example: true + connection_status: + $ref: "#/definitions/ConnectionStatus" + remote_ip: + type: string + x-example: "95.179.155.35" + NetInfo: + type: object + properties: + listening: + type: boolean + x-example: true + listeners: + type: array + items: + type: string + x-example: "Listener(@)" + n_peers: + type: number + x-example: "1" + peers: + type: array + items: + $ref: "#/definitions/Peer" + NetInfoResponse: + description: NetInfo Response + allOf: + - $ref: "#/definitions/JSONRPC" + - type: object + properties: + result: + $ref: "#/definitions/NetInfo" + BlockID: + required: + - "hash" + - "parts" + properties: + hash: + type: string + x-example: "D82C2734BB0E76C772A10994B210EF9D11505D1B98CB189D9CF7F9A5488672A5" + parts: + required: + - "total" + - "hash" + properties: + total: + type: string + x-example: "1" + hash: + type: string + x-example: "CB02DCAA7FB46BF874052EC2273FD0B1F2CF2E1593298D9781E60FE9C3DB8638" + type: object + type: object + BlockMetaHeader: + required: + - "version" + - "chain_id" + - "height" + - "time" + - "num_txs" + - "total_txs" + - "last_block_id" + - "last_commit_hash" + - "data_hash" + - "validators_hash" + - "next_validators_hash" + - "consensus_hash" + - "app_hash" + - "last_results_hash" + - "evidence_hash" + - "proposer_address" + properties: + version: + required: + - "block" + - "app" + properties: + block: + type: string + x-example: "10" + app: + type: string + x-example: "0" + type: object + chain_id: + type: string + x-example: "cosmoshub-2" + height: + type: string + x-example: "12" + time: + type: string + x-example: "2019-04-22T17:01:51.701356223Z" + num_txs: + type: string + x-example: "2" + total_txs: + type: string + x-example: "3" + last_block_id: + $ref: "#/definitions/BlockID" + last_commit_hash: + type: string + x-example: "21B9BC845AD2CB2C4193CDD17BFC506F1EBE5A7402E84AD96E64171287A34812" + data_hash: + type: string + x-example: "970886F99E77ED0D60DA8FCE0447C2676E59F2F77302B0C4AA10E1D02F18EF73" + validators_hash: + type: string + x-example: "D658BFD100CA8025CFD3BECFE86194322731D387286FBD26E059115FD5F2BCA0" + next_validators_hash: + type: string + x-example: "D658BFD100CA8025CFD3BECFE86194322731D387286FBD26E059115FD5F2BCA0" + consensus_hash: + type: string + x-example: "0F2908883A105C793B74495EB7D6DF2EEA479ED7FC9349206A65CB0F9987A0B8" + app_hash: + type: string + x-example: "223BF64D4A01074DC523A80E76B9BBC786C791FB0A1893AC5B14866356FCFD6C" + last_results_hash: + type: string + x-example: "" + evidence_hash: + type: string + x-example: "" + proposer_address: + type: string + x-example: "D540AB022088612AC74B287D076DBFBC4A377A2E" + type: object + BlockMetaId: + required: + - "hash" + - "parts" + properties: + hash: + type: string + x-example: "112BC173FD838FB68EB43476816CD7B4C6661B6884A9E357B417EE957E1CF8F7" + parts: + required: + - "total" + - "hash" + properties: + total: + type: string + x-example: "1" + hash: + type: string + x-example: "38D4B26B5B725C4F13571EFE022C030390E4C33C8CF6F88EDD142EA769642DBD" + type: object + type: object + BlockMeta: + type: object + properties: + block_id: + $ref: "#/definitions/BlockMetaId" + header: + $ref: "#/definitions/BlockMetaHeader" + Blockchain: + type: object + required: + - "last_height" + - "block_metas" + properties: + last_height: + type: string + x-example: "1276718" + block_metas: + type: "array" + items: + $ref: "#/definitions/BlockMeta" + BlockchainResponse: + description: Blockchain info + allOf: + - $ref: "#/definitions/JSONRPC" + - type: object + properties: + result: + $ref: "#/definitions/Blockchain" + Commit: + required: + - "type" + - "height" + - "round" + - "block_id" + - "timestamp" + - "validator_address" + - "validator_index" + - "signature" + properties: + type: + type: number + x-example: 2 + height: + type: string + x-example: "1262085" + round: + type: string + x-example: "0" + block_id: + $ref: "#/definitions/BlockID" + timestamp: + type: string + x-example: "2019-08-01T11:39:38.867269833Z" + validator_address: + type: string + x-example: "000001E443FD237E4B616E2FA69DF4EE3D49A94F" + validator_index: + type: string + x-example: "0" + signature: + type: string + x-example: "DBchvucTzAUEJnGYpNvMdqLhBAHG4Px8BsOBB3J3mAFCLGeuG7uJqy+nVngKzZdPhPi8RhmE/xcw/M9DOJjEDg==" + Block: + type: object + properties: + header: + $ref: "#/definitions/BlockMetaHeader" + data: + type: array + items: + type: string + x-example: "yQHwYl3uCkKoo2GaChRnd+THLQ2RM87nEZrE19910Z28ABIUWW/t8AtIMwcyU0sT32RcMDI9GF0aEAoFdWF0b20SBzEwMDAwMDASEwoNCgV1YXRvbRIEMzEwMRCd8gEaagom61rphyEDoJPxlcjRoNDtZ9xMdvs+lRzFaHe2dl2P5R2yVCWrsHISQKkqX5H1zXAIJuC57yw0Yb03Fwy75VRip0ZBtLiYsUqkOsPUoQZAhDNP+6LY+RUwz/nVzedkF0S29NZ32QXdGv0=" + evidence: + type: array + items: + $ref: "#/definitions/Evidence" + last_commit: + type: object + properties: + block_id: + $ref: "#/definitions/BlockID" + precommits: + type: array + items: + $ref: "#/definitions/Commit" + Validator: + type: object + properties: + pub_key: + $ref: "#/definitions/PubKey" + voting_power: + type: number + address: + type: string + Evidence: + type: object + properties: + type: + type: string + height: + type: number + time: + type: number + total_voting_power: + type: number + validator: + $ref: "#/definitions/Validator" + BlockComplete: + type: object + properties: + block_meta: + $ref: "#/definitions/BlockMeta" + block: + $ref: "#/definitions/Block" + BlockResponse: + description: Blockc info + allOf: + - $ref: "#/definitions/JSONRPC" + - type: object + properties: + result: + $ref: "#/definitions/BlockComplete" + Tag: + type: object + properties: + key: + type: string + example: "YWN0aW9u" + value: + type: string + example: "c2VuZA==" + ################## FROM NOW ON NEEDS REFACTOR ################## + BlockResultsResponse: + type: "object" + required: + - "jsonrpc" + - "id" + - "result" + properties: + jsonrpc: + type: "string" + example: "2.0" + id: + type: "string" + example: "" + result: + required: + - "height" + - "results" + properties: + height: + type: "string" + example: "12" + results: + required: + - "deliver_tx" + - "end_block" + - "begin_block" + properties: + deliver_tx: + type: "array" + x-nullable: true + items: + type: "object" + properties: + log: + type: "string" + example: '[{"msg_index":"0","success":true,"log":""}]' + gasWanted: + type: "string" + example: "25629" + gasUsed: + type: "string" + example: "25629" + tags: + type: "array" + items: + type: "object" + properties: + key: + type: "string" + example: "YWN0aW9u" + value: + type: "string" + example: "c2VuZA==" + end_block: + required: + - "validator_updates" + properties: {} + type: "object" + begin_block: + properties: {} + type: "object" + type: "object" + type: "object" + CommitResponse: + type: "object" + required: + - "jsonrpc" + - "id" + - "result" + properties: + jsonrpc: + type: "string" + example: "2.0" + id: + type: "string" + example: "" + result: + required: + - "signed_header" + - "canonical" + properties: + signed_header: + required: + - "header" + - "commit" + properties: + header: + required: + - "version" + - "chain_id" + - "height" + - "time" + - "num_txs" + - "total_txs" + - "last_block_id" + - "last_commit_hash" + - "data_hash" + - "validators_hash" + - "next_validators_hash" + - "consensus_hash" + - "app_hash" + - "last_results_hash" + - "evidence_hash" + - "proposer_address" + properties: + version: + required: + - "block" + - "app" + properties: + block: + type: "string" + example: "10" + app: + type: "string" + example: "0" + type: "object" + chain_id: + type: "string" + example: "cosmoshub-2" + height: + type: "string" + example: "12" + time: + type: "string" + example: "2019-04-22T17:01:51.701356223Z" + num_txs: + type: "string" + example: "2" + total_txs: + type: "string" + example: "3" + last_block_id: + required: + - "hash" + - "parts" + properties: + hash: + type: "string" + example: "D82C2734BB0E76C772A10994B210EF9D11505D1B98CB189D9CF7F9A5488672A5" + parts: + required: + - "total" + - "hash" + properties: + total: + type: "string" + example: "1" + hash: + type: "string" + example: "CB02DCAA7FB46BF874052EC2273FD0B1F2CF2E1593298D9781E60FE9C3DB8638" + type: "object" + type: "object" + last_commit_hash: + type: "string" + example: "21B9BC845AD2CB2C4193CDD17BFC506F1EBE5A7402E84AD96E64171287A34812" + data_hash: + type: "string" + example: "970886F99E77ED0D60DA8FCE0447C2676E59F2F77302B0C4AA10E1D02F18EF73" + validators_hash: + type: "string" + example: "D658BFD100CA8025CFD3BECFE86194322731D387286FBD26E059115FD5F2BCA0" + next_validators_hash: + type: "string" + example: "D658BFD100CA8025CFD3BECFE86194322731D387286FBD26E059115FD5F2BCA0" + consensus_hash: + type: "string" + example: "0F2908883A105C793B74495EB7D6DF2EEA479ED7FC9349206A65CB0F9987A0B8" + app_hash: + type: "string" + example: "223BF64D4A01074DC523A80E76B9BBC786C791FB0A1893AC5B14866356FCFD6C" + last_results_hash: + type: "string" + example: "" + evidence_hash: + type: "string" + example: "" + proposer_address: + type: "string" + example: "D540AB022088612AC74B287D076DBFBC4A377A2E" + type: "object" + commit: + required: + - "block_id" + - "precommits" + properties: + block_id: + required: + - "hash" + - "parts" + properties: + hash: + type: "string" + example: "112BC173FD838FB68EB43476816CD7B4C6661B6884A9E357B417EE957E1CF8F7" + parts: + required: + - "total" + - "hash" + properties: + total: + type: "string" + example: "1" + hash: + type: "string" + example: "38D4B26B5B725C4F13571EFE022C030390E4C33C8CF6F88EDD142EA769642DBD" + type: "object" + type: "object" + precommits: + type: "array" + items: + type: "object" + properties: + type: + type: "number" + example: 2 + height: + type: "string" + example: "12" + round: + type: "string" + example: "0" + block_id: + required: + - "hash" + - "parts" + properties: + hash: + type: "string" + example: "112BC173FD838FB68EB43476816CD7B4C6661B6884A9E357B417EE957E1CF8F7" + parts: + required: + - "total" + - "hash" + properties: + total: + type: "string" + example: "1" + hash: + type: "string" + example: "38D4B26B5B725C4F13571EFE022C030390E4C33C8CF6F88EDD142EA769642DBD" + type: "object" + type: "object" + timestamp: + type: "string" + example: "2019-04-22T17:01:58.376629719Z" + validator_address: + type: "string" + example: "000001E443FD237E4B616E2FA69DF4EE3D49A94F" + validator_index: + type: "string" + example: "0" + signature: + type: "string" + example: "14jaTQXYRt8kbLKEhdHq7AXycrFImiLuZx50uOjs2+Zv+2i7RTG/jnObD07Jo2ubZ8xd7bNBJMqkgtkd0oQHAw==" + type: "object" + type: "object" + canonical: + type: "boolean" + example: true + type: "object" + ValidatorsResponse: + type: object + required: + - "jsonrpc" + - "id" + - "result" + properties: + jsonrpc: + type: "string" + example: "2.0" + id: + type: "string" + example: "" + result: + required: + - "block_height" + - "validators" + properties: + block_height: + type: "string" + example: "55" + validators: + type: "array" + items: + type: "object" + properties: + address: + type: "string" + example: "000001E443FD237E4B616E2FA69DF4EE3D49A94F" + pub_key: + required: + - "type" + - "value" + properties: + type: + type: "string" + example: "tendermint/PubKeyEd25519" + value: + type: "string" + example: "9tK9IT+FPdf2qm+5c2qaxi10sWP+3erWTKgftn2PaQM=" + type: "object" + voting_power: + type: "string" + example: "250353" + proposer_priority: + type: "string" + example: "13769415" + type: "object" + GenesisResponse: + type: object + required: + - "jsonrpc" + - "id" + - "result" + properties: + jsonrpc: + type: "string" + example: "2.0" + id: + type: "string" + example: "" + result: + required: + - "genesis" + properties: + genesis: + required: + - "genesis_time" + - "chain_id" + - "consensus_params" + - "validators" + - "app_hash" + properties: + genesis_time: + type: "string" + example: "2019-04-22T17:00:00Z" + chain_id: + type: "string" + example: "cosmoshub-2" + consensus_params: + required: + - "block" + - "evidence" + - "validator" + properties: + block: + required: + - "max_bytes" + - "max_gas" + - "time_iota_ms" + properties: + max_bytes: + type: "string" + example: "200000" + max_gas: + type: "string" + example: "2000000" + time_iota_ms: + type: "string" + example: "1000" + type: "object" + evidence: + required: + - "max_age" + properties: + max_age: + type: "string" + example: "1000000" + type: "object" + validator: + required: + - "pub_key_types" + properties: + pub_key_types: + type: "array" + items: + type: "string" + example: + - "ed25519" + type: "object" + type: "object" + validators: + type: "array" + items: + type: "object" + properties: + address: + type: "string" + example: "B00A6323737F321EB0B8D59C6FD497A14B60938A" + pub_key: + required: + - "type" + - "value" + properties: + type: + type: "string" + example: "tendermint/PubKeyEd25519" + value: + type: "string" + example: "cOQZvh/h9ZioSeUMZB/1Vy1Xo5x2sjrVjlE/qHnYifM=" + type: "object" + power: + type: "string" + example: "9328525" + name: + type: "string" + example: "Certus One" + app_hash: + type: "string" + example: "" + app_state: + properties: {} + type: "object" + type: "object" + type: "object" + DumpConsensusResponse: + type: object + required: + - "jsonrpc" + - "id" + - "result" + properties: + jsonrpc: + type: "string" + example: "2.0" + id: + type: "string" + example: "" + result: + required: + - "round_state" + - "peers" + properties: + round_state: + required: + - "height" + - "round" + - "step" + - "start_time" + - "commit_time" + - "validators" + - "proposal" + - "proposal_block" + - "proposal_block_parts" + - "locked_round" + - "locked_block" + - "locked_block_parts" + - "valid_round" + - "valid_block" + - "valid_block_parts" + - "votes" + - "commit_round" + - "last_commit" + - "last_validators" + - "triggered_timeout_precommit" + properties: + height: + type: "string" + example: "1311801" + round: + type: "string" + example: "0" + step: + type: "number" + example: 3 + start_time: + type: "string" + example: "2019-08-05T11:28:49.064658805Z" + commit_time: + type: "string" + example: "2019-08-05T11:28:44.064658805Z" + validators: + required: + - "validators" + - "proposer" + properties: + validators: + type: "array" + items: + type: "object" + properties: + address: + type: "string" + example: "000001E443FD237E4B616E2FA69DF4EE3D49A94F" + pub_key: + required: + - "type" + - "value" + properties: + type: + type: "string" + example: "tendermint/PubKeyEd25519" + value: + type: "string" + example: "9tK9IT+FPdf2qm+5c2qaxi10sWP+3erWTKgftn2PaQM=" + type: "object" + voting_power: + type: "string" + example: "239727" + proposer_priority: + type: "string" + example: "-11896414" + proposer: + required: + - "address" + - "pub_key" + - "voting_power" + - "proposer_priority" + properties: + address: + type: "string" + example: "708FDDCE121CDADA502F2B0252FEF13FDAA31E50" + pub_key: + required: + - "type" + - "value" + properties: + type: + type: "string" + example: "tendermint/PubKeyEd25519" + value: + type: "string" + example: "VNMNfw7mrQBSpEvCtA9ykOe6BoR00RM9b/a9v3vXZhY=" + type: "object" + voting_power: + type: "string" + example: "295360" + proposer_priority: + type: "string" + example: "-88886833" + type: "object" + type: "object" + locked_round: + type: "string" + example: "-1" + valid_round: + type: "string" + example: "-1" + votes: + type: "array" + items: + type: "object" + properties: + round: + type: "string" + example: "0" + prevotes: + type: "array" + x-nullable: true + items: + type: "string" + example: + - "nil-Vote" + - "Vote{19:46A3F8B8393B 1311801/00/1(Prevote) 000000000000 64CE682305CB @ 2019-08-05T11:28:47.374703444Z}" + prevotes_bit_array: + type: "string" + example: "BA{100:___________________x________________________________________________________________________________} 209706/170220253 = 0.00" + precommits: + type: "array" + x-nullable: true + items: + type: "string" + example: + - "nil-Vote" + precommits_bit_array: + type: "string" + example: "BA{100:____________________________________________________________________________________________________} 0/170220253 = 0.00" + commit_round: + type: "string" + example: "-1" + last_commit: + x-nullable: true + required: + - "votes" + - "votes_bit_array" + - "peer_maj_23s" + properties: + votes: + type: "array" + items: + type: "string" + example: + - "Vote{0:000001E443FD 1311800/00/2(Precommit) 3071ADB27D1A 77EE1B6B6847 @ 2019-08-05T11:28:43.810128139Z}" + votes_bit_array: + type: "string" + example: "BA{100:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx} 170220253/170220253 = 1.00" + peer_maj_23s: + properties: {} + type: "object" + type: "object" + last_validators: + required: + - "validators" + - "proposer" + properties: + validators: + type: "array" + items: + type: "object" + properties: + address: + type: "string" + example: "000001E443FD237E4B616E2FA69DF4EE3D49A94F" + pub_key: + required: + - "type" + - "value" + properties: + type: + type: "string" + example: "tendermint/PubKeyEd25519" + value: + type: "string" + example: "9tK9IT+FPdf2qm+5c2qaxi10sWP+3erWTKgftn2PaQM=" + type: "object" + voting_power: + type: "string" + example: "239727" + proposer_priority: + type: "string" + example: "-12136141" + proposer: + required: + - "address" + - "pub_key" + - "voting_power" + - "proposer_priority" + properties: + address: + type: "string" + example: "B00A6323737F321EB0B8D59C6FD497A14B60938A" + pub_key: + required: + - "type" + - "value" + properties: + type: + type: "string" + example: "tendermint/PubKeyEd25519" + value: + type: "string" + example: "cOQZvh/h9ZioSeUMZB/1Vy1Xo5x2sjrVjlE/qHnYifM=" + type: "object" + voting_power: + type: "string" + example: "8590153" + proposer_priority: + type: "string" + example: "-79515145" + type: "object" + type: "object" + triggered_timeout_precommit: + type: "boolean" + example: false + type: "object" + peers: + type: "array" + items: + type: "object" + properties: + node_address: + type: "string" + example: "357f6a6c1d27414579a8185060aa8adf9815c43c@68.183.41.207:26656" + peer_state: + required: + - "round_state" + - "stats" + properties: + round_state: + required: + - "height" + - "round" + - "step" + - "start_time" + - "proposal" + - "proposal_block_parts_header" + - "proposal_block_parts" + - "proposal_pol_round" + - "proposal_pol" + - "prevotes" + - "precommits" + - "last_commit_round" + - "last_commit" + - "catchup_commit_round" + - "catchup_commit" + properties: + height: + type: "string" + example: "1311801" + round: + type: "string" + example: "0" + step: + type: "number" + example: 3 + start_time: + type: "string" + example: "2019-08-05T11:28:49.21730864Z" + proposal: + type: "boolean" + example: false + proposal_block_parts_header: + required: + - "total" + - "hash" + properties: + total: + type: "string" + example: "0" + hash: + type: "string" + example: "" + type: "object" + proposal_pol_round: + x-nullable: true + type: "string" + example: "-1" + proposal_pol: + x-nullable: true + type: "string" + example: "____________________________________________________________________________________________________" + prevotes: + x-nullable: true + type: "string" + example: "___________________x________________________________________________________________________________" + precommits: + x-nullable: true + type: "string" + example: "____________________________________________________________________________________________________" + last_commit_round: + x-nullable: true + type: "string" + example: "0" + last_commit: + x-nullable: true + type: "string" + example: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + catchup_commit_round: + type: "string" + x-nullable: true + example: "-1" + catchup_commit: + x-nullable: true + type: "string" + example: "____________________________________________________________________________________________________" + type: "object" + stats: + required: + - "votes" + - "block_parts" + properties: + votes: + type: "string" + example: "1159558" + block_parts: + type: "string" + example: "4786" + type: "object" + type: "object" + type: "object" + ConsensusStateResponse: + type: object + required: + - "jsonrpc" + - "id" + - "result" + properties: + jsonrpc: + type: "string" + example: "2.0" + id: + type: "string" + example: "" + result: + required: + - "round_state" + properties: + round_state: + required: + - "height/round/step" + - "start_time" + - "proposal_block_hash" + - "locked_block_hash" + - "valid_block_hash" + - "height_vote_set" + properties: + height/round/step: + type: "string" + example: "1262197/0/8" + start_time: + type: "string" + example: "2019-08-01T11:52:38.962730289Z" + proposal_block_hash: + type: "string" + example: "634ADAF1F402663BEC2ABC340ECE8B4B45AA906FA603272ACC5F5EED3097E009" + locked_block_hash: + type: "string" + example: "634ADAF1F402663BEC2ABC340ECE8B4B45AA906FA603272ACC5F5EED3097E009" + valid_block_hash: + type: "string" + example: "634ADAF1F402663BEC2ABC340ECE8B4B45AA906FA603272ACC5F5EED3097E009" + height_vote_set: + type: "array" + items: + type: "object" + properties: + round: + type: "string" + example: "0" + prevotes: + type: "array" + items: + type: "string" + example: + - "Vote{0:000001E443FD 1262197/00/1(Prevote) 634ADAF1F402 7BB974E1BA40 @ 2019-08-01T11:52:35.513572509Z}" + - "nil-Vote" + prevotes_bit_array: + type: "string" + example: "BA{100:xxxxxxxxxxxxxxxxx_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx} 169753436/170151262 = 1.00" + precommits: + type: "array" + items: + type: "string" + example: + - "Vote{5:18C78D135C9D 1262197/00/2(Precommit) 634ADAF1F402 8B5EFFFEABCD @ 2019-08-01T11:52:36.25600005Z}" + - "nil-Vote" + precommits_bit_array: + type: "string" + example: "BA{100:xxxxxx_xxxxx_xxxx_x_xxx_xx_xx_xx__x_x_x__xxxxxxxxxxxxxx_xxxx_xx_xxxxxx_xxxxxxxx_xxxx_xxx_x_xxxx__xxx} 118726247/170151262 = 0.70" + type: "object" + type: "object" + ConsensusParamsResponse: + type: object + required: + - "jsonrpc" + - "id" + - "result" + properties: + jsonrpc: + type: "string" + example: "2.0" + id: + type: "string" + example: "" + result: + required: + - "block_height" + - "consensus_params" + properties: + block_height: + type: "string" + example: "1313448" + consensus_params: + required: + - "block" + - "evidence" + - "validator" + properties: + block: + required: + - "max_bytes" + - "max_gas" + - "time_iota_ms" + properties: + max_bytes: + type: "string" + example: "200000" + max_gas: + type: "string" + example: "2000000" + time_iota_ms: + type: "string" + example: "1000" + type: "object" + evidence: + required: + - "max_age" + properties: + max_age: + type: "string" + example: "1000000" + type: "object" + validator: + required: + - "pub_key_types" + properties: + pub_key_types: + type: "array" + items: + type: "string" + example: + - "ed25519" + type: "object" + type: "object" + type: "object" + UnconfirmedTransactionsResponse: + type: object + required: + - "jsonrpc" + - "id" + - "result" + properties: + jsonrpc: + type: "string" + example: "2.0" + id: + type: "string" + example: "" + result: + required: + - "n_txs" + - "total" + - "total_bytes" + # - "txs" + properties: + n_txs: + type: "string" + example: "31" + total: + type: "string" + example: "82" + total_bytes: + type: "string" + example: "19974" + # txs: + # type: "array" + # x-nullable: true + # items: + # type: "string" + # x-nullable: true + # example: + # - "gAPwYl3uCjCMTXENChSMnIkb5ZpYHBKIZqecFEV2tuZr7xIUA75/FmYq9WymsOBJ0XSJ8yV8zmQKMIxNcQ0KFIyciRvlmlgcEohmp5wURXa25mvvEhQbrvwbvlNiT+Yjr86G+YQNx7kRVgowjE1xDQoUjJyJG+WaWBwSiGannBRFdrbma+8SFK2m+1oxgILuQLO55n8mWfnbIzyPCjCMTXENChSMnIkb5ZpYHBKIZqecFEV2tuZr7xIUQNGfkmhTNMis4j+dyMDIWXdIPiYKMIxNcQ0KFIyciRvlmlgcEohmp5wURXa25mvvEhS8sL0D0wwgGCItQwVowak5YB38KRIUCg4KBXVhdG9tEgUxMDA1NBDoxRgaagom61rphyECn8x7emhhKdRCB2io7aS/6Cpuq5NbVqbODmqOT3jWw6kSQKUresk+d+Gw0BhjiggTsu8+1voW+VlDCQ1GRYnMaFOHXhyFv7BCLhFWxLxHSAYT8a5XqoMayosZf9mANKdXArA=" + type: "object" + NumUnconfirmedTransactionsResponse: + type: object + required: + - "jsonrpc" + - "id" + - "result" + properties: + jsonrpc: + type: "string" + example: "2.0" + id: + type: "string" + example: "" + result: + required: + - "n_txs" + - "total" + - "total_bytes" + - "txs" + properties: + n_txs: + type: "string" + example: "82" + total: + type: "string" + example: "82" + total_bytes: + type: "string" + example: "19974" + txs: + type: array + x-nullable: true + items: + type: string + x-nullable: true + example: + - null + - "gAPwYl3uCjCMTXENChSMnIkb5ZpYHBKIZqecFEV2tuZr7xIUA75/FmYq9WymsOBJ0XSJ8yV8zmQKMIxNcQ0KFIyciRvlmlgcEohmp5wURXa25mvvEhQbrvwbvlNiT+Yjr86G+YQNx7kRVgowjE1xDQoUjJyJG+WaWBwSiGannBRFdrbma+8SFK2m+1oxgILuQLO55n8mWfnbIzyPCjCMTXENChSMnIkb5ZpYHBKIZqecFEV2tuZr7xIUQNGfkmhTNMis4j+dyMDIWXdIPiYKMIxNcQ0KFIyciRvlmlgcEohmp5wURXa25mvvEhS8sL0D0wwgGCItQwVowak5YB38KRIUCg4KBXVhdG9tEgUxMDA1NBDoxRgaagom61rphyECn8x7emhhKdRCB2io7aS/6Cpuq5NbVqbODmqOT3jWw6kSQKUresk+d+Gw0BhjiggTsu8+1voW+VlDCQ1GRYnMaFOHXhyFv7BCLhFWxLxHSAYT8a5XqoMayosZf9mANKdXArA=" + type: "object" + TxSearchResponse: + type: object + required: + - "jsonrpc" + - "id" + - "result" + properties: + jsonrpc: + type: "string" + example: "2.0" + id: + type: "string" + example: "" + result: + required: + - "txs" + - "total_count" + properties: + txs: + type: "array" + items: + type: "object" + properties: + hash: + type: "string" + example: "D70952032620CC4E2737EB8AC379806359D8E0B17B0488F627997A0B043ABDED" + height: + type: "string" + example: "1000" + index: + type: "number" + example: 0 + tx_result: + required: + - "log" + - "gasWanted" + - "gasUsed" + - "tags" + properties: + log: + type: "string" + example: '[{"msg_index":"0","success":true,"log":""}]' + gasWanted: + type: "string" + example: "200000" + gasUsed: + type: "string" + example: "28596" + tags: + type: "array" + items: + type: "object" + properties: + key: + type: "string" + example: "YWN0aW9u" + value: + type: "string" + example: "c2VuZA==" + type: "object" + tx: + type: "string" + example: "5wHwYl3uCkaoo2GaChQmSIu8hxpJxLcCuIi8fiHN4TMwrRIU/Af1cEG7Rcs/6LjTl7YjRSymJfYaFAoFdWF0b20SCzE0OTk5OTk1MDAwEhMKDQoFdWF0b20SBDUwMDAQwJoMGmoKJuta6YchAwswBShaB1wkZBctLIhYqBC3JrAI28XGzxP+rVEticGEEkAc+khTkKL9CDE47aDvjEHvUNt+izJfT4KVF2v2JkC+bmlH9K08q3PqHeMI9Z5up+XMusnTqlP985KF+SI5J3ZOIhhNYWRlIGJ5IENpcmNsZSB3aXRoIGxvdmU=" + proof: + required: + - "RootHash" + - "Data" + - "Proof" + properties: + RootHash: + type: "string" + example: "72FE6BF6D4109105357AECE0A82E99D0F6288854D16D8767C5E72C57F876A14D" + Data: + type: "string" + example: "5wHwYl3uCkaoo2GaChQmSIu8hxpJxLcCuIi8fiHN4TMwrRIU/Af1cEG7Rcs/6LjTl7YjRSymJfYaFAoFdWF0b20SCzE0OTk5OTk1MDAwEhMKDQoFdWF0b20SBDUwMDAQwJoMGmoKJuta6YchAwswBShaB1wkZBctLIhYqBC3JrAI28XGzxP+rVEticGEEkAc+khTkKL9CDE47aDvjEHvUNt+izJfT4KVF2v2JkC+bmlH9K08q3PqHeMI9Z5up+XMusnTqlP985KF+SI5J3ZOIhhNYWRlIGJ5IENpcmNsZSB3aXRoIGxvdmU=" + Proof: + required: + - "total" + - "index" + - "leaf_hash" + - "aunts" + properties: + total: + type: "string" + example: "2" + index: + type: "string" + example: "0" + leaf_hash: + type: "string" + example: "eoJxKCzF3m72Xiwb/Q43vJ37/2Sx8sfNS9JKJohlsYI=" + aunts: + type: "array" + items: + type: "string" + example: + - "eWb+HG/eMmukrQj4vNGyFYb3nKQncAWacq4HF5eFzDY=" + type: "object" + type: "object" + total_count: + type: "string" + example: "2" + type: "object" + TxResponse: + type: object + required: + - "jsonrpc" + - "id" + - "result" + properties: + jsonrpc: + type: "string" + example: "2.0" + id: + type: "string" + example: "" + result: + required: + - "hash" + - "height" + - "index" + - "tx_result" + - "tx" + properties: + hash: + type: "string" + example: "D70952032620CC4E2737EB8AC379806359D8E0B17B0488F627997A0B043ABDED" + height: + type: "string" + example: "1000" + index: + type: "number" + example: 0 + tx_result: + required: + - "log" + - "gasWanted" + - "gasUsed" + - "tags" + properties: + log: + type: "string" + example: '[{"msg_index":"0","success":true,"log":""}]' + gasWanted: + type: "string" + example: "200000" + gasUsed: + type: "string" + example: "28596" + tags: + type: "array" + items: + type: "object" + properties: + key: + type: "string" + example: "YWN0aW9u" + value: + type: "string" + example: "c2VuZA==" + type: "object" + tx: + type: "string" + example: "5wHwYl3uCkaoo2GaChQmSIu8hxpJxLcCuIi8fiHN4TMwrRIU/Af1cEG7Rcs/6LjTl7YjRSymJfYaFAoFdWF0b20SCzE0OTk5OTk1MDAwEhMKDQoFdWF0b20SBDUwMDAQwJoMGmoKJuta6YchAwswBShaB1wkZBctLIhYqBC3JrAI28XGzxP+rVEticGEEkAc+khTkKL9CDE47aDvjEHvUNt+izJfT4KVF2v2JkC+bmlH9K08q3PqHeMI9Z5up+XMusnTqlP985KF+SI5J3ZOIhhNYWRlIGJ5IENpcmNsZSB3aXRoIGxvdmU=" + type: "object" + ABCIInfoResponse: + type: object + required: + - "jsonrpc" + - "id" + properties: + jsonrpc: + type: "string" + example: "2.0" + id: + type: "string" + example: "" + result: + required: + - "response" + properties: + response: + required: + - "data" + - "app_version" + - "version" + properties: + data: + type: "string" + example: '{"size":0}' + version: + type: string + example: "0.16.1" + app_version: + type: "string" + example: "1314126" + type: "object" + type: "object" + ABCIQueryResponse: + type: object + required: + - "error" + - "result" + - "id" + - "jsonrpc" + properties: + error: + type: "string" + example: "" + result: + required: + - "response" + properties: + response: + required: + - "log" + - "height" + - "proof" + - "value" + - "key" + - "index" + - "code" + properties: + log: + type: "string" + example: "exists" + height: + type: "string" + example: "0" + proof: + type: "string" + example: "010114FED0DAD959F36091AD761C922ABA3CBF1D8349990101020103011406AA2262E2F448242DF2C2607C3CDC705313EE3B0001149D16177BC71E445476174622EA559715C293740C" + value: + type: "string" + example: "61626364" + key: + type: "string" + example: "61626364" + index: + type: "string" + example: "-1" + code: + type: "string" + example: "0" + type: "object" + type: "object" + id: + type: "string" + example: "" + jsonrpc: + type: "string" + example: "2.0" + BroadcastEvidenceResponse: + type: object + required: + - "id" + - "jsonrpc" + properties: + error: + type: "string" + example: "" + result: + type: "string" + example: "" + id: + type: "string" + example: "" + jsonrpc: + type: "string" + example: "2.0" + BroadcastTxCommitResponse: + type: object + required: + - "error" + - "result" + - "id" + - "jsonrpc" + properties: + error: + type: "string" + example: "" + result: + required: + - "height" + - "hash" + - "deliver_tx" + - "check_tx" + properties: + height: + type: "string" + example: "26682" + hash: + type: "string" + example: "75CA0F856A4DA078FC4911580360E70CEFB2EBEE" + deliver_tx: + required: + - "log" + - "data" + - "code" + properties: + log: + type: "string" + example: "" + data: + type: "string" + example: "" + code: + type: "string" + example: "0" + type: "object" + check_tx: + required: + - "log" + - "data" + - "code" + properties: + log: + type: "string" + example: "" + data: + type: "string" + example: "" + code: + type: "string" + example: "0" + type: "object" + type: "object" + id: + type: "string" + example: "" + jsonrpc: + type: "string" + example: "2.0" + BroadcastTxResponse: + type: object + required: + - "jsonrpc" + - "id" + - "result" + - "error" + properties: + jsonrpc: + type: "string" + example: "2.0" + id: + type: "string" + example: "" + result: + required: + - "code" + - "data" + - "log" + - "hash" + properties: + code: + type: "string" + example: "0" + data: + type: "string" + example: "" + log: + type: "string" + example: "" + hash: + type: "string" + example: "0D33F2F03A5234F38706E43004489E061AC40A2E" + type: "object" + error: + type: "string" + example: "" diff --git a/docs/spec/software/wal.md b/docs/spec/software/wal.md index 1f5d712c5..889ce4868 100644 --- a/docs/spec/software/wal.md +++ b/docs/spec/software/wal.md @@ -28,5 +28,5 @@ WAL. Then it will go to precommit, and that time it will work because the private validator contains the `LastSignBytes` and then we’ll replay the precommit from the WAL. -Make sure to read about [WAL corruption](../../../tendermint-core/running-in-production.md#wal-corruption) +Make sure to read about [WAL corruption](../../tendermint-core/running-in-production.md#wal-corruption) and recovery strategies. diff --git a/docs/tendermint-core-image.jpg b/docs/tendermint-core-image.jpg new file mode 100755 index 000000000..75832e602 Binary files /dev/null and b/docs/tendermint-core-image.jpg differ diff --git a/docs/tendermint-core/configuration.md b/docs/tendermint-core/configuration.md index c961ada4d..3fe985d89 100644 --- a/docs/tendermint-core/configuration.md +++ b/docs/tendermint-core/configuration.md @@ -30,8 +30,19 @@ moniker = "anonymous" # and verifying their commits fast_sync = true -# Database backend: leveldb | memdb | cleveldb -db_backend = "leveldb" +# Database backend: goleveldb | cleveldb | boltdb +# * goleveldb (github.com/syndtr/goleveldb - most popular implementation) +# - pure go +# - stable +# * cleveldb (uses levigo wrapper) +# - fast +# - requires gcc +# - use cleveldb build tag (go build -tags cleveldb) +# * boltdb (uses etcd's fork of bolt - github.com/etcd-io/bbolt) +# - EXPERIMENTAL +# - may be faster is some use-cases (random reads - indexer) +# - use boltdb build tag (go build -tags boltdb) +db_backend = "goleveldb" # Database directory db_dir = "data" @@ -127,14 +138,22 @@ max_subscriptions_per_client = 5 # See https://github.com/tendermint/tendermint/issues/3435 timeout_broadcast_tx_commit = "10s" -# The name of a file containing certificate that is used to create the HTTPS server. +# Maximum size of request body, in bytes +max_body_bytes = {{ .RPC.MaxBodyBytes }} + +# Maximum size of request header, in bytes +max_header_bytes = {{ .RPC.MaxHeaderBytes }} + +# The path to a file containing certificate that is used to create the HTTPS server. +# Migth be either absolute path or path related to tendermint's config directory. # If the certificate is signed by a certificate authority, # the certFile should be the concatenation of the server's certificate, any intermediates, # and the CA's certificate. # NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. Otherwise, HTTP server is run. tls_cert_file = "" -# The name of a file containing matching private key that is used to create the HTTPS server. +# The path to a file containing matching private key that is used to create the HTTPS server. +# Migth be either absolute path or path related to tendermint's config directory. # NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. Otherwise, HTTP server is run. tls_key_file = "" @@ -221,6 +240,18 @@ max_txs_bytes = 1073741824 # Size of the cache (used to filter transactions we saw earlier) in transactions cache_size = 10000 +# Maximum size of a single transaction. +# NOTE: the max size of a tx transmitted over the network is {max_tx_bytes} + {amino overhead}. +max_tx_bytes = 1048576 + +##### fast sync configuration options ##### +[fastsync] + +# Fast Sync version to use: +# 1) "v0" (default) - the legacy fast sync implementation +# 2) "v1" - refactor of v0 version for better testability +version = "v0" + ##### consensus configuration options ##### [consensus] @@ -302,8 +333,7 @@ namespace = "tendermint" If `create_empty_blocks` is set to `true` in your config, blocks will be created ~ every second (with default consensus parameters). You can regulate -the delay between blocks by changing the `timeout_commit`. E.g. `timeout_commit -= "10s"` should result in ~ 10 second blocks. +the delay between blocks by changing the `timeout_commit`. E.g. `timeout_commit = "10s"` should result in ~ 10 second blocks. **create_empty_blocks = false** @@ -329,7 +359,7 @@ Tendermint will only create blocks if there are transactions, or after waiting ## Consensus timeouts explained There's a variety of information about timeouts in [Running in -production](./running-in-production.html) +production](./running-in-production.md) You can also find more detailed technical explanation in the spec: [The latest gossip on BFT consensus](https://arxiv.org/abs/1807.04938). diff --git a/docs/tendermint-core/how-to-read-logs.md b/docs/tendermint-core/how-to-read-logs.md index 54c2c8a32..e852298b8 100644 --- a/docs/tendermint-core/how-to-read-logs.md +++ b/docs/tendermint-core/how-to-read-logs.md @@ -115,7 +115,7 @@ little overview what they do. - `abci-client` As mentioned in [Application Development Guide](../app-dev/app-development.md), Tendermint acts as an ABCI client with respect to the application and maintains 3 connections: mempool, consensus and query. The code used by Tendermint Core can - be found [here](https://github.com/tendermint/tendermint/tree/develop/abci/client). + be found [here](https://github.com/tendermint/tendermint/tree/master/abci/client). - `blockchain` Provides storage, pool (a group of peers), and reactor for both storing and exchanging blocks between peers. - `consensus` The heart of Tendermint core, which is the diff --git a/docs/tendermint-core/metrics.md b/docs/tendermint-core/metrics.md index ad6d4c765..94313ddbb 100644 --- a/docs/tendermint-core/metrics.md +++ b/docs/tendermint-core/metrics.md @@ -14,34 +14,34 @@ Listen address can be changed in the config file (see The following metrics are available: -| **Name** | **Type** | **Since** | **Tags** | **Description** | -|-----------------------------------------|-----------|-----------|----------|-----------------------------------------------------------------| -| consensus\_height | Gauge | 0.21.0 | | Height of the chain | -| consensus\_validators | Gauge | 0.21.0 | | Number of validators | -| consensus\_validators\_power | Gauge | 0.21.0 | | Total voting power of all validators | -| consensus\_missing\_validators | Gauge | 0.21.0 | | Number of validators who did not sign | -| consensus\_missing\_validators\_power | Gauge | 0.21.0 | | Total voting power of the missing validators | -| consensus\_byzantine\_validators | Gauge | 0.21.0 | | Number of validators who tried to double sign | -| consensus\_byzantine\_validators\_power | Gauge | 0.21.0 | | Total voting power of the byzantine validators | -| consensus\_block\_interval\_seconds | Histogram | 0.21.0 | | Time between this and last block (Block.Header.Time) in seconds | -| consensus\_rounds | Gauge | 0.21.0 | | Number of rounds | -| consensus\_num\_txs | Gauge | 0.21.0 | | Number of transactions | -| consensus\_block\_parts | counter | on dev | peer\_id | number of blockparts transmitted by peer | -| consensus\_latest\_block\_height | gauge | on dev | | /status sync\_info number | -| consensus\_fast\_syncing | gauge | on dev | | either 0 (not fast syncing) or 1 (syncing) | -| consensus\_total\_txs | Gauge | 0.21.0 | | Total number of transactions committed | -| consensus\_block\_size\_bytes | Gauge | 0.21.0 | | Block size in bytes | -| p2p\_peers | Gauge | 0.21.0 | | Number of peers node's connected to | -| p2p\_peer\_receive\_bytes\_total | counter | on dev | peer\_id | number of bytes received from a given peer | -| p2p\_peer\_send\_bytes\_total | counter | on dev | peer\_id | number of bytes sent to a given peer | -| p2p\_peer\_pending\_send\_bytes | gauge | on dev | peer\_id | number of pending bytes to be sent to a given peer | -| p2p\_num\_txs | gauge | on dev | peer\_id | number of transactions submitted by each peer\_id | -| p2p\_pending\_send\_bytes | gauge | on dev | peer\_id | amount of data pending to be sent to peer | -| mempool\_size | Gauge | 0.21.0 | | Number of uncommitted transactions | -| mempool\_tx\_size\_bytes | histogram | on dev | | transaction sizes in bytes | -| mempool\_failed\_txs | counter | on dev | | number of failed transactions | -| mempool\_recheck\_times | counter | on dev | | number of transactions rechecked in the mempool | -| state\_block\_processing\_time | histogram | on dev | | time between BeginBlock and EndBlock in ms | +| **Name** | **Type** | **Since** | **Tags** | **Description** | +|-----------------------------------------|-----------|-----------|----------------|-----------------------------------------------------------------| +| consensus\_height | Gauge | 0.21.0 | | Height of the chain | +| consensus\_validators | Gauge | 0.21.0 | | Number of validators | +| consensus\_validators\_power | Gauge | 0.21.0 | | Total voting power of all validators | +| consensus\_missing\_validators | Gauge | 0.21.0 | | Number of validators who did not sign | +| consensus\_missing\_validators\_power | Gauge | 0.21.0 | | Total voting power of the missing validators | +| consensus\_byzantine\_validators | Gauge | 0.21.0 | | Number of validators who tried to double sign | +| consensus\_byzantine\_validators\_power | Gauge | 0.21.0 | | Total voting power of the byzantine validators | +| consensus\_block\_interval\_seconds | Histogram | 0.21.0 | | Time between this and last block (Block.Header.Time) in seconds | +| consensus\_rounds | Gauge | 0.21.0 | | Number of rounds | +| consensus\_num\_txs | Gauge | 0.21.0 | | Number of transactions | +| consensus\_block\_parts | counter | on dev | peer\_id | number of blockparts transmitted by peer | +| consensus\_latest\_block\_height | gauge | on dev | | /status sync\_info number | +| consensus\_fast\_syncing | gauge | on dev | | either 0 (not fast syncing) or 1 (syncing) | +| consensus\_total\_txs | Gauge | 0.21.0 | | Total number of transactions committed | +| consensus\_block\_size\_bytes | Gauge | 0.21.0 | | Block size in bytes | +| p2p\_peers | Gauge | 0.21.0 | | Number of peers node's connected to | +| p2p\_peer\_receive\_bytes\_total | counter | on dev | peer\_id, chID | number of bytes per channel received from a given peer | +| p2p\_peer\_send\_bytes\_total | counter | on dev | peer\_id, chID | number of bytes per channel sent to a given peer | +| p2p\_peer\_pending\_send\_bytes | gauge | on dev | peer\_id | number of pending bytes to be sent to a given peer | +| p2p\_num\_txs | gauge | on dev | peer\_id | number of transactions submitted by each peer\_id | +| p2p\_pending\_send\_bytes | gauge | on dev | peer\_id | amount of data pending to be sent to peer | +| mempool\_size | Gauge | 0.21.0 | | Number of uncommitted transactions | +| mempool\_tx\_size\_bytes | histogram | on dev | | transaction sizes in bytes | +| mempool\_failed\_txs | counter | on dev | | number of failed transactions | +| mempool\_recheck\_times | counter | on dev | | number of transactions rechecked in the mempool | +| state\_block\_processing\_time | histogram | on dev | | time between BeginBlock and EndBlock in ms | ## Useful queries diff --git a/docs/tendermint-core/rpc.md b/docs/tendermint-core/rpc.md index 4ea5ab0d9..1b8e24426 100644 --- a/docs/tendermint-core/rpc.md +++ b/docs/tendermint-core/rpc.md @@ -4,4 +4,4 @@ The RPC documentation is hosted here: - [https://tendermint.com/rpc/](https://tendermint.com/rpc/) -To update the documentation, edit the relevant `godoc` comments in the [rpc/core directory](https://github.com/tendermint/tendermint/tree/develop/rpc/core). +To update the documentation, edit the relevant `godoc` comments in the [rpc/core directory](https://github.com/tendermint/tendermint/tree/master/rpc/core). diff --git a/docs/tendermint-core/running-in-production.md b/docs/tendermint-core/running-in-production.md index 1ec792831..9cb21fc54 100644 --- a/docs/tendermint-core/running-in-production.md +++ b/docs/tendermint-core/running-in-production.md @@ -8,7 +8,7 @@ key-value database. Unfortunately, this implementation of LevelDB seems to suffe install the real C-implementation of LevelDB and compile Tendermint to use that using `make build_c`. See the [install instructions](../introduction/install.md) for details. -Tendermint keeps multiple distinct LevelDB databases in the `$TMROOT/data`: +Tendermint keeps multiple distinct databases in the `$TMROOT/data`: - `blockstore.db`: Keeps the entire blockchain - stores blocks, block commits, and block meta data, each indexed by height. Used to sync new diff --git a/docs/tendermint-core/using-tendermint.md b/docs/tendermint-core/using-tendermint.md index 2ca8c9e92..abf382501 100644 --- a/docs/tendermint-core/using-tendermint.md +++ b/docs/tendermint-core/using-tendermint.md @@ -20,7 +20,7 @@ Initialize the root directory by running: tendermint init ``` -This will create a new private key (`priv_validator.json`), and a +This will create a new private key (`priv_validator_key.json`), and a genesis file (`genesis.json`) containing the associated public key, in `$TMHOME/config`. This is all that's necessary to run a local testnet with one validator. @@ -43,6 +43,11 @@ definition](https://github.com/tendermint/tendermint/blob/master/types/genesis.g - `chain_id`: ID of the blockchain. This must be unique for every blockchain. If your testnet blockchains do not have unique chain IDs, you will have a bad time. The ChainID must be less than 50 symbols. +- `consensus_params` + - `block` + - `time_iota_ms`: Minimum time increment between consecutive blocks (in + milliseconds). If the block header timestamp is ahead of the system clock, + decrease this value. - `validators`: List of initial validators. Note this may be overridden entirely by the application, and may be left empty to make explicit that the application will initialize the validator set with ResponseInitChain. @@ -63,9 +68,10 @@ definition](https://github.com/tendermint/tendermint/blob/master/types/genesis.g "genesis_time": "2018-11-13T18:11:50.277637Z", "chain_id": "test-chain-s4ui7D", "consensus_params": { - "block_size": { + "block": { "max_bytes": "22020096", - "max_gas": "-1" + "max_gas": "-1", + "time_iota_ms": "1000" }, "evidence": { "max_age": "100000" @@ -202,8 +208,10 @@ Note that raw hex cannot be used in `POST` transactions. ## Reset -**WARNING: UNSAFE** Only do this in development and only if you can +::: warning +**UNSAFE** Only do this in development and only if you can afford to lose all blockchain data! +::: To reset a blockchain, stop the node and run: @@ -306,7 +314,7 @@ write-ahead-log](../tendermint-core/running-in-production.md#mempool-wal) ## Tendermint Networks When `tendermint init` is run, both a `genesis.json` and -`priv_validator.json` are created in `~/.tendermint/config`. The +`priv_validator_key.json` are created in `~/.tendermint/config`. The `genesis.json` might look like: ``` @@ -327,7 +335,7 @@ When `tendermint init` is run, both a `genesis.json` and } ``` -And the `priv_validator.json`: +And the `priv_validator_key.json`: ``` { @@ -346,20 +354,20 @@ And the `priv_validator.json`: } ``` -The `priv_validator.json` actually contains a private key, and should +The `priv_validator_key.json` actually contains a private key, and should thus be kept absolutely secret; for now we work with the plain text. Note the `last_` fields, which are used to prevent us from signing conflicting messages. Note also that the `pub_key` (the public key) in the -`priv_validator.json` is also present in the `genesis.json`. +`priv_validator_key.json` is also present in the `genesis.json`. The genesis file contains the list of public keys which may participate in the consensus, and their corresponding voting power. Greater than 2/3 of the voting power must be active (i.e. the corresponding private keys must be producing signatures) for the consensus to make progress. In our case, the genesis file contains the public key of our -`priv_validator.json`, so a Tendermint node started with the default +`priv_validator_key.json`, so a Tendermint node started with the default root directory will be able to make progress. Voting power uses an int64 but must be positive, thus the range is: 0 through 9223372036854775807. Because of how the current proposer selection algorithm works, we do not @@ -445,16 +453,16 @@ not connected to the other peer. The easiest way to add new validators is to do it in the `genesis.json`, before starting the network. For instance, we could make a new -`priv_validator.json`, and copy it's `pub_key` into the above genesis. +`priv_validator_key.json`, and copy it's `pub_key` into the above genesis. -We can generate a new `priv_validator.json` with the command: +We can generate a new `priv_validator_key.json` with the command: ``` tendermint gen_validator ``` Now we can update our genesis file. For instance, if the new -`priv_validator.json` looks like: +`priv_validator_key.json` looks like: ``` { @@ -502,7 +510,7 @@ then the new `genesis.json` will be: ``` Update the `genesis.json` in `~/.tendermint/config`. Copy the genesis -file and the new `priv_validator.json` to the `~/.tendermint/config` on +file and the new `priv_validator_key.json` to the `~/.tendermint/config` on a new machine. Now run `tendermint node` on both machines, and use either diff --git a/docs/tools/benchmarking.md b/docs/tools/benchmarking.md index 67a472e4b..a30ab54ab 100644 --- a/docs/tools/benchmarking.md +++ b/docs/tools/benchmarking.md @@ -75,6 +75,5 @@ Each of the connections is handled via two separate goroutines. ## Development ``` -make get_vendor_deps make test ``` diff --git a/docs/tools/monitoring.md b/docs/tools/monitoring.md index fa3901dde..26b90ed70 100644 --- a/docs/tools/monitoring.md +++ b/docs/tools/monitoring.md @@ -88,6 +88,5 @@ websocket. ``` make get_tools -make get_vendor_deps make test ``` diff --git a/dredd.yml b/dredd.yml new file mode 100644 index 000000000..0db3d767d --- /dev/null +++ b/dredd.yml @@ -0,0 +1,33 @@ +color: true +dry-run: null +hookfiles: build/contract_tests +language: go +require: null +server: make localnet-start +server-wait: 30 +init: false +custom: {} +names: false +only: [] +reporter: [] +output: [] +header: [] +sorted: false +user: null +inline-errors: false +details: false +method: [GET] +loglevel: warning +path: [] +hooks-worker-timeout: 5000 +hooks-worker-connect-timeout: 1500 +hooks-worker-connect-retry: 500 +hooks-worker-after-connect-wait: 100 +hooks-worker-term-timeout: 5000 +hooks-worker-term-retry: 500 +hooks-worker-handler-host: 127.0.0.1 +hooks-worker-handler-port: 61321 +config: ./dredd.yml +# This path accepts no variables +blueprint: ./docs/spec/rpc/swagger.yaml +endpoint: 'http://127.0.0.1:26657/' diff --git a/evidence/wire.go b/evidence/codec.go similarity index 100% rename from evidence/wire.go rename to evidence/codec.go diff --git a/evidence/pool_test.go b/evidence/pool_test.go index 30b20011e..13bc45563 100644 --- a/evidence/pool_test.go +++ b/evidence/pool_test.go @@ -60,7 +60,7 @@ func TestEvidencePool(t *testing.T) { pool := NewEvidencePool(stateDB, evidenceDB) goodEvidence := types.NewMockGoodEvidence(height, 0, valAddr) - badEvidence := types.MockBadEvidence{goodEvidence} + badEvidence := types.MockBadEvidence{MockGoodEvidence: goodEvidence} // bad evidence err := pool.AddEvidence(badEvidence) diff --git a/evidence/reactor.go b/evidence/reactor.go index bbbab3e96..76ea270d9 100644 --- a/evidence/reactor.go +++ b/evidence/reactor.go @@ -60,11 +60,6 @@ func (evR *EvidenceReactor) AddPeer(peer p2p.Peer) { go evR.broadcastEvidenceRoutine(peer) } -// RemovePeer implements Reactor. -func (evR *EvidenceReactor) RemovePeer(peer p2p.Peer, reason interface{}) { - // nothing to do -} - // Receive implements Reactor. // It adds any received evidence to the evpool. func (evR *EvidenceReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { diff --git a/go.mod b/go.mod new file mode 100644 index 000000000..f64d603f4 --- /dev/null +++ b/go.mod @@ -0,0 +1,51 @@ +module github.com/tendermint/tendermint + +go 1.12 + +require ( + github.com/VividCortex/gohistogram v1.0.0 // indirect + github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 // indirect + github.com/btcsuite/btcd v0.0.0-20190115013929-ed77733ec07d + github.com/btcsuite/btcutil v0.0.0-20180706230648-ab6388e0c60a + github.com/etcd-io/bbolt v1.3.3 + github.com/fortytw2/leaktest v1.2.0 + github.com/go-kit/kit v0.6.0 + github.com/go-logfmt/logfmt v0.3.0 + github.com/go-stack/stack v1.8.0 // indirect + github.com/gogo/protobuf v1.2.1 + github.com/golang/protobuf v1.3.2 + github.com/golang/snappy v0.0.1 + github.com/google/gofuzz v1.0.0 // indirect + github.com/gorilla/websocket v1.2.0 + github.com/hashicorp/hcl v1.0.0 // indirect + github.com/inconshreveable/mousetrap v1.0.0 // indirect + github.com/jmhodges/levigo v1.0.0 + github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 // indirect + github.com/libp2p/go-buffer-pool v0.0.2 + github.com/magiconair/properties v1.8.0 + github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect + github.com/mitchellh/mapstructure v1.1.2 // indirect + github.com/pelletier/go-toml v1.2.0 // indirect + github.com/pkg/errors v0.8.1 + github.com/prometheus/client_golang v0.9.1 + github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 // indirect + github.com/prometheus/common v0.0.0-20181020173914-7e9e6cabbd39 // indirect + github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d // indirect + github.com/rcrowley/go-metrics v0.0.0-20180503174638-e2704e165165 + github.com/rs/cors v1.6.0 + github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa + github.com/spf13/afero v1.1.2 // indirect + github.com/spf13/cast v1.3.0 // indirect + github.com/spf13/cobra v0.0.1 + github.com/spf13/jwalterweatherman v1.0.0 // indirect + github.com/spf13/pflag v1.0.3 // indirect + github.com/spf13/viper v1.0.0 + github.com/stretchr/testify v1.3.0 + github.com/syndtr/goleveldb v1.0.1-0.20190318030020-c3a204f8e965 + github.com/tendermint/go-amino v0.14.1 + go.etcd.io/bbolt v1.3.3 // indirect + golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 + golang.org/x/net v0.0.0-20190909003024-a7b16738d86b + google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2 // indirect + google.golang.org/grpc v1.23.0 +) diff --git a/go.sum b/go.sum new file mode 100644 index 000000000..2bae0430d --- /dev/null +++ b/go.sum @@ -0,0 +1,156 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/VividCortex/gohistogram v1.0.0 h1:6+hBz+qvs0JOrrNhhmR7lFxo5sINxBCGXrdtl/UvroE= +github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= +github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/btcsuite/btcd v0.0.0-20190115013929-ed77733ec07d h1:xG8Pj6Y6J760xwETNmMzmlt38QSwz0BLp1cZ09g27uw= +github.com/btcsuite/btcd v0.0.0-20190115013929-ed77733ec07d/go.mod h1:d3C0AkH6BRcvO8T0UEPu53cnw4IbV63x1bEjildYhO0= +github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= +github.com/btcsuite/btcutil v0.0.0-20180706230648-ab6388e0c60a h1:RQMUrEILyYJEoAT34XS/kLu40vC0+po/UfxrBBA4qZE= +github.com/btcsuite/btcutil v0.0.0-20180706230648-ab6388e0c60a/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= +github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg= +github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY= +github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= +github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= +github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/etcd-io/bbolt v1.3.3 h1:gSJmxrs37LgTqR/oyJBWok6k6SvXEUerFTbltIhXkBM= +github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw= +github.com/fortytw2/leaktest v1.2.0 h1:cj6GCiwJDH7l3tMHLjZDo0QqPtrXJiWSI9JgpeQKw+Q= +github.com/fortytw2/leaktest v1.2.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/go-kit/kit v0.6.0 h1:wTifptAGIyIuir4bRyN4h7+kAa2a4eepLYVmRe5qqQ8= +github.com/go-kit/kit v0.6.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.3.0 h1:8HUsc87TaSWLKwrnumgC8/YconD2fJQsRJAsWaPg2ic= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/gorilla/websocket v1.2.0 h1:VJtLvh6VQym50czpZzx07z/kw9EgAxI3x1ZB8taTMQQ= +github.com/gorilla/websocket v1.2.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jmhodges/levigo v1.0.0 h1:q5EC36kV79HWeTBWsod3mG11EgStG3qArTKcvlksN1U= +github.com/jmhodges/levigo v1.0.0/go.mod h1:Q6Qx+uH3RAqyK4rFQroq9RL7mdkABMcfhEI+nNuzMJQ= +github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/libp2p/go-buffer-pool v0.0.2 h1:QNK2iAFa8gjAe1SPz6mHSMuCcjs+X1wlHzeOSqcmlfs= +github.com/libp2p/go-buffer-pool v0.0.2/go.mod h1:MvaB6xw5vOrDl8rYZGLFdKAuk/hRoRZd1Vi32+RXyFM= +github.com/magiconair/properties v1.8.0 h1:LLgXmsheXeRoUOBOjtwPQCWIYqM/LU1ayDtDePerRcY= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v0.9.1 h1:K47Rk0v/fkEfwfQet2KWhscE0cJzjgCCDBG2KHZoVno= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/common v0.0.0-20181020173914-7e9e6cabbd39 h1:Cto4X6SVMWRPBkJ/3YHn1iDGDGc/Z+sW+AEMKHMVvN4= +github.com/prometheus/common v0.0.0-20181020173914-7e9e6cabbd39/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d h1:GoAlyOgbOEIFdaDqxJVlbOQ1DtGmZWs/Qau0hIlk+WQ= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/rcrowley/go-metrics v0.0.0-20180503174638-e2704e165165 h1:nkcn14uNmFEuGCb2mBZbBb24RdNRL08b/wb+xBOYpuk= +github.com/rcrowley/go-metrics v0.0.0-20180503174638-e2704e165165/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rs/cors v1.6.0 h1:G9tHG9lebljV9mfp9SNPDL36nCDxmo3zTlAf1YgvzmI= +github.com/rs/cors v1.6.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= +github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa h1:YJfZp12Z3AFhSBeXOlv4BO55RMwPn2NoQeDsrdWnBtY= +github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa/go.mod h1:oJyF+mSPHbB5mVY2iO9KV3pTt/QbIkGaO8gQ2WrDbP4= +github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.1 h1:zZh3X5aZbdnoj+4XkaBxKfhO4ot82icYdhhREIAXIj8= +github.com/spf13/cobra v0.0.1/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/viper v1.0.0 h1:RUA/ghS2i64rlnn4ydTfblY8Og8QzcPtCcHvgMn+w/I= +github.com/spf13/viper v1.0.0/go.mod h1:A8kyI5cUJhb8N+3pkfONlcEcZbueH6nhAm0Fq7SrnBM= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/syndtr/goleveldb v1.0.1-0.20190318030020-c3a204f8e965 h1:1oFLiOyVl+W7bnBzGhf7BbIv9loSFQcieWWYIjLqcAw= +github.com/syndtr/goleveldb v1.0.1-0.20190318030020-c3a204f8e965/go.mod h1:9OrXJhf154huy1nPWmuSrkgjPUtUNhA+Zmy+6AESzuA= +github.com/tendermint/go-amino v0.14.1 h1:o2WudxNfdLNBwMyl2dqOJxiro5rfrEaU0Ugs6offJMk= +github.com/tendermint/go-amino v0.14.1/go.mod h1:i/UKE5Uocn+argJJBb12qTZsCDBcAYMbR92AaJVmKso= +go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk= +go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190909003024-a7b16738d86b h1:XfVGCX+0T4WOStkaOsJRllbsiImhB2jgVBGc9L0lPGc= +golang.org/x/net v0.0.0-20190909003024-a7b16738d86b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2 h1:67iHsV9djwGdZpdZNbLuQj6FOzCaZe3w+vhLjn5AcFA= +google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/grpc v1.23.0 h1:AzbTB6ux+okLTzP8Ru1Xs41C303zdcfEht7MQnYJt5A= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/libs/README.md b/libs/README.md index 9ea618dbd..b1bb0396c 100644 --- a/libs/README.md +++ b/libs/README.md @@ -13,7 +13,7 @@ CLI wraps the `cobra` and `viper` packages and handles some common elements of b ## clist -Clist provides a linekd list that is safe for concurrent access by many readers. +Clist provides a linked list that is safe for concurrent access by many readers. ## common diff --git a/libs/autofile/group.go b/libs/autofile/group.go index ce73466e4..7cc345478 100644 --- a/libs/autofile/group.go +++ b/libs/autofile/group.go @@ -472,7 +472,8 @@ func (gr *GroupReader) Read(p []byte) (n int, err error) { for { nn, err = gr.curReader.Read(p[n:]) n += nn - if err == io.EOF { + switch { + case err == io.EOF: if n >= lenP { return n, nil } @@ -480,9 +481,9 @@ func (gr *GroupReader) Read(p []byte) (n int, err error) { if err1 := gr.openFile(gr.curIndex + 1); err1 != nil { return n, err1 } - } else if err != nil { + case err != nil: return n, err - } else if nn == 0 { // empty file + case nn == 0: // empty file return n, err } } diff --git a/libs/circle.yml b/libs/circle.yml deleted file mode 100644 index 390ffb039..000000000 --- a/libs/circle.yml +++ /dev/null @@ -1,21 +0,0 @@ -machine: - environment: - GOPATH: "${HOME}/.go_workspace" - PROJECT_PARENT_PATH: "$GOPATH/src/github.com/$CIRCLE_PROJECT_USERNAME" - PROJECT_PATH: $GOPATH/src/github.com/$CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME - hosts: - localhost: 127.0.0.1 - -dependencies: - override: - - mkdir -p "$PROJECT_PARENT_PATH" - - ln -sf "$HOME/$CIRCLE_PROJECT_REPONAME/" "$PROJECT_PATH" - post: - - go version - -test: - override: - - cd $PROJECT_PATH && make get_tools && make get_vendor_deps && bash ./test.sh - post: - - cd "$PROJECT_PATH" && bash <(curl -s https://codecov.io/bash) -f coverage.txt - - cd "$PROJECT_PATH" && mv coverage.txt "${CIRCLE_ARTIFACTS}" diff --git a/libs/cli/helper.go b/libs/cli/helper.go index 878cf26e5..6bf23750c 100644 --- a/libs/cli/helper.go +++ b/libs/cli/helper.go @@ -14,7 +14,7 @@ import ( func WriteConfigVals(dir string, vals map[string]string) error { data := "" for k, v := range vals { - data = data + fmt.Sprintf("%s = \"%s\"\n", k, v) + data += fmt.Sprintf("%s = \"%s\"\n", k, v) } cfile := filepath.Join(dir, "config.toml") return ioutil.WriteFile(cfile, []byte(data), 0666) diff --git a/libs/clist/clist_test.go b/libs/clist/clist_test.go index 13aca3577..1784f8218 100644 --- a/libs/clist/clist_test.go +++ b/libs/clist/clist_test.go @@ -261,6 +261,8 @@ func TestWaitChan(t *testing.T) { pushed++ time.Sleep(time.Duration(cmn.RandIntn(25)) * time.Millisecond) } + // apply a deterministic pause so the counter has time to catch up + time.Sleep(25 * time.Millisecond) close(done) }() @@ -273,7 +275,7 @@ FOR_LOOP: next = next.Next() seen++ if next == nil { - continue + t.Fatal("Next should not be nil when waiting on NextWaitChan") } case <-done: break FOR_LOOP diff --git a/libs/common/async.go b/libs/common/async.go index e3293ab4c..326b97248 100644 --- a/libs/common/async.go +++ b/libs/common/async.go @@ -61,9 +61,10 @@ func (trs *TaskResultSet) Reap() *TaskResultSet { TaskResult: result, OK: true, } - } else { - // We already wrote it. } + // else { + // We already wrote it. + // } default: // Do nothing. } @@ -83,9 +84,10 @@ func (trs *TaskResultSet) Wait() *TaskResultSet { TaskResult: result, OK: true, } - } else { - // We already wrote it. } + // else { + // We already wrote it. + // } } return trs } diff --git a/libs/common/async_test.go b/libs/common/async_test.go index f565b4bd3..9ac5ffe3f 100644 --- a/libs/common/async_test.go +++ b/libs/common/async_test.go @@ -31,18 +31,20 @@ func TestParallel(t *testing.T) { var failedTasks int for i := 0; i < len(tasks); i++ { taskResult, ok := trs.LatestResult(i) - if !ok { + switch { + case !ok: assert.Fail(t, "Task #%v did not complete.", i) failedTasks++ - } else if taskResult.Error != nil { + case taskResult.Error != nil: assert.Fail(t, "Task should not have errored but got %v", taskResult.Error) failedTasks++ - } else if !assert.Equal(t, -1*i, taskResult.Value.(int)) { + case !assert.Equal(t, -1*i, taskResult.Value.(int)): assert.Fail(t, "Task should have returned %v but got %v", -1*i, taskResult.Value.(int)) failedTasks++ - } else { - // Good! } + // else { + // Good! + // } } assert.Equal(t, failedTasks, 0, "No task should have failed") assert.Nil(t, trs.FirstError(), "There should be no errors") @@ -132,11 +134,12 @@ func checkResult(t *testing.T, taskResultSet *TaskResultSet, index int, val inte taskName := fmt.Sprintf("Task #%v", index) assert.True(t, ok, "TaskResultCh unexpectedly closed for %v", taskName) assert.Equal(t, val, taskResult.Value, taskName) - if err != nil { + switch { + case err != nil: assert.Equal(t, err, taskResult.Error, taskName) - } else if pnk != nil { + case pnk != nil: assert.Equal(t, pnk, taskResult.Error.(Error).Data(), taskName) - } else { + default: assert.Nil(t, taskResult.Error, taskName) } } diff --git a/libs/common/date.go b/libs/common/date.go deleted file mode 100644 index e017a4b41..000000000 --- a/libs/common/date.go +++ /dev/null @@ -1,43 +0,0 @@ -package common - -import ( - "strings" - "time" - - "github.com/pkg/errors" -) - -// TimeLayout helps to parse a date string of the format YYYY-MM-DD -// Intended to be used with the following function: -// time.Parse(TimeLayout, date) -var TimeLayout = "2006-01-02" //this represents YYYY-MM-DD - -// ParseDateRange parses a date range string of the format start:end -// where the start and end date are of the format YYYY-MM-DD. -// The parsed dates are time.Time and will return the zero time for -// unbounded dates, ex: -// unbounded start: :2000-12-31 -// unbounded end: 2000-12-31: -func ParseDateRange(dateRange string) (startDate, endDate time.Time, err error) { - dates := strings.Split(dateRange, ":") - if len(dates) != 2 { - err = errors.New("bad date range, must be in format date:date") - return - } - parseDate := func(date string) (out time.Time, err error) { - if len(date) == 0 { - return - } - out, err = time.Parse(TimeLayout, date) - return - } - startDate, err = parseDate(dates[0]) - if err != nil { - return - } - endDate, err = parseDate(dates[1]) - if err != nil { - return - } - return -} diff --git a/libs/common/date_test.go b/libs/common/date_test.go deleted file mode 100644 index 2c0632477..000000000 --- a/libs/common/date_test.go +++ /dev/null @@ -1,46 +0,0 @@ -package common - -import ( - "testing" - "time" - - "github.com/stretchr/testify/assert" -) - -var ( - date = time.Date(2015, time.Month(12), 31, 0, 0, 0, 0, time.UTC) - date2 = time.Date(2016, time.Month(12), 31, 0, 0, 0, 0, time.UTC) - zero time.Time -) - -func TestParseDateRange(t *testing.T) { - assert := assert.New(t) - - var testDates = []struct { - dateStr string - start time.Time - end time.Time - errNil bool - }{ - {"2015-12-31:2016-12-31", date, date2, true}, - {"2015-12-31:", date, zero, true}, - {":2016-12-31", zero, date2, true}, - {"2016-12-31", zero, zero, false}, - {"2016-31-12:", zero, zero, false}, - {":2016-31-12", zero, zero, false}, - } - - for _, test := range testDates { - start, end, err := ParseDateRange(test.dateStr) - if test.errNil { - assert.Nil(err) - testPtr := func(want, have time.Time) { - assert.True(have.Equal(want)) - } - testPtr(test.start, start) - testPtr(test.end, end) - } else { - assert.NotNil(err) - } - } -} diff --git a/libs/common/errors.go b/libs/common/errors.go index 10e40ebd2..aacfbe274 100644 --- a/libs/common/errors.go +++ b/libs/common/errors.go @@ -9,7 +9,7 @@ import ( // Convenience method. func ErrorWrap(cause interface{}, format string, args ...interface{}) Error { - if causeCmnError, ok := cause.(*cmnError); ok { + if causeCmnError, ok := cause.(*cmnError); ok { //nolint:gocritic msg := fmt.Sprintf(format, args...) return causeCmnError.Stacktrace().Trace(1, msg) } else if cause == nil { @@ -212,35 +212,3 @@ func (fe FmtError) String() string { func (fe FmtError) Format() string { return fe.format } - -//---------------------------------------- -// Panic wrappers -// XXX DEPRECATED - -// A panic resulting from a sanity check means there is a programmer error -// and some guarantee is not satisfied. -// XXX DEPRECATED -func PanicSanity(v interface{}) { - panic(fmt.Sprintf("Panicked on a Sanity Check: %v", v)) -} - -// A panic here means something has gone horribly wrong, in the form of data corruption or -// failure of the operating system. In a correct/healthy system, these should never fire. -// If they do, it's indicative of a much more serious problem. -// XXX DEPRECATED -func PanicCrisis(v interface{}) { - panic(fmt.Sprintf("Panicked on a Crisis: %v", v)) -} - -// Indicates a failure of consensus. Someone was malicious or something has -// gone horribly wrong. These should really boot us into an "emergency-recover" mode -// XXX DEPRECATED -func PanicConsensus(v interface{}) { - panic(fmt.Sprintf("Panicked on a Consensus Failure: %v", v)) -} - -// For those times when we're not sure if we should panic -// XXX DEPRECATED -func PanicQ(v interface{}) { - panic(fmt.Sprintf("Panicked questionably: %v", v)) -} diff --git a/libs/common/heap.go b/libs/common/heap.go deleted file mode 100644 index b3bcb9db8..000000000 --- a/libs/common/heap.go +++ /dev/null @@ -1,125 +0,0 @@ -package common - -import ( - "bytes" - "container/heap" -) - -/* - Example usage: - - ``` - h := NewHeap() - - h.Push("msg1", 1) - h.Push("msg3", 3) - h.Push("msg2", 2) - - fmt.Println(h.Pop()) // msg1 - fmt.Println(h.Pop()) // msg2 - fmt.Println(h.Pop()) // msg3 - ``` -*/ -type Heap struct { - pq priorityQueue -} - -func NewHeap() *Heap { - return &Heap{pq: make([]*pqItem, 0)} -} - -func (h *Heap) Len() int64 { - return int64(len(h.pq)) -} - -func (h *Heap) Push(value interface{}, priority int) { - heap.Push(&h.pq, &pqItem{value: value, priority: cmpInt(priority)}) -} - -func (h *Heap) PushBytes(value interface{}, priority []byte) { - heap.Push(&h.pq, &pqItem{value: value, priority: cmpBytes(priority)}) -} - -func (h *Heap) PushComparable(value interface{}, priority Comparable) { - heap.Push(&h.pq, &pqItem{value: value, priority: priority}) -} - -func (h *Heap) Peek() interface{} { - if len(h.pq) == 0 { - return nil - } - return h.pq[0].value -} - -func (h *Heap) Update(value interface{}, priority Comparable) { - h.pq.Update(h.pq[0], value, priority) -} - -func (h *Heap) Pop() interface{} { - item := heap.Pop(&h.pq).(*pqItem) - return item.value -} - -//----------------------------------------------------------------------------- -// From: http://golang.org/pkg/container/heap/#example__priorityQueue - -type pqItem struct { - value interface{} - priority Comparable - index int -} - -type priorityQueue []*pqItem - -func (pq priorityQueue) Len() int { return len(pq) } - -func (pq priorityQueue) Less(i, j int) bool { - return pq[i].priority.Less(pq[j].priority) -} - -func (pq priorityQueue) Swap(i, j int) { - pq[i], pq[j] = pq[j], pq[i] - pq[i].index = i - pq[j].index = j -} - -func (pq *priorityQueue) Push(x interface{}) { - n := len(*pq) - item := x.(*pqItem) - item.index = n - *pq = append(*pq, item) -} - -func (pq *priorityQueue) Pop() interface{} { - old := *pq - n := len(old) - item := old[n-1] - item.index = -1 // for safety - *pq = old[0 : n-1] - return item -} - -func (pq *priorityQueue) Update(item *pqItem, value interface{}, priority Comparable) { - item.value = value - item.priority = priority - heap.Fix(pq, item.index) -} - -//-------------------------------------------------------------------------------- -// Comparable - -type Comparable interface { - Less(o interface{}) bool -} - -type cmpInt int - -func (i cmpInt) Less(o interface{}) bool { - return int(i) < int(o.(cmpInt)) -} - -type cmpBytes []byte - -func (bz cmpBytes) Less(o interface{}) bool { - return bytes.Compare([]byte(bz), []byte(o.(cmpBytes))) < 0 -} diff --git a/libs/common/io.go b/libs/common/io.go deleted file mode 100644 index fa0443e09..000000000 --- a/libs/common/io.go +++ /dev/null @@ -1,74 +0,0 @@ -package common - -import ( - "bytes" - "errors" - "io" -) - -type PrefixedReader struct { - Prefix []byte - reader io.Reader -} - -func NewPrefixedReader(prefix []byte, reader io.Reader) *PrefixedReader { - return &PrefixedReader{prefix, reader} -} - -func (pr *PrefixedReader) Read(p []byte) (n int, err error) { - if len(pr.Prefix) > 0 { - read := copy(p, pr.Prefix) - pr.Prefix = pr.Prefix[read:] - return read, nil - } - return pr.reader.Read(p) -} - -// NOTE: Not goroutine safe -type BufferCloser struct { - bytes.Buffer - Closed bool -} - -func NewBufferCloser(buf []byte) *BufferCloser { - return &BufferCloser{ - *bytes.NewBuffer(buf), - false, - } -} - -func (bc *BufferCloser) Close() error { - if bc.Closed { - return errors.New("BufferCloser already closed") - } - bc.Closed = true - return nil -} - -func (bc *BufferCloser) Write(p []byte) (n int, err error) { - if bc.Closed { - return 0, errors.New("Cannot write to closed BufferCloser") - } - return bc.Buffer.Write(p) -} - -func (bc *BufferCloser) WriteByte(c byte) error { - if bc.Closed { - return errors.New("Cannot write to closed BufferCloser") - } - return bc.Buffer.WriteByte(c) -} - -func (bc *BufferCloser) WriteRune(r rune) (n int, err error) { - if bc.Closed { - return 0, errors.New("Cannot write to closed BufferCloser") - } - return bc.Buffer.WriteRune(r) -} - -func (bc *BufferCloser) WriteString(s string) (n int, err error) { - if bc.Closed { - return 0, errors.New("Cannot write to closed BufferCloser") - } - return bc.Buffer.WriteString(s) -} diff --git a/libs/common/os.go b/libs/common/os.go index 7c3fad7ee..0e35524cf 100644 --- a/libs/common/os.go +++ b/libs/common/os.go @@ -1,39 +1,13 @@ package common import ( - "bufio" "fmt" - "io" "io/ioutil" "os" - "os/exec" "os/signal" - "strings" "syscall" ) -var gopath string - -// GoPath returns GOPATH env variable value. If it is not set, this function -// will try to call `go env GOPATH` subcommand. -func GoPath() string { - if gopath != "" { - return gopath - } - - path := os.Getenv("GOPATH") - if len(path) == 0 { - goCmd := exec.Command("go", "env", "GOPATH") - out, err := goCmd.Output() - if err != nil { - panic(fmt.Sprintf("failed to determine gopath: %v", err)) - } - path = string(out) - } - gopath = path - return path -} - type logger interface { Info(msg string, keyvals ...interface{}) } @@ -78,25 +52,6 @@ func EnsureDir(dir string, mode os.FileMode) error { return nil } -func IsDirEmpty(name string) (bool, error) { - f, err := os.Open(name) - if err != nil { - if os.IsNotExist(err) { - return true, err - } - // Otherwise perhaps a permission - // error or some other error. - return false, err - } - defer f.Close() - - _, err = f.Readdirnames(1) // Or f.Readdir(1) - if err == io.EOF { - return true, nil - } - return false, err // Either not empty or error, suits both cases -} - func FileExists(filePath string) bool { _, err := os.Stat(filePath) return !os.IsNotExist(err) @@ -125,19 +80,3 @@ func MustWriteFile(filePath string, contents []byte, mode os.FileMode) { Exit(fmt.Sprintf("MustWriteFile failed: %v", err)) } } - -//-------------------------------------------------------------------------------- - -func Prompt(prompt string, defaultValue string) (string, error) { - fmt.Print(prompt) - reader := bufio.NewReader(os.Stdin) - line, err := reader.ReadString('\n') - if err != nil { - return defaultValue, err - } - line = strings.TrimSpace(line) - if line == "" { - return defaultValue, nil - } - return line, nil -} diff --git a/libs/common/os_test.go b/libs/common/os_test.go deleted file mode 100644 index e8a23ebd6..000000000 --- a/libs/common/os_test.go +++ /dev/null @@ -1,46 +0,0 @@ -package common - -import ( - "os" - "testing" -) - -func TestOSGoPath(t *testing.T) { - // restore original gopath upon exit - path := os.Getenv("GOPATH") - defer func() { - _ = os.Setenv("GOPATH", path) - }() - - err := os.Setenv("GOPATH", "~/testgopath") - if err != nil { - t.Fatal(err) - } - path = GoPath() - if path != "~/testgopath" { - t.Fatalf("should get GOPATH env var value, got %v", path) - } - os.Unsetenv("GOPATH") - - path = GoPath() - if path != "~/testgopath" { - t.Fatalf("subsequent calls should return the same value, got %v", path) - } -} - -func TestOSGoPathWithoutEnvVar(t *testing.T) { - // restore original gopath upon exit - path := os.Getenv("GOPATH") - defer func() { - _ = os.Setenv("GOPATH", path) - }() - - os.Unsetenv("GOPATH") - // reset cache - gopath = "" - - path = GoPath() - if path == "" || path == "~/testgopath" { - t.Fatalf("should get nonempty result of calling go env GOPATH, got %v", path) - } -} diff --git a/libs/common/random.go b/libs/common/random.go index 2de65945c..47e44d1c0 100644 --- a/libs/common/random.go +++ b/libs/common/random.go @@ -300,7 +300,7 @@ func cRandBytes(numBytes int) []byte { b := make([]byte, numBytes) _, err := crand.Read(b) if err != nil { - PanicCrisis(err) + panic(err) } return b } diff --git a/libs/common/random_test.go b/libs/common/random_test.go index c59a577b8..74dcc04b4 100644 --- a/libs/common/random_test.go +++ b/libs/common/random_test.go @@ -45,11 +45,9 @@ func TestDeterminism(t *testing.T) { output := testThemAll() if i == 0 { firstOutput = output - } else { - if firstOutput != output { - t.Errorf("Run #%d's output was different from first run.\nfirst: %v\nlast: %v", - i, firstOutput, output) - } + } else if firstOutput != output { + t.Errorf("Run #%d's output was different from first run.\nfirst: %v\nlast: %v", + i, firstOutput, output) } } } diff --git a/libs/common/service.go b/libs/common/service.go index 21fb0df3e..8eee48138 100644 --- a/libs/common/service.go +++ b/libs/common/service.go @@ -194,8 +194,7 @@ func (bs *BaseService) Reset() error { // OnReset implements Service by panicking. func (bs *BaseService) OnReset() error { - PanicSanity("The service cannot be reset") - return nil + panic("The service cannot be reset") } // IsRunning implements Service by returning true or false depending on the diff --git a/libs/common/string.go b/libs/common/string.go index ddf350b10..4f8a8f20d 100644 --- a/libs/common/string.go +++ b/libs/common/string.go @@ -51,11 +51,12 @@ func IsASCIIText(s string) bool { func ASCIITrim(s string) string { r := make([]byte, 0, len(s)) for _, b := range []byte(s) { - if b == 32 { + switch { + case b == 32: continue // skip space - } else if 32 < b && b <= 126 { + case 32 < b && b <= 126: r = append(r, b) - } else { + default: panic(fmt.Sprintf("non-ASCII (non-tab) char 0x%X", b)) } } diff --git a/libs/db/backend_test.go b/libs/db/backend_test.go index 6242f50de..cfee77c26 100644 --- a/libs/db/backend_test.go +++ b/libs/db/backend_test.go @@ -9,6 +9,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + cmn "github.com/tendermint/tendermint/libs/common" ) diff --git a/libs/db/boltdb.go b/libs/db/boltdb.go new file mode 100644 index 000000000..0e38a417e --- /dev/null +++ b/libs/db/boltdb.go @@ -0,0 +1,349 @@ +// +build boltdb + +package db + +import ( + "bytes" + "errors" + "fmt" + "os" + "path/filepath" + + "github.com/etcd-io/bbolt" +) + +var bucket = []byte("tm") + +func init() { + registerDBCreator(BoltDBBackend, func(name, dir string, opt interface{}) (DB, error) { + return NewBoltDB(name, dir) + }, false) +} + +// BoltDB is a wrapper around etcd's fork of bolt +// (https://github.com/etcd-io/bbolt). +// +// NOTE: All operations (including Set, Delete) are synchronous by default. One +// can globally turn it off by using NoSync config option (not recommended). +// +// A single bucket ([]byte("tm")) is used per a database instance. This could +// lead to performance issues when/if there will be lots of keys. +type BoltDB struct { + db *bbolt.DB +} + +// NewBoltDB returns a BoltDB with default options. +func NewBoltDB(name, dir string) (DB, error) { + return NewBoltDBWithOpts(name, dir, bbolt.DefaultOptions) +} + +// NewBoltDBWithOpts allows you to supply *bbolt.Options. ReadOnly: true is not +// supported because NewBoltDBWithOpts creates a global bucket. +func NewBoltDBWithOpts(name string, dir string, opts *bbolt.Options) (DB, error) { + if opts.ReadOnly { + return nil, errors.New("ReadOnly: true is not supported") + } + + dbPath := filepath.Join(dir, name+".db") + db, err := bbolt.Open(dbPath, os.ModePerm, opts) + if err != nil { + return nil, err + } + + // create a global bucket + err = db.Update(func(tx *bbolt.Tx) error { + _, err := tx.CreateBucketIfNotExists(bucket) + return err + }) + if err != nil { + return nil, err + } + + return &BoltDB{db: db}, nil +} + +func (bdb *BoltDB) Get(key []byte) (value []byte) { + key = nonEmptyKey(nonNilBytes(key)) + err := bdb.db.View(func(tx *bbolt.Tx) error { + b := tx.Bucket(bucket) + if v := b.Get(key); v != nil { + value = append([]byte{}, v...) + } + return nil + }) + if err != nil { + panic(err) + } + return +} + +func (bdb *BoltDB) Has(key []byte) bool { + return bdb.Get(key) != nil +} + +func (bdb *BoltDB) Set(key, value []byte) { + key = nonEmptyKey(nonNilBytes(key)) + value = nonNilBytes(value) + err := bdb.db.Update(func(tx *bbolt.Tx) error { + b := tx.Bucket(bucket) + return b.Put(key, value) + }) + if err != nil { + panic(err) + } +} + +func (bdb *BoltDB) SetSync(key, value []byte) { + bdb.Set(key, value) +} + +func (bdb *BoltDB) Delete(key []byte) { + key = nonEmptyKey(nonNilBytes(key)) + err := bdb.db.Update(func(tx *bbolt.Tx) error { + return tx.Bucket(bucket).Delete(key) + }) + if err != nil { + panic(err) + } +} + +func (bdb *BoltDB) DeleteSync(key []byte) { + bdb.Delete(key) +} + +func (bdb *BoltDB) Close() { + bdb.db.Close() +} + +func (bdb *BoltDB) Print() { + stats := bdb.db.Stats() + fmt.Printf("%v\n", stats) + + err := bdb.db.View(func(tx *bbolt.Tx) error { + tx.Bucket(bucket).ForEach(func(k, v []byte) error { + fmt.Printf("[%X]:\t[%X]\n", k, v) + return nil + }) + return nil + }) + if err != nil { + panic(err) + } +} + +func (bdb *BoltDB) Stats() map[string]string { + stats := bdb.db.Stats() + m := make(map[string]string) + + // Freelist stats + m["FreePageN"] = fmt.Sprintf("%v", stats.FreePageN) + m["PendingPageN"] = fmt.Sprintf("%v", stats.PendingPageN) + m["FreeAlloc"] = fmt.Sprintf("%v", stats.FreeAlloc) + m["FreelistInuse"] = fmt.Sprintf("%v", stats.FreelistInuse) + + // Transaction stats + m["TxN"] = fmt.Sprintf("%v", stats.TxN) + m["OpenTxN"] = fmt.Sprintf("%v", stats.OpenTxN) + + return m +} + +// boltDBBatch stores key values in sync.Map and dumps them to the underlying +// DB upon Write call. +type boltDBBatch struct { + db *BoltDB + ops []operation +} + +// NewBatch returns a new batch. +func (bdb *BoltDB) NewBatch() Batch { + return &boltDBBatch{ + ops: nil, + db: bdb, + } +} + +// It is safe to modify the contents of the argument after Set returns but not +// before. +func (bdb *boltDBBatch) Set(key, value []byte) { + bdb.ops = append(bdb.ops, operation{opTypeSet, key, value}) +} + +// It is safe to modify the contents of the argument after Delete returns but +// not before. +func (bdb *boltDBBatch) Delete(key []byte) { + bdb.ops = append(bdb.ops, operation{opTypeDelete, key, nil}) +} + +// NOTE: the operation is synchronous (see BoltDB for reasons) +func (bdb *boltDBBatch) Write() { + err := bdb.db.db.Batch(func(tx *bbolt.Tx) error { + b := tx.Bucket(bucket) + for _, op := range bdb.ops { + key := nonEmptyKey(nonNilBytes(op.key)) + switch op.opType { + case opTypeSet: + if putErr := b.Put(key, op.value); putErr != nil { + return putErr + } + case opTypeDelete: + if delErr := b.Delete(key); delErr != nil { + return delErr + } + } + } + return nil + }) + if err != nil { + panic(err) + } +} + +func (bdb *boltDBBatch) WriteSync() { + bdb.Write() +} + +func (bdb *boltDBBatch) Close() {} + +// WARNING: Any concurrent writes or reads will block until the iterator is +// closed. +func (bdb *BoltDB) Iterator(start, end []byte) Iterator { + tx, err := bdb.db.Begin(false) + if err != nil { + panic(err) + } + return newBoltDBIterator(tx, start, end, false) +} + +// WARNING: Any concurrent writes or reads will block until the iterator is +// closed. +func (bdb *BoltDB) ReverseIterator(start, end []byte) Iterator { + tx, err := bdb.db.Begin(false) + if err != nil { + panic(err) + } + return newBoltDBIterator(tx, start, end, true) +} + +// boltDBIterator allows you to iterate on range of keys/values given some +// start / end keys (nil & nil will result in doing full scan). +type boltDBIterator struct { + tx *bbolt.Tx + + itr *bbolt.Cursor + start []byte + end []byte + + currentKey []byte + currentValue []byte + + isInvalid bool + isReverse bool +} + +func newBoltDBIterator(tx *bbolt.Tx, start, end []byte, isReverse bool) *boltDBIterator { + itr := tx.Bucket(bucket).Cursor() + + var ck, cv []byte + if isReverse { + if end == nil { + ck, cv = itr.Last() + } else { + _, _ = itr.Seek(end) // after key + ck, cv = itr.Prev() // return to end key + } + } else { + if start == nil { + ck, cv = itr.First() + } else { + ck, cv = itr.Seek(start) + } + } + + return &boltDBIterator{ + tx: tx, + itr: itr, + start: start, + end: end, + currentKey: ck, + currentValue: cv, + isReverse: isReverse, + isInvalid: false, + } +} + +func (itr *boltDBIterator) Domain() ([]byte, []byte) { + return itr.start, itr.end +} + +func (itr *boltDBIterator) Valid() bool { + if itr.isInvalid { + return false + } + + // iterated to the end of the cursor + if len(itr.currentKey) == 0 { + itr.isInvalid = true + return false + } + + if itr.isReverse { + if itr.start != nil && bytes.Compare(itr.currentKey, itr.start) < 0 { + itr.isInvalid = true + return false + } + } else { + if itr.end != nil && bytes.Compare(itr.end, itr.currentKey) <= 0 { + itr.isInvalid = true + return false + } + } + + // Valid + return true +} + +func (itr *boltDBIterator) Next() { + itr.assertIsValid() + if itr.isReverse { + itr.currentKey, itr.currentValue = itr.itr.Prev() + } else { + itr.currentKey, itr.currentValue = itr.itr.Next() + } +} + +func (itr *boltDBIterator) Key() []byte { + itr.assertIsValid() + return append([]byte{}, itr.currentKey...) +} + +func (itr *boltDBIterator) Value() []byte { + itr.assertIsValid() + var value []byte + if itr.currentValue != nil { + value = append([]byte{}, itr.currentValue...) + } + return value +} + +func (itr *boltDBIterator) Close() { + err := itr.tx.Rollback() + if err != nil { + panic(err) + } +} + +func (itr *boltDBIterator) assertIsValid() { + if !itr.Valid() { + panic("Boltdb-iterator is invalid") + } +} + +// nonEmptyKey returns a []byte("nil") if key is empty. +// WARNING: this may collude with "nil" user key! +func nonEmptyKey(key []byte) []byte { + if len(key) == 0 { + return []byte("nil") + } + return key +} diff --git a/libs/db/boltdb_test.go b/libs/db/boltdb_test.go new file mode 100644 index 000000000..416a8fd03 --- /dev/null +++ b/libs/db/boltdb_test.go @@ -0,0 +1,37 @@ +// +build boltdb + +package db + +import ( + "fmt" + "os" + "testing" + + "github.com/stretchr/testify/require" + + cmn "github.com/tendermint/tendermint/libs/common" +) + +func TestBoltDBNewBoltDB(t *testing.T) { + name := fmt.Sprintf("test_%x", cmn.RandStr(12)) + dir := os.TempDir() + defer cleanupDBDir(dir, name) + + db, err := NewBoltDB(name, dir) + require.NoError(t, err) + db.Close() +} + +func BenchmarkBoltDBRandomReadsWrites(b *testing.B) { + name := fmt.Sprintf("test_%x", cmn.RandStr(12)) + db, err := NewBoltDB(name, "") + if err != nil { + b.Fatal(err) + } + defer func() { + db.Close() + cleanupDBDir("", name) + }() + + benchmarkRandomReadsWrites(b, db) +} diff --git a/libs/db/c_level_db.go b/libs/db/c_level_db.go index 77cd5409a..952b3f767 100644 --- a/libs/db/c_level_db.go +++ b/libs/db/c_level_db.go @@ -1,4 +1,4 @@ -// +build gcc +// +build cleveldb package db @@ -14,7 +14,6 @@ func init() { dbCreator := func(name string, dir string, opt interface{}) (DB, error) { return NewCLevelDB(name, dir) } - registerDBCreator(LevelDBBackend, dbCreator, true) registerDBCreator(CLevelDBBackend, dbCreator, false) } diff --git a/libs/db/c_level_db_test.go b/libs/db/c_level_db_test.go index e71dee0c1..1c10fcdef 100644 --- a/libs/db/c_level_db_test.go +++ b/libs/db/c_level_db_test.go @@ -1,4 +1,4 @@ -// +build gcc +// +build cleveldb package db @@ -93,7 +93,7 @@ func TestCLevelDBBackend(t *testing.T) { // Can't use "" (current directory) or "./" here because levigo.Open returns: // "Error initializing DB: IO error: test_XXX.db: Invalid argument" dir := os.TempDir() - db := NewDB(name, LevelDBBackend, dir) + db := NewDB(name, CLevelDBBackend, dir) defer cleanupDBDir(dir, name) _, ok := db.(*CLevelDB) @@ -103,7 +103,7 @@ func TestCLevelDBBackend(t *testing.T) { func TestCLevelDBStats(t *testing.T) { name := fmt.Sprintf("test_%x", cmn.RandStr(12)) dir := os.TempDir() - db := NewDB(name, LevelDBBackend, dir) + db := NewDB(name, CLevelDBBackend, dir) defer cleanupDBDir(dir, name) assert.NotEmpty(t, db.Stats()) diff --git a/libs/db/common_test.go b/libs/db/common_test.go index 1e27a7cac..64a86979c 100644 --- a/libs/db/common_test.go +++ b/libs/db/common_test.go @@ -1,6 +1,8 @@ package db import ( + "bytes" + "encoding/binary" "fmt" "io/ioutil" "sync" @@ -8,6 +10,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + cmn "github.com/tendermint/tendermint/libs/common" ) //---------------------------------------- @@ -188,3 +191,66 @@ func (mockIterator) Value() []byte { func (mockIterator) Close() { } + +func benchmarkRandomReadsWrites(b *testing.B, db DB) { + b.StopTimer() + + // create dummy data + const numItems = int64(1000000) + internal := map[int64]int64{} + for i := 0; i < int(numItems); i++ { + internal[int64(i)] = int64(0) + } + + // fmt.Println("ok, starting") + b.StartTimer() + + for i := 0; i < b.N; i++ { + // Write something + { + idx := int64(cmn.RandInt()) % numItems + internal[idx]++ + val := internal[idx] + idxBytes := int642Bytes(int64(idx)) + valBytes := int642Bytes(int64(val)) + //fmt.Printf("Set %X -> %X\n", idxBytes, valBytes) + db.Set(idxBytes, valBytes) + } + + // Read something + { + idx := int64(cmn.RandInt()) % numItems + valExp := internal[idx] + idxBytes := int642Bytes(int64(idx)) + valBytes := db.Get(idxBytes) + //fmt.Printf("Get %X -> %X\n", idxBytes, valBytes) + if valExp == 0 { + if !bytes.Equal(valBytes, nil) { + b.Errorf("Expected %v for %v, got %X", nil, idx, valBytes) + break + } + } else { + if len(valBytes) != 8 { + b.Errorf("Expected length 8 for %v, got %X", idx, valBytes) + break + } + valGot := bytes2Int64(valBytes) + if valExp != valGot { + b.Errorf("Expected %v for %v, got %v", valExp, idx, valGot) + break + } + } + } + + } +} + +func int642Bytes(i int64) []byte { + buf := make([]byte, 8) + binary.BigEndian.PutUint64(buf, uint64(i)) + return buf +} + +func bytes2Int64(buf []byte) int64 { + return int64(binary.BigEndian.Uint64(buf)) +} diff --git a/libs/db/db.go b/libs/db/db.go index 33eff1074..44adc630e 100644 --- a/libs/db/db.go +++ b/libs/db/db.go @@ -5,17 +5,38 @@ import ( "strings" ) -//---------------------------------------- -// Main entry - type DBBackendType string +// These are valid backend types. const ( - LevelDBBackend DBBackendType = "leveldb" // legacy, defaults to goleveldb unless +gcc - CLevelDBBackend DBBackendType = "cleveldb" + // LevelDBBackend is a legacy type. Defaults to goleveldb unless cleveldb + // build tag was used, in which it becomes cleveldb. + // Deprecated: Use concrete types (golevedb, cleveldb, etc.) + LevelDBBackend DBBackendType = "leveldb" + + // GoLevelDBBackend represents goleveldb (github.com/syndtr/goleveldb - most + // popular implementation) + // - pure go + // - stable GoLevelDBBackend DBBackendType = "goleveldb" - MemDBBackend DBBackendType = "memdb" - FSDBBackend DBBackendType = "fsdb" // using the filesystem naively + // CLevelDBBackend represents cleveldb (uses levigo wrapper) + // - fast + // - requires gcc + // - use cleveldb build tag (go build -tags cleveldb) + CLevelDBBackend DBBackendType = "cleveldb" + // MemDBBackend represents in-memoty key value store, which is mostly used + // for testing. + MemDBBackend DBBackendType = "memdb" + // FSDBBackend represents filesystem database + // - EXPERIMENTAL + // - slow + FSDBBackend DBBackendType = "fsdb" + // BoltDBBackend represents bolt (uses etcd's fork of bolt - + // github.com/etcd-io/bbolt) + // - EXPERIMENTAL + // - may be faster is some use-cases (random reads - indexer) + // - use boltdb build tag (go build -tags boltdb) + BoltDBBackend DBBackendType = "boltdb" ) type dbCreator func(name string, dir string, opt interface{}) (DB, error) diff --git a/libs/db/db_test.go b/libs/db/db_test.go index 7cb721b26..22b781f95 100644 --- a/libs/db/db_test.go +++ b/libs/db/db_test.go @@ -182,8 +182,7 @@ func TestDBBatchWrite(t *testing.T) { for i, tc := range testCases { mdb := newMockDB() - ddb := NewDebugDB(t.Name(), mdb) - batch := ddb.NewBatch() + batch := mdb.NewBatch() tc.modify(batch) diff --git a/libs/db/debug_db.go b/libs/db/debug_db.go deleted file mode 100644 index 658cd0555..000000000 --- a/libs/db/debug_db.go +++ /dev/null @@ -1,257 +0,0 @@ -package db - -import ( - "fmt" - "sync" - - cmn "github.com/tendermint/tendermint/libs/common" -) - -//---------------------------------------- -// debugDB - -type debugDB struct { - label string - db DB -} - -// For printing all operationgs to the console for debugging. -func NewDebugDB(label string, db DB) debugDB { - return debugDB{ - label: label, - db: db, - } -} - -// Implements atomicSetDeleter. -func (ddb debugDB) Mutex() *sync.Mutex { return nil } - -// Implements DB. -func (ddb debugDB) Get(key []byte) (value []byte) { - defer func() { - fmt.Printf("%v.Get(%v) %v\n", ddb.label, - cmn.ColoredBytes(key, cmn.Cyan, cmn.Blue), - cmn.ColoredBytes(value, cmn.Green, cmn.Blue)) - }() - value = ddb.db.Get(key) - return -} - -// Implements DB. -func (ddb debugDB) Has(key []byte) (has bool) { - defer func() { - fmt.Printf("%v.Has(%v) %v\n", ddb.label, - cmn.ColoredBytes(key, cmn.Cyan, cmn.Blue), has) - }() - return ddb.db.Has(key) -} - -// Implements DB. -func (ddb debugDB) Set(key []byte, value []byte) { - fmt.Printf("%v.Set(%v, %v)\n", ddb.label, - cmn.ColoredBytes(key, cmn.Yellow, cmn.Blue), - cmn.ColoredBytes(value, cmn.Green, cmn.Blue)) - ddb.db.Set(key, value) -} - -// Implements DB. -func (ddb debugDB) SetSync(key []byte, value []byte) { - fmt.Printf("%v.SetSync(%v, %v)\n", ddb.label, - cmn.ColoredBytes(key, cmn.Yellow, cmn.Blue), - cmn.ColoredBytes(value, cmn.Green, cmn.Blue)) - ddb.db.SetSync(key, value) -} - -// Implements atomicSetDeleter. -func (ddb debugDB) SetNoLock(key []byte, value []byte) { - fmt.Printf("%v.SetNoLock(%v, %v)\n", ddb.label, - cmn.ColoredBytes(key, cmn.Yellow, cmn.Blue), - cmn.ColoredBytes(value, cmn.Green, cmn.Blue)) - ddb.db.(atomicSetDeleter).SetNoLock(key, value) -} - -// Implements atomicSetDeleter. -func (ddb debugDB) SetNoLockSync(key []byte, value []byte) { - fmt.Printf("%v.SetNoLockSync(%v, %v)\n", ddb.label, - cmn.ColoredBytes(key, cmn.Yellow, cmn.Blue), - cmn.ColoredBytes(value, cmn.Green, cmn.Blue)) - ddb.db.(atomicSetDeleter).SetNoLockSync(key, value) -} - -// Implements DB. -func (ddb debugDB) Delete(key []byte) { - fmt.Printf("%v.Delete(%v)\n", ddb.label, - cmn.ColoredBytes(key, cmn.Red, cmn.Yellow)) - ddb.db.Delete(key) -} - -// Implements DB. -func (ddb debugDB) DeleteSync(key []byte) { - fmt.Printf("%v.DeleteSync(%v)\n", ddb.label, - cmn.ColoredBytes(key, cmn.Red, cmn.Yellow)) - ddb.db.DeleteSync(key) -} - -// Implements atomicSetDeleter. -func (ddb debugDB) DeleteNoLock(key []byte) { - fmt.Printf("%v.DeleteNoLock(%v)\n", ddb.label, - cmn.ColoredBytes(key, cmn.Red, cmn.Yellow)) - ddb.db.(atomicSetDeleter).DeleteNoLock(key) -} - -// Implements atomicSetDeleter. -func (ddb debugDB) DeleteNoLockSync(key []byte) { - fmt.Printf("%v.DeleteNoLockSync(%v)\n", ddb.label, - cmn.ColoredBytes(key, cmn.Red, cmn.Yellow)) - ddb.db.(atomicSetDeleter).DeleteNoLockSync(key) -} - -// Implements DB. -func (ddb debugDB) Iterator(start, end []byte) Iterator { - fmt.Printf("%v.Iterator(%v, %v)\n", ddb.label, - cmn.ColoredBytes(start, cmn.Cyan, cmn.Blue), - cmn.ColoredBytes(end, cmn.Cyan, cmn.Blue)) - return NewDebugIterator(ddb.label, ddb.db.Iterator(start, end)) -} - -// Implements DB. -func (ddb debugDB) ReverseIterator(start, end []byte) Iterator { - fmt.Printf("%v.ReverseIterator(%v, %v)\n", ddb.label, - cmn.ColoredBytes(start, cmn.Cyan, cmn.Blue), - cmn.ColoredBytes(end, cmn.Cyan, cmn.Blue)) - return NewDebugIterator(ddb.label, ddb.db.ReverseIterator(start, end)) -} - -// Implements DB. -// Panics if the underlying db is not an -// atomicSetDeleter. -func (ddb debugDB) NewBatch() Batch { - fmt.Printf("%v.NewBatch()\n", ddb.label) - return NewDebugBatch(ddb.label, ddb.db.NewBatch()) -} - -// Implements DB. -func (ddb debugDB) Close() { - fmt.Printf("%v.Close()\n", ddb.label) - ddb.db.Close() -} - -// Implements DB. -func (ddb debugDB) Print() { - ddb.db.Print() -} - -// Implements DB. -func (ddb debugDB) Stats() map[string]string { - return ddb.db.Stats() -} - -//---------------------------------------- -// debugIterator - -type debugIterator struct { - label string - itr Iterator -} - -// For printing all operationgs to the console for debugging. -func NewDebugIterator(label string, itr Iterator) debugIterator { - return debugIterator{ - label: label, - itr: itr, - } -} - -// Implements Iterator. -func (ditr debugIterator) Domain() (start []byte, end []byte) { - defer func() { - fmt.Printf("%v.itr.Domain() (%X,%X)\n", ditr.label, start, end) - }() - start, end = ditr.itr.Domain() - return -} - -// Implements Iterator. -func (ditr debugIterator) Valid() (ok bool) { - defer func() { - fmt.Printf("%v.itr.Valid() %v\n", ditr.label, ok) - }() - ok = ditr.itr.Valid() - return -} - -// Implements Iterator. -func (ditr debugIterator) Next() { - fmt.Printf("%v.itr.Next()\n", ditr.label) - ditr.itr.Next() -} - -// Implements Iterator. -func (ditr debugIterator) Key() (key []byte) { - key = ditr.itr.Key() - fmt.Printf("%v.itr.Key() %v\n", ditr.label, - cmn.ColoredBytes(key, cmn.Cyan, cmn.Blue)) - return -} - -// Implements Iterator. -func (ditr debugIterator) Value() (value []byte) { - value = ditr.itr.Value() - fmt.Printf("%v.itr.Value() %v\n", ditr.label, - cmn.ColoredBytes(value, cmn.Green, cmn.Blue)) - return -} - -// Implements Iterator. -func (ditr debugIterator) Close() { - fmt.Printf("%v.itr.Close()\n", ditr.label) - ditr.itr.Close() -} - -//---------------------------------------- -// debugBatch - -type debugBatch struct { - label string - bch Batch -} - -// For printing all operationgs to the console for debugging. -func NewDebugBatch(label string, bch Batch) debugBatch { - return debugBatch{ - label: label, - bch: bch, - } -} - -// Implements Batch. -func (dbch debugBatch) Set(key, value []byte) { - fmt.Printf("%v.batch.Set(%v, %v)\n", dbch.label, - cmn.ColoredBytes(key, cmn.Yellow, cmn.Blue), - cmn.ColoredBytes(value, cmn.Green, cmn.Blue)) - dbch.bch.Set(key, value) -} - -// Implements Batch. -func (dbch debugBatch) Delete(key []byte) { - fmt.Printf("%v.batch.Delete(%v)\n", dbch.label, - cmn.ColoredBytes(key, cmn.Red, cmn.Yellow)) - dbch.bch.Delete(key) -} - -// Implements Batch. -func (dbch debugBatch) Write() { - fmt.Printf("%v.batch.Write()\n", dbch.label) - dbch.bch.Write() -} - -// Implements Batch. -func (dbch debugBatch) WriteSync() { - fmt.Printf("%v.batch.WriteSync()\n", dbch.label) - dbch.bch.WriteSync() -} - -// Implements Batch. -func (dbch debugBatch) Close() { - dbch.bch.Close() -} diff --git a/libs/db/go_level_db.go b/libs/db/go_level_db.go index a52559b86..2c1a716b3 100644 --- a/libs/db/go_level_db.go +++ b/libs/db/go_level_db.go @@ -9,8 +9,6 @@ import ( "github.com/syndtr/goleveldb/leveldb/errors" "github.com/syndtr/goleveldb/leveldb/iterator" optPkg "github.com/syndtr/goleveldb/leveldb/opt" - - cmn "github.com/tendermint/tendermint/libs/common" ) func init() { @@ -71,7 +69,7 @@ func (db *GoLevelDB) Set(key []byte, value []byte) { value = nonNilBytes(value) err := db.db.Put(key, value, nil) if err != nil { - cmn.PanicCrisis(err) + panic(err) } } @@ -81,7 +79,7 @@ func (db *GoLevelDB) SetSync(key []byte, value []byte) { value = nonNilBytes(value) err := db.db.Put(key, value, &optPkg.WriteOptions{Sync: true}) if err != nil { - cmn.PanicCrisis(err) + panic(err) } } @@ -90,7 +88,7 @@ func (db *GoLevelDB) Delete(key []byte) { key = nonNilBytes(key) err := db.db.Delete(key, nil) if err != nil { - cmn.PanicCrisis(err) + panic(err) } } @@ -99,7 +97,7 @@ func (db *GoLevelDB) DeleteSync(key []byte) { key = nonNilBytes(key) err := db.db.Delete(key, &optPkg.WriteOptions{Sync: true}) if err != nil { - cmn.PanicCrisis(err) + panic(err) } } diff --git a/libs/db/go_level_db_test.go b/libs/db/go_level_db_test.go index c24eec3c8..f781a2b3d 100644 --- a/libs/db/go_level_db_test.go +++ b/libs/db/go_level_db_test.go @@ -1,29 +1,27 @@ package db import ( - "bytes" - "encoding/binary" "fmt" - "os" "testing" + "github.com/stretchr/testify/require" "github.com/syndtr/goleveldb/leveldb/opt" - "github.com/stretchr/testify/require" cmn "github.com/tendermint/tendermint/libs/common" ) -func TestNewGoLevelDB(t *testing.T) { +func TestGoLevelDBNewGoLevelDB(t *testing.T) { name := fmt.Sprintf("test_%x", cmn.RandStr(12)) - // Test write locks - db, err := NewGoLevelDB(name, "") + defer cleanupDBDir("", name) + + // Test we can't open the db twice for writing + wr1, err := NewGoLevelDB(name, "") require.Nil(t, err) - defer os.RemoveAll("./" + name + ".db") _, err = NewGoLevelDB(name, "") require.NotNil(t, err) - db.Close() // Close the db to release the lock + wr1.Close() // Close the db to release the lock - // Open the db twice in a row to test read-only locks + // Test we can open the db twice for reading only ro1, err := NewGoLevelDBWithOpts(name, "", &opt.Options{ReadOnly: true}) defer ro1.Close() require.Nil(t, err) @@ -32,75 +30,16 @@ func TestNewGoLevelDB(t *testing.T) { require.Nil(t, err) } -func BenchmarkRandomReadsWrites(b *testing.B) { - b.StopTimer() - - numItems := int64(1000000) - internal := map[int64]int64{} - for i := 0; i < int(numItems); i++ { - internal[int64(i)] = int64(0) - } - db, err := NewGoLevelDB(fmt.Sprintf("test_%x", cmn.RandStr(12)), "") +func BenchmarkGoLevelDBRandomReadsWrites(b *testing.B) { + name := fmt.Sprintf("test_%x", cmn.RandStr(12)) + db, err := NewGoLevelDB(name, "") if err != nil { - b.Fatal(err.Error()) - return + b.Fatal(err) } + defer func() { + db.Close() + cleanupDBDir("", name) + }() - fmt.Println("ok, starting") - b.StartTimer() - - for i := 0; i < b.N; i++ { - // Write something - { - idx := (int64(cmn.RandInt()) % numItems) - internal[idx]++ - val := internal[idx] - idxBytes := int642Bytes(int64(idx)) - valBytes := int642Bytes(int64(val)) - //fmt.Printf("Set %X -> %X\n", idxBytes, valBytes) - db.Set( - idxBytes, - valBytes, - ) - } - // Read something - { - idx := (int64(cmn.RandInt()) % numItems) - val := internal[idx] - idxBytes := int642Bytes(int64(idx)) - valBytes := db.Get(idxBytes) - //fmt.Printf("Get %X -> %X\n", idxBytes, valBytes) - if val == 0 { - if !bytes.Equal(valBytes, nil) { - b.Errorf("Expected %v for %v, got %X", - nil, idx, valBytes) - break - } - } else { - if len(valBytes) != 8 { - b.Errorf("Expected length 8 for %v, got %X", - idx, valBytes) - break - } - valGot := bytes2Int64(valBytes) - if val != valGot { - b.Errorf("Expected %v for %v, got %v", - val, idx, valGot) - break - } - } - } - } - - db.Close() -} - -func int642Bytes(i int64) []byte { - buf := make([]byte, 8) - binary.BigEndian.PutUint64(buf, uint64(i)) - return buf -} - -func bytes2Int64(buf []byte) int64 { - return int64(binary.BigEndian.Uint64(buf)) + benchmarkRandomReadsWrites(b, db) } diff --git a/libs/db/prefix_db.go b/libs/db/prefix_db.go index 0dd06ef9d..70a669387 100644 --- a/libs/db/prefix_db.go +++ b/libs/db/prefix_db.go @@ -212,7 +212,10 @@ func (pdb *prefixDB) Stats() map[string]string { } func (pdb *prefixDB) prefixed(key []byte) []byte { - return append(cp(pdb.prefix), key...) + ret := make([]byte, len(pdb.prefix), len(pdb.prefix)+len(key)) + copy(ret, pdb.prefix) + ret = append(ret, key...) + return ret } //---------------------------------------- @@ -231,13 +234,11 @@ func newPrefixBatch(prefix []byte, source Batch) prefixBatch { } func (pb prefixBatch) Set(key, value []byte) { - pkey := append(cp(pb.prefix), key...) - pb.source.Set(pkey, value) + pb.source.Set(pb.prefixed(key), value) } func (pb prefixBatch) Delete(key []byte) { - pkey := append(cp(pb.prefix), key...) - pb.source.Delete(pkey) + pb.source.Delete(pb.prefixed(key)) } func (pb prefixBatch) Write() { @@ -252,6 +253,13 @@ func (pb prefixBatch) Close() { pb.source.Close() } +func (pb prefixBatch) prefixed(key []byte) []byte { + ret := make([]byte, len(pb.prefix), len(pb.prefix) + len(key)) + copy(ret, pb.prefix) + ret = append(ret, key...) + return ret +} + //---------------------------------------- // prefixIterator diff --git a/libs/db/remotedb/remotedb_test.go b/libs/db/remotedb/remotedb_test.go index f5c8e2cb5..43a022461 100644 --- a/libs/db/remotedb/remotedb_test.go +++ b/libs/db/remotedb/remotedb_test.go @@ -28,7 +28,7 @@ func TestRemoteDB(t *testing.T) { client, err := remotedb.NewRemoteDB(ln.Addr().String(), cert) require.Nil(t, err, "expecting a successful client creation") dbName := "test-remote-db" - require.Nil(t, client.InitRemote(&remotedb.Init{Name: dbName, Type: "leveldb"})) + require.Nil(t, client.InitRemote(&remotedb.Init{Name: dbName, Type: "goleveldb"})) defer func() { err := os.RemoveAll(dbName + ".db") if err != nil { diff --git a/libs/db/remotedb/test.crt b/libs/db/remotedb/test.crt index 06ffec1d2..1090e73d7 100644 --- a/libs/db/remotedb/test.crt +++ b/libs/db/remotedb/test.crt @@ -1,19 +1,25 @@ -----BEGIN CERTIFICATE----- -MIIDAjCCAeqgAwIBAgIJAOGCVedOwRbOMA0GCSqGSIb3DQEBBQUAMCExCzAJBgNV -BAYTAlVTMRIwEAYDVQQDDAlsb2NhbGhvc3QwHhcNMTkwMjExMTU0NjQ5WhcNMjAw -MjExMTU0NjQ5WjAhMQswCQYDVQQGEwJVUzESMBAGA1UEAwwJbG9jYWxob3N0MIIB -IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA60S/fNUWoHm1PYI/yrlnZNtr -dRqDORHe0hPwl/lttLz7+a7HzQZFnpiXnuxbDJtpIq/h1vhAl0sFy86Ip26LhbWc -GjxJL24tVwiOwqYRzTPZ/rK3JYuNcIvcztXjMqdzPrHSZy5YZgrQB6yhTiqpBc4D -h/XgWjEt4DhpHwf/zuIK9XkJw0IaTWjFmoyKRoWW3q4bHzoKNxS9bXP117Tz7tn0 -AdsQCjt1GKcIROkcOGUHqByINJ2XlBkb7SQPjQVBLDVJKdRDUt+yHkkdbn97UDhq -HRTCt5UELWs/53Gj1ffNuhjECOVjG1HkZweLgZjJRQYe8X2OOLNOyfVY1KsDnQID -AQABoz0wOzAMBgNVHRMEBTADAQH/MCsGA1UdEQQkMCKCCWxvY2FsaG9zdIIJbG9j -YWxob3N0hwQAAAAAhwR/AAABMA0GCSqGSIb3DQEBBQUAA4IBAQCe2A5gDc3jiZwT -a5TJrc2J2KouqxB/PCddw5VY8jPsZJfsr9gxHi+Xa5g8p3oqmEOIlqM5BVhrZRUG -RWHDmL+bCsuzMoA/vGHtHmUIwLeZQLWgT3kv12Dc8M9flNNjmXWxdMR9lOMwcL83 -F0CdElxSmaEbNvCIJBDetJJ7vMCqS2lnTLWurbH4ZGeGwvjzNgpgGCKwbyK/gU+j -UXiTQbVvPQ3WWACDnfH6rg0TpxU9jOBkd+4/9tUrBG7UclQBfGULk3sObLO9kx4N -8RxJmtp8jljIXVPX3udExI05pz039pAgvaeZWtP17QSbYcKF1jFtKo6ckrv2GKXX -M5OXGXdw +MIIEOjCCAiKgAwIBAgIQYO+jRR0Sbs+WzU/hj2aoxzANBgkqhkiG9w0BAQsFADAZ +MRcwFQYDVQQDEw50ZW5kZXJtaW50LmNvbTAeFw0xOTA2MDIxMTAyMDdaFw0yMDEy +MDIxMTAyMDRaMBMxETAPBgNVBAMTCHJlbW90ZWRiMIIBIjANBgkqhkiG9w0BAQEF +AAOCAQ8AMIIBCgKCAQEAt7YkYMJ5X5X3MT1tWG1KFO3uyZl962fInl+43xVESydp +qYYHYei7b3T8c/3Ww6f3aKkkCHrvPtqHZjU6o+wp/AQMNlyUoyRN89+6Oj67u2C7 +iZjzAJ+Pk87jMaStubvmZ9J+uk4op4rv5Rt4ns/Kg70RaMvqYR8tGqPcy3o8fWS+ +hCbuwAS8b65yp+AgbnThDEBUnieN3OFLfDV//45qw2OlqlM/gHOVT2JMRbl14Y7x +tW3/Xe+lsB7B3+OC6NQ2Nu7DEA1X+TBNyItIGnQH6DwK2ZBRtyQEk26FAWVj8fHd +A5I4+RcGWXz4T6gJmDZN7+47WHO0ProjARbUV0GIuQIDAQABo4GDMIGAMA4GA1Ud +DwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwHQYDVR0O +BBYEFOA8wzCYhoZmy0WHgnv/0efijUMKMB8GA1UdIwQYMBaAFNSTPe743aIx7rIp +vn5HV3gJ4z1hMA8GA1UdEQQIMAaHBH8AAAEwDQYJKoZIhvcNAQELBQADggIBAKZf +EVo0i9nMZv6ZJjbmAlMfo5FH41/oBYC8pyGAnJKl42raXKJAbl45h80iGn3vNggf +7HJjN+znAHDFYjIwK2IV2WhHPyxK6uk+FA5uBR/aAPcw+zhRfXUMYdhNHr6KBlZZ +bvD7Iq4UALg+XFQz/fQkIi7QvTBwkYyPNA2+a/TGf6myMp26hoz73DQXklqm6Zle +myPs1Vp9bTgOv/3l64BMUV37FZ2TyiisBkV1qPEoDxT7Fbi8G1K8gMDLd0wu0jvX +nz96nk30TDnZewV1fhkMJVKKGiLbaIgHcu1lWsWJZ0tdc+MF7R9bLBO5T0cTDgNy +V8/51g+Cxu5SSHKjFkT0vBBONhjPmRqzJpxOQfHjiv8mmHwwiaNNy2VkJHj5GHer +64r67fQTSqAifzgwAbXYK+ObUbx4PnHvSYSF5dbcR1Oj6UTVtGAgdmN2Y03AIc1B +CiaojcMVuMRz/SvmPWl34GBvvT5/h9VCpHEB3vV6bQxJb5U1fLyo4GABA2Ic3DHr +kV5p7CZI06UNbyQyFtnEb5XoXywRa4Df7FzDIv3HL13MtyXrYrJqC1eAbn+3jGdh +bQa510mWYAlQQmzHSf/SLKott4QKR3SmhOGqGKNvquAYJ9XLdYdsPmKKGH6iGUD8 +n7yEi0KMD/BHsPQNNLatsR2SxqGDeLhbLR0w2hig -----END CERTIFICATE----- diff --git a/libs/db/remotedb/test.key b/libs/db/remotedb/test.key index e1adb3e1e..b30bf809a 100644 --- a/libs/db/remotedb/test.key +++ b/libs/db/remotedb/test.key @@ -1,27 +1,27 @@ -----BEGIN RSA PRIVATE KEY----- -MIIEogIBAAKCAQEA60S/fNUWoHm1PYI/yrlnZNtrdRqDORHe0hPwl/lttLz7+a7H -zQZFnpiXnuxbDJtpIq/h1vhAl0sFy86Ip26LhbWcGjxJL24tVwiOwqYRzTPZ/rK3 -JYuNcIvcztXjMqdzPrHSZy5YZgrQB6yhTiqpBc4Dh/XgWjEt4DhpHwf/zuIK9XkJ -w0IaTWjFmoyKRoWW3q4bHzoKNxS9bXP117Tz7tn0AdsQCjt1GKcIROkcOGUHqByI -NJ2XlBkb7SQPjQVBLDVJKdRDUt+yHkkdbn97UDhqHRTCt5UELWs/53Gj1ffNuhjE -COVjG1HkZweLgZjJRQYe8X2OOLNOyfVY1KsDnQIDAQABAoIBAAb5n8+8pZIWaags -L2X8PzN/Sd1L7u4HOJrz2mM3EuiT3ciWRPgwImpETeJ5UW27Qc+0dTahX5DcuYxE -UErefSZ2ru0cMnNEifWVnF3q/IYf7mudss5bJ9NZYi+Dqdu7mTAXp4xFlHtaALbp -iFK/8wjoBbTHNmKWKK0IHx27Z/sjK+7QnoKij+rRzvhmNyN2r3dT7EO4VePriesr -zyVaGexNPFhtd1HLJLQ5GqRAidtLM4x1ubvp3NLTCvvoQKKYFOg7WqKycZ2VllOg -ApcpZb/kB/sNTacLvum5HgMNWuWwgREISuQJR+esz/5WaSTQ04L2+vMVomGM18X+ -9n4KYwECgYEA/Usajzl3tWv1IIairSk9Md7Z2sbaPVBNKv4IDJy3mLwt+2VN2mqo -fpeV5rBaFNWzJR0M0JwLbdlsvSfXgVFkUePg1UiJyFqOKmMO8Bd/nxV9NAewVg1D -KXQLsfrojBfka7HtFmfk/GA2swEMCGzUcY23bwah1JUTLhvbl19GNMECgYEA7chW -Ip/IvYBiaaD/qgklwJE8QoAVzi9zqlI1MOJJNf1r/BTeZ2R8oXlRk8PVxFglliuA -vMgwCkfuqxA8irIdHReLzqcLddPtaHo6R8zKP2cpYBo61C3CPzEAucasaOXQFpjs -DPnp4QFeboNPgiEGLVGHFvD5TwZpideBpWTwud0CgYEAy04MDGfJEQKNJ0VJr4mJ -R80iubqgk1QwDFEILu9fYiWxFrbSTX0Mr0eGlzp3o39/okt17L9DYTGCWTVwgajN -x/kLjsYBaaJdt+H4rHeABTWfYDLHs9pDTTOK65mELGZE/rg6n6BWqMelP/qYKO8J -efeRA3mkTVg2o+zSTea4GEECgYEA3DB4EvgD2/fXKhl8puhxnTDgrHQPvS8T3NTj -jLD/Oo/CP1zT1sqm3qCJelwOyBMYO0dtn2OBmQOjb6VJauYlL5tuS59EbYgigG0v -Ku3pG21cUzH26CS3i+zEz0O6xCiL2WEitaF3gnTSDWRrbAVIww6MGiJru1IkyRBX -beFbScECf1n00W9qrXnqsWefk73ucggfV0gQQmDnauMA9J7B96+MvGprE54Tx9vl -SBodgvJsCod9Y9Q7QsMcXb4CuEgTgWKDBp5cA/KUOQmK5buOrysosLnnm12LaHiF -O7IIh8Cmb9TbdldgW+8ndZ4EQ3lfIS0zN3/7rWD34bs19JDYkRY= +MIIEpQIBAAKCAQEAt7YkYMJ5X5X3MT1tWG1KFO3uyZl962fInl+43xVESydpqYYH +Yei7b3T8c/3Ww6f3aKkkCHrvPtqHZjU6o+wp/AQMNlyUoyRN89+6Oj67u2C7iZjz +AJ+Pk87jMaStubvmZ9J+uk4op4rv5Rt4ns/Kg70RaMvqYR8tGqPcy3o8fWS+hCbu +wAS8b65yp+AgbnThDEBUnieN3OFLfDV//45qw2OlqlM/gHOVT2JMRbl14Y7xtW3/ +Xe+lsB7B3+OC6NQ2Nu7DEA1X+TBNyItIGnQH6DwK2ZBRtyQEk26FAWVj8fHdA5I4 ++RcGWXz4T6gJmDZN7+47WHO0ProjARbUV0GIuQIDAQABAoIBAQCEVFAZ3puc7aIU +NuIXqwmMz+KMFuMr+SL6aYr6LhB2bhpfQSr6LLEu1L6wMm1LnCbLneJVtW+1/6U+ +SyNFRmzrmmLNmZx7c0AvZb14DQ4fJ8uOjryje0vptUHT1YJJ4n5R1L7yJjCElsC8 +cDBPfO+sOzlaGmBmuxU7NkNp0k/WJc1Wnn5WFCKKk8BCH1AUKvn/vwbRV4zl/Be7 +ApywPUouV+GJlTAG5KLb15CWKSqFNJxUJ6K7NnmfDoy7muUUv8MtrTn59XTH4qK7 +p/3A8tdNpR/RpEJ8+y3kS9CDZBVnsk0j0ptT//jdt1vSsylXxrf7vjLnyguRZZ5H +Vwe2POotAoGBAOY1UaFjtIz2G5qromaUtrPb5EPWRU8fiLtUXUDKG8KqNAqsGbDz +Stw1mVFyyuaFMReO18djCvcja1xxF3TZbdpV1k7RfcpEZXiFzBAPgeEGdA3Tc3V2 +byuJQthWamCBxF/7OGUmH/E/kH0pv5g9+eIitK/CUC2YUhCnubhchGAXAoGBAMxL +O7mnPqDJ2PqxVip/lL6VnchtF1bx1aDNr83rVTf+BEsOgCIFoDEBIVKDnhXlaJu7 +8JN4la/esytq4j3nM1cl6mjvw2ixYmwQtKiDuNiyb88hhQ+nxVsbIpYxtbhsj+u5 +hOrMN6jKd0GVWsYpdNvY/dXZG1MXhbWwExjRAY+vAoGBAKBu3jHUU5q9VWWIYciN +sXpNL5qbNHg86MRsugSSFaCnj1c0sz7ffvdSn0Pk9USL5Defw/9fpd+wHn0xD4DO +msFDevQ5CSoyWmkRDbLPq9sP7UdJariczkMQCLbOGpqhNSMS6C2N0UsG2oJv2ueV +oZUYTMYEbG4qLl8PFN5IE7UHAoGAGwEq4OyZm7lyxBii8jUxHUw7sh2xgx2uhnYJ +8idUeXVLbfx5tYWW2kNy+yxIvk432LYsI+JBryC6AFg9lb81CyUI6lwfMXyZLP28 +U7Ytvf9ARloA88PSk6tvk/j4M2uuTpOUXVEnXll9EB9FA4LBXro9O4JaWU53rz+a +FqKyGSMCgYEAuYCGC+Fz7lIa0aE4tT9mwczQequxGYsL41KR/4pDO3t9QsnzunLY +fvCFhteBOstwTBBdfBaKIwSp3zI2QtA4K0Jx9SAJ9q0ft2ciB9ukUFBhC9+TqzXg +gSz3XpRtI8PhwAxZgCnov+NPQV8IxvD4ZgnnEiRBHrYnSEsaMLoVnkw= -----END RSA PRIVATE KEY----- diff --git a/libs/db/util_test.go b/libs/db/util_test.go index 07f9dd23e..39a02160c 100644 --- a/libs/db/util_test.go +++ b/libs/db/util_test.go @@ -22,6 +22,11 @@ func TestPrefixIteratorNoMatchNil(t *testing.T) { // Empty iterator for db populated after iterator created. func TestPrefixIteratorNoMatch1(t *testing.T) { for backend := range backends { + if backend == BoltDBBackend { + t.Log("bolt does not support concurrent writes while iterating") + continue + } + t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { db, dir := newTempDB(t, backend) defer os.RemoveAll(dir) diff --git a/libs/errors/errors.go b/libs/errors/errors.go deleted file mode 100644 index a03382780..000000000 --- a/libs/errors/errors.go +++ /dev/null @@ -1,21 +0,0 @@ -// Package errors contains errors that are thrown across packages. -package errors - -// // ErrPermissionsChanged occurs if the file permission have changed since the file was created. -// type ErrPermissionsChanged struct { -// name string -// got, want os.FileMode -// } - -// func NewErrPermissionsChanged(name string, got, want os.FileMode) *ErrPermissionsChanged { -// return &ErrPermissionsChanged{name: name, got: got, want: want} -// } - -// func (e ErrPermissionsChanged) Error() string { -// return fmt.Sprintf( -// "file: [%v]\nexpected file permissions: %v, got: %v", -// e.name, -// e.want, -// e.got, -// ) -// } diff --git a/libs/fail/fail.go b/libs/fail/fail.go index d7912af5c..0c9220622 100644 --- a/libs/fail/fail.go +++ b/libs/fail/fail.go @@ -2,36 +2,30 @@ package fail import ( "fmt" - "math/rand" "os" "strconv" ) -var callIndexToFail int - -func init() { +func envSet() int { callIndexToFailS := os.Getenv("FAIL_TEST_INDEX") if callIndexToFailS == "" { - callIndexToFail = -1 + return -1 } else { var err error - callIndexToFail, err = strconv.Atoi(callIndexToFailS) + callIndexToFail, err := strconv.Atoi(callIndexToFailS) if err != nil { - callIndexToFail = -1 + return -1 } + return callIndexToFail } } // Fail when FAIL_TEST_INDEX == callIndex -var ( - callIndex int //indexes Fail calls - - callRandIndex int // indexes a run of FailRand calls - callRandIndexToFail = -1 // the callRandIndex to fail on in FailRand -) +var callIndex int //indexes Fail calls func Fail() { + callIndexToFail := envSet() if callIndexToFail < 0 { return } @@ -43,33 +37,6 @@ func Fail() { callIndex += 1 } -// FailRand should be called n successive times. -// It will fail on a random one of those calls -// n must be greater than 0 -func FailRand(n int) { - if callIndexToFail < 0 { - return - } - - if callRandIndexToFail < 0 { - // first call in the loop, pick a random index to fail at - callRandIndexToFail = rand.Intn(n) - callRandIndex = 0 - } - - if callIndex == callIndexToFail { - if callRandIndex == callRandIndexToFail { - Exit() - } - } - - callRandIndex += 1 - - if callRandIndex == n { - callIndex += 1 - } -} - func Exit() { fmt.Printf("*** fail-test %d ***\n", callIndex) os.Exit(1) diff --git a/libs/log/tmfmt_logger.go b/libs/log/tmfmt_logger.go index d841263ea..d57f9558e 100644 --- a/libs/log/tmfmt_logger.go +++ b/libs/log/tmfmt_logger.go @@ -60,9 +60,10 @@ func (l tmfmtLogger) Log(keyvals ...interface{}) error { for i := 0; i < len(keyvals)-1; i += 2 { // Extract level - if keyvals[i] == kitlevel.Key() { + switch keyvals[i] { + case kitlevel.Key(): excludeIndexes = append(excludeIndexes, i) - switch keyvals[i+1].(type) { + switch keyvals[i+1].(type) { // nolint:gocritic case string: lvl = keyvals[i+1].(string) case kitlevel.Value: @@ -71,11 +72,11 @@ func (l tmfmtLogger) Log(keyvals ...interface{}) error { panic(fmt.Sprintf("level value of unknown type %T", keyvals[i+1])) } // and message - } else if keyvals[i] == msgKey { + case msgKey: excludeIndexes = append(excludeIndexes, i) msg = keyvals[i+1].(string) // and module (could be multiple keyvals; if such case last keyvalue wins) - } else if keyvals[i] == moduleKey { + case moduleKey: excludeIndexes = append(excludeIndexes, i) module = keyvals[i+1].(string) } diff --git a/libs/pubsub/example_test.go b/libs/pubsub/example_test.go index a43696266..34bb2a88f 100644 --- a/libs/pubsub/example_test.go +++ b/libs/pubsub/example_test.go @@ -21,7 +21,7 @@ func TestExample(t *testing.T) { ctx := context.Background() subscription, err := s.Subscribe(ctx, "example-client", query.MustParse("abci.account.name='John'")) require.NoError(t, err) - err = s.PublishWithTags(ctx, "Tombstone", map[string]string{"abci.account.name": "John"}) + err = s.PublishWithEvents(ctx, "Tombstone", map[string][]string{"abci.account.name": {"John"}}) require.NoError(t, err) assertReceive(t, "Tombstone", subscription.Out()) } diff --git a/libs/pubsub/pubsub.go b/libs/pubsub/pubsub.go index f78dac1ba..cb7b8d5bb 100644 --- a/libs/pubsub/pubsub.go +++ b/libs/pubsub/pubsub.go @@ -26,7 +26,7 @@ // for { // select { // case msg <- subscription.Out(): -// // handle msg.Data() and msg.Tags() +// // handle msg.Data() and msg.Events() // case <-subscription.Cancelled(): // return subscription.Err() // } @@ -61,9 +61,14 @@ var ( ErrAlreadySubscribed = errors.New("already subscribed") ) -// Query defines an interface for a query to be used for subscribing. +// Query defines an interface for a query to be used for subscribing. A query +// matches against a map of events. Each key in this map is a composite of the +// even type and an attribute key (e.g. "{eventType}.{eventAttrKey}") and the +// values are the event values that are contained under that relationship. This +// allows event types to repeat themselves with the same set of keys and +// different values. type Query interface { - Matches(tags map[string]string) bool + Matches(events map[string][]string) bool String() string } @@ -76,12 +81,12 @@ type cmd struct { clientID string // publish - msg interface{} - tags map[string]string + msg interface{} + events map[string][]string } // Server allows clients to subscribe/unsubscribe for messages, publishing -// messages with or without tags, and manages internal state. +// messages with or without events, and manages internal state. type Server struct { cmn.BaseService @@ -258,15 +263,15 @@ func (s *Server) NumClientSubscriptions(clientID string) int { // Publish publishes the given message. An error will be returned to the caller // if the context is canceled. func (s *Server) Publish(ctx context.Context, msg interface{}) error { - return s.PublishWithTags(ctx, msg, make(map[string]string)) + return s.PublishWithEvents(ctx, msg, make(map[string][]string)) } -// PublishWithTags publishes the given message with the set of tags. The set is -// matched with clients queries. If there is a match, the message is sent to +// PublishWithEvents publishes the given message with the set of events. The set +// is matched with clients queries. If there is a match, the message is sent to // the client. -func (s *Server) PublishWithTags(ctx context.Context, msg interface{}, tags map[string]string) error { +func (s *Server) PublishWithEvents(ctx context.Context, msg interface{}, events map[string][]string) error { select { - case s.cmds <- cmd{op: pub, msg: msg, tags: tags}: + case s.cmds <- cmd{op: pub, msg: msg, events: events}: return nil case <-ctx.Done(): return ctx.Err() @@ -325,7 +330,7 @@ loop: case sub: state.add(cmd.clientID, cmd.query, cmd.subscription) case pub: - state.send(cmd.msg, cmd.tags) + state.send(cmd.msg, cmd.events) } } } @@ -392,18 +397,18 @@ func (state *state) removeAll(reason error) { } } -func (state *state) send(msg interface{}, tags map[string]string) { +func (state *state) send(msg interface{}, events map[string][]string) { for qStr, clientSubscriptions := range state.subscriptions { q := state.queries[qStr].q - if q.Matches(tags) { + if q.Matches(events) { for clientID, subscription := range clientSubscriptions { if cap(subscription.out) == 0 { // block on unbuffered channel - subscription.out <- Message{msg, tags} + subscription.out <- NewMessage(msg, events) } else { // don't block on buffered channels select { - case subscription.out <- Message{msg, tags}: + case subscription.out <- NewMessage(msg, events): default: state.remove(clientID, qStr, ErrOutOfCapacity) } diff --git a/libs/pubsub/pubsub_test.go b/libs/pubsub/pubsub_test.go index 884477563..5a2baa14f 100644 --- a/libs/pubsub/pubsub_test.go +++ b/libs/pubsub/pubsub_test.go @@ -46,13 +46,16 @@ func TestSubscribe(t *testing.T) { err = s.Publish(ctx, "Asylum") assert.NoError(t, err) + + err = s.Publish(ctx, "Ivan") + assert.NoError(t, err) }() select { case <-published: assertReceive(t, "Quicksilver", subscription.Out()) assertCancelled(t, subscription, pubsub.ErrOutOfCapacity) - case <-time.After(100 * time.Millisecond): + case <-time.After(3 * time.Second): t.Fatal("Expected Publish(Asylum) not to block") } } @@ -101,7 +104,7 @@ func TestSubscribeUnbuffered(t *testing.T) { select { case <-published: t.Fatal("Expected Publish(Darkhawk) to block") - case <-time.After(100 * time.Millisecond): + case <-time.After(3 * time.Second): assertReceive(t, "Ultron", subscription.Out()) assertReceive(t, "Darkhawk", subscription.Out()) } @@ -133,24 +136,75 @@ func TestDifferentClients(t *testing.T) { ctx := context.Background() subscription1, err := s.Subscribe(ctx, "client-1", query.MustParse("tm.events.type='NewBlock'")) require.NoError(t, err) - err = s.PublishWithTags(ctx, "Iceman", map[string]string{"tm.events.type": "NewBlock"}) + err = s.PublishWithEvents(ctx, "Iceman", map[string][]string{"tm.events.type": {"NewBlock"}}) require.NoError(t, err) assertReceive(t, "Iceman", subscription1.Out()) subscription2, err := s.Subscribe(ctx, "client-2", query.MustParse("tm.events.type='NewBlock' AND abci.account.name='Igor'")) require.NoError(t, err) - err = s.PublishWithTags(ctx, "Ultimo", map[string]string{"tm.events.type": "NewBlock", "abci.account.name": "Igor"}) + err = s.PublishWithEvents(ctx, "Ultimo", map[string][]string{"tm.events.type": {"NewBlock"}, "abci.account.name": {"Igor"}}) require.NoError(t, err) assertReceive(t, "Ultimo", subscription1.Out()) assertReceive(t, "Ultimo", subscription2.Out()) subscription3, err := s.Subscribe(ctx, "client-3", query.MustParse("tm.events.type='NewRoundStep' AND abci.account.name='Igor' AND abci.invoice.number = 10")) require.NoError(t, err) - err = s.PublishWithTags(ctx, "Valeria Richards", map[string]string{"tm.events.type": "NewRoundStep"}) + err = s.PublishWithEvents(ctx, "Valeria Richards", map[string][]string{"tm.events.type": {"NewRoundStep"}}) require.NoError(t, err) assert.Zero(t, len(subscription3.Out())) } +func TestSubscribeDuplicateKeys(t *testing.T) { + ctx := context.Background() + s := pubsub.NewServer() + s.SetLogger(log.TestingLogger()) + require.NoError(t, s.Start()) + defer s.Stop() + + testCases := []struct { + query string + expected interface{} + }{ + { + "withdraw.rewards='17'", + "Iceman", + }, + { + "withdraw.rewards='22'", + "Iceman", + }, + { + "withdraw.rewards='1' AND withdraw.rewards='22'", + "Iceman", + }, + { + "withdraw.rewards='100'", + nil, + }, + } + + for i, tc := range testCases { + sub, err := s.Subscribe(ctx, fmt.Sprintf("client-%d", i), query.MustParse(tc.query)) + require.NoError(t, err) + + err = s.PublishWithEvents( + ctx, + "Iceman", + map[string][]string{ + "transfer.sender": {"foo", "bar", "baz"}, + "withdraw.rewards": {"1", "17", "22"}, + }, + ) + require.NoError(t, err) + + if tc.expected != nil { + assertReceive(t, tc.expected, sub.Out()) + } else { + require.Zero(t, len(sub.Out())) + } + } +} + func TestClientSubscribesTwice(t *testing.T) { s := pubsub.NewServer() s.SetLogger(log.TestingLogger()) @@ -162,7 +216,7 @@ func TestClientSubscribesTwice(t *testing.T) { subscription1, err := s.Subscribe(ctx, clientID, q) require.NoError(t, err) - err = s.PublishWithTags(ctx, "Goblin Queen", map[string]string{"tm.events.type": "NewBlock"}) + err = s.PublishWithEvents(ctx, "Goblin Queen", map[string][]string{"tm.events.type": {"NewBlock"}}) require.NoError(t, err) assertReceive(t, "Goblin Queen", subscription1.Out()) @@ -170,7 +224,7 @@ func TestClientSubscribesTwice(t *testing.T) { require.Error(t, err) require.Nil(t, subscription2) - err = s.PublishWithTags(ctx, "Spider-Man", map[string]string{"tm.events.type": "NewBlock"}) + err = s.PublishWithEvents(ctx, "Spider-Man", map[string][]string{"tm.events.type": {"NewBlock"}}) require.NoError(t, err) assertReceive(t, "Spider-Man", subscription1.Out()) } @@ -219,11 +273,11 @@ func TestResubscribe(t *testing.T) { defer s.Stop() ctx := context.Background() - subscription, err := s.Subscribe(ctx, clientID, query.Empty{}) + _, err := s.Subscribe(ctx, clientID, query.Empty{}) require.NoError(t, err) err = s.Unsubscribe(ctx, clientID, query.Empty{}) require.NoError(t, err) - subscription, err = s.Subscribe(ctx, clientID, query.Empty{}) + subscription, err := s.Subscribe(ctx, clientID, query.Empty{}) require.NoError(t, err) err = s.Publish(ctx, "Cable") @@ -309,7 +363,7 @@ func benchmarkNClients(n int, b *testing.B) { b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { - s.PublishWithTags(ctx, "Gamora", map[string]string{"abci.Account.Owner": "Ivan", "abci.Invoices.Number": string(i)}) + s.PublishWithEvents(ctx, "Gamora", map[string][]string{"abci.Account.Owner": {"Ivan"}, "abci.Invoices.Number": {string(i)}}) } } @@ -340,7 +394,7 @@ func benchmarkNClientsOneQuery(n int, b *testing.B) { b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { - s.PublishWithTags(ctx, "Gamora", map[string]string{"abci.Account.Owner": "Ivan", "abci.Invoices.Number": "1"}) + s.PublishWithEvents(ctx, "Gamora", map[string][]string{"abci.Account.Owner": {"Ivan"}, "abci.Invoices.Number": {"1"}}) } } diff --git a/libs/pubsub/query/empty.go b/libs/pubsub/query/empty.go index 83271f047..2d7642adc 100644 --- a/libs/pubsub/query/empty.go +++ b/libs/pubsub/query/empty.go @@ -5,7 +5,7 @@ type Empty struct { } // Matches always returns true. -func (Empty) Matches(tags map[string]string) bool { +func (Empty) Matches(tags map[string][]string) bool { return true } diff --git a/libs/pubsub/query/empty_test.go b/libs/pubsub/query/empty_test.go index 141fb9515..3fcd2d728 100644 --- a/libs/pubsub/query/empty_test.go +++ b/libs/pubsub/query/empty_test.go @@ -10,8 +10,8 @@ import ( func TestEmptyQueryMatchesAnything(t *testing.T) { q := query.Empty{} - assert.True(t, q.Matches(map[string]string{})) - assert.True(t, q.Matches(map[string]string{"Asher": "Roth"})) - assert.True(t, q.Matches(map[string]string{"Route": "66"})) - assert.True(t, q.Matches(map[string]string{"Route": "66", "Billy": "Blue"})) + assert.True(t, q.Matches(map[string][]string{})) + assert.True(t, q.Matches(map[string][]string{"Asher": {"Roth"}})) + assert.True(t, q.Matches(map[string][]string{"Route": {"66"}})) + assert.True(t, q.Matches(map[string][]string{"Route": {"66"}, "Billy": {"Blue"}})) } diff --git a/libs/pubsub/query/query.go b/libs/pubsub/query/query.go index 3dc526050..394de4cf2 100644 --- a/libs/pubsub/query/query.go +++ b/libs/pubsub/query/query.go @@ -167,12 +167,14 @@ func (q *Query) Conditions() []Condition { return conditions } -// Matches returns true if the query matches the given set of tags, false otherwise. +// Matches returns true if the query matches against any event in the given set +// of events, false otherwise. For each event, a match exists if the query is +// matched against *any* value in a slice of values. // -// For example, query "name=John" matches tags = {"name": "John"}. More -// examples could be found in parser_test.go and query_test.go. -func (q *Query) Matches(tags map[string]string) bool { - if len(tags) == 0 { +// For example, query "name=John" matches events = {"name": ["John", "Eric"]}. +// More examples could be found in parser_test.go and query_test.go. +func (q *Query) Matches(events map[string][]string) bool { + if len(events) == 0 { return false } @@ -181,7 +183,8 @@ func (q *Query) Matches(tags map[string]string) bool { var tag string var op Operator - // tokens must be in the following order: tag ("tx.gas") -> operator ("=") -> operand ("7") + // tokens must be in the following order: + // tag ("tx.gas") -> operator ("=") -> operand ("7") for _, token := range q.parser.Tokens() { switch token.pegRule { @@ -207,7 +210,7 @@ func (q *Query) Matches(tags map[string]string) bool { // see if the triplet (tag, operator, operand) matches any tag // "tx.gas", "=", "7", { "tx.gas": 7, "tx.ID": "4AE393495334" } - if !match(tag, op, reflect.ValueOf(valueWithoutSingleQuotes), tags) { + if !match(tag, op, reflect.ValueOf(valueWithoutSingleQuotes), events) { return false } case rulenumber: @@ -217,7 +220,7 @@ func (q *Query) Matches(tags map[string]string) bool { if err != nil { panic(fmt.Sprintf("got %v while trying to parse %s as float64 (should never happen if the grammar is correct)", err, number)) } - if !match(tag, op, reflect.ValueOf(value), tags) { + if !match(tag, op, reflect.ValueOf(value), events) { return false } } else { @@ -225,7 +228,7 @@ func (q *Query) Matches(tags map[string]string) bool { if err != nil { panic(fmt.Sprintf("got %v while trying to parse %s as int64 (should never happen if the grammar is correct)", err, number)) } - if !match(tag, op, reflect.ValueOf(value), tags) { + if !match(tag, op, reflect.ValueOf(value), events) { return false } } @@ -234,7 +237,7 @@ func (q *Query) Matches(tags map[string]string) bool { if err != nil { panic(fmt.Sprintf("got %v while trying to parse %s as time.Time / RFC3339 (should never happen if the grammar is correct)", err, buffer[begin:end])) } - if !match(tag, op, reflect.ValueOf(value), tags) { + if !match(tag, op, reflect.ValueOf(value), events) { return false } case ruledate: @@ -242,7 +245,7 @@ func (q *Query) Matches(tags map[string]string) bool { if err != nil { panic(fmt.Sprintf("got %v while trying to parse %s as time.Time / '2006-01-02' (should never happen if the grammar is correct)", err, buffer[begin:end])) } - if !match(tag, op, reflect.ValueOf(value), tags) { + if !match(tag, op, reflect.ValueOf(value), events) { return false } } @@ -251,34 +254,53 @@ func (q *Query) Matches(tags map[string]string) bool { return true } -// match returns true if the given triplet (tag, operator, operand) matches any tag. +// match returns true if the given triplet (tag, operator, operand) matches any +// value in an event for that key. // -// First, it looks up the tag in tags and if it finds one, tries to compare the -// value from it to the operand using the operator. +// First, it looks up the key in the events and if it finds one, tries to compare +// all the values from it to the operand using the operator. // -// "tx.gas", "=", "7", { "tx.gas": 7, "tx.ID": "4AE393495334" } -func match(tag string, op Operator, operand reflect.Value, tags map[string]string) bool { +// "tx.gas", "=", "7", {"tx": [{"gas": 7, "ID": "4AE393495334"}]} +func match(tag string, op Operator, operand reflect.Value, events map[string][]string) bool { // look up the tag from the query in tags - value, ok := tags[tag] + values, ok := events[tag] if !ok { return false } + + for _, value := range values { + // return true if any value in the set of the event's values matches + if matchValue(value, op, operand) { + return true + } + } + + return false +} + +// matchValue will attempt to match a string value against an operation an +// operand. A boolean is returned representing the match result. It will panic +// if an error occurs or if the operand is invalid. +func matchValue(value string, op Operator, operand reflect.Value) bool { switch operand.Kind() { case reflect.Struct: // time operandAsTime := operand.Interface().(time.Time) + // try our best to convert value from tags to time.Time var ( v time.Time err error ) + if strings.ContainsAny(value, "T") { v, err = time.Parse(TimeLayout, value) } else { v, err = time.Parse(DateLayout, value) } if err != nil { - panic(fmt.Sprintf("Failed to convert value %v from tag to time.Time: %v", value, err)) + panic(fmt.Sprintf("failed to convert value %v from tag to time.Time: %v", value, err)) } + switch op { case OpLessEqual: return v.Before(operandAsTime) || v.Equal(operandAsTime) @@ -291,14 +313,17 @@ func match(tag string, op Operator, operand reflect.Value, tags map[string]strin case OpEqual: return v.Equal(operandAsTime) } + case reflect.Float64: operandFloat64 := operand.Interface().(float64) var v float64 + // try our best to convert value from tags to float64 v, err := strconv.ParseFloat(value, 64) if err != nil { - panic(fmt.Sprintf("Failed to convert value %v from tag to float64: %v", value, err)) + panic(fmt.Sprintf("failed to convert value %v from tag to float64: %v", value, err)) } + switch op { case OpLessEqual: return v <= operandFloat64 @@ -311,6 +336,7 @@ func match(tag string, op Operator, operand reflect.Value, tags map[string]strin case OpEqual: return v == operandFloat64 } + case reflect.Int64: operandInt := operand.Interface().(int64) var v int64 @@ -318,7 +344,7 @@ func match(tag string, op Operator, operand reflect.Value, tags map[string]strin if strings.ContainsAny(value, ".") { v1, err := strconv.ParseFloat(value, 64) if err != nil { - panic(fmt.Sprintf("Failed to convert value %v from tag to float64: %v", value, err)) + panic(fmt.Sprintf("failed to convert value %v from tag to float64: %v", value, err)) } v = int64(v1) } else { @@ -326,7 +352,7 @@ func match(tag string, op Operator, operand reflect.Value, tags map[string]strin // try our best to convert value from tags to int64 v, err = strconv.ParseInt(value, 10, 64) if err != nil { - panic(fmt.Sprintf("Failed to convert value %v from tag to int64: %v", value, err)) + panic(fmt.Sprintf("failed to convert value %v from tag to int64: %v", value, err)) } } switch op { @@ -341,6 +367,7 @@ func match(tag string, op Operator, operand reflect.Value, tags map[string]strin case OpEqual: return v == operandInt } + case reflect.String: switch op { case OpEqual: @@ -348,8 +375,9 @@ func match(tag string, op Operator, operand reflect.Value, tags map[string]strin case OpContains: return strings.Contains(value, operand.String()) } + default: - panic(fmt.Sprintf("Unknown kind of operand %v", operand.Kind())) + panic(fmt.Sprintf("unknown kind of operand %v", operand.Kind())) } return false diff --git a/libs/pubsub/query/query_test.go b/libs/pubsub/query/query_test.go index a3d83b259..10dc3d221 100644 --- a/libs/pubsub/query/query_test.go +++ b/libs/pubsub/query/query_test.go @@ -19,30 +19,40 @@ func TestMatches(t *testing.T) { testCases := []struct { s string - tags map[string]string + events map[string][]string err bool matches bool }{ - {"tm.events.type='NewBlock'", map[string]string{"tm.events.type": "NewBlock"}, false, true}, - - {"tx.gas > 7", map[string]string{"tx.gas": "8"}, false, true}, - {"tx.gas > 7 AND tx.gas < 9", map[string]string{"tx.gas": "8"}, false, true}, - {"body.weight >= 3.5", map[string]string{"body.weight": "3.5"}, false, true}, - {"account.balance < 1000.0", map[string]string{"account.balance": "900"}, false, true}, - {"apples.kg <= 4", map[string]string{"apples.kg": "4.0"}, false, true}, - {"body.weight >= 4.5", map[string]string{"body.weight": fmt.Sprintf("%v", float32(4.5))}, false, true}, - {"oranges.kg < 4 AND watermellons.kg > 10", map[string]string{"oranges.kg": "3", "watermellons.kg": "12"}, false, true}, - {"peaches.kg < 4", map[string]string{"peaches.kg": "5"}, false, false}, - - {"tx.date > DATE 2017-01-01", map[string]string{"tx.date": time.Now().Format(query.DateLayout)}, false, true}, - {"tx.date = DATE 2017-01-01", map[string]string{"tx.date": txDate}, false, true}, - {"tx.date = DATE 2018-01-01", map[string]string{"tx.date": txDate}, false, false}, - - {"tx.time >= TIME 2013-05-03T14:45:00Z", map[string]string{"tx.time": time.Now().Format(query.TimeLayout)}, false, true}, - {"tx.time = TIME 2013-05-03T14:45:00Z", map[string]string{"tx.time": txTime}, false, false}, - - {"abci.owner.name CONTAINS 'Igor'", map[string]string{"abci.owner.name": "Igor,Ivan"}, false, true}, - {"abci.owner.name CONTAINS 'Igor'", map[string]string{"abci.owner.name": "Pavel,Ivan"}, false, false}, + {"tm.events.type='NewBlock'", map[string][]string{"tm.events.type": {"NewBlock"}}, false, true}, + + {"tx.gas > 7", map[string][]string{"tx.gas": {"8"}}, false, true}, + {"tx.gas > 7 AND tx.gas < 9", map[string][]string{"tx.gas": {"8"}}, false, true}, + {"body.weight >= 3.5", map[string][]string{"body.weight": {"3.5"}}, false, true}, + {"account.balance < 1000.0", map[string][]string{"account.balance": {"900"}}, false, true}, + {"apples.kg <= 4", map[string][]string{"apples.kg": {"4.0"}}, false, true}, + {"body.weight >= 4.5", map[string][]string{"body.weight": {fmt.Sprintf("%v", float32(4.5))}}, false, true}, + {"oranges.kg < 4 AND watermellons.kg > 10", map[string][]string{"oranges.kg": {"3"}, "watermellons.kg": {"12"}}, false, true}, + {"peaches.kg < 4", map[string][]string{"peaches.kg": {"5"}}, false, false}, + + {"tx.date > DATE 2017-01-01", map[string][]string{"tx.date": {time.Now().Format(query.DateLayout)}}, false, true}, + {"tx.date = DATE 2017-01-01", map[string][]string{"tx.date": {txDate}}, false, true}, + {"tx.date = DATE 2018-01-01", map[string][]string{"tx.date": {txDate}}, false, false}, + + {"tx.time >= TIME 2013-05-03T14:45:00Z", map[string][]string{"tx.time": {time.Now().Format(query.TimeLayout)}}, false, true}, + {"tx.time = TIME 2013-05-03T14:45:00Z", map[string][]string{"tx.time": {txTime}}, false, false}, + + {"abci.owner.name CONTAINS 'Igor'", map[string][]string{"abci.owner.name": {"Igor,Ivan"}}, false, true}, + {"abci.owner.name CONTAINS 'Igor'", map[string][]string{"abci.owner.name": {"Pavel,Ivan"}}, false, false}, + + {"abci.owner.name = 'Igor'", map[string][]string{"abci.owner.name": {"Igor", "Ivan"}}, false, true}, + {"abci.owner.name = 'Ivan'", map[string][]string{"abci.owner.name": {"Igor", "Ivan"}}, false, true}, + {"abci.owner.name = 'Ivan' AND abci.owner.name = 'Igor'", map[string][]string{"abci.owner.name": {"Igor", "Ivan"}}, false, true}, + {"abci.owner.name = 'Ivan' AND abci.owner.name = 'John'", map[string][]string{"abci.owner.name": {"Igor", "Ivan"}}, false, false}, + + {"tm.events.type='NewBlock'", map[string][]string{"tm.events.type": {"NewBlock"}, "app.name": {"fuzzed"}}, false, true}, + {"app.name = 'fuzzed'", map[string][]string{"tm.events.type": {"NewBlock"}, "app.name": {"fuzzed"}}, false, true}, + {"tm.events.type='NewBlock' AND app.name = 'fuzzed'", map[string][]string{"tm.events.type": {"NewBlock"}, "app.name": {"fuzzed"}}, false, true}, + {"tm.events.type='NewHeader' AND app.name = 'fuzzed'", map[string][]string{"tm.events.type": {"NewBlock"}, "app.name": {"fuzzed"}}, false, false}, } for _, tc := range testCases { @@ -51,10 +61,12 @@ func TestMatches(t *testing.T) { require.Nil(t, err) } + require.NotNil(t, q, "Query '%s' should not be nil", tc.s) + if tc.matches { - assert.True(t, q.Matches(tc.tags), "Query '%s' should match %v", tc.s, tc.tags) + assert.True(t, q.Matches(tc.events), "Query '%s' should match %v", tc.s, tc.events) } else { - assert.False(t, q.Matches(tc.tags), "Query '%s' should not match %v", tc.s, tc.tags) + assert.False(t, q.Matches(tc.events), "Query '%s' should not match %v", tc.s, tc.events) } } } diff --git a/libs/pubsub/subscription.go b/libs/pubsub/subscription.go index 2660439f5..40c97c9ee 100644 --- a/libs/pubsub/subscription.go +++ b/libs/pubsub/subscription.go @@ -70,12 +70,12 @@ func (s *Subscription) cancel(err error) { // Message glues data and tags together. type Message struct { - data interface{} - tags map[string]string + data interface{} + events map[string][]string } -func NewMessage(data interface{}, tags map[string]string) Message { - return Message{data, tags} +func NewMessage(data interface{}, events map[string][]string) Message { + return Message{data, events} } // Data returns an original data published. @@ -83,7 +83,7 @@ func (msg Message) Data() interface{} { return msg.data } -// Tags returns tags, which matched the client's query. -func (msg Message) Tags() map[string]string { - return msg.tags +// Events returns events, which matched the client's query. +func (msg Message) Events() map[string][]string { + return msg.events } diff --git a/libs/test.sh b/libs/test.sh index 64898b0d2..d0618768b 100755 --- a/libs/test.sh +++ b/libs/test.sh @@ -4,9 +4,6 @@ set -e # run the linter # make lint -# setup certs -make gen_certs - # run the unit tests with coverage echo "" > coverage.txt for d in $(go list ./... | grep -v vendor); do @@ -16,6 +13,3 @@ for d in $(go list ./... | grep -v vendor); do rm profile.out fi done - -# cleanup certs -make clean_certs diff --git a/libs/test/assert.go b/libs/test/assert.go deleted file mode 100644 index a6ffed0ce..000000000 --- a/libs/test/assert.go +++ /dev/null @@ -1,14 +0,0 @@ -package test - -import ( - "testing" -) - -func AssertPanics(t *testing.T, msg string, f func()) { - defer func() { - if err := recover(); err == nil { - t.Errorf("Should have panic'd, but didn't: %v", msg) - } - }() - f() -} diff --git a/libs/version/version.go b/libs/version/version.go deleted file mode 100644 index 6e73a937d..000000000 --- a/libs/version/version.go +++ /dev/null @@ -1,3 +0,0 @@ -package version - -const Version = "0.9.0" diff --git a/lite/proxy/query_test.go b/lite/proxy/query_test.go index db2b6e46c..d92a486ea 100644 --- a/lite/proxy/query_test.go +++ b/lite/proxy/query_test.go @@ -143,13 +143,13 @@ func TestTxProofs(t *testing.T) { // First let's make sure a bogus transaction hash returns a valid non-existence proof. key := types.Tx([]byte("bogus")).Hash() - res, err := cl.Tx(key, true) + _, err = cl.Tx(key, true) require.NotNil(err) require.Contains(err.Error(), "not found") // Now let's check with the real tx root hash. key = types.Tx(tx).Hash() - res, err = cl.Tx(key, true) + res, err := cl.Tx(key, true) require.NoError(err, "%#v", err) require.NotNil(res) keyHash := merkle.SimpleHashFromByteSlices([][]byte{key}) diff --git a/lite/proxy/verifier.go b/lite/proxy/verifier.go index b7c11f18e..429c54b2d 100644 --- a/lite/proxy/verifier.go +++ b/lite/proxy/verifier.go @@ -14,7 +14,7 @@ func NewVerifier(chainID, rootDir string, client lclient.SignStatusClient, logge logger.Info("lite/proxy/NewVerifier()...", "chainID", chainID, "rootDir", rootDir, "client", client) memProvider := lite.NewDBProvider("trusted.mem", dbm.NewMemDB()).SetLimit(cacheSize) - lvlProvider := lite.NewDBProvider("trusted.lvl", dbm.NewDB("trust-base", dbm.LevelDBBackend, rootDir)) + lvlProvider := lite.NewDBProvider("trusted.lvl", dbm.NewDB("trust-base", dbm.GoLevelDBBackend, rootDir)) trust := lite.NewMultiProvider( memProvider, lvlProvider, diff --git a/mempool/cache_test.go b/mempool/cache_test.go index ea9f63fd6..539bf1197 100644 --- a/mempool/cache_test.go +++ b/mempool/cache_test.go @@ -8,6 +8,7 @@ import ( "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/abci/example/kvstore" + abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/proxy" "github.com/tendermint/tendermint/types" ) @@ -66,7 +67,7 @@ func TestCacheAfterUpdate(t *testing.T) { tx := types.Tx{byte(v)} updateTxs = append(updateTxs, tx) } - mempool.Update(int64(tcIndex), updateTxs, nil, nil) + mempool.Update(int64(tcIndex), updateTxs, abciResponses(len(updateTxs), abci.CodeTypeOK), nil, nil) for _, v := range tc.reAddIndices { tx := types.Tx{byte(v)} diff --git a/mempool/clist_mempool.go b/mempool/clist_mempool.go new file mode 100644 index 000000000..7cd7fd49a --- /dev/null +++ b/mempool/clist_mempool.go @@ -0,0 +1,726 @@ +package mempool + +import ( + "bytes" + "container/list" + "crypto/sha256" + "fmt" + "sync" + "sync/atomic" + "time" + + "github.com/pkg/errors" + + abci "github.com/tendermint/tendermint/abci/types" + cfg "github.com/tendermint/tendermint/config" + auto "github.com/tendermint/tendermint/libs/autofile" + "github.com/tendermint/tendermint/libs/clist" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/proxy" + "github.com/tendermint/tendermint/types" +) + +//-------------------------------------------------------------------------------- + +// CListMempool is an ordered in-memory pool for transactions before they are +// proposed in a consensus round. Transaction validity is checked using the +// CheckTx abci message before the transaction is added to the pool. The +// mempool uses a concurrent list structure for storing transactions that can +// be efficiently accessed by multiple concurrent readers. +type CListMempool struct { + config *cfg.MempoolConfig + + proxyLowMtx sync.Mutex + proxyNextMtx sync.Mutex + proxyBlockingMtx sync.Mutex + proxyAppConn proxy.AppConnMempool + txs *clist.CList // concurrent linked-list of good txs + preCheck PreCheckFunc + postCheck PostCheckFunc + + // Track whether we're rechecking txs. + // These are not protected by a mutex and are expected to be mutated + // in serial (ie. by abci responses which are called in serial). + recheckCursor *clist.CElement // next expected response + recheckEnd *clist.CElement // re-checking stops here + + // notify listeners (ie. consensus) when txs are available + notifiedTxsAvailable bool + txsAvailable chan struct{} // fires once for each height, when the mempool is not empty + + // Map for quick access to txs to record sender in CheckTx. + // txsMap: txKey -> CElement + txsMap sync.Map + + // Atomic integers + height int64 // the last block Update()'d to + rechecking int32 // for re-checking filtered txs on Update() + txsBytes int64 // total size of mempool, in bytes + + // Keep a cache of already-seen txs. + // This reduces the pressure on the proxyApp. + cache txCache + + // A log of mempool txs + wal *auto.AutoFile + + logger log.Logger + + metrics *Metrics +} + +var _ Mempool = &CListMempool{} + +// CListMempoolOption sets an optional parameter on the mempool. +type CListMempoolOption func(*CListMempool) + +// NewCListMempool returns a new mempool with the given configuration and connection to an application. +func NewCListMempool( + config *cfg.MempoolConfig, + proxyAppConn proxy.AppConnMempool, + height int64, + options ...CListMempoolOption, +) *CListMempool { + mempool := &CListMempool{ + config: config, + proxyAppConn: proxyAppConn, + txs: clist.New(), + height: height, + rechecking: 0, + recheckCursor: nil, + recheckEnd: nil, + logger: log.NewNopLogger(), + metrics: NopMetrics(), + } + if config.CacheSize > 0 { + mempool.cache = newMapTxCache(config.CacheSize) + } else { + mempool.cache = nopTxCache{} + } + proxyAppConn.SetResponseCallback(mempool.globalCb) + for _, option := range options { + option(mempool) + } + return mempool +} + +// NOTE: not thread safe - should only be called once, on startup +func (mem *CListMempool) EnableTxsAvailable() { + mem.txsAvailable = make(chan struct{}, 1) +} + +// SetLogger sets the Logger. +func (mem *CListMempool) SetLogger(l log.Logger) { + mem.logger = l +} + +// WithPreCheck sets a filter for the mempool to reject a tx if f(tx) returns +// false. This is ran before CheckTx. +func WithPreCheck(f PreCheckFunc) CListMempoolOption { + return func(mem *CListMempool) { mem.preCheck = f } +} + +// WithPostCheck sets a filter for the mempool to reject a tx if f(tx) returns +// false. This is ran after CheckTx. +func WithPostCheck(f PostCheckFunc) CListMempoolOption { + return func(mem *CListMempool) { mem.postCheck = f } +} + +// WithMetrics sets the metrics. +func WithMetrics(metrics *Metrics) CListMempoolOption { + return func(mem *CListMempool) { mem.metrics = metrics } +} + +// *panics* if can't create directory or open file. +// *not thread safe* +func (mem *CListMempool) InitWAL() { + walDir := mem.config.WalDir() + err := cmn.EnsureDir(walDir, 0700) + if err != nil { + panic(errors.Wrap(err, "Error ensuring Mempool WAL dir")) + } + af, err := auto.OpenAutoFile(walDir + "/wal") + if err != nil { + panic(errors.Wrap(err, "Error opening Mempool WAL file")) + } + mem.wal = af +} + +func (mem *CListMempool) CloseWAL() { + mem.Lock() + defer mem.Unlock() + + if err := mem.wal.Close(); err != nil { + mem.logger.Error("Error closing WAL", "err", err) + } + mem.wal = nil +} + +func (mem *CListMempool) Lock() { + mem.proxyNextMtx.Lock() + mem.proxyBlockingMtx.Lock() + mem.proxyNextMtx.Unlock() +} + +func (mem *CListMempool) Unlock() { + mem.proxyBlockingMtx.Unlock() +} + +func (mem *CListMempool) LockLow() { + mem.proxyLowMtx.Lock() + mem.proxyNextMtx.Lock() + mem.proxyBlockingMtx.Lock() + mem.proxyNextMtx.Unlock() +} + +func (mem *CListMempool) UnlockLow() { + mem.proxyBlockingMtx.Unlock() + mem.proxyLowMtx.Unlock() +} + +func (mem *CListMempool) Size() int { + return mem.txs.Len() +} + +func (mem *CListMempool) TxsBytes() int64 { + return atomic.LoadInt64(&mem.txsBytes) +} + +func (mem *CListMempool) FlushAppConn() error { + return mem.proxyAppConn.FlushSync() +} + +func (mem *CListMempool) Flush() { + mem.Lock() + defer mem.Unlock() + + mem.cache.Reset() + + for e := mem.txs.Front(); e != nil; e = e.Next() { + mem.txs.Remove(e) + e.DetachPrev() + } + + mem.txsMap = sync.Map{} + _ = atomic.SwapInt64(&mem.txsBytes, 0) +} + +// TxsFront returns the first transaction in the ordered list for peer +// goroutines to call .NextWait() on. +func (mem *CListMempool) TxsFront() *clist.CElement { + return mem.txs.Front() +} + +// TxsWaitChan returns a channel to wait on transactions. It will be closed +// once the mempool is not empty (ie. the internal `mem.txs` has at least one +// element) +func (mem *CListMempool) TxsWaitChan() <-chan struct{} { + return mem.txs.WaitChan() +} + +// It blocks if we're waiting on Update() or Reap(). +// cb: A callback from the CheckTx command. +// It gets called from another goroutine. +// CONTRACT: Either cb will get called, or err returned. +func (mem *CListMempool) CheckTx(tx types.Tx, cb func(*abci.Response)) (err error) { + return mem.CheckTxWithInfo(tx, cb, TxInfo{SenderID: UnknownPeerID, FromPersistent: false}) +} + +func (mem *CListMempool) CheckTxWithInfo(tx types.Tx, cb func(*abci.Response), txInfo TxInfo) (err error) { + mem.LockLow() + // use defer to unlock mutex because application (*local client*) might panic + defer mem.UnlockLow() + + var ( + memSize = mem.Size() + txsBytes = mem.TxsBytes() + txSize = len(tx) + ) + if memSize >= mem.config.Size || + int64(txSize)+txsBytes > mem.config.MaxTxsBytes { + return ErrMempoolIsFull{ + memSize, mem.config.Size, + txsBytes, mem.config.MaxTxsBytes} + } + + // The size of the corresponding amino-encoded TxMessage + // can't be larger than the maxMsgSize, otherwise we can't + // relay it to peers. + if txSize > mem.config.MaxTxBytes { + return ErrTxTooLarge{mem.config.MaxTxBytes, txSize} + } + + if mem.preCheck != nil { + if err := mem.preCheck(tx); err != nil { + return ErrPreCheck{err} + } + } + + // CACHE + if !mem.cache.Push(tx) { + // Record a new sender for a tx we've already seen. + // Note it's possible a tx is still in the cache but no longer in the mempool + // (eg. after committing a block, txs are removed from mempool but not cache), + // so we only record the sender for txs still in the mempool. + if e, ok := mem.txsMap.Load(txKey(tx)); ok { + memTx := e.(*clist.CElement).Value.(*mempoolTx) + memTx.senders.LoadOrStore(txInfo.SenderID, true) + // TODO: consider punishing peer for dups, + // its non-trivial since invalid txs can become valid, + // but they can spam the same tx with little cost to them atm. + + } + + return ErrTxInCache + } + // END CACHE + + // WAL + if mem.wal != nil { + // TODO: Notify administrators when WAL fails + _, err := mem.wal.Write([]byte(tx)) + if err != nil { + mem.logger.Error("Error writing to WAL", "err", err) + } + _, err = mem.wal.Write([]byte("\n")) + if err != nil { + mem.logger.Error("Error writing to WAL", "err", err) + } + } + // END WAL + + // NOTE: proxyAppConn may error if tx buffer is full + if err = mem.proxyAppConn.Error(); err != nil { + return err + } + + reqRes := mem.proxyAppConn.CheckTxAsync(abci.RequestCheckTx{Tx: tx}) + reqRes.SetCallback(mem.reqResCb(tx, txInfo, cb)) + + return nil +} + +// Global callback that will be called after every ABCI response. +// Having a single global callback avoids needing to set a callback for each request. +// However, processing the checkTx response requires the peerID (so we can track which txs we heard from who), +// and peerID is not included in the ABCI request, so we have to set request-specific callbacks that +// include this information. If we're not in the midst of a recheck, this function will just return, +// so the request specific callback can do the work. +// When rechecking, we don't need the peerID, so the recheck callback happens here. +func (mem *CListMempool) globalCb(req *abci.Request, res *abci.Response) { + if mem.recheckCursor == nil { + return + } + + mem.metrics.RecheckTimes.Add(1) + mem.resCbRecheck(req, res) + + // update metrics + mem.metrics.Size.Set(float64(mem.Size())) +} + +// Request specific callback that should be set on individual reqRes objects +// to incorporate local information when processing the response. +// This allows us to track the peer that sent us this tx, so we can avoid sending it back to them. +// NOTE: alternatively, we could include this information in the ABCI request itself. +// +// External callers of CheckTx, like the RPC, can also pass an externalCb through here that is called +// when all other response processing is complete. +// +// Used in CheckTxWithInfo to record PeerID who sent us the tx. +func (mem *CListMempool) reqResCb(tx []byte, txInfo TxInfo, externalCb func(*abci.Response)) func(res *abci.Response) { + return func(res *abci.Response) { + if mem.recheckCursor != nil { + // this should never happen + panic("recheck cursor is not nil in reqResCb") + } + + mem.resCbFirstTime(tx, txInfo, res) + + // update metrics + mem.metrics.Size.Set(float64(mem.Size())) + + // passed in by the caller of CheckTx, eg. the RPC + if externalCb != nil { + externalCb(res) + } + } +} + +// Called from: +// - resCbFirstTime (lock not held) if tx is valid +func (mem *CListMempool) addTx(memTx *mempoolTx) { + e := mem.txs.PushBack(memTx) + mem.txsMap.Store(txKey(memTx.tx), e) + atomic.AddInt64(&mem.txsBytes, int64(len(memTx.tx))) + mem.metrics.TxSizeBytes.Observe(float64(len(memTx.tx))) +} + +// Called from: +// - Update (lock held) if tx was committed +// - resCbRecheck (lock not held) if tx was invalidated +func (mem *CListMempool) removeTx(tx types.Tx, elem *clist.CElement, removeFromCache bool) { + mem.txs.Remove(elem) + elem.DetachPrev() + mem.txsMap.Delete(txKey(tx)) + atomic.AddInt64(&mem.txsBytes, int64(-len(tx))) + + if removeFromCache { + mem.cache.Remove(tx) + } +} + +// callback, which is called after the app checked the tx for the first time. +// +// The case where the app checks the tx for the second and subsequent times is +// handled by the resCbRecheck callback. +func (mem *CListMempool) resCbFirstTime(tx []byte, txInfo TxInfo, res *abci.Response) { + switch r := res.Value.(type) { + case *abci.Response_CheckTx: + var postCheckErr error + if mem.postCheck != nil { + postCheckErr = mem.postCheck(tx, r.CheckTx) + } + if (r.CheckTx.Code == abci.CodeTypeOK) && postCheckErr == nil { + memTx := &mempoolTx{ + fromPersistent: txInfo.FromPersistent, + height: mem.height, + gasWanted: r.CheckTx.GasWanted, + tx: tx, + } + memTx.senders.Store(txInfo.SenderID, true) + mem.addTx(memTx) + mem.notifyTxsAvailable() + } else { + // ignore bad transaction + mem.logger.Info("Rejected bad transaction", "tx", txID(tx), "res", r, "err", postCheckErr) + mem.metrics.FailedTxs.Add(1) + // remove from cache (it might be good later) + mem.cache.Remove(tx) + } + default: + // ignore other messages + } +} + +// callback, which is called after the app rechecked the tx. +// +// The case where the app checks the tx for the first time is handled by the +// resCbFirstTime callback. +func (mem *CListMempool) resCbRecheck(req *abci.Request, res *abci.Response) { + switch r := res.Value.(type) { + case *abci.Response_CheckTx: + tx := req.GetCheckTx().Tx + memTx := mem.recheckCursor.Value.(*mempoolTx) + if !bytes.Equal(tx, memTx.tx) { + panic(fmt.Sprintf( + "Unexpected tx response from proxy during recheck\nExpected %X, got %X", + memTx.tx, + tx)) + } + var postCheckErr error + if mem.postCheck != nil { + postCheckErr = mem.postCheck(tx, r.CheckTx) + } + if (r.CheckTx.Code == abci.CodeTypeOK) && postCheckErr == nil { + // Good, nothing to do. + } else { + // Tx became invalidated due to newly committed block. + mem.logger.Info("Tx is no longer valid", "tx", txID(tx), "res", r, "err", postCheckErr) + // NOTE: we remove tx from the cache because it might be good later + mem.removeTx(tx, mem.recheckCursor, true) + } + if mem.recheckCursor == mem.recheckEnd { + mem.recheckCursor = nil + } else { + mem.recheckCursor = mem.recheckCursor.Next() + } + if mem.recheckCursor == nil { + // Done! + atomic.StoreInt32(&mem.rechecking, 0) + mem.logger.Info("Done rechecking txs") + + // incase the recheck removed all txs + if mem.Size() > 0 { + mem.notifyTxsAvailable() + } + } + default: + // ignore other messages + } +} + +func (mem *CListMempool) TxsAvailable() <-chan struct{} { + return mem.txsAvailable +} + +func (mem *CListMempool) notifyTxsAvailable() { + if mem.Size() == 0 { + panic("notified txs available but mempool is empty!") + } + if mem.txsAvailable != nil && !mem.notifiedTxsAvailable { + // channel cap is 1, so this will send once + mem.notifiedTxsAvailable = true + select { + case mem.txsAvailable <- struct{}{}: + default: + } + } +} + +func (mem *CListMempool) ReapMaxBytesMaxGas(maxBytes, maxGas int64) types.Txs { + mem.Lock() + defer mem.Unlock() + + for atomic.LoadInt32(&mem.rechecking) > 0 { + // TODO: Something better? + time.Sleep(time.Millisecond * 10) + } + + var totalBytes int64 + var totalGas int64 + // TODO: we will get a performance boost if we have a good estimate of avg + // size per tx, and set the initial capacity based off of that. + // txs := make([]types.Tx, 0, cmn.MinInt(mem.txs.Len(), max/mem.avgTxSize)) + txs := make([]types.Tx, 0, mem.txs.Len()) + for e := mem.txs.Front(); e != nil; e = e.Next() { + memTx := e.Value.(*mempoolTx) + // Check total size requirement + aminoOverhead := types.ComputeAminoOverhead(memTx.tx, 1) + if maxBytes > -1 && totalBytes+int64(len(memTx.tx))+aminoOverhead > maxBytes { + return txs + } + totalBytes += int64(len(memTx.tx)) + aminoOverhead + // Check total gas requirement. + // If maxGas is negative, skip this check. + // Since newTotalGas < masGas, which + // must be non-negative, it follows that this won't overflow. + newTotalGas := totalGas + memTx.gasWanted + if maxGas > -1 && newTotalGas > maxGas { + return txs + } + totalGas = newTotalGas + txs = append(txs, memTx.tx) + } + return txs +} + +func (mem *CListMempool) ReapMaxTxs(max int) types.Txs { + mem.Lock() + defer mem.Unlock() + + if max < 0 { + max = mem.txs.Len() + } + + for atomic.LoadInt32(&mem.rechecking) > 0 { + // TODO: Something better? + time.Sleep(time.Millisecond * 10) + } + + txs := make([]types.Tx, 0, cmn.MinInt(mem.txs.Len(), max)) + for e := mem.txs.Front(); e != nil && len(txs) <= max; e = e.Next() { + memTx := e.Value.(*mempoolTx) + txs = append(txs, memTx.tx) + } + return txs +} + +func (mem *CListMempool) Update( + height int64, + txs types.Txs, + deliverTxResponses []*abci.ResponseDeliverTx, + preCheck PreCheckFunc, + postCheck PostCheckFunc, +) error { + // Set height + mem.height = height + mem.notifiedTxsAvailable = false + + if preCheck != nil { + mem.preCheck = preCheck + } + if postCheck != nil { + mem.postCheck = postCheck + } + + for i, tx := range txs { + if deliverTxResponses[i].Code == abci.CodeTypeOK { + // Add valid committed tx to the cache (if missing). + _ = mem.cache.Push(tx) + } else { + // Allow invalid transactions to be resubmitted. + mem.cache.Remove(tx) + } + + // Remove committed tx from the mempool. + // + // Note an evil proposer can drop valid txs! + // Mempool before: + // 100 -> 101 -> 102 + // Block, proposed by an evil proposer: + // 101 -> 102 + // Mempool after: + // 100 + // https://github.com/tendermint/tendermint/issues/3322. + if e, ok := mem.txsMap.Load(txKey(tx)); ok { + mem.removeTx(tx, e.(*clist.CElement), false) + } + } + + // Either recheck non-committed txs to see if they became invalid + // or just notify there're some txs left. + if memSize := mem.Size(); memSize > 0 { + if mem.config.Recheck { + mem.logger.Info("Recheck txs", "numtxs", memSize, "height", height) + mem.recheckTxs() + // At this point, mem.txs are being rechecked. + // mem.recheckCursor re-scans mem.txs and possibly removes some txs. + // Before mem.Reap(), we should wait for mem.recheckCursor to be nil. + } else { + mem.notifyTxsAvailable() + } + } + + // Update metrics + mem.metrics.Size.Set(float64(mem.Size())) + + return nil +} + +func (mem *CListMempool) recheckTxs() { + if mem.Size() == 0 { + panic("recheckTxs is called, but the mempool is empty") + } + + atomic.StoreInt32(&mem.rechecking, 1) + mem.recheckCursor = mem.txs.Front() + mem.recheckEnd = mem.txs.Back() + + // Push txs to proxyAppConn + // NOTE: globalCb may be called concurrently. + for e := mem.txs.Front(); e != nil; e = e.Next() { + memTx := e.Value.(*mempoolTx) + mem.proxyAppConn.ReCheckTxAsync(abci.RequestCheckTx{ + Tx: memTx.tx, + Type: abci.CheckTxType_Recheck, + }) + } + + mem.proxyAppConn.FlushAsync() +} + +//-------------------------------------------------------------------------------- + +// mempoolTx is a transaction that successfully ran +type mempoolTx struct { + height int64 // height that this tx had been validated in + gasWanted int64 // amount of gas this tx states it will require + tx types.Tx // + + // ids of peers who've sent us this tx (as a map for quick lookups). + // senders: PeerID -> bool + senders sync.Map + fromPersistent bool +} + +// Height returns the height for this transaction +func (memTx *mempoolTx) Height() int64 { + return atomic.LoadInt64(&memTx.height) +} + +//-------------------------------------------------------------------------------- + +type txCache interface { + Reset() + Push(tx types.Tx) bool + Remove(tx types.Tx) +} + +// mapTxCache maintains a LRU cache of transactions. This only stores the hash +// of the tx, due to memory concerns. +type mapTxCache struct { + mtx sync.Mutex + size int + map_ map[[sha256.Size]byte]*list.Element + list *list.List +} + +var _ txCache = (*mapTxCache)(nil) + +// newMapTxCache returns a new mapTxCache. +func newMapTxCache(cacheSize int) *mapTxCache { + return &mapTxCache{ + size: cacheSize, + map_: make(map[[sha256.Size]byte]*list.Element, cacheSize), + list: list.New(), + } +} + +// Reset resets the cache to an empty state. +func (cache *mapTxCache) Reset() { + cache.mtx.Lock() + cache.map_ = make(map[[sha256.Size]byte]*list.Element, cache.size) + cache.list.Init() + cache.mtx.Unlock() +} + +// Push adds the given tx to the cache and returns true. It returns +// false if tx is already in the cache. +func (cache *mapTxCache) Push(tx types.Tx) bool { + cache.mtx.Lock() + defer cache.mtx.Unlock() + + // Use the tx hash in the cache + txHash := txKey(tx) + if moved, exists := cache.map_[txHash]; exists { + cache.list.MoveToBack(moved) + return false + } + + if cache.list.Len() >= cache.size { + popped := cache.list.Front() + poppedTxHash := popped.Value.([sha256.Size]byte) + delete(cache.map_, poppedTxHash) + if popped != nil { + cache.list.Remove(popped) + } + } + e := cache.list.PushBack(txHash) + cache.map_[txHash] = e + return true +} + +// Remove removes the given tx from the cache. +func (cache *mapTxCache) Remove(tx types.Tx) { + cache.mtx.Lock() + txHash := txKey(tx) + popped := cache.map_[txHash] + delete(cache.map_, txHash) + if popped != nil { + cache.list.Remove(popped) + } + + cache.mtx.Unlock() +} + +type nopTxCache struct{} + +var _ txCache = (*nopTxCache)(nil) + +func (nopTxCache) Reset() {} +func (nopTxCache) Push(types.Tx) bool { return true } +func (nopTxCache) Remove(types.Tx) {} + +//-------------------------------------------------------------------------------- + +// txKey is the fixed length array sha256 hash used as the key in maps. +func txKey(tx types.Tx) [sha256.Size]byte { + return sha256.Sum256(tx) +} + +// txID is the hex encoded hash of the bytes as a types.Tx. +func txID(tx []byte) string { + return fmt.Sprintf("%X", types.Tx(tx).Hash()) +} diff --git a/mempool/mempool_test.go b/mempool/clist_mempool_test.go similarity index 85% rename from mempool/mempool_test.go rename to mempool/clist_mempool_test.go index 54c6d8c74..1cc8d2a73 100644 --- a/mempool/mempool_test.go +++ b/mempool/clist_mempool_test.go @@ -33,18 +33,18 @@ import ( // test. type cleanupFunc func() -func newMempoolWithApp(cc proxy.ClientCreator) (*Mempool, cleanupFunc) { +func newMempoolWithApp(cc proxy.ClientCreator) (*CListMempool, cleanupFunc) { return newMempoolWithAppAndConfig(cc, cfg.ResetTestRoot("mempool_test")) } -func newMempoolWithAppAndConfig(cc proxy.ClientCreator, config *cfg.Config) (*Mempool, cleanupFunc) { +func newMempoolWithAppAndConfig(cc proxy.ClientCreator, config *cfg.Config) (*CListMempool, cleanupFunc) { appConnMem, _ := cc.NewABCIClient() appConnMem.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "mempool")) err := appConnMem.Start() if err != nil { panic(err) } - mempool := NewMempool(config.Mempool, appConnMem, 0) + mempool := NewCListMempool(config.Mempool, appConnMem, 0) mempool.SetLogger(log.TestingLogger()) return mempool, func() { os.RemoveAll(config.RootDir) } } @@ -67,9 +67,9 @@ func ensureFire(t *testing.T, ch <-chan struct{}, timeoutMS int) { } } -func checkTxs(t *testing.T, mempool *Mempool, count int, peerID uint16) types.Txs { +func checkTxs(t *testing.T, mempool Mempool, count int, peerID uint16) types.Txs { txs := make(types.Txs, count) - txInfo := TxInfo{PeerID: peerID} + txInfo := TxInfo{SenderID: peerID} for i := 0; i < count; i++ { txBytes := make([]byte, 20) txs[i] = txBytes @@ -100,7 +100,7 @@ func TestReapMaxBytesMaxGas(t *testing.T) { checkTxs(t, mempool, 1, UnknownPeerID) tx0 := mempool.TxsFront().Value.(*mempoolTx) // assert that kv store has gas wanted = 1. - require.Equal(t, app.CheckTx(tx0.tx).GasWanted, int64(1), "KVStore had a gas value neq to 1") + require.Equal(t, app.CheckTx(abci.RequestCheckTx{Tx: tx0.tx}).GasWanted, int64(1), "KVStore had a gas value neq to 1") require.Equal(t, tx0.gasWanted, int64(1), "transactions gas was set incorrectly") // ensure each tx is 20 bytes long require.Equal(t, len(tx0.tx), 20, "Tx is longer than 20 bytes") @@ -171,22 +171,45 @@ func TestMempoolFilters(t *testing.T) { {10, PreCheckAminoMaxBytes(22), PostCheckMaxGas(0), 0}, } for tcIndex, tt := range tests { - mempool.Update(1, emptyTxArr, tt.preFilter, tt.postFilter) + mempool.Update(1, emptyTxArr, abciResponses(len(emptyTxArr), abci.CodeTypeOK), tt.preFilter, tt.postFilter) checkTxs(t, mempool, tt.numTxsToCreate, UnknownPeerID) require.Equal(t, tt.expectedNumTxs, mempool.Size(), "mempool had the incorrect size, on test case %d", tcIndex) mempool.Flush() } } -func TestMempoolUpdateAddsTxsToCache(t *testing.T) { +func TestMempoolUpdate(t *testing.T) { app := kvstore.NewKVStoreApplication() cc := proxy.NewLocalClientCreator(app) mempool, cleanup := newMempoolWithApp(cc) defer cleanup() - mempool.Update(1, []types.Tx{[]byte{0x01}}, nil, nil) - err := mempool.CheckTx([]byte{0x01}, nil) - if assert.Error(t, err) { - assert.Equal(t, ErrTxInCache, err) + + // 1. Adds valid txs to the cache + { + mempool.Update(1, []types.Tx{[]byte{0x01}}, abciResponses(1, abci.CodeTypeOK), nil, nil) + err := mempool.CheckTx([]byte{0x01}, nil) + if assert.Error(t, err) { + assert.Equal(t, ErrTxInCache, err) + } + } + + // 2. Removes valid txs from the mempool + { + err := mempool.CheckTx([]byte{0x02}, nil) + require.NoError(t, err) + mempool.Update(1, []types.Tx{[]byte{0x02}}, abciResponses(1, abci.CodeTypeOK), nil, nil) + assert.Zero(t, mempool.Size()) + } + + // 3. Removes invalid transactions from the cache and the mempool (if present) + { + err := mempool.CheckTx([]byte{0x03}, nil) + require.NoError(t, err) + mempool.Update(1, []types.Tx{[]byte{0x03}}, abciResponses(1, 1), nil, nil) + assert.Zero(t, mempool.Size()) + + err = mempool.CheckTx([]byte{0x03}, nil) + assert.NoError(t, err) } } @@ -211,7 +234,7 @@ func TestTxsAvailable(t *testing.T) { // it should fire once now for the new height // since there are still txs left committedTxs, txs := txs[:50], txs[50:] - if err := mempool.Update(1, committedTxs, nil, nil); err != nil { + if err := mempool.Update(1, committedTxs, abciResponses(len(committedTxs), abci.CodeTypeOK), nil, nil); err != nil { t.Error(err) } ensureFire(t, mempool.TxsAvailable(), timeoutMS) @@ -222,8 +245,8 @@ func TestTxsAvailable(t *testing.T) { ensureNoFire(t, mempool.TxsAvailable(), timeoutMS) // now call update with all the txs. it should not fire as there are no txs left - committedTxs = append(txs, moreTxs...) - if err := mempool.Update(2, committedTxs, nil, nil); err != nil { + committedTxs = append(txs, moreTxs...) //nolint: gocritic + if err := mempool.Update(2, committedTxs, abciResponses(len(committedTxs), abci.CodeTypeOK), nil, nil); err != nil { t.Error(err) } ensureNoFire(t, mempool.TxsAvailable(), timeoutMS) @@ -245,8 +268,8 @@ func NewSleepCounterApplication(f bool, i int) *SleepCounterApplication { return &SleepCounterApplication{counter.NewCounterApplication(f), wg} } -func (app *SleepCounterApplication) CheckTx(tx []byte) abci.ResponseCheckTx { - res := app.CounterApplication.CheckTx(tx) +func (app *SleepCounterApplication) CheckTx(req abci.RequestCheckTx) abci.ResponseCheckTx { + res := app.CounterApplication.CheckTx(req) app.wg.Wait() return res } @@ -278,20 +301,20 @@ func TestReapPriority(t *testing.T) { //for threshold := range seqReap { txs := mempool.ReapMaxBytesMaxGas(-1, -1) - if len(txs) >= threshold { + if len(txs) > threshold { str := fmt.Sprintf("Reap failed to have priority, %v > %v\n", len(txs), threshold) fmt.Print(str) testResult <- str } else { - fmt.Printf("Priority reaping: %v < %v\n", len(txs), threshold) + fmt.Printf("Priority reaping: %v <= %v\n", len(txs), threshold) } j += len(txs) - if err := mempool.Update(0, txs, nil, nil); err != nil { + if err := mempool.Update(0, txs, abciResponses(len(txs), abci.CodeTypeOK),nil, nil); err != nil { testResult <- err.Error() } for _, txBytes := range txs { - res, err := appConnCon.DeliverTxSync(txBytes) + res, err := appConnCon.DeliverTxSync(abci.RequestDeliverTx{Tx: txBytes}) if err != nil { testResult <- fmt.Sprintf("Client error committing tx: %v", err) } @@ -383,7 +406,7 @@ func TestSerialReap(t *testing.T) { binary.BigEndian.PutUint64(txBytes, uint64(i)) txs = append(txs, txBytes) } - if err := mempool.Update(0, txs, nil, nil); err != nil { + if err := mempool.Update(0, txs, abciResponses(len(txs), abci.CodeTypeOK), nil, nil); err != nil { t.Error(err) } } @@ -393,7 +416,7 @@ func TestSerialReap(t *testing.T) { for i := start; i < end; i++ { txBytes := make([]byte, 8) binary.BigEndian.PutUint64(txBytes, uint64(i)) - res, err := appConnCon.DeliverTxSync(txBytes) + res, err := appConnCon.DeliverTxSync(abci.RequestDeliverTx{Tx: txBytes}) if err != nil { t.Errorf("Client error committing tx: %v", err) } @@ -450,7 +473,6 @@ func TestMempoolCloseWAL(t *testing.T) { // 1. Create the temporary directory for mempool and WAL testing. rootDir, err := ioutil.TempDir("", "mempool-test") require.Nil(t, err, "expecting successful tmpdir creation") - defer os.RemoveAll(rootDir) // 2. Ensure that it doesn't contain any elements -- Sanity check m1, err := filepath.Glob(filepath.Join(rootDir, "*")) @@ -458,13 +480,13 @@ func TestMempoolCloseWAL(t *testing.T) { require.Equal(t, 0, len(m1), "no matches yet") // 3. Create the mempool - wcfg := cfg.DefaultMempoolConfig() - wcfg.RootDir = rootDir - defer os.RemoveAll(wcfg.RootDir) + wcfg := cfg.DefaultConfig() + wcfg.Mempool.RootDir = rootDir app := kvstore.NewKVStoreApplication() cc := proxy.NewLocalClientCreator(app) - appConnMem, _ := cc.NewABCIClient() - mempool := NewMempool(wcfg, appConnMem, 10) + mempool, cleanup := newMempoolWithAppAndConfig(cc, wcfg) + defer cleanup() + mempool.height = 10 mempool.InitWAL() // 4. Ensure that the directory contains the WAL file @@ -506,6 +528,9 @@ func TestMempoolMaxMsgSize(t *testing.T) { mempl, cleanup := newMempoolWithApp(cc) defer cleanup() + maxTxSize := mempl.config.MaxTxBytes + maxMsgSize := calcMaxMsgSize(maxTxSize) + testCases := []struct { len int err bool @@ -542,7 +567,7 @@ func TestMempoolMaxMsgSize(t *testing.T) { require.NoError(t, err, caseString) } else { require.True(t, len(encoded) > maxMsgSize, caseString) - require.Equal(t, err, ErrTxTooLarge, caseString) + require.Equal(t, err, ErrTxTooLarge{maxTxSize, testCase.len}, caseString) } } @@ -565,7 +590,7 @@ func TestMempoolTxsBytes(t *testing.T) { assert.EqualValues(t, 1, mempool.TxsBytes()) // 3. zero again after tx is removed by Update - mempool.Update(1, []types.Tx{[]byte{0x01}}, nil, nil) + mempool.Update(1, []types.Tx{[]byte{0x01}}, abciResponses(1, abci.CodeTypeOK), nil, nil) assert.EqualValues(t, 0, mempool.TxsBytes()) // 4. zero after Flush @@ -602,7 +627,7 @@ func TestMempoolTxsBytes(t *testing.T) { err = appConnCon.Start() require.Nil(t, err) defer appConnCon.Stop() - res, err := appConnCon.DeliverTxSync(txBytes) + res, err := appConnCon.DeliverTxSync(abci.RequestDeliverTx{Tx: txBytes}) require.NoError(t, err) require.EqualValues(t, 0, res.Code) res2, err := appConnCon.CommitSync() @@ -610,7 +635,7 @@ func TestMempoolTxsBytes(t *testing.T) { require.NotEmpty(t, res2.Data) // Pretend like we committed nothing so txBytes gets rechecked and removed. - mempool.Update(1, []types.Tx{}, nil, nil) + mempool.Update(1, []types.Tx{}, abciResponses(0, abci.CodeTypeOK), nil, nil) assert.EqualValues(t, 0, mempool.TxsBytes()) } @@ -644,7 +669,7 @@ func TestMempoolRemoteAppConcurrency(t *testing.T) { tx := txs[int(txNum)] // this will err with ErrTxInCache many times ... - mempool.CheckTxWithInfo(tx, nil, TxInfo{PeerID: uint16(peerID)}) + mempool.CheckTxWithInfo(tx, nil, TxInfo{SenderID: uint16(peerID)}) } err := mempool.FlushAppConn() require.NoError(t, err) @@ -673,3 +698,11 @@ func checksumFile(p string, t *testing.T) string { require.Nil(t, err, "expecting successful read of %q", p) return checksumIt(data) } + +func abciResponses(n int, code uint32) []*abci.ResponseDeliverTx { + responses := make([]*abci.ResponseDeliverTx, 0, n) + for i := 0; i < n; i++ { + responses = append(responses, &abci.ResponseDeliverTx{Code: code}) + } + return responses +} diff --git a/mempool/wire.go b/mempool/codec.go similarity index 100% rename from mempool/wire.go rename to mempool/codec.go diff --git a/mempool/doc.go b/mempool/doc.go new file mode 100644 index 000000000..ddd47aa2d --- /dev/null +++ b/mempool/doc.go @@ -0,0 +1,24 @@ +// The mempool pushes new txs onto the proxyAppConn. +// It gets a stream of (req, res) tuples from the proxy. +// The mempool stores good txs in a concurrent linked-list. + +// Multiple concurrent go-routines can traverse this linked-list +// safely by calling .NextWait() on each element. + +// So we have several go-routines: +// 1. Consensus calling Update() and Reap() synchronously +// 2. Many mempool reactor's peer routines calling CheckTx() +// 3. Many mempool reactor's peer routines traversing the txs linked list +// 4. Another goroutine calling GarbageCollectTxs() periodically + +// To manage these goroutines, there are three methods of locking. +// 1. Mutations to the linked-list is protected by an internal mtx (CList is goroutine-safe) +// 2. Mutations to the linked-list elements are atomic +// 3. CheckTx() calls can be paused upon Update() and Reap(), protected by .proxyMtx + +// Garbage collection of old elements from mempool.txs is handlde via +// the DetachPrev() call, which makes old elements not reachable by +// peer broadcastTxRoutine() automatically garbage collected. + +// TODO: Better handle abci client errors. (make it automatically handle connection errors) +package mempool diff --git a/mempool/errors.go b/mempool/errors.go new file mode 100644 index 000000000..c5140bdf0 --- /dev/null +++ b/mempool/errors.go @@ -0,0 +1,53 @@ +package mempool + +import ( + "fmt" + + "github.com/pkg/errors" +) + +var ( + // ErrTxInCache is returned to the client if we saw tx earlier + ErrTxInCache = errors.New("Tx already exists in cache") +) + +// ErrTxTooLarge means the tx is too big to be sent in a message to other peers +type ErrTxTooLarge struct { + max int + actual int +} + +func (e ErrTxTooLarge) Error() string { + return fmt.Sprintf("Tx too large. Max size is %d, but got %d", e.max, e.actual) +} + +// ErrMempoolIsFull means Tendermint & an application can't handle that much load +type ErrMempoolIsFull struct { + numTxs int + maxTxs int + + txsBytes int64 + maxTxsBytes int64 +} + +func (e ErrMempoolIsFull) Error() string { + return fmt.Sprintf( + "mempool is full: number of txs %d (max: %d), total txs bytes %d (max: %d)", + e.numTxs, e.maxTxs, + e.txsBytes, e.maxTxsBytes) +} + +// ErrPreCheck is returned when tx is too big +type ErrPreCheck struct { + Reason error +} + +func (e ErrPreCheck) Error() string { + return e.Reason.Error() +} + +// IsPreCheckError returns true if err is due to pre check failure. +func IsPreCheckError(err error) bool { + _, ok := err.(ErrPreCheck) + return ok +} diff --git a/mempool/mempool.go b/mempool/mempool.go index df7bad082..6452d83f1 100644 --- a/mempool/mempool.go +++ b/mempool/mempool.go @@ -1,113 +1,105 @@ package mempool import ( - "bytes" - "container/list" - "crypto/sha256" "fmt" - "sync" - "sync/atomic" - "time" - - "github.com/pkg/errors" abci "github.com/tendermint/tendermint/abci/types" - cfg "github.com/tendermint/tendermint/config" - auto "github.com/tendermint/tendermint/libs/autofile" - "github.com/tendermint/tendermint/libs/clist" - cmn "github.com/tendermint/tendermint/libs/common" - "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/proxy" "github.com/tendermint/tendermint/types" ) -// PreCheckFunc is an optional filter executed before CheckTx and rejects -// transaction if false is returned. An example would be to ensure that a -// transaction doesn't exceeded the block size. -type PreCheckFunc func(types.Tx) error +// Mempool defines the mempool interface. +// +// Updates to the mempool need to be synchronized with committing a block so +// apps can reset their transient state on Commit. +type Mempool interface { + // CheckTx executes a new transaction against the application to determine + // its validity and whether it should be added to the mempool. + CheckTx(tx types.Tx, callback func(*abci.Response)) error -// PostCheckFunc is an optional filter executed after CheckTx and rejects -// transaction if false is returned. An example would be to ensure a -// transaction doesn't require more gas than available for the block. -type PostCheckFunc func(types.Tx, *abci.ResponseCheckTx) error + // CheckTxWithInfo performs the same operation as CheckTx, but with extra + // meta data about the tx. + // Currently this metadata is the peer who sent it, used to prevent the tx + // from being gossiped back to them. + CheckTxWithInfo(tx types.Tx, callback func(*abci.Response), txInfo TxInfo) error -// TxInfo are parameters that get passed when attempting to add a tx to the -// mempool. -type TxInfo struct { - // We don't use p2p.ID here because it's too big. The gain is to store max 2 - // bytes with each tx to identify the sender rather than 20 bytes. - PeerID uint16 - // whether the tx comes from a persistent peer. - FromPersistent bool -} + // ReapMaxBytesMaxGas reaps transactions from the mempool up to maxBytes + // bytes total with the condition that the total gasWanted must be less than + // maxGas. + // If both maxes are negative, there is no cap on the size of all returned + // transactions (~ all available transactions). + ReapMaxBytesMaxGas(maxBytes, maxGas int64) types.Txs -/* + // ReapMaxTxs reaps up to max transactions from the mempool. + // If max is negative, there is no cap on the size of all returned + // transactions (~ all available transactions). + ReapMaxTxs(max int) types.Txs -The mempool pushes new txs onto the proxyAppConn. -It gets a stream of (req, res) tuples from the proxy. -The mempool stores good txs in a concurrent linked-list. + // Lock locks the mempool. The consensus must be able to hold lock to safely update. + Lock() -Multiple concurrent go-routines can traverse this linked-list -safely by calling .NextWait() on each element. + // Unlock unlocks the mempool. + Unlock() -So we have several go-routines: -1. Consensus calling Update() and Reap() synchronously -2. Many mempool reactor's peer routines calling CheckTx() -3. Many mempool reactor's peer routines traversing the txs linked list -4. Another goroutine calling GarbageCollectTxs() periodically + // Update informs the mempool that the given txs were committed and can be discarded. + // NOTE: this should be called *after* block is committed by consensus. + // NOTE: unsafe; Lock/Unlock must be managed by caller + Update(blockHeight int64, blockTxs types.Txs, deliverTxResponses []*abci.ResponseDeliverTx, newPreFn PreCheckFunc, newPostFn PostCheckFunc) error -To manage these goroutines, there are three methods of locking. -1. Mutations to the linked-list is protected by an internal mtx (CList is goroutine-safe) -2. Mutations to the linked-list elements are atomic -3. CheckTx() calls can be paused upon Update() and Reap(), protected by .proxyMtx + // FlushAppConn flushes the mempool connection to ensure async reqResCb calls are + // done. E.g. from CheckTx. + FlushAppConn() error -Garbage collection of old elements from mempool.txs is handlde via -the DetachPrev() call, which makes old elements not reachable by -peer broadcastTxRoutine() automatically garbage collected. + // Flush removes all transactions from the mempool and cache + Flush() -TODO: Better handle abci client errors. (make it automatically handle connection errors) + // TxsAvailable returns a channel which fires once for every height, + // and only when transactions are available in the mempool. + // NOTE: the returned channel may be nil if EnableTxsAvailable was not called. + TxsAvailable() <-chan struct{} -*/ + // EnableTxsAvailable initializes the TxsAvailable channel, ensuring it will + // trigger once every height when transactions are available. + EnableTxsAvailable() -var ( - // ErrTxInCache is returned to the client if we saw tx earlier - ErrTxInCache = errors.New("Tx already exists in cache") + // Size returns the number of transactions in the mempool. + Size() int - // ErrTxTooLarge means the tx is too big to be sent in a message to other peers - ErrTxTooLarge = fmt.Errorf("Tx too large. Max size is %d", maxTxSize) -) + // TxsBytes returns the total size of all txs in the mempool. + TxsBytes() int64 -// ErrMempoolIsFull means Tendermint & an application can't handle that much load -type ErrMempoolIsFull struct { - numTxs int - maxTxs int + // InitWAL creates a directory for the WAL file and opens a file itself. + InitWAL() - txsBytes int64 - maxTxsBytes int64 + // CloseWAL closes and discards the underlying WAL file. + // Any further writes will not be relayed to disk. + CloseWAL() } -func (e ErrMempoolIsFull) Error() string { - return fmt.Sprintf( - "Mempool is full: number of txs %d (max: %d), total txs bytes %d (max: %d)", - e.numTxs, e.maxTxs, - e.txsBytes, e.maxTxsBytes) -} +//-------------------------------------------------------------------------------- -// ErrPreCheck is returned when tx is too big -type ErrPreCheck struct { - Reason error -} +// PreCheckFunc is an optional filter executed before CheckTx and rejects +// transaction if false is returned. An example would be to ensure that a +// transaction doesn't exceeded the block size. +type PreCheckFunc func(types.Tx) error -func (e ErrPreCheck) Error() string { - return e.Reason.Error() -} +// PostCheckFunc is an optional filter executed after CheckTx and rejects +// transaction if false is returned. An example would be to ensure a +// transaction doesn't require more gas than available for the block. +type PostCheckFunc func(types.Tx, *abci.ResponseCheckTx) error -// IsPreCheckError returns true if err is due to pre check failure. -func IsPreCheckError(err error) bool { - _, ok := err.(ErrPreCheck) - return ok +// TxInfo are parameters that get passed when attempting to add a tx to the +// mempool. +type TxInfo struct { + // We don't use p2p.ID here because it's too big. The gain is to store max 2 + // bytes with each tx to identify the sender rather than 20 bytes. + SenderID uint16 + + // whether the tx comes from a persistent peer. + FromPersistent bool } +//-------------------------------------------------------------------------------- + // PreCheckAminoMaxBytes checks that the size of the transaction plus the amino // overhead is smaller or equal to the expected maxBytes. func PreCheckAminoMaxBytes(maxBytes int64) PreCheckFunc { @@ -145,737 +137,3 @@ func PostCheckMaxGas(maxGas int64) PostCheckFunc { return nil } } - -// TxID is the hex encoded hash of the bytes as a types.Tx. -func TxID(tx []byte) string { - return fmt.Sprintf("%X", types.Tx(tx).Hash()) -} - -// txKey is the fixed length array sha256 hash used as the key in maps. -func txKey(tx types.Tx) [sha256.Size]byte { - return sha256.Sum256(tx) -} - -// Mempool is an ordered in-memory pool for transactions before they are proposed in a consensus -// round. Transaction validity is checked using the CheckTx abci message before the transaction is -// added to the pool. The Mempool uses a concurrent list structure for storing transactions that -// can be efficiently accessed by multiple concurrent readers. -type Mempool struct { - config *cfg.MempoolConfig - - proxyLowMtx sync.Mutex - proxyNextMtx sync.Mutex - proxyBlockingMtx sync.Mutex - proxyAppConn proxy.AppConnMempool - txs *clist.CList // concurrent linked-list of good txs - preCheck PreCheckFunc - postCheck PostCheckFunc - - // Track whether we're rechecking txs. - // These are not protected by a mutex and are expected to be mutated - // in serial (ie. by abci responses which are called in serial). - recheckCursor *clist.CElement // next expected response - recheckEnd *clist.CElement // re-checking stops here - - // notify listeners (ie. consensus) when txs are available - notifiedTxsAvailable bool - txsAvailable chan struct{} // fires once for each height, when the mempool is not empty - - // Map for quick access to txs to record sender in CheckTx. - // txsMap: txKey -> CElement - txsMap sync.Map - - // Atomic integers - height int64 // the last block Update()'d to - rechecking int32 // for re-checking filtered txs on Update() - txsBytes int64 // total size of mempool, in bytes - - // Keep a cache of already-seen txs. - // This reduces the pressure on the proxyApp. - cache txCache - - // A log of mempool txs - wal *auto.AutoFile - - logger log.Logger - - metrics *Metrics -} - -// MempoolOption sets an optional parameter on the Mempool. -type MempoolOption func(*Mempool) - -// NewMempool returns a new Mempool with the given configuration and connection to an application. -func NewMempool( - config *cfg.MempoolConfig, - proxyAppConn proxy.AppConnMempool, - height int64, - options ...MempoolOption, -) *Mempool { - mempool := &Mempool{ - config: config, - proxyAppConn: proxyAppConn, - txs: clist.New(), - height: height, - rechecking: 0, - recheckCursor: nil, - recheckEnd: nil, - logger: log.NewNopLogger(), - metrics: NopMetrics(), - } - if config.CacheSize > 0 { - mempool.cache = newMapTxCache(config.CacheSize) - } else { - mempool.cache = nopTxCache{} - } - proxyAppConn.SetResponseCallback(mempool.globalCb) - for _, option := range options { - option(mempool) - } - return mempool -} - -// EnableTxsAvailable initializes the TxsAvailable channel, -// ensuring it will trigger once every height when transactions are available. -// NOTE: not thread safe - should only be called once, on startup -func (mem *Mempool) EnableTxsAvailable() { - mem.txsAvailable = make(chan struct{}, 1) -} - -// SetLogger sets the Logger. -func (mem *Mempool) SetLogger(l log.Logger) { - mem.logger = l -} - -// WithPreCheck sets a filter for the mempool to reject a tx if f(tx) returns -// false. This is ran before CheckTx. -func WithPreCheck(f PreCheckFunc) MempoolOption { - return func(mem *Mempool) { mem.preCheck = f } -} - -// WithPostCheck sets a filter for the mempool to reject a tx if f(tx) returns -// false. This is ran after CheckTx. -func WithPostCheck(f PostCheckFunc) MempoolOption { - return func(mem *Mempool) { mem.postCheck = f } -} - -// WithMetrics sets the metrics. -func WithMetrics(metrics *Metrics) MempoolOption { - return func(mem *Mempool) { mem.metrics = metrics } -} - -// InitWAL creates a directory for the WAL file and opens a file itself. -// -// *panics* if can't create directory or open file. -// *not thread safe* -func (mem *Mempool) InitWAL() { - walDir := mem.config.WalDir() - err := cmn.EnsureDir(walDir, 0700) - if err != nil { - panic(errors.Wrap(err, "Error ensuring Mempool WAL dir")) - } - af, err := auto.OpenAutoFile(walDir + "/wal") - if err != nil { - panic(errors.Wrap(err, "Error opening Mempool WAL file")) - } - mem.wal = af -} - -// CloseWAL closes and discards the underlying WAL file. -// Any further writes will not be relayed to disk. -func (mem *Mempool) CloseWAL() { - mem.Lock() - defer mem.Unlock() - - if err := mem.wal.Close(); err != nil { - mem.logger.Error("Error closing WAL", "err", err) - } - mem.wal = nil -} - -// Lock locks the mempool. The consensus must be able to hold lock to safely update. -func (mem *Mempool) Lock() { - mem.proxyNextMtx.Lock() - mem.proxyBlockingMtx.Lock() - mem.proxyNextMtx.Unlock() -} - -// Unlock unlocks the mempool. -func (mem *Mempool) Unlock() { - mem.proxyBlockingMtx.Unlock() -} - -//LockLow uses triple mutex to low the priority of CheckTx() -func (mem *Mempool) LockLow() { - mem.proxyLowMtx.Lock() - mem.proxyNextMtx.Lock() - mem.proxyBlockingMtx.Lock() - mem.proxyNextMtx.Unlock() -} - -func (mem *Mempool) UnlockLow() { - mem.proxyBlockingMtx.Unlock() - mem.proxyLowMtx.Unlock() -} - -// Size returns the number of transactions in the mempool. -func (mem *Mempool) Size() int { - return mem.txs.Len() -} - -// TxsBytes returns the total size of all txs in the mempool. -func (mem *Mempool) TxsBytes() int64 { - return atomic.LoadInt64(&mem.txsBytes) -} - -// FlushAppConn flushes the mempool connection to ensure async reqResCb calls are -// done. E.g. from CheckTx. -func (mem *Mempool) FlushAppConn() error { - return mem.proxyAppConn.FlushSync() -} - -// Flush removes all transactions from the mempool and cache -func (mem *Mempool) Flush() { - mem.Lock() - defer mem.Unlock() - - mem.cache.Reset() - - for e := mem.txs.Front(); e != nil; e = e.Next() { - mem.txs.Remove(e) - e.DetachPrev() - } - - mem.txsMap = sync.Map{} - _ = atomic.SwapInt64(&mem.txsBytes, 0) -} - -// TxsFront returns the first transaction in the ordered list for peer -// goroutines to call .NextWait() on. -func (mem *Mempool) TxsFront() *clist.CElement { - return mem.txs.Front() -} - -// TxsWaitChan returns a channel to wait on transactions. It will be closed -// once the mempool is not empty (ie. the internal `mem.txs` has at least one -// element) -func (mem *Mempool) TxsWaitChan() <-chan struct{} { - return mem.txs.WaitChan() -} - -// CheckTx executes a new transaction against the application to determine its validity -// and whether it should be added to the mempool. -// It blocks if we're waiting on Update() or Reap(). -// cb: A callback from the CheckTx command. -// It gets called from another goroutine. -// CONTRACT: Either cb will get called, or err returned. -func (mem *Mempool) CheckTx(tx types.Tx, cb func(*abci.Response)) (err error) { - return mem.CheckTxWithInfo(tx, cb, TxInfo{PeerID: UnknownPeerID, FromPersistent: false}) -} - -// CheckTxWithInfo performs the same operation as CheckTx, but with extra meta data about the tx. -// Currently this metadata is the peer who sent it, -// used to prevent the tx from being gossiped back to them. -func (mem *Mempool) CheckTxWithInfo(tx types.Tx, cb func(*abci.Response), txInfo TxInfo) (err error) { - mem.LockLow() - // use defer to unlock mutex because application (*local client*) might panic - defer mem.UnlockLow() - - var ( - memSize = mem.Size() - txsBytes = mem.TxsBytes() - ) - if memSize >= mem.config.Size || - int64(len(tx))+txsBytes > mem.config.MaxTxsBytes { - return ErrMempoolIsFull{ - memSize, mem.config.Size, - txsBytes, mem.config.MaxTxsBytes} - } - - // The size of the corresponding amino-encoded TxMessage - // can't be larger than the maxMsgSize, otherwise we can't - // relay it to peers. - if len(tx) > maxTxSize { - return ErrTxTooLarge - } - - if mem.preCheck != nil { - if err := mem.preCheck(tx); err != nil { - return ErrPreCheck{err} - } - } - - // CACHE - if !mem.cache.Push(tx) { - // Record a new sender for a tx we've already seen. - // Note it's possible a tx is still in the cache but no longer in the mempool - // (eg. after committing a block, txs are removed from mempool but not cache), - // so we only record the sender for txs still in the mempool. - if e, ok := mem.txsMap.Load(txKey(tx)); ok { - memTx := e.(*clist.CElement).Value.(*mempoolTx) - if _, loaded := memTx.senders.LoadOrStore(txInfo.PeerID, true); loaded { - // TODO: consider punishing peer for dups, - // its non-trivial since invalid txs can become valid, - // but they can spam the same tx with little cost to them atm. - } - } - return ErrTxInCache - } - // END CACHE - - // WAL - if mem.wal != nil { - // TODO: Notify administrators when WAL fails - _, err := mem.wal.Write([]byte(tx)) - if err != nil { - mem.logger.Error("Error writing to WAL", "err", err) - } - _, err = mem.wal.Write([]byte("\n")) - if err != nil { - mem.logger.Error("Error writing to WAL", "err", err) - } - } - // END WAL - - // NOTE: proxyAppConn may error if tx buffer is full - if err = mem.proxyAppConn.Error(); err != nil { - return err - } - - reqRes := mem.proxyAppConn.CheckTxAsync(tx) - reqRes.SetCallback(mem.reqResCb(tx, txInfo, cb)) - - return nil -} - -// Global callback that will be called after every ABCI response. -// Having a single global callback avoids needing to set a callback for each request. -// However, processing the checkTx response requires the peerID (so we can track which txs we heard from who), -// and peerID is not included in the ABCI request, so we have to set request-specific callbacks that -// include this information. If we're not in the midst of a recheck, this function will just return, -// so the request specific callback can do the work. -// When rechecking, we don't need the peerID, so the recheck callback happens here. -func (mem *Mempool) globalCb(req *abci.Request, res *abci.Response) { - if mem.recheckCursor == nil { - return - } - - mem.metrics.RecheckTimes.Add(1) - mem.resCbRecheck(req, res) - - // update metrics - mem.metrics.Size.Set(float64(mem.Size())) -} - -// Request specific callback that should be set on individual reqRes objects -// to incorporate local information when processing the response. -// This allows us to track the peer that sent us this tx, so we can avoid sending it back to them. -// NOTE: alternatively, we could include this information in the ABCI request itself. -// -// External callers of CheckTx, like the RPC, can also pass an externalCb through here that is called -// when all other response processing is complete. -// -// Used in CheckTxWithInfo to record PeerID who sent us the tx. -func (mem *Mempool) reqResCb(tx []byte, txInfo TxInfo, externalCb func(*abci.Response)) func(res *abci.Response) { - return func(res *abci.Response) { - if mem.recheckCursor != nil { - // this should never happen - panic("recheck cursor is not nil in reqResCb") - } - - mem.resCbFirstTime(tx, txInfo, res) - - // update metrics - mem.metrics.Size.Set(float64(mem.Size())) - - // passed in by the caller of CheckTx, eg. the RPC - if externalCb != nil { - externalCb(res) - } - } -} - -// Called from: -// - resCbFirstTime (lock not held) if tx is valid -func (mem *Mempool) addTx(memTx *mempoolTx) { - e := mem.txs.PushBack(memTx) - mem.txsMap.Store(txKey(memTx.tx), e) - atomic.AddInt64(&mem.txsBytes, int64(len(memTx.tx))) - mem.metrics.TxSizeBytes.Observe(float64(len(memTx.tx))) -} - -// Called from: -// - Update (lock held) if tx was committed -// - resCbRecheck (lock not held) if tx was invalidated -func (mem *Mempool) removeTx(tx types.Tx, elem *clist.CElement, removeFromCache bool) { - mem.txs.Remove(elem) - elem.DetachPrev() - mem.txsMap.Delete(txKey(tx)) - atomic.AddInt64(&mem.txsBytes, int64(-len(tx))) - - if removeFromCache { - mem.cache.Remove(tx) - } -} - -// callback, which is called after the app checked the tx for the first time. -// -// The case where the app checks the tx for the second and subsequent times is -// handled by the resCbRecheck callback. -func (mem *Mempool) resCbFirstTime(tx []byte, txInfo TxInfo, res *abci.Response) { - switch r := res.Value.(type) { - case *abci.Response_CheckTx: - var postCheckErr error - if mem.postCheck != nil { - postCheckErr = mem.postCheck(tx, r.CheckTx) - } - if (r.CheckTx.Code == abci.CodeTypeOK) && postCheckErr == nil { - memTx := &mempoolTx{ - fromPersistent: txInfo.FromPersistent, - height: mem.height, - gasWanted: r.CheckTx.GasWanted, - tx: tx, - } - memTx.senders.Store(txInfo.PeerID, true) - mem.addTx(memTx) - mem.logger.Info("Added good transaction", - "tx", TxID(tx), - "res", r, - "height", memTx.height, - "total", mem.Size(), - "fromPersistent", memTx.fromPersistent, - ) - mem.notifyTxsAvailable() - } else { - // ignore bad transaction - mem.logger.Info("Rejected bad transaction", "tx", TxID(tx), "res", r, "err", postCheckErr) - mem.metrics.FailedTxs.Add(1) - // remove from cache (it might be good later) - mem.cache.Remove(tx) - } - default: - // ignore other messages - } -} - -// callback, which is called after the app rechecked the tx. -// -// The case where the app checks the tx for the first time is handled by the -// resCbFirstTime callback. -func (mem *Mempool) resCbRecheck(req *abci.Request, res *abci.Response) { - switch r := res.Value.(type) { - case *abci.Response_CheckTx: - tx := req.GetCheckTx().Tx - memTx := mem.recheckCursor.Value.(*mempoolTx) - if !bytes.Equal(tx, memTx.tx) { - panic(fmt.Sprintf( - "Unexpected tx response from proxy during recheck\nExpected %X, got %X", - memTx.tx, - tx)) - } - var postCheckErr error - if mem.postCheck != nil { - postCheckErr = mem.postCheck(tx, r.CheckTx) - } - if (r.CheckTx.Code == abci.CodeTypeOK) && postCheckErr == nil { - // Good, nothing to do. - } else { - // Tx became invalidated due to newly committed block. - mem.logger.Info("Tx is no longer valid", "tx", TxID(tx), "res", r, "err", postCheckErr) - // NOTE: we remove tx from the cache because it might be good later - mem.removeTx(tx, mem.recheckCursor, true) - } - if mem.recheckCursor == mem.recheckEnd { - mem.recheckCursor = nil - } else { - mem.recheckCursor = mem.recheckCursor.Next() - } - if mem.recheckCursor == nil { - // Done! - atomic.StoreInt32(&mem.rechecking, 0) - mem.logger.Info("Done rechecking txs") - - // incase the recheck removed all txs - if mem.Size() > 0 { - mem.notifyTxsAvailable() - } - } - default: - // ignore other messages - } -} - -// TxsAvailable returns a channel which fires once for every height, -// and only when transactions are available in the mempool. -// NOTE: the returned channel may be nil if EnableTxsAvailable was not called. -func (mem *Mempool) TxsAvailable() <-chan struct{} { - return mem.txsAvailable -} - -func (mem *Mempool) notifyTxsAvailable() { - if mem.Size() == 0 { - panic("notified txs available but mempool is empty!") - } - if mem.txsAvailable != nil && !mem.notifiedTxsAvailable { - // channel cap is 1, so this will send once - mem.notifiedTxsAvailable = true - select { - case mem.txsAvailable <- struct{}{}: - default: - } - } -} - -// ReapMaxBytesMaxGas reaps transactions from the mempool up to maxBytes bytes total -// with the condition that the total gasWanted must be less than maxGas. -// If both maxes are negative, there is no cap on the size of all returned -// transactions (~ all available transactions). -func (mem *Mempool) ReapMaxBytesMaxGas(maxBytes, maxGas int64) types.Txs { - mem.Lock() - defer mem.Unlock() - - for atomic.LoadInt32(&mem.rechecking) > 0 { - // TODO: Something better? - time.Sleep(time.Millisecond * 10) - } - - var totalBytes int64 - var totalGas int64 - // TODO: we will get a performance boost if we have a good estimate of avg - // size per tx, and set the initial capacity based off of that. - // txs := make([]types.Tx, 0, cmn.MinInt(mem.txs.Len(), max/mem.avgTxSize)) - txs := make([]types.Tx, 0, mem.txs.Len()) - for e := mem.txs.Front(); e != nil; e = e.Next() { - memTx := e.Value.(*mempoolTx) - // Check total size requirement - aminoOverhead := types.ComputeAminoOverhead(memTx.tx, 1) - if maxBytes > -1 && totalBytes+int64(len(memTx.tx))+aminoOverhead > maxBytes { - return txs - } - totalBytes += int64(len(memTx.tx)) + aminoOverhead - // Check total gas requirement. - // If maxGas is negative, skip this check. - // Since newTotalGas < masGas, which - // must be non-negative, it follows that this won't overflow. - newTotalGas := totalGas + memTx.gasWanted - if maxGas > -1 && newTotalGas > maxGas { - return txs - } - totalGas = newTotalGas - txs = append(txs, memTx.tx) - } - return txs -} - -// ReapMaxTxs reaps up to max transactions from the mempool. -// If max is negative, there is no cap on the size of all returned -// transactions (~ all available transactions). -func (mem *Mempool) ReapMaxTxs(max int) types.Txs { - mem.Lock() - defer mem.Unlock() - - if max < 0 { - max = mem.txs.Len() - } - - for atomic.LoadInt32(&mem.rechecking) > 0 { - // TODO: Something better? - time.Sleep(time.Millisecond * 10) - } - - txs := make([]types.Tx, 0, cmn.MinInt(mem.txs.Len(), max)) - for e := mem.txs.Front(); e != nil && len(txs) <= max; e = e.Next() { - memTx := e.Value.(*mempoolTx) - txs = append(txs, memTx.tx) - } - return txs -} - -// Update informs the mempool that the given txs were committed and can be discarded. -// NOTE: this should be called *after* block is committed by consensus. -// NOTE: unsafe; Lock/Unlock must be managed by caller -func (mem *Mempool) Update( - height int64, - txs types.Txs, - preCheck PreCheckFunc, - postCheck PostCheckFunc, -) error { - // Set height - mem.height = height - mem.notifiedTxsAvailable = false - - if preCheck != nil { - mem.preCheck = preCheck - } - if postCheck != nil { - mem.postCheck = postCheck - } - - // Add committed transactions to cache (if missing). - for _, tx := range txs { - _ = mem.cache.Push(tx) - } - - // Remove committed transactions. - txsLeft := mem.removeTxs(txs) - - // Either recheck non-committed txs to see if they became invalid - // or just notify there're some txs left. - if len(txsLeft) > 0 { - if mem.config.Recheck { - mem.logger.Info("Recheck txs", "numtxs", len(txsLeft), "height", height) - mem.recheckTxs(txsLeft) - // At this point, mem.txs are being rechecked. - // mem.recheckCursor re-scans mem.txs and possibly removes some txs. - // Before mem.Reap(), we should wait for mem.recheckCursor to be nil. - } else { - mem.notifyTxsAvailable() - } - } - - // Update metrics - mem.metrics.Size.Set(float64(mem.Size())) - - return nil -} - -func (mem *Mempool) removeTxs(txs types.Txs) []types.Tx { - // Build a map for faster lookups. - txsMap := make(map[string]struct{}, len(txs)) - for _, tx := range txs { - txsMap[string(tx)] = struct{}{} - } - - txsLeft := make([]types.Tx, 0, mem.txs.Len()) - for e := mem.txs.Front(); e != nil; e = e.Next() { - memTx := e.Value.(*mempoolTx) - // Remove the tx if it's already in a block. - if _, ok := txsMap[string(memTx.tx)]; ok { - // NOTE: we don't remove committed txs from the cache. - mem.removeTx(memTx.tx, e, false) - - continue - } - txsLeft = append(txsLeft, memTx.tx) - } - return txsLeft -} - -// NOTE: pass in txs because mem.txs can mutate concurrently. -func (mem *Mempool) recheckTxs(txs []types.Tx) { - if len(txs) == 0 { - return - } - atomic.StoreInt32(&mem.rechecking, 1) - mem.recheckCursor = mem.txs.Front() - mem.recheckEnd = mem.txs.Back() - - // Push txs to proxyAppConn - // NOTE: globalCb may be called concurrently. - for _, tx := range txs { - mem.proxyAppConn.ReCheckTxAsync(tx) - } - mem.proxyAppConn.FlushAsync() -} - -//-------------------------------------------------------------------------------- - -// mempoolTx is a transaction that successfully ran -type mempoolTx struct { - fromPersistent bool // whether the tx come from a persistent peer - height int64 // height that this tx had been validated in - gasWanted int64 // amount of gas this tx states it will require - tx types.Tx // - - // ids of peers who've sent us this tx (as a map for quick lookups). - // senders: PeerID -> bool - senders sync.Map -} - -// Height returns the height for this transaction -func (memTx *mempoolTx) Height() int64 { - return atomic.LoadInt64(&memTx.height) -} - -//-------------------------------------------------------------------------------- - -type txCache interface { - Reset() - Push(tx types.Tx) bool - Remove(tx types.Tx) -} - -// mapTxCache maintains a LRU cache of transactions. This only stores the hash -// of the tx, due to memory concerns. -type mapTxCache struct { - mtx sync.Mutex - size int - map_ map[[sha256.Size]byte]*list.Element - list *list.List -} - -var _ txCache = (*mapTxCache)(nil) - -// newMapTxCache returns a new mapTxCache. -func newMapTxCache(cacheSize int) *mapTxCache { - return &mapTxCache{ - size: cacheSize, - map_: make(map[[sha256.Size]byte]*list.Element, cacheSize), - list: list.New(), - } -} - -// Reset resets the cache to an empty state. -func (cache *mapTxCache) Reset() { - cache.mtx.Lock() - cache.map_ = make(map[[sha256.Size]byte]*list.Element, cache.size) - cache.list.Init() - cache.mtx.Unlock() -} - -// Push adds the given tx to the cache and returns true. It returns -// false if tx is already in the cache. -func (cache *mapTxCache) Push(tx types.Tx) bool { - cache.mtx.Lock() - defer cache.mtx.Unlock() - - // Use the tx hash in the cache - txHash := txKey(tx) - if moved, exists := cache.map_[txHash]; exists { - cache.list.MoveToBack(moved) - return false - } - - if cache.list.Len() >= cache.size { - popped := cache.list.Front() - poppedTxHash := popped.Value.([sha256.Size]byte) - delete(cache.map_, poppedTxHash) - if popped != nil { - cache.list.Remove(popped) - } - } - e := cache.list.PushBack(txHash) - cache.map_[txHash] = e - return true -} - -// Remove removes the given tx from the cache. -func (cache *mapTxCache) Remove(tx types.Tx) { - cache.mtx.Lock() - txHash := txKey(tx) - popped := cache.map_[txHash] - delete(cache.map_, txHash) - if popped != nil { - cache.list.Remove(popped) - } - - cache.mtx.Unlock() -} - -type nopTxCache struct{} - -var _ txCache = (*nopTxCache)(nil) - -func (nopTxCache) Reset() {} -func (nopTxCache) Push(types.Tx) bool { return true } -func (nopTxCache) Remove(types.Tx) {} diff --git a/mempool/reactor.go b/mempool/reactor.go index 8293f3852..c117353c0 100644 --- a/mempool/reactor.go +++ b/mempool/reactor.go @@ -18,8 +18,7 @@ import ( const ( MempoolChannel = byte(0x30) - maxMsgSize = 1048576 // 1MB TODO make it configurable - maxTxSize = maxMsgSize - 8 // account for amino overhead of TxMessage + aminoOverheadForTxMessage = 8 MempoolPacketChannelSize = 1024 * 200 // 200K messages can be queued peerCatchupSleepIntervalMS = 100 // If peer is behind, sleep this amount @@ -37,13 +36,13 @@ type MempoolPacket struct { msgBytes []byte } -// MempoolReactor handles mempool tx broadcasting amongst peers. +// Reactor handles mempool tx broadcasting amongst peers. // It maintains a map from peer ID to counter, to prevent gossiping txs to the // peers you received it from. -type MempoolReactor struct { +type Reactor struct { p2p.BaseReactor config *cfg.MempoolConfig - Mempool *Mempool + mempool *CListMempool ids *mempoolIDs recvCh chan *MempoolPacket } @@ -111,26 +110,26 @@ func newMempoolIDs() *mempoolIDs { } } -// NewMempoolReactor returns a new MempoolReactor with the given config and mempool. -func NewMempoolReactor(config *cfg.MempoolConfig, mempool *Mempool) *MempoolReactor { - memR := &MempoolReactor{ +// NewReactor returns a new Reactor with the given config and mempool. +func NewReactor(config *cfg.MempoolConfig, mempool *CListMempool) *Reactor { + memR := &Reactor{ config: config, - Mempool: mempool, recvCh: make(chan *MempoolPacket, MempoolPacketChannelSize), + mempool: mempool, ids: newMempoolIDs(), } - memR.BaseReactor = *p2p.NewBaseReactor("MempoolReactor", memR) + memR.BaseReactor = *p2p.NewBaseReactor("Reactor", memR) return memR } -// SetLogger sets the Logger on the reactor and the underlying Mempool. -func (memR *MempoolReactor) SetLogger(l log.Logger) { +// SetLogger sets the Logger on the reactor and the underlying mempool. +func (memR *Reactor) SetLogger(l log.Logger) { memR.Logger = l - memR.Mempool.SetLogger(l) + memR.mempool.SetLogger(l) } // OnStart implements p2p.BaseReactor. -func (memR *MempoolReactor) OnStart() error { +func (memR *Reactor) OnStart() error { if !memR.config.Broadcast { memR.Logger.Info("Tx broadcasting is disabled") } @@ -140,13 +139,13 @@ func (memR *MempoolReactor) OnStart() error { // OnStop implements p2p.BaseReactor // Close message queue channel -func (memR *MempoolReactor) OnStop() { +func (memR *Reactor) OnStop() { close(memR.recvCh) } // GetChannels implements Reactor. // It returns the list of channels for this reactor. -func (memR *MempoolReactor) GetChannels() []*p2p.ChannelDescriptor { +func (memR *Reactor) GetChannels() []*p2p.ChannelDescriptor { return []*p2p.ChannelDescriptor{ { ID: MempoolChannel, @@ -157,23 +156,23 @@ func (memR *MempoolReactor) GetChannels() []*p2p.ChannelDescriptor { // AddPeer implements Reactor. // It starts a broadcast routine ensuring all txs are forwarded to the given peer. -func (memR *MempoolReactor) AddPeer(peer p2p.Peer) { +func (memR *Reactor) AddPeer(peer p2p.Peer) { memR.ids.ReserveForPeer(peer) go memR.broadcastTxRoutine(peer) } // RemovePeer implements Reactor. -func (memR *MempoolReactor) RemovePeer(peer p2p.Peer, reason interface{}) { +func (memR *Reactor) RemovePeer(peer p2p.Peer, reason interface{}) { memR.ids.Reclaim(peer) // broadcast routine checks if peer is gone and returns } // Receive implements Reactor. -func (memR *MempoolReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { +func (memR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { memR.recvCh <- &MempoolPacket{chID: chID, src: src, msgBytes: msgBytes} } -func (memR *MempoolReactor) receiveRoutine() { +func (memR *Reactor) receiveRoutine() { memR.Logger.Debug("Starting ReceiveRoutine for mempool") for p := range memR.recvCh { memR.receiveImpl(p.chID, p.src, p.msgBytes) @@ -181,8 +180,8 @@ func (memR *MempoolReactor) receiveRoutine() { } // It adds any received transactions to the mempool. -func (memR *MempoolReactor) receiveImpl(chID byte, src p2p.Peer, msgBytes []byte) { - msg, err := decodeMsg(msgBytes) +func (memR *Reactor) receiveImpl(chID byte, src p2p.Peer, msgBytes []byte) { + msg, err := memR.decodeMsg(msgBytes) if err != nil { memR.Logger.Error("Error decoding message", "src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes) memR.Switch.StopPeerForError(src, err) @@ -193,12 +192,14 @@ func (memR *MempoolReactor) receiveImpl(chID byte, src p2p.Peer, msgBytes []byte switch msg := msg.(type) { case *TxMessage: peerID := memR.ids.GetForPeer(src) - err := memR.Mempool.CheckTxWithInfo(msg.Tx, nil, TxInfo{PeerID: peerID, FromPersistent: memR.Switch.IsPersistent(src)}) + err := memR.mempool.CheckTxWithInfo(msg.Tx, nil, TxInfo{SenderID: peerID, FromPersistent: src.IsPersistent()}) if err != nil { + // txID(msg.Tx) is costly if err == ErrTxInCache { - memR.Mempool.metrics.DuplicateTx.With("peer_id", string(src.ID())).Add(1) + memR.Logger.Debug("Could not check tx", "tx", "err", err) + } else { + memR.Logger.Info("Could not check tx", "tx", txID(msg.Tx), "err", err) } - memR.Logger.Info("Could not check tx", "tx", TxID(msg.Tx), "err", err) } // broadcasting happens from go routines per peer default: @@ -212,8 +213,8 @@ type PeerState interface { } // Send new mempool txs to peer. -func (memR *MempoolReactor) broadcastTxRoutine(peer p2p.Peer) { - if !memR.config.Broadcast || (memR.config.OnlyToPersistent && !memR.Switch.IsPersistent(peer)) { +func (memR *Reactor) broadcastTxRoutine(peer p2p.Peer) { + if !memR.config.Broadcast || (memR.config.OnlyToPersistent && !peer.IsPersistent()) { return } @@ -229,8 +230,8 @@ func (memR *MempoolReactor) broadcastTxRoutine(peer p2p.Peer) { // start from the beginning. if next == nil { select { - case <-memR.Mempool.TxsWaitChan(): // Wait until a tx is available - if next = memR.Mempool.TxsFront(); next == nil { + case <-memR.mempool.TxsWaitChan(): // Wait until a tx is available + if next = memR.mempool.TxsFront(); next == nil { continue } case <-peer.Quit(): @@ -284,7 +285,7 @@ func (memR *MempoolReactor) broadcastTxRoutine(peer p2p.Peer) { //----------------------------------------------------------------------------- // Messages -// MempoolMessage is a message sent or received by the MempoolReactor. +// MempoolMessage is a message sent or received by the Reactor. type MempoolMessage interface{} func RegisterMempoolMessages(cdc *amino.Codec) { @@ -292,9 +293,10 @@ func RegisterMempoolMessages(cdc *amino.Codec) { cdc.RegisterConcrete(&TxMessage{}, "tendermint/mempool/TxMessage", nil) } -func decodeMsg(bz []byte) (msg MempoolMessage, err error) { - if len(bz) > maxMsgSize { - return msg, fmt.Errorf("Msg exceeds max size (%d > %d)", len(bz), maxMsgSize) +func (memR *Reactor) decodeMsg(bz []byte) (msg MempoolMessage, err error) { + maxMsgSize := calcMaxMsgSize(memR.config.MaxTxBytes) + if l := len(bz); l > maxMsgSize { + return msg, ErrTxTooLarge{maxMsgSize, l} } err = cdc.UnmarshalBinaryBare(bz, &msg) return @@ -311,3 +313,9 @@ type TxMessage struct { func (m *TxMessage) String() string { return fmt.Sprintf("[TxMessage %v]", m.Tx) } + +// calcMaxMsgSize returns the max size of TxMessage +// account for amino overhead of TxMessage +func calcMaxMsgSize(maxTxSize int) int { + return maxTxSize + aminoOverheadForTxMessage +} diff --git a/mempool/reactor_test.go b/mempool/reactor_test.go index c9cf49809..dff4c0d68 100644 --- a/mempool/reactor_test.go +++ b/mempool/reactor_test.go @@ -1,7 +1,6 @@ package mempool import ( - "fmt" "net" "sync" "testing" @@ -43,20 +42,20 @@ func mempoolLogger() log.Logger { } // connect N mempool reactors through N switches -func makeAndConnectMempoolReactors(config *cfg.Config, N int) []*MempoolReactor { - reactors := make([]*MempoolReactor, N) +func makeAndConnectReactors(config *cfg.Config, n int) []*Reactor { + reactors := make([]*Reactor, n) logger := mempoolLogger() - for i := 0; i < N; i++ { + for i := 0; i < n; i++ { app := kvstore.NewKVStoreApplication() cc := proxy.NewLocalClientCreator(app) mempool, cleanup := newMempoolWithApp(cc) defer cleanup() - reactors[i] = NewMempoolReactor(config.Mempool, mempool) // so we dont start the consensus states + reactors[i] = NewReactor(config.Mempool, mempool) // so we dont start the consensus states reactors[i].SetLogger(logger.With("validator", i)) } - p2p.MakeConnectedSwitches(config.P2P, N, func(i int, s *p2p.Switch) *p2p.Switch { + p2p.MakeConnectedSwitches(config.P2P, n, func(i int, s *p2p.Switch) *p2p.Switch { s.AddReactor("MEMPOOL", reactors[i]) return s @@ -64,13 +63,15 @@ func makeAndConnectMempoolReactors(config *cfg.Config, N int) []*MempoolReactor return reactors } -// wait for all txs on all reactors -func waitForTxs(t *testing.T, txs types.Txs, reactors []*MempoolReactor) { +func waitForTxsOnReactors(t *testing.T, txs types.Txs, reactors []*Reactor) { // wait for the txs in all mempools wg := new(sync.WaitGroup) - for i := 0; i < len(reactors); i++ { + for i, reactor := range reactors { wg.Add(1) - go _waitForTxs(t, wg, txs, i, reactors) + go func(r *Reactor, reactorIndex int) { + defer wg.Done() + waitForTxsOnReactor(t, txs, r, reactorIndex) + }(reactor, i) } done := make(chan struct{}) @@ -87,25 +88,23 @@ func waitForTxs(t *testing.T, txs types.Txs, reactors []*MempoolReactor) { } } -// wait for all txs on a single mempool -func _waitForTxs(t *testing.T, wg *sync.WaitGroup, txs types.Txs, reactorIdx int, reactors []*MempoolReactor) { - - mempool := reactors[reactorIdx].Mempool - for mempool.Size() != len(txs) { +func waitForTxsOnReactor(t *testing.T, txs types.Txs, reactor *Reactor, reactorIndex int) { + mempool := reactor.mempool + for mempool.Size() < len(txs) { time.Sleep(time.Millisecond * 100) } reapedTxs := mempool.ReapMaxTxs(len(txs)) for i, tx := range txs { - assert.Equal(t, tx, reapedTxs[i], fmt.Sprintf("txs at index %d on reactor %d don't match: %v vs %v", i, reactorIdx, tx, reapedTxs[i])) + assert.Equalf(t, tx, reapedTxs[i], + "txs at index %d on reactor %d don't match: %v vs %v", i, reactorIndex, tx, reapedTxs[i]) } - wg.Done() } // ensure no txs on reactor after some timeout -func ensureNoTxs(t *testing.T, reactor *MempoolReactor, timeout time.Duration) { +func ensureNoTxs(t *testing.T, reactor *Reactor, timeout time.Duration) { time.Sleep(timeout) // wait for the txs in all mempools - assert.Zero(t, reactor.Mempool.Size()) + assert.Zero(t, reactor.mempool.Size()) } const ( @@ -116,7 +115,7 @@ const ( func TestReactorBroadcastTxMessage(t *testing.T) { config := cfg.TestConfig() const N = 4 - reactors := makeAndConnectMempoolReactors(config, N) + reactors := makeAndConnectReactors(config, N) defer func() { for _, r := range reactors { r.Stop() @@ -130,14 +129,14 @@ func TestReactorBroadcastTxMessage(t *testing.T) { // send a bunch of txs to the first reactor's mempool // and wait for them all to be received in the others - txs := checkTxs(t, reactors[0].Mempool, NUM_TXS, UnknownPeerID) - waitForTxs(t, txs, reactors) + txs := checkTxs(t, reactors[0].mempool, NUM_TXS, UnknownPeerID) + waitForTxsOnReactors(t, txs, reactors) } func TestReactorNoBroadcastToSender(t *testing.T) { config := cfg.TestConfig() const N = 2 - reactors := makeAndConnectMempoolReactors(config, N) + reactors := makeAndConnectReactors(config, N) defer func() { for _, r := range reactors { r.Stop() @@ -146,7 +145,7 @@ func TestReactorNoBroadcastToSender(t *testing.T) { // send a bunch of txs to the first reactor's mempool, claiming it came from peer // ensure peer gets no txs - checkTxs(t, reactors[0].Mempool, NUM_TXS, 1) + checkTxs(t, reactors[0].mempool, NUM_TXS, 1) ensureNoTxs(t, reactors[1], 100*time.Millisecond) } @@ -157,7 +156,7 @@ func TestBroadcastTxForPeerStopsWhenPeerStops(t *testing.T) { config := cfg.TestConfig() const N = 2 - reactors := makeAndConnectMempoolReactors(config, N) + reactors := makeAndConnectReactors(config, N) defer func() { for _, r := range reactors { r.Stop() @@ -180,7 +179,7 @@ func TestBroadcastTxForPeerStopsWhenReactorStops(t *testing.T) { config := cfg.TestConfig() const N = 2 - reactors := makeAndConnectMempoolReactors(config, N) + reactors := makeAndConnectReactors(config, N) // stop reactors for _, r := range reactors { diff --git a/mock/mempool.go b/mock/mempool.go new file mode 100644 index 000000000..cebe156ba --- /dev/null +++ b/mock/mempool.go @@ -0,0 +1,46 @@ +package mock + +import ( + abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/libs/clist" + mempl "github.com/tendermint/tendermint/mempool" + "github.com/tendermint/tendermint/types" +) + +// Mempool is an empty implementation of a Mempool, useful for testing. +type Mempool struct{} + +var _ mempl.Mempool = Mempool{} + +func (Mempool) Lock() {} +func (Mempool) Unlock() {} +func (Mempool) Size() int { return 0 } +func (Mempool) CheckTx(_ types.Tx, _ func(*abci.Response)) error { + return nil +} +func (Mempool) CheckTxWithInfo(_ types.Tx, _ func(*abci.Response), + _ mempl.TxInfo) error { + return nil +} +func (Mempool) ReapMaxBytesMaxGas(_, _ int64) types.Txs { return types.Txs{} } +func (Mempool) ReapMaxTxs(n int) types.Txs { return types.Txs{} } +func (Mempool) Update( + _ int64, + _ types.Txs, + _ []*abci.ResponseDeliverTx, + _ mempl.PreCheckFunc, + _ mempl.PostCheckFunc, +) error { + return nil +} +func (Mempool) Flush() {} +func (Mempool) FlushAppConn() error { return nil } +func (Mempool) TxsAvailable() <-chan struct{} { return make(chan struct{}) } +func (Mempool) EnableTxsAvailable() {} +func (Mempool) TxsBytes() int64 { return 0 } + +func (Mempool) TxsFront() *clist.CElement { return nil } +func (Mempool) TxsWaitChan() <-chan struct{} { return nil } + +func (Mempool) InitWAL() {} +func (Mempool) CloseWAL() {} diff --git a/networks/local/localnode/Dockerfile b/networks/local/localnode/Dockerfile index 3942cecd6..03af5aa3c 100644 --- a/networks/local/localnode/Dockerfile +++ b/networks/local/localnode/Dockerfile @@ -13,4 +13,4 @@ CMD ["node", "--proxy_app", "kvstore"] STOPSIGNAL SIGTERM COPY wrapper.sh /usr/bin/wrapper.sh - +COPY config-template.toml /etc/tendermint/config-template.toml diff --git a/networks/local/localnode/config-template.toml b/networks/local/localnode/config-template.toml new file mode 100644 index 000000000..a90eb7bd5 --- /dev/null +++ b/networks/local/localnode/config-template.toml @@ -0,0 +1,2 @@ +[rpc] +laddr = "tcp://0.0.0.0:26657" diff --git a/networks/remote/integration.sh b/networks/remote/integration.sh index 8150aad48..c2d7c3a36 100644 --- a/networks/remote/integration.sh +++ b/networks/remote/integration.sh @@ -30,9 +30,7 @@ go get $REPO cd $GOPATH/src/$REPO ## build -git checkout zach/ansible make get_tools -make get_vendor_deps make build # generate an ssh key @@ -84,8 +82,11 @@ ip3=$(strip $ip3) # all the ansible commands are also directory specific cd $GOPATH/src/github.com/tendermint/tendermint/networks/remote/ansible +# create config dirs +tendermint testnet + ansible-playbook -i inventory/digital_ocean.py -l sentrynet install.yml -ansible-playbook -i inventory/digital_ocean.py -l sentrynet config.yml -e BINARY=$GOPATH/src/github.com/tendermint/tendermint/build/tendermint -e CONFIGDIR=$GOPATH/src/github.com/tendermint/tendermint/docs/examples +ansible-playbook -i inventory/digital_ocean.py -l sentrynet config.yml -e BINARY=$GOPATH/src/github.com/tendermint/tendermint/build/tendermint -e CONFIGDIR=$GOPATH/src/github.com/tendermint/tendermint/networks/remote/ansible/mytestnet sleep 10 diff --git a/node/wire.go b/node/codec.go similarity index 100% rename from node/wire.go rename to node/codec.go diff --git a/node/doc.go b/node/doc.go new file mode 100644 index 000000000..08f3fa258 --- /dev/null +++ b/node/doc.go @@ -0,0 +1,40 @@ +/* +Package node is the main entry point, where the Node struct, which +represents a full node, is defined. + +Adding new p2p.Reactor(s) + +To add a new p2p.Reactor, use the CustomReactors option: + + node, err := NewNode( + config, + privVal, + nodeKey, + clientCreator, + genesisDocProvider, + dbProvider, + metricsProvider, + logger, + CustomReactors(map[string]p2p.Reactor{"CUSTOM": customReactor}), + ) + +Replacing existing p2p.Reactor(s) + +To replace the built-in p2p.Reactor, use the CustomReactors option: + + node, err := NewNode( + config, + privVal, + nodeKey, + clientCreator, + genesisDocProvider, + dbProvider, + metricsProvider, + logger, + CustomReactors(map[string]p2p.Reactor{"BLOCKCHAIN": customBlockchainReactor}), + ) + +The list of existing reactors can be found in CustomReactors documentation. + +*/ +package node diff --git a/node/node.go b/node/node.go index 2a5371f50..4f3fb68fa 100644 --- a/node/node.go +++ b/node/node.go @@ -18,11 +18,13 @@ import ( "github.com/tendermint/go-amino" abci "github.com/tendermint/tendermint/abci/types" - bc "github.com/tendermint/tendermint/blockchain" "github.com/tendermint/tendermint/blockchain/hot" + bcv0 "github.com/tendermint/tendermint/blockchain/v0" + bcv1 "github.com/tendermint/tendermint/blockchain/v1" cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/consensus" cs "github.com/tendermint/tendermint/consensus" - "github.com/tendermint/tendermint/crypto/ed25519" + "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/evidence" cmn "github.com/tendermint/tendermint/libs/common" dbm "github.com/tendermint/tendermint/libs/db" @@ -46,6 +48,7 @@ import ( "github.com/tendermint/tendermint/state/txindex" "github.com/tendermint/tendermint/state/txindex/kv" "github.com/tendermint/tendermint/state/txindex/null" + "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" tmtime "github.com/tendermint/tendermint/types/time" "github.com/tendermint/tendermint/version" @@ -102,7 +105,7 @@ func DefaultNewNode(config *cfg.Config, logger log.Logger) (*Node, error) { if _, err := os.Stat(oldPrivVal); !os.IsNotExist(err) { oldPV, err := privval.LoadOldFilePV(oldPrivVal) if err != nil { - return nil, fmt.Errorf("Error reading OldPrivValidator from %v: %v\n", oldPrivVal, err) + return nil, fmt.Errorf("error reading OldPrivValidator from %v: %v\n", oldPrivVal, err) } logger.Info("Upgrading PrivValidator file", "old", oldPrivVal, @@ -141,6 +144,33 @@ func DefaultMetricsProvider(config *cfg.InstrumentationConfig) MetricsProvider { } } +// Option sets a parameter for the node. +type Option func(*Node) + +// CustomReactors allows you to add custom reactors (name -> p2p.Reactor) to +// the node's Switch. +// +// WARNING: using any name from the below list of the existing reactors will +// result in replacing it with the custom one. +// +// - MEMPOOL +// - BLOCKCHAIN +// - CONSENSUS +// - EVIDENCE +// - PEX +func CustomReactors(reactors map[string]p2p.Reactor) Option { + return func(n *Node) { + for name, reactor := range reactors { + if existingReactor := n.sw.Reactor(name); existingReactor != nil { + n.sw.Logger.Info("Replacing existing reactor with a custom one", + "name", name, "existing", existingReactor, "custom", reactor) + n.sw.RemoveReactor(name, existingReactor) + } + n.sw.AddReactor(name, reactor) + } + } +} + //------------------------------------------------------------------------------ // Node is the highest level interface to a full Tendermint node. @@ -164,96 +194,70 @@ type Node struct { // services eventBus *types.EventBus // pub/sub for services stateDB dbm.DB - blockStore *bc.BlockStore // store the blockchain to disk - bcReactor *bc.BlockchainReactor // for fast-syncing - mempoolReactor *mempl.MempoolReactor // for gossipping transactions + blockStore *store.BlockStore // store the blockchain to disk + bcReactor p2p.Reactor // for fast-syncing + mempoolReactor *mempl.Reactor // for gossipping transactions + mempool mempl.Mempool consensusState *cs.ConsensusState // latest consensus state consensusReactor *cs.ConsensusReactor // for participating in the consensus + pexReactor *pex.PEXReactor // for exchanging peer addresses evidencePool *evidence.EvidencePool // tracking evidence proxyApp proxy.AppConns // connection to the application rpcListeners []net.Listener // rpc servers txIndexer txindex.TxIndexer - blockIndexer blockindex.BlockIndexer indexerService *txindex.IndexerService + prometheusSrv *http.Server + blockIndexer blockindex.BlockIndexer blockIndexService *blockindex.IndexerService indexHub *sm.IndexHub - prometheusSrv *http.Server } -// NewNode returns a new, ready to go, Tendermint Node. -func NewNode(config *cfg.Config, - privValidator types.PrivValidator, - nodeKey *p2p.NodeKey, - clientCreator proxy.ClientCreator, - genesisDocProvider GenesisDocProvider, - dbProvider DBProvider, - metricsProvider MetricsProvider, - logger log.Logger) (*Node, error) { - - // Get BlockStore - blockStoreDB, err := dbProvider(&DBContext{"blockstore", config}) +func initDBs(config *cfg.Config, dbProvider DBProvider) (blockStore *store.BlockStore, stateDB dbm.DB, err error) { + var blockStoreDB dbm.DB + blockStoreDB, err = dbProvider(&DBContext{"blockstore", config}) if err != nil { - return nil, err + return } - blockStore := bc.NewBlockStore(blockStoreDB) + blockStore = store.NewBlockStore(blockStoreDB) - // Get State - stateDB, err := dbProvider(&DBContext{"state", config}) - if err != nil { - return nil, err - } + stateDB, err = dbProvider(&DBContext{"state", config}) - // Get genesis doc - // TODO: move to state package? - genDoc, err := loadGenesisDoc(stateDB) - if err != nil { - genDoc, err = genesisDocProvider() - if err != nil { - return nil, err - } - // save genesis doc to prevent a certain class of user errors (e.g. when it - // was changed, accidentally or not). Also good for audit trail. - saveGenesisDoc(stateDB, genDoc) - } - - state, err := sm.LoadStateFromDBOrGenesisDoc(stateDB, genDoc) - if err != nil { - return nil, err - } + return +} - // Create the proxyApp and establish connections to the ABCI app (consensus, mempool, query). +func createAndStartProxyAppConns(clientCreator proxy.ClientCreator, logger log.Logger) (proxy.AppConns, error) { proxyApp := proxy.NewAppConns(clientCreator) proxyApp.SetLogger(logger.With("module", "proxy")) if err := proxyApp.Start(); err != nil { - return nil, fmt.Errorf("Error starting proxy app connections: %v", err) + return nil, fmt.Errorf("error starting proxy app connections: %v", err) } + return proxyApp, nil +} - // EventBus and IndexerService must be started before the handshake because - // we might need to index the txs of the replayed block as this might not have happened - // when the node stopped last time (i.e. the node stopped after it saved the block - // but before it indexed the txs, or, endblocker panicked) +func createAndStartEventBus(logger log.Logger) (*types.EventBus, error) { eventBus := types.NewEventBus() eventBus.SetLogger(logger.With("module", "events")) - - err = eventBus.Start() - if err != nil { + if err := eventBus.Start(); err != nil { return nil, err } + return eventBus, nil +} - // Transaction indexing - var txIndexer txindex.TxIndexer - var txDB dbm.DB // TODO: remove by refactor defaultdbprovider to cache the created db instaces +func createAndStartIndexerService(config *cfg.Config, dbProvider DBProvider, + eventBus *types.EventBus, lastBlockHeight int64, stateDB dbm.DB, blockStore sm.BlockStore, metrics *sm.Metrics, logger log.Logger) (txDB dbm.DB, txIndexerService *txindex.IndexerService, txIndexer txindex.TxIndexer, blockIndexerService *blockindex.IndexerService, blockIndexer blockindex.BlockIndexer, indexHub *sm.IndexHub, err error) { switch config.TxIndex.Indexer { case "kv": - store, err := dbProvider(&DBContext{"tx_index", config}) + var store dbm.DB + store, err = dbProvider(&DBContext{"tx_index", config}) txDB = store if err != nil { - return nil, err + return } indexOptions := make([]func(index *kv.TxIndex), 0) - if config.TxIndex.IndexTags != "" { + switch { + case config.TxIndex.IndexTags != "": indexOptions = append(indexOptions, kv.IndexTags(splitAndTrimEmpty(config.TxIndex.IndexTags, ",", " "))) - } else if config.TxIndex.IndexAllTags { + case config.TxIndex.IndexAllTags: indexOptions = append(indexOptions, kv.IndexAllTags()) } if config.TxIndex.EnableRangeQuery { @@ -264,60 +268,56 @@ func NewNode(config *cfg.Config, txIndexer = &null.TxIndex{} } - txIndexerService := txindex.NewIndexerService(txIndexer, eventBus) + txIndexerService = txindex.NewIndexerService(txIndexer, eventBus) txIndexerService.SetLogger(logger.With("module", "txindex")) err = txIndexerService.Start() if err != nil { - return nil, err + return } // Block indexing - var blockIndexer blockindex.BlockIndexer switch config.BlockIndex.Indexer { case "kv": - store, err := dbProvider(&DBContext{"block_index", config}) + var store dbm.DB + store, err = dbProvider(&DBContext{"block_index", config}) if err != nil { - return nil, err + return } blockIndexer = bkv.NewBlockIndex(store) default: blockIndexer = &nullblk.BlockIndex{} } - blockIndexerService := blockindex.NewIndexerService(blockIndexer, eventBus) + blockIndexerService = blockindex.NewIndexerService(blockIndexer, eventBus) blockIndexerService.SetLogger(logger.With("module", "blockindex")) err = blockIndexerService.Start() if err != nil { - return nil, err + return } - csMetrics, p2pMetrics, memplMetrics, smMetrics, hotMetrics := metricsProvider(genDoc.ChainID) - - indexHub := sm.NewIndexHub(state.LastBlockHeight, stateDB, blockStore, eventBus, sm.IndexHubWithMetrics(smMetrics)) + indexHub = sm.NewIndexHub(lastBlockHeight, stateDB, blockStore, eventBus, sm.IndexHubWithMetrics(metrics)) indexHub.RegisterIndexSvc(blockIndexerService) indexHub.RegisterIndexSvc(txIndexerService) indexHub.SetLogger(logger.With("module", "indexer_hub")) err = indexHub.Start() - if err != nil { - return nil, err - } + return +} - // Create the handshaker, which calls RequestInfo, sets the AppVersion on the state, - // and replays any blocks as necessary to sync tendermint with the app. - consensusLogger := logger.With("module", "consensus") - handshaker := cs.NewHandshaker(stateDB, state, blockStore, genDoc, config.WithAppStat) +func doHandshake(stateDB dbm.DB, state sm.State, blockStore sm.BlockStore, + genDoc *types.GenesisDoc, eventBus *types.EventBus, proxyApp proxy.AppConns, withAppStat bool, consensusLogger log.Logger) error { + + handshaker := cs.NewHandshaker(stateDB, state, blockStore, genDoc, withAppStat) handshaker.SetLogger(consensusLogger) handshaker.SetEventBus(eventBus) if err := handshaker.Handshake(proxyApp); err != nil { - return nil, fmt.Errorf("Error during handshake: %v", err) + return fmt.Errorf("error during handshake: %v", err) } + return nil +} - // Reload the state. It will have the Version.Consensus.App set by the - // Handshake, and may have other modifications as well (ie. depending on - // what happened during block replay). - state = sm.LoadState(stateDB) - +func logNodeStartupInfo(state sm.State, pubKey crypto.PubKey, config *cfg.Config, logger, + consensusLogger log.Logger) { // Log the version info. logger.Info("Version info", "software", version.TMCoreSemVer, @@ -333,28 +333,6 @@ func NewNode(config *cfg.Config, ) } - if config.PrivValidatorListenAddr != "" { - // If an address is provided, listen on the socket for a connection from an - // external signing process. - // FIXME: we should start services inside OnStart - privValidator, err = createAndStartPrivValidatorSocketClient(config.PrivValidatorListenAddr, logger) - if err != nil { - return nil, errors.Wrap(err, "Error with private validator socket client") - } - } - - // Decide whether to fast-sync or not - // We don't fast-sync when the only validator is us. - fastSync := config.FastSync - if state.Validators.Size() == 1 { - addr, _ := state.Validators.GetByIndex(0) - privValAddr := privValidator.GetAddress() - if bytes.Equal(privValAddr, addr) { - fastSync = false - } - } - - pubKey := privValidator.GetPubKey() addr := pubKey.Address() // Log whether this node is a validator or an observer if state.Validators.HasAddress(addr) { @@ -362,9 +340,20 @@ func NewNode(config *cfg.Config, } else { consensusLogger.Info("This node is not a validator", "addr", addr, "pubKey", pubKey) } +} - // Make MempoolReactor - mempool := mempl.NewMempool( +func onlyValidatorIsUs(state sm.State, privVal types.PrivValidator) bool { + if state.Validators.Size() > 1 { + return false + } + addr, _ := state.Validators.GetByIndex(0) + return bytes.Equal(privVal.GetPubKey().Address(), addr) +} + +func createMempoolAndMempoolReactor(config *cfg.Config, proxyApp proxy.AppConns, + state sm.State, memplMetrics *mempl.Metrics, logger log.Logger) (*mempl.Reactor, *mempl.CListMempool) { + + mempool := mempl.NewCListMempool( config.Mempool, proxyApp.Mempool(), state.LastBlockHeight, @@ -373,75 +362,62 @@ func NewNode(config *cfg.Config, mempl.WithPostCheck(sm.TxPostCheck(state)), ) mempoolLogger := logger.With("module", "mempool") - mempool.SetLogger(mempoolLogger) - if config.Mempool.WalEnabled() { - mempool.InitWAL() // no need to have the mempool wal during tests - } - mempoolReactor := mempl.NewMempoolReactor(config.Mempool, mempool) + mempoolReactor := mempl.NewReactor(config.Mempool, mempool) mempoolReactor.SetLogger(mempoolLogger) if config.Consensus.WaitForTxs() { mempool.EnableTxsAvailable() } + return mempoolReactor, mempool +} + +func createEvidenceReactor(config *cfg.Config, dbProvider DBProvider, + stateDB dbm.DB, logger log.Logger) (*evidence.EvidenceReactor, *evidence.EvidencePool, error) { - // Make Evidence Reactor evidenceDB, err := dbProvider(&DBContext{"evidence", config}) if err != nil { - return nil, err + return nil, nil, err } evidenceLogger := logger.With("module", "evidence") evidencePool := evidence.NewEvidencePool(stateDB, evidenceDB) evidencePool.SetLogger(evidenceLogger) evidenceReactor := evidence.NewEvidenceReactor(evidencePool) evidenceReactor.SetLogger(evidenceLogger) + return evidenceReactor, evidencePool, nil +} - if state.Validators.Size() == 1 { - addr, _ := state.Validators.GetByIndex(0) - if bytes.Equal(privValidator.GetAddress(), addr) { - config.StateSyncHeight = -1 - } - } - - var stateReactor *snapshot.StateReactor - if config.StateSyncReactor { - stateSyncLogger := logger.With("module", "statesync") - snapshot.InitSnapshotManager(stateDB, txDB, blockStore, config.DBDir(), stateSyncLogger) - - // !!!This method may change config.StateSyncHeight!!! - // so the later reactor need read config.StateSyncHeight rather than a copied variable - stateReactor = snapshot.NewStateReactor(stateDB, proxyApp.State(), config) - stateReactor.SetLogger(stateSyncLogger) - } else { - config.StateSyncHeight = -1 +func createBlockchainReactor(config *cfg.Config, + state sm.State, + blockExec *sm.BlockExecutor, + blockStore *store.BlockStore, + fastSync bool, + logger log.Logger) (bcReactor p2p.Reactor, err error) { + + switch config.FastSync.Version { + case "v0": + bcReactor = bcv0.NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync, config.HotSyncReactor, config.HotSync) + case "v1": + bcReactor = bcv1.NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync,config.HotSyncReactor, config.HotSync) + default: + return nil, fmt.Errorf("unknown fastsync version %s", config.FastSync.Version) } - blockExecLogger := logger.With("module", "exec") - // make block executor for consensus and blockchain reactors to execute blocks - blockExec := sm.NewBlockExecutor( - stateDB, - blockExecLogger, - proxyApp.Consensus(), - mempool, - evidencePool, - config.WithAppStat, - sm.BlockExecutorWithMetrics(smMetrics), - ) - - // Make BlockchainReactor - bcReactor := bc.NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync && (config.StateSyncHeight < 0), config.HotSyncReactor, config.HotSync) bcReactor.SetLogger(logger.With("module", "blockchain")) + return bcReactor, nil +} - var hotSyncReactor *hot.BlockchainReactor - if config.HotSyncReactor { - hotSyncLogger := logger.With("module", "hotsync") - hotSyncReactor = hot.NewBlockChainReactor(state.Copy(), blockExec, blockStore, config.HotSync, fastSync || config.StateSyncHeight >= 0, config.HotSyncTimeout, hot.WithMetrics(hotMetrics), hot.WithEventBus(eventBus)) - hotSyncReactor.SetLogger(hotSyncLogger) - if privValidator != nil { - hotSyncReactor.SetPrivValidator(privValidator) - } - } +func createConsensusReactor(config *cfg.Config, + state sm.State, + blockExec *sm.BlockExecutor, + blockStore sm.BlockStore, + mempool *mempl.CListMempool, + evidencePool *evidence.EvidencePool, + privValidator types.PrivValidator, + csMetrics *cs.Metrics, + fastSync bool, + eventBus *types.EventBus, + consensusLogger log.Logger) (*consensus.ConsensusReactor, *consensus.ConsensusState) { - // Make ConsensusReactor consensusState := cs.NewConsensusState( config.Consensus, state.Copy(), @@ -457,28 +433,13 @@ func NewNode(config *cfg.Config, } consensusReactor := cs.NewConsensusReactor(consensusState, fastSync || (config.StateSyncHeight >= 0) || config.HotSync, cs.ReactorMetrics(csMetrics)) consensusReactor.SetLogger(consensusLogger) - // services which will be publishing and/or subscribing for messages (events) // consensusReactor will set it on consensusState and blockExecutor consensusReactor.SetEventBus(eventBus) + return consensusReactor, consensusState +} - p2pLogger := logger.With("module", "p2p") - nodeInfo, err := makeNodeInfo( - config, - nodeKey.ID(), - txIndexer, - genDoc.ChainID, - p2p.NewProtocolVersion( - version.P2PProtocol, // global - state.Version.Consensus.Block, - state.Version.Consensus.App, - ), - ) - if err != nil { - return nil, err - } - - // Setup Transport. +func createTransport(config *cfg.Config, nodeInfo p2p.NodeInfo, nodeKey *p2p.NodeKey, proxyApp proxy.AppConns) (*p2p.MultiplexTransport, []p2p.PeerFilterFunc) { var ( mConnConfig = p2p.MConnConfig(config.P2P) transport = p2p.NewMultiplexTransport(nodeInfo, *nodeKey, mConnConfig) @@ -504,7 +465,7 @@ func NewNode(config *cfg.Config, return err } if res.IsErr() { - return fmt.Errorf("Error querying abci app: %v", res) + return fmt.Errorf("error querying abci app: %v", res) } return nil @@ -522,7 +483,7 @@ func NewNode(config *cfg.Config, return err } if res.IsErr() { - return fmt.Errorf("Error querying abci app: %v", res) + return fmt.Errorf("error querying abci app: %v", res) } return nil @@ -531,8 +492,22 @@ func NewNode(config *cfg.Config, } p2p.MultiplexTransportConnFilters(connFilters...)(transport) + return transport, peerFilters +} + +func createSwitch(config *cfg.Config, + transport *p2p.MultiplexTransport, + p2pMetrics *p2p.Metrics, + peerFilters []p2p.PeerFilterFunc, + mempoolReactor *mempl.Reactor, + bcReactor p2p.Reactor, + stateReactor *snapshot.StateReactor, + consensusReactor *consensus.ConsensusReactor, + evidenceReactor *evidence.EvidenceReactor, + nodeInfo p2p.NodeInfo, + nodeKey *p2p.NodeKey, + p2pLogger log.Logger) *p2p.Switch { - // Setup Switch. sw := p2p.NewSwitch( config.P2P, transport, @@ -541,19 +516,229 @@ func NewNode(config *cfg.Config, ) sw.SetLogger(p2pLogger) sw.AddReactor("MEMPOOL", mempoolReactor) - if config.StateSyncReactor { - sw.AddReactor("STATE", stateReactor) - } - if config.HotSyncReactor { - sw.AddReactor("HOT", hotSyncReactor) - } sw.AddReactor("BLOCKCHAIN", bcReactor) sw.AddReactor("CONSENSUS", consensusReactor) sw.AddReactor("EVIDENCE", evidenceReactor) + // stateReactor is special, need create it before block chain reactor. + if stateReactor != nil { + sw.AddReactor("STATE", stateReactor) + } + sw.SetNodeInfo(nodeInfo) sw.SetNodeKey(nodeKey) p2pLogger.Info("P2P Node ID", "ID", nodeKey.ID(), "file", config.NodeKeyFile()) + return sw +} + +func createAddrBookAndSetOnSwitch(config *cfg.Config, sw *p2p.Switch, + p2pLogger log.Logger, nodeKey *p2p.NodeKey) (pex.AddrBook, error) { + + addrBook := pex.NewAddrBook(config.P2P.AddrBookFile(), config.P2P.AddrBookStrict) + addrBook.SetLogger(p2pLogger.With("book", config.P2P.AddrBookFile())) + + // Add ourselves to addrbook to prevent dialing ourselves + if config.P2P.ExternalAddress != "" { + addr, err := p2p.NewNetAddressString(p2p.IDAddressString(nodeKey.ID(), config.P2P.ExternalAddress)) + if err != nil { + return nil, errors.Wrap(err, "p2p.external_address is incorrect") + } + addrBook.AddOurAddress(addr) + } + if config.P2P.ListenAddress != "" { + addr, err := p2p.NewNetAddressString(p2p.IDAddressString(nodeKey.ID(), config.P2P.ListenAddress)) + if err != nil { + return nil, errors.Wrap(err, "p2p.laddr is incorrect") + } + addrBook.AddOurAddress(addr) + } + + sw.SetAddrBook(addrBook) + + return addrBook, nil +} + +func createHotSyncReactorAndAddToSwitch(privValidator types.PrivValidator, blockExec *sm.BlockExecutor, blockStore *store.BlockStore, eventBus *types.EventBus, state sm.State, config *cfg.Config, fastSync bool, hotMetrics *hot.Metrics, sw *p2p.Switch, logger log.Logger) { + hotSyncLogger := logger.With("module", "hotsync") + hotSyncReactor := hot.NewBlockChainReactor(state.Copy(), blockExec, blockStore, config.HotSync, fastSync || config.StateSyncHeight >= 0, config.HotSyncTimeout, hot.WithMetrics(hotMetrics), hot.WithEventBus(eventBus)) + hotSyncReactor.SetLogger(hotSyncLogger) + if privValidator != nil { + hotSyncReactor.SetPrivValidator(privValidator) + } + sw.AddReactor("HOT", hotSyncReactor) +} + +func createStateReactor(txDB dbm.DB, proxyApp proxy.AppConns, blockStore *store.BlockStore, stateDB dbm.DB, config *cfg.Config, logger log.Logger) *snapshot.StateReactor { + stateSyncLogger := logger.With("module", "statesync") + snapshot.InitSnapshotManager(stateDB, txDB, blockStore, config.DBDir(), stateSyncLogger) + + // !!!This method may change config.StateSyncHeight!!! + // so the later reactor need read config.StateSyncHeight rather than a copied variable + stateReactor := snapshot.NewStateReactor(stateDB, proxyApp.State(), config) + stateReactor.SetLogger(stateSyncLogger) + return stateReactor +} + +func createPEXReactorAndAddToSwitch(addrBook pex.AddrBook, config *cfg.Config, + sw *p2p.Switch, logger log.Logger) *pex.PEXReactor { + + // TODO persistent peers ? so we can have their DNS addrs saved + pexReactor := pex.NewPEXReactor(addrBook, + &pex.PEXReactorConfig{ + Seeds: splitAndTrimEmpty(config.P2P.Seeds, ",", " "), + SeedMode: config.P2P.SeedMode, + // See consensus/reactor.go: blocksToContributeToBecomeGoodPeer 10000 + // blocks assuming 10s blocks ~ 28 hours. + // TODO (melekes): make it dynamic based on the actual block latencies + // from the live network. + // https://github.com/tendermint/tendermint/issues/3523 + SeedDisconnectWaitPeriod: 28 * time.Hour, + }) + pexReactor.SetLogger(logger.With("module", "pex")) + sw.AddReactor("PEX", pexReactor) + return pexReactor +} + +// NewNode returns a new, ready to go, Tendermint Node. +func NewNode(config *cfg.Config, + privValidator types.PrivValidator, + nodeKey *p2p.NodeKey, + clientCreator proxy.ClientCreator, + genesisDocProvider GenesisDocProvider, + dbProvider DBProvider, + metricsProvider MetricsProvider, + logger log.Logger, + options ...Option) (*Node, error) { + + blockStore, stateDB, err := initDBs(config, dbProvider) + if err != nil { + return nil, err + } + + state, genDoc, err := LoadStateFromDBOrGenesisDocProvider(stateDB, genesisDocProvider) + if err != nil { + return nil, err + } + + // Create the proxyApp and establish connections to the ABCI app (consensus, mempool, query). + proxyApp, err := createAndStartProxyAppConns(clientCreator, logger) + if err != nil { + return nil, err + } + + // EventBus and IndexerService must be started before the handshake because + // we might need to index the txs of the replayed block as this might not have happened + // when the node stopped last time (i.e. the node stopped after it saved the block + // but before it indexed the txs, or, endblocker panicked) + eventBus, err := createAndStartEventBus(logger) + if err != nil { + return nil, err + } + + csMetrics, p2pMetrics, memplMetrics, smMetrics, htMetrics := metricsProvider(genDoc.ChainID) + + // Transaction indexing + txDB, txIndexerService, txIndexer, blockIndexerService, blockIndexer, indexHub, err := createAndStartIndexerService(config, dbProvider, eventBus, state.LastBlockHeight, stateDB, blockStore, smMetrics, logger) + if err != nil { + return nil, err + } + + // Create the handshaker, which calls RequestInfo, sets the AppVersion on the state, + // and replays any blocks as necessary to sync tendermint with the app. + consensusLogger := logger.With("module", "consensus") + if err := doHandshake(stateDB, state, blockStore, genDoc, eventBus, proxyApp, config.WithAppStat, consensusLogger); err != nil { + return nil, err + } + + // Reload the state. It will have the Version.Consensus.App set by the + // Handshake, and may have other modifications as well (ie. depending on + // what happened during block replay). + state = sm.LoadState(stateDB) + + // If an address is provided, listen on the socket for a connection from an + // external signing process. + if config.PrivValidatorListenAddr != "" { + // FIXME: we should start services inside OnStart + privValidator, err = createAndStartPrivValidatorSocketClient(config.PrivValidatorListenAddr, logger) + if err != nil { + return nil, errors.Wrap(err, "error with private validator socket client") + } + } + + pubKey := privValidator.GetPubKey() + if pubKey == nil { + // TODO: GetPubKey should return errors - https://github.com/tendermint/tendermint/issues/3602 + return nil, errors.New("could not retrieve public key from private validator") + } + + logNodeStartupInfo(state, pubKey, config, logger, consensusLogger) + + // Decide whether to fast-sync or not + // We don't fast-sync when the only validator is us. + fastSync := config.FastSyncMode && !onlyValidatorIsUs(state, privValidator) + + // Make MempoolReactor + mempoolReactor, mempool := createMempoolAndMempoolReactor(config, proxyApp, state, memplMetrics, logger) + + // Make Evidence Reactor + evidenceReactor, evidencePool, err := createEvidenceReactor(config, dbProvider, stateDB, logger) + if err != nil { + return nil, err + } + + // make block executor for consensus and blockchain reactors to execute blocks + blockExec := sm.NewBlockExecutor( + stateDB, + logger.With("module", "state"), + proxyApp.Consensus(), + mempool, + evidencePool, + config.WithAppStat, + sm.BlockExecutorWithMetrics(smMetrics), + ) + + var stateReactor *snapshot.StateReactor + if config.StateSyncReactor { + stateReactor = createStateReactor(txDB, proxyApp, blockStore, stateDB, config, logger) + } + + // Make BlockchainReactor + + bcReactor, err := createBlockchainReactor(config, state, blockExec, blockStore, fastSync && (config.StateSyncHeight < 0 || !config.StateSyncReactor), logger) + if err != nil { + return nil, errors.Wrap(err, "could not create blockchain reactor") + } + + // Make ConsensusReactor + consensusReactor, consensusState := createConsensusReactor( + config, state, blockExec, blockStore, mempool, evidencePool, + privValidator, csMetrics, fastSync, eventBus, consensusLogger, + ) + + nodeInfo, err := makeNodeInfo(config, nodeKey, txIndexer, genDoc, state) + if err != nil { + return nil, err + } + + // Setup Transport. + transport, peerFilters := createTransport(config, nodeInfo, nodeKey, proxyApp) + + // Setup Switch. + p2pLogger := logger.With("module", "p2p") + sw := createSwitch( + config, transport, p2pMetrics, peerFilters, mempoolReactor, bcReactor, stateReactor, + consensusReactor, evidenceReactor, nodeInfo, nodeKey, p2pLogger, + ) + + err = sw.AddPersistentPeers(splitAndTrimEmpty(config.P2P.PersistentPeers, ",", " ")) + if err != nil { + return nil, errors.Wrap(err, "could not add peers from persistent_peers field") + } + + addrBook, err := createAddrBookAndSetOnSwitch(config, sw, p2pLogger, nodeKey) + if err != nil { + return nil, errors.Wrap(err, "could not create addrbook") + } // Optionally, start the pex reactor // @@ -567,36 +752,18 @@ func NewNode(config *cfg.Config, // // If PEX is on, it should handle dialing the seeds. Otherwise the switch does it. // Note we currently use the addrBook regardless at least for AddOurAddress - addrBook := pex.NewAddrBook(config.P2P.AddrBookFile(), config.P2P.AddrBookStrict) - - // Add ourselves to addrbook to prevent dialing ourselves - addrBook.AddOurAddress(sw.NetAddress()) - - addrBook.SetLogger(p2pLogger.With("book", config.P2P.AddrBookFile())) + var pexReactor *pex.PEXReactor if config.P2P.PexReactor { - // TODO persistent peers ? so we can have their DNS addrs saved - pexReactor := pex.NewPEXReactor(addrBook, - &pex.PEXReactorConfig{ - Seeds: splitAndTrimEmpty(config.P2P.Seeds, ",", " "), - SeedMode: config.P2P.SeedMode, - // See consensus/reactor.go: blocksToContributeToBecomeGoodPeer 10000 - // blocks assuming 10s blocks ~ 28 hours. - // TODO (melekes): make it dynamic based on the actual block latencies - // from the live network. - // https://github.com/tendermint/tendermint/issues/3523 - SeedDisconnectWaitPeriod: 28 * time.Hour, - }) - pexReactor.SetLogger(logger.With("module", "pex")) - sw.AddReactor("PEX", pexReactor) + pexReactor = createPEXReactorAndAddToSwitch(addrBook, config, sw, logger) } - sw.SetAddrBook(addrBook) + if config.HotSyncReactor { + createHotSyncReactorAndAddToSwitch(privValidator, blockExec, blockStore, eventBus, state, config, fastSync, htMetrics, sw, logger) + } - // run the profile server - profileHost := config.ProfListenAddress - if profileHost != "" { + if config.ProfListenAddress != "" { go func() { - logger.Error("Profile server", "err", http.ListenAndServe(profileHost, nil)) + logger.Error("Profile server", "err", http.ListenAndServe(config.ProfListenAddress, nil)) }() } @@ -615,8 +782,10 @@ func NewNode(config *cfg.Config, blockStore: blockStore, bcReactor: bcReactor, mempoolReactor: mempoolReactor, + mempool: mempool, consensusState: consensusState, consensusReactor: consensusReactor, + pexReactor: pexReactor, evidencePool: evidencePool, proxyApp: proxyApp, txIndexer: txIndexer, @@ -627,6 +796,11 @@ func NewNode(config *cfg.Config, eventBus: eventBus, } node.BaseService = *cmn.NewBaseService(logger, "Node", node) + + for _, option := range options { + option(node) + } + return node, nil } @@ -658,7 +832,7 @@ func (n *Node) OnStart() error { } // Start the transport. - addr, err := p2p.NewNetAddressStringWithOptionalID(n.config.P2P.ListenAddress) + addr, err := p2p.NewNetAddressString(p2p.IDAddressString(n.nodeKey.ID(), n.config.P2P.ListenAddress)) if err != nil { return err } @@ -668,6 +842,10 @@ func (n *Node) OnStart() error { n.isListening = true + if n.config.Mempool.WalEnabled() { + n.mempool.InitWAL() // no need to have the mempool wal during tests + } + // Start the switch (the P2P server). err = n.sw.Start() if err != nil { @@ -675,11 +853,9 @@ func (n *Node) OnStart() error { } // Always connect to persistent peers - if n.config.P2P.PersistentPeers != "" { - err = n.sw.DialPeersAsync(n.addrBook, splitAndTrimEmpty(n.config.P2P.PersistentPeers, ",", " "), true) - if err != nil { - return err - } + err = n.sw.DialPeersAsync(splitAndTrimEmpty(n.config.P2P.PersistentPeers, ",", " ")) + if err != nil { + return errors.Wrap(err, "could not dial peers from persistent_peers field") } return nil @@ -698,12 +874,11 @@ func (n *Node) OnStop() { n.indexHub.Stop() // now stop the reactors - // TODO: gracefully disconnect from peers. n.sw.Stop() // stop mempool WAL if n.config.Mempool.WalEnabled() { - n.mempoolReactor.Mempool.CloseWAL() + n.mempool.CloseWAL() } if err := n.transport.Close(); err != nil { @@ -738,7 +913,7 @@ func (n *Node) ConfigureRPC() { rpccore.SetStateDB(n.stateDB) rpccore.SetBlockStore(n.blockStore) rpccore.SetConsensusState(n.consensusState) - rpccore.SetMempool(n.mempoolReactor.Mempool) + rpccore.SetMempool(n.mempool) rpccore.SetEvidencePool(n.evidencePool) rpccore.SetP2PPeers(n.sw) rpccore.SetP2PTransport(n) @@ -766,6 +941,17 @@ func (n *Node) startRPC() ([]net.Listener, error) { rpccore.AddUnsafeRoutes() } + config := rpcserver.DefaultConfig() + config.MaxBodyBytes = n.config.RPC.MaxBodyBytes + config.MaxHeaderBytes = n.config.RPC.MaxHeaderBytes + config.MaxOpenConnections = n.config.RPC.MaxOpenConnections + // If necessary adjust global WriteTimeout to ensure it's greater than + // TimeoutBroadcastTxCommit. + // See https://github.com/tendermint/tendermint/issues/3435 + if config.WriteTimeout <= n.config.RPC.TimeoutBroadcastTxCommit { + config.WriteTimeout = n.config.RPC.TimeoutBroadcastTxCommit + 1*time.Second + } + // we may expose the rpc over both a unix and tcp socket listeners := make([]net.Listener, len(listenAddrs)) var wsWorkerPool *gopool.Pool @@ -784,7 +970,7 @@ func (n *Node) startRPC() ([]net.Listener, error) { if err != nil && err != tmpubsub.ErrSubscriptionNotFound { wmLogger.Error("Failed to unsubscribe addr from events", "addr", remoteAddr, "err", err) } - }), rpcserver.SetWorkerPool(wsWorkerPool)) + }), rpcserver.SetWorkerPool(wsWorkerPool), rpcserver.ReadLimit(config.MaxBodyBytes)) wm.SetLogger(wmLogger) if n.config.RPC.DisableWebsocket { mux.HandleFunc("/websocket", wm.WebsocketDisabledHandler) @@ -792,16 +978,6 @@ func (n *Node) startRPC() ([]net.Listener, error) { mux.HandleFunc("/websocket", wm.WebsocketHandler) } rpcserver.RegisterRPCFuncs(mux, rpccore.Routes, coreCodec, rpcLogger) - - config := rpcserver.DefaultConfig() - config.MaxOpenConnections = n.config.RPC.MaxOpenConnections - // If necessary adjust global WriteTimeout to ensure it's greater than - // TimeoutBroadcastTxCommit. - // See https://github.com/tendermint/tendermint/issues/3435 - if config.WriteTimeout <= n.config.RPC.TimeoutBroadcastTxCommit { - config.WriteTimeout = n.config.RPC.TimeoutBroadcastTxCommit + 1*time.Second - } - listener, err := rpcserver.Listen( listenAddr, config, @@ -883,7 +1059,7 @@ func (n *Node) Switch() *p2p.Switch { } // BlockStore returns the Node's BlockStore. -func (n *Node) BlockStore() *bc.BlockStore { +func (n *Node) BlockStore() *store.BlockStore { return n.blockStore } @@ -897,11 +1073,21 @@ func (n *Node) ConsensusReactor() *cs.ConsensusReactor { return n.consensusReactor } -// MempoolReactor returns the Node's MempoolReactor. -func (n *Node) MempoolReactor() *mempl.MempoolReactor { +// MempoolReactor returns the Node's mempool reactor. +func (n *Node) MempoolReactor() *mempl.Reactor { return n.mempoolReactor } +// Mempool returns the Node's mempool. +func (n *Node) Mempool() mempl.Mempool { + return n.mempool +} + +// PEXReactor returns the Node's PEXReactor. It returns nil if PEX is disabled. +func (n *Node) PEXReactor() *pex.PEXReactor { + return n.pexReactor +} + // EvidencePool returns the Node's EvidencePool. func (n *Node) EvidencePool() *evidence.EvidencePool { return n.evidencePool @@ -952,23 +1138,39 @@ func (n *Node) NodeInfo() p2p.NodeInfo { func makeNodeInfo( config *cfg.Config, - nodeID p2p.ID, + nodeKey *p2p.NodeKey, txIndexer txindex.TxIndexer, - chainID string, - protocolVersion p2p.ProtocolVersion, + genDoc *types.GenesisDoc, + state sm.State, ) (p2p.NodeInfo, error) { txIndexerStatus := "on" if _, ok := txIndexer.(*null.TxIndex); ok { txIndexerStatus = "off" } + + var bcChannel byte + switch config.FastSync.Version { + case "v0": + bcChannel = bcv0.BlockchainChannel + case "v1": + // actually stay the same with v1 + bcChannel = bcv1.BlockchainChannel + default: + return nil, fmt.Errorf("unknown fastsync version %s", config.FastSync.Version) + } + nodeInfo := p2p.DefaultNodeInfo{ - ProtocolVersion: protocolVersion, - ID_: nodeID, - Network: chainID, - Version: version.TMCoreSemVer, + ProtocolVersion: p2p.NewProtocolVersion( + version.P2PProtocol, // global + state.Version.Consensus.Block, + state.Version.Consensus.App, + ), + ID_: nodeKey.ID(), + Network: genDoc.ChainID, + Version: version.TMCoreSemVer, Channels: []byte{ snapshot.StateSyncChannel, - bc.BlockchainChannel, + bcChannel, cs.StateChannel, cs.DataChannel, cs.VoteChannel, cs.VoteSetBitsChannel, mempl.MempoolChannel, evidence.EvidenceChannel, @@ -1006,56 +1208,63 @@ var ( genesisDocKey = []byte("genesisDoc") ) +// LoadStateFromDBOrGenesisDocProvider attempts to load the state from the +// database, or creates one using the given genesisDocProvider and persists the +// result to the database. On success this also returns the genesis doc loaded +// through the given provider. +func LoadStateFromDBOrGenesisDocProvider(stateDB dbm.DB, genesisDocProvider GenesisDocProvider) (sm.State, *types.GenesisDoc, error) { + // Get genesis doc + genDoc, err := loadGenesisDoc(stateDB) + if err != nil { + genDoc, err = genesisDocProvider() + if err != nil { + return sm.State{}, nil, err + } + // save genesis doc to prevent a certain class of user errors (e.g. when it + // was changed, accidentally or not). Also good for audit trail. + saveGenesisDoc(stateDB, genDoc) + } + state, err := sm.LoadStateFromDBOrGenesisDoc(stateDB, genDoc) + if err != nil { + return sm.State{}, nil, err + } + return state, genDoc, nil +} + // panics if failed to unmarshal bytes func loadGenesisDoc(db dbm.DB) (*types.GenesisDoc, error) { - bytes := db.Get(genesisDocKey) - if len(bytes) == 0 { + b := db.Get(genesisDocKey) + if len(b) == 0 { return nil, errors.New("Genesis doc not found") } var genDoc *types.GenesisDoc - err := cdc.UnmarshalJSON(bytes, &genDoc) + err := cdc.UnmarshalJSON(b, &genDoc) if err != nil { - cmn.PanicCrisis(fmt.Sprintf("Failed to load genesis doc due to unmarshaling error: %v (bytes: %X)", err, bytes)) + panic(fmt.Sprintf("Failed to load genesis doc due to unmarshaling error: %v (bytes: %X)", err, b)) } return genDoc, nil } // panics if failed to marshal the given genesis document func saveGenesisDoc(db dbm.DB, genDoc *types.GenesisDoc) { - bytes, err := cdc.MarshalJSON(genDoc) + b, err := cdc.MarshalJSON(genDoc) if err != nil { - cmn.PanicCrisis(fmt.Sprintf("Failed to save genesis doc due to marshaling error: %v", err)) + panic(fmt.Sprintf("Failed to save genesis doc due to marshaling error: %v", err)) } - db.SetSync(genesisDocKey, bytes) + db.SetSync(genesisDocKey, b) } func createAndStartPrivValidatorSocketClient( listenAddr string, logger log.Logger, ) (types.PrivValidator, error) { - var listener net.Listener - - protocol, address := cmn.ProtocolAndAddress(listenAddr) - ln, err := net.Listen(protocol, address) + pve, err := privval.NewSignerListener(listenAddr, logger) if err != nil { - return nil, err - } - switch protocol { - case "unix": - listener = privval.NewUnixListener(ln) - case "tcp": - // TODO: persist this key so external signer - // can actually authenticate us - listener = privval.NewTCPListener(ln, ed25519.GenPrivKey()) - default: - return nil, fmt.Errorf( - "Wrong listen address: expected either 'tcp' or 'unix' protocols, got %s", - protocol, - ) + return nil, errors.Wrap(err, "failed to start private validator") } - pvsc := privval.NewSignerValidatorEndpoint(logger.With("module", "privval"), listener) - if err := pvsc.Start(); err != nil { + pvsc, err := privval.NewSignerClient(pve) + if err != nil { return nil, errors.Wrap(err, "failed to start private validator") } diff --git a/node/node_test.go b/node/node_test.go index f3823a38c..77e970d28 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -21,6 +21,7 @@ import ( "github.com/tendermint/tendermint/libs/log" mempl "github.com/tendermint/tendermint/mempool" "github.com/tendermint/tendermint/p2p" + p2pmock "github.com/tendermint/tendermint/p2p/mock" "github.com/tendermint/tendermint/privval" "github.com/tendermint/tendermint/proxy" sm "github.com/tendermint/tendermint/state" @@ -100,7 +101,10 @@ func TestNodeDelayedStart(t *testing.T) { n.GenesisDoc().GenesisTime = now.Add(2 * time.Second) require.NoError(t, err) - n.Start() + err = n.Start() + require.NoError(t, err) + defer n.Stop() + startTime := tmtime.Now() assert.Equal(t, true, startTime.After(n.GenesisDoc().GenesisTime)) } @@ -132,25 +136,29 @@ func TestNodeSetPrivValTCP(t *testing.T) { config.BaseConfig.PrivValidatorListenAddr = addr dialer := privval.DialTCPFn(addr, 100*time.Millisecond, ed25519.GenPrivKey()) - pvsc := privval.NewSignerServiceEndpoint( + dialerEndpoint := privval.NewSignerDialerEndpoint( log.TestingLogger(), + dialer, + ) + privval.SignerDialerEndpointTimeoutReadWrite(100 * time.Millisecond)(dialerEndpoint) + + signerServer := privval.NewSignerServer( + dialerEndpoint, config.ChainID(), types.NewMockPV(), - dialer, ) - privval.SignerServiceEndpointTimeoutReadWrite(100 * time.Millisecond)(pvsc) go func() { - err := pvsc.Start() + err := signerServer.Start() if err != nil { panic(err) } }() - defer pvsc.Stop() + defer signerServer.Stop() n, err := DefaultNewNode(config, log.TestingLogger()) require.NoError(t, err) - assert.IsType(t, &privval.SignerValidatorEndpoint{}, n.PrivValidator()) + assert.IsType(t, &privval.SignerClient{}, n.PrivValidator()) } // address without a protocol must result in error @@ -174,13 +182,17 @@ func TestNodeSetPrivValIPC(t *testing.T) { config.BaseConfig.PrivValidatorListenAddr = "unix://" + tmpfile dialer := privval.DialUnixFn(tmpfile) - pvsc := privval.NewSignerServiceEndpoint( + dialerEndpoint := privval.NewSignerDialerEndpoint( log.TestingLogger(), + dialer, + ) + privval.SignerDialerEndpointTimeoutReadWrite(100 * time.Millisecond)(dialerEndpoint) + + pvsc := privval.NewSignerServer( + dialerEndpoint, config.ChainID(), types.NewMockPV(), - dialer, ) - privval.SignerServiceEndpointTimeoutReadWrite(100 * time.Millisecond)(pvsc) go func() { err := pvsc.Start() @@ -190,8 +202,7 @@ func TestNodeSetPrivValIPC(t *testing.T) { n, err := DefaultNewNode(config, log.TestingLogger()) require.NoError(t, err) - assert.IsType(t, &privval.SignerValidatorEndpoint{}, n.PrivValidator()) - + assert.IsType(t, &privval.SignerClient{}, n.PrivValidator()) } // testFreeAddr claims a free port so we don't block on listener being ready. @@ -224,7 +235,7 @@ func TestCreateProposalBlock(t *testing.T) { // Make Mempool memplMetrics := mempl.PrometheusMetrics("node_test") - mempool := mempl.NewMempool( + mempool := mempl.NewCListMempool( config.Mempool, proxyApp.Mempool(), state.LastBlockHeight, @@ -280,16 +291,49 @@ func TestCreateProposalBlock(t *testing.T) { assert.NoError(t, err) } +func TestNodeNewNodeCustomReactors(t *testing.T) { + config := cfg.ResetTestRoot("node_new_node_custom_reactors_test") + defer os.RemoveAll(config.RootDir) + + cr := p2pmock.NewReactor() + customBlockchainReactor := p2pmock.NewReactor() + + nodeKey, err := p2p.LoadOrGenNodeKey(config.NodeKeyFile()) + require.NoError(t, err) + + n, err := NewNode(config, + privval.LoadOrGenFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile()), + nodeKey, + proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir()), + DefaultGenesisDocProviderFunc(config), + DefaultDBProvider, + DefaultMetricsProvider(config.Instrumentation), + log.TestingLogger(), + CustomReactors(map[string]p2p.Reactor{"FOO": cr, "BLOCKCHAIN": customBlockchainReactor}), + ) + require.NoError(t, err) + + err = n.Start() + require.NoError(t, err) + defer n.Stop() + + assert.True(t, cr.IsRunning()) + assert.Equal(t, cr, n.Switch().Reactor("FOO")) + + assert.True(t, customBlockchainReactor.IsRunning()) + assert.Equal(t, customBlockchainReactor, n.Switch().Reactor("BLOCKCHAIN")) +} + func state(nVals int, height int64) (sm.State, dbm.DB) { vals := make([]types.GenesisValidator, nVals) for i := 0; i < nVals; i++ { secret := []byte(fmt.Sprintf("test%d", i)) pk := ed25519.GenPrivKeyFromSecret(secret) vals[i] = types.GenesisValidator{ - pk.PubKey().Address(), - pk.PubKey(), - 1000, - fmt.Sprintf("test%d", i), + Address: pk.PubKey().Address(), + PubKey: pk.PubKey(), + Power: 1000, + Name: fmt.Sprintf("test%d", i), } } s, _ := sm.MakeGenesisState(&types.GenesisDoc{ diff --git a/p2p/base_reactor.go b/p2p/base_reactor.go index d2d52510b..3bccabd64 100644 --- a/p2p/base_reactor.go +++ b/p2p/base_reactor.go @@ -5,26 +5,40 @@ import ( "github.com/tendermint/tendermint/p2p/conn" ) +// Reactor is responsible for handling incoming messages on one or more +// Channel. Switch calls GetChannels when reactor is added to it. When a new +// peer joins our node, InitPeer and AddPeer are called. RemovePeer is called +// when the peer is stopped. Receive is called when a message is received on a +// channel associated with this reactor. +// +// Peer#Send or Peer#TrySend should be used to send the message to a peer. type Reactor interface { cmn.Service // Start, Stop // SetSwitch allows setting a switch. SetSwitch(*Switch) - // GetChannels returns the list of channel descriptors. + // GetChannels returns the list of MConnection.ChannelDescriptor. Make sure + // that each ID is unique across all the reactors added to the switch. GetChannels() []*conn.ChannelDescriptor - // AddPeer is called by the switch when a new peer is added. - AddPeer(peer Peer) + // InitPeer is called by the switch before the peer is started. Use it to + // initialize data for the peer (e.g. peer state). + // + // NOTE: The switch won't call AddPeer nor RemovePeer if it fails to start + // the peer. Do not store any data associated with the peer in the reactor + // itself unless you don't want to have a state, which is never cleaned up. + InitPeer(peer Peer) Peer - // InitAddPeer is called by switch before peer is started. - InitAddPeer(peer Peer) Peer + // AddPeer is called by the switch after the peer is added and successfully + // started. Use it to start goroutines communicating with the peer. + AddPeer(peer Peer) // RemovePeer is called by the switch when the peer is stopped (due to error // or other reason). RemovePeer(peer Peer, reason interface{}) - // Receive is called when msgBytes is received from peer. + // Receive is called by the switch when msgBytes is received from the peer. // // NOTE reactor can not keep msgBytes around after Receive completes without // copying. @@ -54,4 +68,4 @@ func (*BaseReactor) GetChannels() []*conn.ChannelDescriptor { return nil func (*BaseReactor) AddPeer(peer Peer) {} func (*BaseReactor) RemovePeer(peer Peer, reason interface{}) {} func (*BaseReactor) Receive(chID byte, peer Peer, msgBytes []byte) {} -func (*BaseReactor) InitAddPeer(peer Peer) Peer { return peer } +func (*BaseReactor) InitPeer(peer Peer) Peer { return peer } diff --git a/p2p/wire.go b/p2p/codec.go similarity index 100% rename from p2p/wire.go rename to p2p/codec.go diff --git a/p2p/conn/wire.go b/p2p/conn/codec.go similarity index 100% rename from p2p/conn/wire.go rename to p2p/conn/codec.go diff --git a/p2p/conn/connection.go b/p2p/conn/connection.go index 207e3f7fd..3eb9540d0 100644 --- a/p2p/conn/connection.go +++ b/p2p/conn/connection.go @@ -90,6 +90,9 @@ type MConnection struct { quitSendRoutine chan struct{} doneSendRoutine chan struct{} + // Closing quitRecvRouting will cause the recvRouting to eventually quit. + quitRecvRoutine chan struct{} + // used to ensure FlushStop and OnStop // are safe to call concurrently. stopMtx sync.Mutex @@ -206,6 +209,7 @@ func (c *MConnection) OnStart() error { c.chStatsTimer = time.NewTicker(updateStats) c.quitSendRoutine = make(chan struct{}) c.doneSendRoutine = make(chan struct{}) + c.quitRecvRoutine = make(chan struct{}) go c.sendRoutine() go c.recvRoutine() return nil @@ -220,7 +224,14 @@ func (c *MConnection) stopServices() (alreadyStopped bool) { select { case <-c.quitSendRoutine: - // already quit via FlushStop or OnStop + // already quit + return true + default: + } + + select { + case <-c.quitRecvRoutine: + // already quit return true default: } @@ -230,6 +241,8 @@ func (c *MConnection) stopServices() (alreadyStopped bool) { c.pingTimer.Stop() c.chStatsTimer.Stop() + // inform the recvRouting that we are shutting down + close(c.quitRecvRoutine) close(c.quitSendRoutine) return false } @@ -250,8 +263,6 @@ func (c *MConnection) FlushStop() { <-c.doneSendRoutine // Send and flush all pending msgs. - // By now, IsRunning == false, - // so any concurrent attempts to send will fail. // Since sendRoutine has exited, we can call this // safely eof := c.sendSomePacketMsgs() @@ -322,6 +333,8 @@ func (c *MConnection) Send(chID byte, msgBytes []byte) bool { return false } + c.Logger.Debug("Send", "channel", chID, "conn", c, "msgLength", len(msgBytes)) + // Send message to channel. channel, ok := c.channelsIdx[chID] if !ok { @@ -337,7 +350,7 @@ func (c *MConnection) Send(chID byte, msgBytes []byte) bool { default: } } else { - c.Logger.Debug("Send failed", "channel", chID, "conn", c, "msgBytes", fmt.Sprintf("%X", msgBytes)) + c.Logger.Debug("Send failed", "channel", chID, "conn", c, "msgLength", len(msgBytes)) } return success } @@ -548,9 +561,22 @@ FOR_LOOP: var err error _n, err = cdc.UnmarshalBinaryLengthPrefixedReader(c.bufConnReader, &packet, int64(c._maxPacketMsgSize)) c.recvMonitor.Update(int(_n)) + if err != nil { + // stopServices was invoked and we are shutting down + // receiving is excpected to fail since we will close the connection + select { + case <-c.quitRecvRoutine: + break FOR_LOOP + default: + } + if c.IsRunning() { - c.Logger.Error("Connection failed @ recvRoutine (reading byte)", "conn", c, "err", err) + if err == io.EOF { + c.Logger.Info("Connection is closed @ recvRoutine (likely by the other side)", "conn", c) + } else { + c.Logger.Error("Connection failed @ recvRoutine (reading byte)", "conn", c, "err", err) + } c.stopForError(err) } break FOR_LOOP @@ -592,6 +618,7 @@ FOR_LOOP: break FOR_LOOP } if msgBytes != nil { + c.Logger.Debug("Received bytes", "chID", pkt.ChannelID) // NOTE: This means the reactor.Receive runs in the same thread as the p2p recv routine // except the mempool actually is using an asynchronus Receive() to prevent jamming requests // stopping block producing (tested via ) @@ -710,7 +737,7 @@ type Channel struct { func newChannel(conn *MConnection, desc ChannelDescriptor) *Channel { desc = desc.FillDefaults() if desc.Priority <= 0 { - cmn.PanicSanity("Channel default priority must be a positive integer") + panic("Channel default priority must be a positive integer") } return &Channel{ conn: conn, diff --git a/p2p/conn/connection_test.go b/p2p/conn/connection_test.go index 283b00ebe..91e3e2099 100644 --- a/p2p/conn/connection_test.go +++ b/p2p/conn/connection_test.go @@ -57,7 +57,8 @@ func TestMConnectionSendFlushStop(t *testing.T) { msgB := make([]byte, aminoMsgLength) _, err := server.Read(msgB) if err != nil { - t.Fatal(err) + t.Error(err) + return } errCh <- err }() diff --git a/p2p/conn/secret_connection.go b/p2p/conn/secret_connection.go index f8e784bbc..2481c04ca 100644 --- a/p2p/conn/secret_connection.go +++ b/p2p/conn/secret_connection.go @@ -8,6 +8,7 @@ import ( "crypto/subtle" "encoding/binary" "io" + "math" "net" "sync" "time" @@ -160,8 +161,10 @@ func (sc *SecretConnection) Write(data []byte) (n int, err error) { if err := func() error { var sealedFrame = pool.Get(aeadSizeOverhead + totalFrameSize) var frame = pool.Get(totalFrameSize) - defer pool.Put(sealedFrame) - defer pool.Put(frame) + defer func() { + pool.Put(sealedFrame) + pool.Put(frame) + }() var chunk []byte if dataMaxSize < len(data) { chunk = data[:dataMaxSize] @@ -456,6 +459,11 @@ func shareAuthSignature(sc *SecretConnection, pubKey crypto.PubKey, signature [] // (little-endian in nonce[4:]). func incrNonce(nonce *[aeadNonceSize]byte) { counter := binary.LittleEndian.Uint64(nonce[4:]) + if counter == math.MaxUint64 { + // Terminates the session and makes sure the nonce would not re-used. + // See https://github.com/tendermint/tendermint/issues/3531 + panic("can't increase nonce without overflow") + } counter++ binary.LittleEndian.PutUint64(nonce[4:], counter) } diff --git a/p2p/conn/secret_connection_test.go b/p2p/conn/secret_connection_test.go index 648e6529f..daa1e98a7 100644 --- a/p2p/conn/secret_connection_test.go +++ b/p2p/conn/secret_connection_test.go @@ -194,7 +194,8 @@ func writeLots(t *testing.T, wg *sync.WaitGroup, conn net.Conn, txt string, n in for i := 0; i < n; i++ { _, err := conn.Write([]byte(txt)) if err != nil { - t.Fatalf("Failed to write to fooSecConn: %v", err) + t.Errorf("Failed to write to fooSecConn: %v", err) + return } } } @@ -455,7 +456,8 @@ func BenchmarkWriteSecretConnection(b *testing.B) { if err == io.EOF { return } else if err != nil { - b.Fatalf("Failed to read from barSecConn: %v", err) + b.Errorf("Failed to read from barSecConn: %v", err) + return } } }() @@ -465,7 +467,8 @@ func BenchmarkWriteSecretConnection(b *testing.B) { idx := cmn.RandIntn(len(fooWriteBytes)) _, err := fooSecConn.Write(fooWriteBytes[idx]) if err != nil { - b.Fatalf("Failed to write to fooSecConn: %v", err) + b.Errorf("Failed to write to fooSecConn: %v", err) + return } } b.StopTimer() @@ -498,7 +501,8 @@ func BenchmarkReadSecretConnection(b *testing.B) { idx := cmn.RandIntn(len(fooWriteBytes)) _, err := fooSecConn.Write(fooWriteBytes[idx]) if err != nil { - b.Fatalf("Failed to write to fooSecConn: %v, %v,%v", err, i, b.N) + b.Errorf("Failed to write to fooSecConn: %v, %v,%v", err, i, b.N) + return } } }() diff --git a/p2p/fuzz.go b/p2p/fuzz.go index 80e4fed6a..135155d86 100644 --- a/p2p/fuzz.go +++ b/p2p/fuzz.go @@ -117,14 +117,15 @@ func (fc *FuzzedConnection) fuzz() bool { case config.FuzzModeDrop: // randomly drop the r/w, drop the conn, or sleep r := cmn.RandFloat64() - if r <= fc.config.ProbDropRW { + switch { + case r <= fc.config.ProbDropRW: return true - } else if r < fc.config.ProbDropRW+fc.config.ProbDropConn { + case r < fc.config.ProbDropRW+fc.config.ProbDropConn: // XXX: can't this fail because machine precision? // XXX: do we need an error? fc.Close() // nolint: errcheck, gas return true - } else if r < fc.config.ProbDropRW+fc.config.ProbDropConn+fc.config.ProbSleep { + case r < fc.config.ProbDropRW+fc.config.ProbDropConn+fc.config.ProbSleep: time.Sleep(fc.randomDuration()) } case config.FuzzModeDelay: diff --git a/p2p/metrics.go b/p2p/metrics.go index 1ef481ad1..f0f7852c4 100644 --- a/p2p/metrics.go +++ b/p2p/metrics.go @@ -45,13 +45,13 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { Subsystem: MetricsSubsystem, Name: "peer_receive_bytes_total", Help: "Number of bytes received from a given peer.", - }, append(labels, "peer_id")).With(labelsAndValues...), + }, append(labels, "peer_id", "chID")).With(labelsAndValues...), PeerSendBytesTotal: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ Namespace: namespace, Subsystem: MetricsSubsystem, Name: "peer_send_bytes_total", Help: "Number of bytes sent to a given peer.", - }, append(labels, "peer_id")).With(labelsAndValues...), + }, append(labels, "peer_id", "chID")).With(labelsAndValues...), PeerPendingSendBytes: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ Namespace: namespace, Subsystem: MetricsSubsystem, diff --git a/p2p/mock/reactor.go b/p2p/mock/reactor.go new file mode 100644 index 000000000..cfce12bd1 --- /dev/null +++ b/p2p/mock/reactor.go @@ -0,0 +1,23 @@ +package mock + +import ( + "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/p2p" + "github.com/tendermint/tendermint/p2p/conn" +) + +type Reactor struct { + p2p.BaseReactor +} + +func NewReactor() *Reactor { + r := &Reactor{} + r.BaseReactor = *p2p.NewBaseReactor("Reactor", r) + r.SetLogger(log.TestingLogger()) + return r +} + +func (r *Reactor) GetChannels() []*conn.ChannelDescriptor { return []*conn.ChannelDescriptor{} } +func (r *Reactor) AddPeer(peer p2p.Peer) {} +func (r *Reactor) RemovePeer(peer p2p.Peer, reason interface{}) {} +func (r *Reactor) Receive(chID byte, peer p2p.Peer, msgBytes []byte) {} diff --git a/p2p/netaddress.go b/p2p/netaddress.go index 5534ded98..04a2b843f 100644 --- a/p2p/netaddress.go +++ b/p2p/netaddress.go @@ -13,9 +13,7 @@ import ( "strings" "time" - "errors" - - cmn "github.com/tendermint/tendermint/libs/common" + "github.com/pkg/errors" ) // NetAddress defines information about a peer on the network @@ -42,19 +40,24 @@ func IDAddressString(id ID, protocolHostPort string) string { // NewNetAddress returns a new NetAddress using the provided TCP // address. When testing, other net.Addr (except TCP) will result in // using 0.0.0.0:0. When normal run, other net.Addr (except TCP) will -// panic. +// panic. Panics if ID is invalid. // TODO: socks proxies? func NewNetAddress(id ID, addr net.Addr) *NetAddress { tcpAddr, ok := addr.(*net.TCPAddr) if !ok { if flag.Lookup("test.v") == nil { // normal run - cmn.PanicSanity(fmt.Sprintf("Only TCPAddrs are supported. Got: %v", addr)) + panic(fmt.Sprintf("Only TCPAddrs are supported. Got: %v", addr)) } else { // in testing netAddr := NewNetAddressIPPort(net.IP("0.0.0.0"), 0) netAddr.ID = id return netAddr } } + + if err := validateID(id); err != nil { + panic(fmt.Sprintf("Invalid ID %v: %v (addr: %v)", id, err, addr)) + } + ip := tcpAddr.IP port := uint16(tcpAddr.Port) na := NewNetAddressIPPort(ip, port) @@ -67,36 +70,20 @@ func NewNetAddress(id ID, addr net.Addr) *NetAddress { // Also resolves the host if host is not an IP. // Errors are of type ErrNetAddressXxx where Xxx is in (NoID, Invalid, Lookup) func NewNetAddressString(addr string) (*NetAddress, error) { - spl := strings.Split(addr, "@") - if len(spl) < 2 { - return nil, ErrNetAddressNoID{addr} - } - return NewNetAddressStringWithOptionalID(addr) -} - -// NewNetAddressStringWithOptionalID returns a new NetAddress using the -// provided address in the form of "ID@IP:Port", where the ID is optional. -// Also resolves the host if host is not an IP. -func NewNetAddressStringWithOptionalID(addr string) (*NetAddress, error) { addrWithoutProtocol := removeProtocolIfDefined(addr) - - var id ID spl := strings.Split(addrWithoutProtocol, "@") - if len(spl) == 2 { - idStr := spl[0] - idBytes, err := hex.DecodeString(idStr) - if err != nil { - return nil, ErrNetAddressInvalid{addrWithoutProtocol, err} - } - if len(idBytes) != IDByteLength { - return nil, ErrNetAddressInvalid{ - addrWithoutProtocol, - fmt.Errorf("invalid hex length - got %d, expected %d", len(idBytes), IDByteLength)} - } + if len(spl) != 2 { + return nil, ErrNetAddressNoID{addr} + } - id, addrWithoutProtocol = ID(idStr), spl[1] + // get ID + if err := validateID(ID(spl[0])); err != nil { + return nil, ErrNetAddressInvalid{addrWithoutProtocol, err} } + var id ID + id, addrWithoutProtocol = ID(spl[0]), spl[1] + // get host and port host, portStr, err := net.SplitHostPort(addrWithoutProtocol) if err != nil { return nil, ErrNetAddressInvalid{addrWithoutProtocol, err} @@ -218,22 +205,28 @@ func (na *NetAddress) DialTimeout(timeout time.Duration) (net.Conn, error) { // Routable returns true if the address is routable. func (na *NetAddress) Routable() bool { + if err := na.Valid(); err != nil { + return false + } // TODO(oga) bitcoind doesn't include RFC3849 here, but should we? - return na.Valid() && !(na.RFC1918() || na.RFC3927() || na.RFC4862() || + return !(na.RFC1918() || na.RFC3927() || na.RFC4862() || na.RFC4193() || na.RFC4843() || na.Local()) } // For IPv4 these are either a 0 or all bits set address. For IPv6 a zero // address or one that matches the RFC3849 documentation address format. -func (na *NetAddress) Valid() bool { - if string(na.ID) != "" { - data, err := hex.DecodeString(string(na.ID)) - if err != nil || len(data) != IDByteLength { - return false - } +func (na *NetAddress) Valid() error { + if err := validateID(na.ID); err != nil { + return errors.Wrap(err, "invalid ID") + } + + if na.IP == nil { + return errors.New("no IP") } - return na.IP != nil && !(na.IP.IsUnspecified() || na.RFC3849() || - na.IP.Equal(net.IPv4bcast)) + if na.IP.IsUnspecified() || na.RFC3849() || na.IP.Equal(net.IPv4bcast) { + return errors.New("invalid IP") + } + return nil } // HasID returns true if the address has an ID. @@ -257,36 +250,39 @@ func (na *NetAddress) ReachabilityTo(o *NetAddress) int { Ipv4 Ipv6_strong ) - if !na.Routable() { + switch { + case !na.Routable(): return Unreachable - } else if na.RFC4380() { - if !o.Routable() { + case na.RFC4380(): + switch { + case !o.Routable(): return Default - } else if o.RFC4380() { + case o.RFC4380(): return Teredo - } else if o.IP.To4() != nil { + case o.IP.To4() != nil: return Ipv4 - } else { // ipv6 + default: // ipv6 return Ipv6_weak } - } else if na.IP.To4() != nil { + case na.IP.To4() != nil: if o.Routable() && o.IP.To4() != nil { return Ipv4 } return Default - } else /* ipv6 */ { + default: /* ipv6 */ var tunnelled bool // Is our v6 is tunnelled? if o.RFC3964() || o.RFC6052() || o.RFC6145() { tunnelled = true } - if !o.Routable() { + switch { + case !o.Routable(): return Default - } else if o.RFC4380() { + case o.RFC4380(): return Teredo - } else if o.IP.To4() != nil { + case o.IP.To4() != nil: return Ipv4 - } else if tunnelled { + case tunnelled: // only prioritise ipv6 if we aren't tunnelling it. return Ipv6_weak } @@ -340,3 +336,17 @@ func removeProtocolIfDefined(addr string) string { return addr } + +func validateID(id ID) error { + if len(id) == 0 { + return errors.New("no ID") + } + idBytes, err := hex.DecodeString(string(id)) + if err != nil { + return err + } + if len(idBytes) != IDByteLength { + return fmt.Errorf("invalid hex length - got %d, expected %d", len(idBytes), IDByteLength) + } + return nil +} diff --git a/p2p/netaddress_test.go b/p2p/netaddress_test.go index 1966167d7..e7d82cd77 100644 --- a/p2p/netaddress_test.go +++ b/p2p/netaddress_test.go @@ -11,42 +11,36 @@ import ( func TestNewNetAddress(t *testing.T) { tcpAddr, err := net.ResolveTCPAddr("tcp", "127.0.0.1:8080") require.Nil(t, err) - addr := NewNetAddress("", tcpAddr) - assert.Equal(t, "127.0.0.1:8080", addr.String()) + assert.Panics(t, func() { + NewNetAddress("", tcpAddr) + }) + + addr := NewNetAddress("deadbeefdeadbeefdeadbeefdeadbeefdeadbeef", tcpAddr) + assert.Equal(t, "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", addr.String()) assert.NotPanics(t, func() { NewNetAddress("", &net.UDPAddr{IP: net.ParseIP("127.0.0.1"), Port: 8000}) }, "Calling NewNetAddress with UDPAddr should not panic in testing") } -func TestIDAddressString(t *testing.T) { - testCases := []struct { - id ID - hostPort string - expect string - }{ - {"123xxx", "tcp://127.0.0.1:8080", "123xxx@127.0.0.1:8080"}, - {"123xxx", "udp://127.0.0.1:8080", "123xxx@127.0.0.1:8080"}, - {"123xxx", "127.0.0.1:8080", "123xxx@127.0.0.1:8080"}, - } - for _, tc := range testCases { - ac := IDAddressString(tc.id, tc.hostPort) - assert.Equal(t, tc.expect, ac) - } -} - -func TestNewNetAddressStringWithOptionalID(t *testing.T) { +func TestNewNetAddressString(t *testing.T) { testCases := []struct { name string addr string expected string correct bool }{ - {"no node id, no protocol", "127.0.0.1:8080", "127.0.0.1:8080", true}, - {"no node id, tcp input", "tcp://127.0.0.1:8080", "127.0.0.1:8080", true}, - {"no node id, udp input", "udp://127.0.0.1:8080", "127.0.0.1:8080", true}, - {"malformed udp input", "udp//127.0.0.1:8080", "", false}, + {"no node id and no protocol", "127.0.0.1:8080", "", false}, + {"no node id w/ tcp input", "tcp://127.0.0.1:8080", "", false}, + {"no node id w/ udp input", "udp://127.0.0.1:8080", "", false}, + + {"no protocol", "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", true}, + {"tcp input", "tcp://deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", true}, + {"udp input", "udp://deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", true}, + {"malformed tcp input", "tcp//deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "", false}, + {"malformed udp input", "udp//deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "", false}, + // {"127.0.0:8080", false}, {"invalid host", "notahost", "", false}, {"invalid port", "127.0.0.1:notapath", "", false}, @@ -57,14 +51,13 @@ func TestNewNetAddressStringWithOptionalID(t *testing.T) { {"too short nodeId", "deadbeef@127.0.0.1:8080", "", false}, {"too short, not hex nodeId", "this-isnot-hex@127.0.0.1:8080", "", false}, {"not hex nodeId", "xxxxbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "", false}, - {"correct nodeId", "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", true}, {"too short nodeId w/tcp", "tcp://deadbeef@127.0.0.1:8080", "", false}, {"too short notHex nodeId w/tcp", "tcp://this-isnot-hex@127.0.0.1:8080", "", false}, {"notHex nodeId w/tcp", "tcp://xxxxbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "", false}, {"correct nodeId w/tcp", "tcp://deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", true}, - {"no node id when expected", "tcp://@127.0.0.1:8080", "", false}, + {"no node id", "tcp://@127.0.0.1:8080", "", false}, {"no node id or IP", "tcp://@", "", false}, {"tcp no host, w/ port", "tcp://:26656", "", false}, {"empty", "", "", false}, @@ -75,7 +68,7 @@ func TestNewNetAddressStringWithOptionalID(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - addr, err := NewNetAddressStringWithOptionalID(tc.addr) + addr, err := NewNetAddressString(tc.addr) if tc.correct { if assert.Nil(t, err, tc.addr) { assert.Equal(t, tc.expected, addr.String()) @@ -87,28 +80,6 @@ func TestNewNetAddressStringWithOptionalID(t *testing.T) { } } -func TestNewNetAddressString(t *testing.T) { - testCases := []struct { - addr string - expected string - correct bool - }{ - {"127.0.0.1:8080", "127.0.0.1:8080", false}, - {"deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", true}, - } - - for _, tc := range testCases { - addr, err := NewNetAddressString(tc.addr) - if tc.correct { - if assert.Nil(t, err, tc.addr) { - assert.Equal(t, tc.expected, addr.String()) - } - } else { - assert.NotNil(t, err, tc.addr) - } - } -} - func TestNewNetAddressStrings(t *testing.T) { addrs, errs := NewNetAddressStrings([]string{ "127.0.0.1:8080", @@ -131,15 +102,20 @@ func TestNetAddressProperties(t *testing.T) { local bool routable bool }{ - {"127.0.0.1:8080", true, true, false}, - {"ya.ru:80", true, false, true}, + {"deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", true, true, false}, + {"deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@ya.ru:80", true, false, true}, } for _, tc := range testCases { - addr, err := NewNetAddressStringWithOptionalID(tc.addr) + addr, err := NewNetAddressString(tc.addr) require.Nil(t, err) - assert.Equal(t, tc.valid, addr.Valid()) + err = addr.Valid() + if tc.valid { + assert.NoError(t, err) + } else { + assert.Error(t, err) + } assert.Equal(t, tc.local, addr.Local()) assert.Equal(t, tc.routable, addr.Routable()) } @@ -152,15 +128,15 @@ func TestNetAddressReachabilityTo(t *testing.T) { other string reachability int }{ - {"127.0.0.1:8080", "127.0.0.1:8081", 0}, - {"ya.ru:80", "127.0.0.1:8080", 1}, + {"deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8081", 0}, + {"deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@ya.ru:80", "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", 1}, } for _, tc := range testCases { - addr, err := NewNetAddressStringWithOptionalID(tc.addr) + addr, err := NewNetAddressString(tc.addr) require.Nil(t, err) - other, err := NewNetAddressStringWithOptionalID(tc.other) + other, err := NewNetAddressString(tc.other) require.Nil(t, err) assert.Equal(t, tc.reachability, addr.ReachabilityTo(other)) diff --git a/p2p/node_info_test.go b/p2p/node_info_test.go index 19567d2bf..e90f397cf 100644 --- a/p2p/node_info_test.go +++ b/p2p/node_info_test.go @@ -19,7 +19,7 @@ func TestNodeInfoValidate(t *testing.T) { channels[i] = byte(i) } dupChannels := make([]byte, 5) - copy(dupChannels[:], channels[:5]) + copy(dupChannels, channels[:5]) dupChannels = append(dupChannels, testCh) nonAscii := "¢§µ" @@ -31,7 +31,7 @@ func TestNodeInfoValidate(t *testing.T) { malleateNodeInfo func(*DefaultNodeInfo) expectErr bool }{ - {"Too Many Channels", func(ni *DefaultNodeInfo) { ni.Channels = append(channels, byte(maxNumChannels)) }, true}, + {"Too Many Channels", func(ni *DefaultNodeInfo) { ni.Channels = append(channels, byte(maxNumChannels)) }, true}, // nolint: gocritic {"Duplicate Channel", func(ni *DefaultNodeInfo) { ni.Channels = dupChannels }, true}, {"Good Channels", func(ni *DefaultNodeInfo) { ni.Channels = ni.Channels[:5] }, false}, diff --git a/p2p/pex/addrbook.go b/p2p/pex/addrbook.go index 42efc714f..8ba69a0bc 100644 --- a/p2p/pex/addrbook.go +++ b/p2p/pex/addrbook.go @@ -178,11 +178,11 @@ func (a *addrBook) OurAddress(addr *p2p.NetAddress) bool { return ok } -func (a *addrBook) AddPrivateIDs(IDs []string) { +func (a *addrBook) AddPrivateIDs(ids []string) { a.mtx.Lock() defer a.mtx.Unlock() - for _, id := range IDs { + for _, id := range ids { a.privateIDs[p2p.ID(id)] = struct{}{} } } @@ -586,8 +586,8 @@ func (a *addrBook) addAddress(addr, src *p2p.NetAddress) error { return ErrAddrBookNilAddr{addr, src} } - if !addr.HasID() { - return ErrAddrBookInvalidAddrNoID{addr} + if err := addr.Valid(); err != nil { + return ErrAddrBookInvalidAddr{Addr: addr, AddrErr: err} } if _, ok := a.privateIDs[addr.ID]; ok { @@ -607,10 +607,6 @@ func (a *addrBook) addAddress(addr, src *p2p.NetAddress) error { return ErrAddrBookNonRoutable{addr} } - if !addr.Valid() { - return ErrAddrBookInvalidAddr{addr} - } - ka := a.addrLookup[addr.ID] if ka != nil { // If its already old and the addr is the same, ignore it. @@ -647,7 +643,7 @@ func (a *addrBook) randomPickAddresses(bucketType byte, num int) []*p2p.NetAddre } total := 0 for _, bucket := range buckets { - total = total + len(bucket) + total += len(bucket) } addresses := make([]*knownAddress, 0, total) for _, bucket := range buckets { diff --git a/p2p/pex/wire.go b/p2p/pex/codec.go similarity index 100% rename from p2p/pex/wire.go rename to p2p/pex/codec.go diff --git a/p2p/pex/errors.go b/p2p/pex/errors.go index 543056af5..911389a9e 100644 --- a/p2p/pex/errors.go +++ b/p2p/pex/errors.go @@ -56,17 +56,10 @@ func (err ErrAddrBookNilAddr) Error() string { } type ErrAddrBookInvalidAddr struct { - Addr *p2p.NetAddress + Addr *p2p.NetAddress + AddrErr error } func (err ErrAddrBookInvalidAddr) Error() string { - return fmt.Sprintf("Cannot add invalid address %v", err.Addr) -} - -type ErrAddrBookInvalidAddrNoID struct { - Addr *p2p.NetAddress -} - -func (err ErrAddrBookInvalidAddrNoID) Error() string { - return fmt.Sprintf("Cannot add address with no ID %v", err.Addr) + return fmt.Sprintf("Cannot add invalid address %v: %v", err.Addr, err.AddrErr) } diff --git a/p2p/pex/file.go b/p2p/pex/file.go index d4a516850..a42eddaf9 100644 --- a/p2p/pex/file.go +++ b/p2p/pex/file.go @@ -53,14 +53,14 @@ func (a *addrBook) loadFromFile(filePath string) bool { // Load addrBookJSON{} r, err := os.Open(filePath) if err != nil { - cmn.PanicCrisis(fmt.Sprintf("Error opening file %s: %v", filePath, err)) + panic(fmt.Sprintf("Error opening file %s: %v", filePath, err)) } defer r.Close() // nolint: errcheck aJSON := &addrBookJSON{} dec := json.NewDecoder(r) err = dec.Decode(aJSON) if err != nil { - cmn.PanicCrisis(fmt.Sprintf("Error reading file %s: %v", filePath, err)) + panic(fmt.Sprintf("Error reading file %s: %v", filePath, err)) } // Restore all the fields... diff --git a/p2p/pex/pex_reactor.go b/p2p/pex/pex_reactor.go index 2c588f861..2769c3f9f 100644 --- a/p2p/pex/pex_reactor.go +++ b/p2p/pex/pex_reactor.go @@ -48,6 +48,24 @@ const ( biasToSelectNewPeers = 30 // 70 to select good peers ) +type errMaxAttemptsToDial struct { +} + +func (e errMaxAttemptsToDial) Error() string { + return fmt.Sprintf("reached max attempts %d to dial", maxAttemptsToDial) +} + +type errTooEarlyToDial struct { + backoffDuration time.Duration + lastDialed time.Time +} + +func (e errTooEarlyToDial) Error() string { + return fmt.Sprintf( + "too early to dial (backoff duration: %d, last dialed: %v, time since: %v)", + e.backoffDuration, e.lastDialed, time.Since(e.lastDialed)) +} + // PEXReactor handles PEX (peer exchange) and ensures that an // adequate number of peers are connected to the switch. // @@ -116,7 +134,7 @@ func NewPEXReactor(b AddrBook, config *PEXReactorConfig) *PEXReactor { return r } -func (r *PEXReactor) InitAddPeer(peer Peer) Peer { +func (r *PEXReactor) InitPeer(peer Peer) Peer { id := string(peer.ID()) r.requestsSent.Delete(id) r.lastReceivedRequests.Delete(id) @@ -134,7 +152,7 @@ func (r *PEXReactor) OnStart() error { if err != nil { return err } else if numOnline == 0 && r.book.Empty() { - return errors.New("Address book is empty, and could not connect to any seed nodes") + return errors.New("Address book is empty and couldn't resolve any seed nodes") } r.seedAddrs = seedAddrs @@ -193,6 +211,13 @@ func (r *PEXReactor) AddPeer(p Peer) { } } +// RemovePeer implements Reactor by resetting peer's requests info. +func (r *PEXReactor) RemovePeer(p Peer, reason interface{}) { + id := string(p.ID()) + r.requestsSent.Delete(id) + r.lastReceivedRequests.Delete(id) +} + func (r *PEXReactor) logErrAddrBook(err error) { if err != nil { switch err.(type) { @@ -205,13 +230,6 @@ func (r *PEXReactor) logErrAddrBook(err error) { } } -// RemovePeer implements Reactor. -func (r *PEXReactor) RemovePeer(p Peer, reason interface{}) { - id := string(p.ID()) - r.requestsSent.Delete(id) - r.lastReceivedRequests.Delete(id) -} - // Receive implements Reactor by handling incoming PEX messages. func (r *PEXReactor) Receive(chID byte, src Peer, msgBytes []byte) { msg, err := decodeMsg(msgBytes) @@ -232,7 +250,7 @@ func (r *PEXReactor) Receive(chID byte, src Peer, msgBytes []byte) { // If we're a seed and this is an inbound peer, // respond once and disconnect. - if r.config.SeedMode && !r.Switch.IsPersistent(src) { + if r.config.SeedMode && !src.IsPersistent() { id := string(src.ID()) v := r.lastReceivedRequests.Get(id) if v != nil { @@ -294,7 +312,7 @@ func (r *PEXReactor) receiveRequest(src Peer) error { now := time.Now() minInterval := r.minReceiveRequestInterval() if now.Sub(lastReceived) < minInterval { - return fmt.Errorf("Peer (%v) sent next PEX request too soon. lastReceived: %v, now: %v, minInterval: %v. Disconnecting", + return fmt.Errorf("peer (%v) sent next PEX request too soon. lastReceived: %v, now: %v, minInterval: %v. Disconnecting", src.ID(), lastReceived, now, @@ -305,14 +323,14 @@ func (r *PEXReactor) receiveRequest(src Peer) error { return nil } -// RequestAddrs asks peer for more addresses if we do not already -// have a request out for this peer. +// RequestAddrs asks peer for more addresses if we do not already have a +// request out for this peer. func (r *PEXReactor) RequestAddrs(p Peer) { - r.Logger.Debug("Request addrs", "from", p) id := string(p.ID()) if r.requestsSent.Has(id) { return } + r.Logger.Debug("Request addrs", "from", p) r.requestsSent.Set(id, struct{}{}) p.Send(PexChannel, cdc.MustMarshalBinaryBare(&pexRequestMessage{})) } @@ -323,7 +341,7 @@ func (r *PEXReactor) RequestAddrs(p Peer) { func (r *PEXReactor) ReceiveAddrs(addrs []*p2p.NetAddress, src Peer) error { id := string(src.ID()) if !r.requestsSent.Has(id) { - return errors.New("Unsolicited pexAddrsMessage") + return errors.New("unsolicited pexAddrsMessage") } r.requestsSent.Delete(id) @@ -331,12 +349,21 @@ func (r *PEXReactor) ReceiveAddrs(addrs []*p2p.NetAddress, src Peer) error { if err != nil { return err } + + srcIsSeed := false + for _, seedAddr := range r.seedAddrs { + if seedAddr.Equals(srcAddr) { + srcIsSeed = true + break + } + } + for _, netAddr := range addrs { // Validate netAddr. Disconnect from a peer if it sends us invalid data. if netAddr == nil { return errors.New("nil address in pexAddrsMessage") } - // TODO: extract validating logic from NewNetAddressStringWithOptionalID + // TODO: extract validating logic from NewNetAddressString // and put it in netAddr#Valid (#2722) na, err := p2p.NewNetAddressString(netAddr.String()) if err != nil { @@ -356,13 +383,23 @@ func (r *PEXReactor) ReceiveAddrs(addrs []*p2p.NetAddress, src Peer) error { } // If this address came from a seed node, try to connect to it without - // waiting. - for _, seedAddr := range r.seedAddrs { - if seedAddr.Equals(srcAddr) { - r.ensurePeers() - } + // waiting (#2093) + if srcIsSeed { + r.Logger.Info("Will dial address, which came from seed", "addr", netAddr, "seed", srcAddr) + go func(addr *p2p.NetAddress) { + err := r.dialPeer(addr) + if err != nil { + switch err.(type) { + case errMaxAttemptsToDial, errTooEarlyToDial: + r.Logger.Debug(err.Error(), "addr", addr) + default: + r.Logger.Error(err.Error(), "addr", addr) + } + } + }(netAddr) } } + return nil } @@ -458,11 +495,21 @@ func (r *PEXReactor) ensurePeers() { // Dial picked addresses for _, addr := range toDial { - go r.dialPeer(addr) + go func(addr *p2p.NetAddress) { + err := r.dialPeer(addr) + if err != nil { + switch err.(type) { + case errMaxAttemptsToDial, errTooEarlyToDial: + r.Logger.Debug(err.Error(), "addr", addr) + default: + r.Logger.Error(err.Error(), "addr", addr) + } + } + }(addr) } - // If we need more addresses, pick a random peer and ask for more. if r.book.NeedMoreAddrs() { + // 1) Pick a random peer and ask for more. peers := r.Switch.Peers().List() peersCount := len(peers) if peersCount > 0 { @@ -470,12 +517,14 @@ func (r *PEXReactor) ensurePeers() { r.Logger.Info("We need more addresses. Sending pexRequest to random peer", "peer", peer) r.RequestAddrs(peer) } - } - // If we are not connected to nor dialing anybody, fallback to dialing a seed. - if out+in+dial+len(toDial) == 0 { - r.Logger.Info("No addresses to dial nor connected peers. Falling back to seeds") - r.dialSeeds() + // 2) Dial seeds if we are not dialing anyone. + // This is done in addition to asking a peer for addresses to work-around + // peers not participating in PEX. + if len(toDial) == 0 { + r.Logger.Info("No addresses to dial. Falling back to seeds") + r.dialSeeds() + } } } @@ -488,17 +537,16 @@ func (r *PEXReactor) dialAttemptsInfo(addr *p2p.NetAddress) (attempts int, lastD return atd.number, atd.lastDialed } -func (r *PEXReactor) dialPeer(addr *p2p.NetAddress) { +func (r *PEXReactor) dialPeer(addr *p2p.NetAddress) error { attempts, lastDialed := r.dialAttemptsInfo(addr) if attempts > maxAttemptsToDial { - // Do not log the message if the addr gets readded. - if attempts+1 == maxAttemptsToDial { - r.Logger.Info("Reached max attempts to dial", "addr", addr, "attempts", attempts) - r.attemptsToDial.Store(addr.DialString(), _attemptsToDial{attempts + 1, time.Now()}) - } + // TODO(melekes): have a blacklist in the addrbook with peers whom we've + // failed to connect to. Then we can clean up attemptsToDial, which acts as + // a blacklist currently. + // https://github.com/tendermint/tendermint/issues/3572 r.book.MarkBad(addr) - return + return errMaxAttemptsToDial{} } // exponential backoff if it's not our first attempt to dial given address @@ -507,33 +555,30 @@ func (r *PEXReactor) dialPeer(addr *p2p.NetAddress) { backoffDuration := jitterSeconds + ((1 << uint(attempts)) * time.Second) sinceLastDialed := time.Since(lastDialed) if sinceLastDialed < backoffDuration { - r.Logger.Debug("Too early to dial", "addr", addr, "backoff_duration", backoffDuration, "last_dialed", lastDialed, "time_since", sinceLastDialed) - return + return errTooEarlyToDial{backoffDuration, lastDialed} } } - err := r.Switch.DialPeerWithAddress(addr, false) + err := r.Switch.DialPeerWithAddress(addr) if err != nil { if _, ok := err.(p2p.ErrCurrentlyDialingOrExistingAddress); ok { - return + return err } - r.Logger.Error("Dialing failed", "addr", addr, "err", err, "attempts", attempts) markAddrInBookBasedOnErr(addr, r.book, err) - if _, ok := err.(p2p.ErrSwitchAuthenticationFailure); ok { + switch err.(type) { + case p2p.ErrSwitchAuthenticationFailure: + // NOTE: addr is removed from addrbook in markAddrInBookBasedOnErr r.attemptsToDial.Delete(addr.DialString()) - } else { - // FIXME: if the addr is going to be removed from the addrbook (hard to - // tell at this point), we need to Delete it from attemptsToDial, not - // record another attempt. - // record attempt + default: r.attemptsToDial.Store(addr.DialString(), _attemptsToDial{attempts + 1, time.Now()}) } - return + return errors.Wrapf(err, "dialing failed (attempts: %d)", attempts+1) } // cleanup any history r.attemptsToDial.Delete(addr.DialString()) + return nil } // checkSeeds checks that addresses are well formed. @@ -556,7 +601,7 @@ func (r *PEXReactor) checkSeeds() (numOnline int, netAddrs []*p2p.NetAddress, er return 0, nil, errors.Wrap(e, "seed node configuration has error") } } - return + return numOnline, netAddrs, nil } // randomly dial seeds until we connect to one or exhaust them @@ -566,13 +611,16 @@ func (r *PEXReactor) dialSeeds() { for _, i := range perm { // dial a random seed seedAddr := r.seedAddrs[i] - err := r.Switch.DialPeerWithAddress(seedAddr, false) + err := r.Switch.DialPeerWithAddress(seedAddr) if err == nil { return } r.Switch.Logger.Error("Error dialing seed", "err", err, "seed", seedAddr) } - r.Switch.Logger.Error("Couldn't connect to any seeds") + // do not write error message if there were no seeds specified in config + if len(r.seedAddrs) > 0 { + r.Switch.Logger.Error("Couldn't connect to any seeds") + } } // AttemptsToDial returns the number of attempts to dial specific address. It @@ -591,8 +639,13 @@ func (r *PEXReactor) AttemptsToDial(addr *p2p.NetAddress) int { // Seed/Crawler Mode causes this node to quickly disconnect // from peers, except other seed nodes. func (r *PEXReactor) crawlPeersRoutine() { - // Do an initial crawl - r.crawlPeers(r.book.GetSelection()) + // If we have any seed nodes, consult them first + if len(r.seedAddrs) > 0 { + r.dialSeeds() + } else { + // Do an initial crawl + r.crawlPeers(r.book.GetSelection()) + } // Fire periodically ticker := time.NewTicker(crawlPeerPeriod) @@ -642,14 +695,14 @@ func (r *PEXReactor) crawlPeers(addrs []*p2p.NetAddress) { LastCrawled: now, } - err := r.Switch.DialPeerWithAddress(addr, false) + err := r.dialPeer(addr) if err != nil { - if _, ok := err.(p2p.ErrCurrentlyDialingOrExistingAddress); ok { - continue + switch err.(type) { + case errMaxAttemptsToDial, errTooEarlyToDial: + r.Logger.Debug(err.Error(), "addr", addr) + default: + r.Logger.Error(err.Error(), "addr", addr) } - - r.Logger.Error("Dialing failed", "addr", addr, "err", err) - markAddrInBookBasedOnErr(addr, r.book, err) continue } @@ -680,7 +733,7 @@ func (r *PEXReactor) attemptDisconnects() { if peer.Status().Duration < r.config.SeedDisconnectWaitPeriod { continue } - if r.Switch.IsPersistent(peer) { + if peer.IsPersistent() { continue } r.Switch.StopPeerGracefully(peer) diff --git a/p2p/pex/pex_reactor_test.go b/p2p/pex/pex_reactor_test.go index 342baa78a..4eca605d7 100644 --- a/p2p/pex/pex_reactor_test.go +++ b/p2p/pex/pex_reactor_test.go @@ -144,7 +144,7 @@ func TestPEXReactorRequestMessageAbuse(t *testing.T) { sw.SetAddrBook(book) peer := mock.NewPeer(nil) - p2p.AddPeerToSwitch(sw, peer) + p2p.AddPeerToSwitchPeerSet(sw, peer) assert.True(t, sw.Peers().Has(peer.ID())) id := string(peer.ID()) @@ -175,7 +175,7 @@ func TestPEXReactorAddrsMessageAbuse(t *testing.T) { sw.SetAddrBook(book) peer := mock.NewPeer(nil) - p2p.AddPeerToSwitch(sw, peer) + p2p.AddPeerToSwitchPeerSet(sw, peer) assert.True(t, sw.Peers().Has(peer.ID())) id := string(peer.ID()) @@ -292,7 +292,8 @@ func TestPEXReactorSeedMode(t *testing.T) { require.Nil(t, err) defer os.RemoveAll(dir) // nolint: errcheck - pexR, book := createReactor(&PEXReactorConfig{SeedMode: true, SeedDisconnectWaitPeriod: 10 * time.Millisecond}) + pexRConfig := &PEXReactorConfig{SeedMode: true, SeedDisconnectWaitPeriod: 10 * time.Millisecond} + pexR, book := createReactor(pexRConfig) defer teardownReactor(book) sw := createSwitchAndAddReactors(pexR) @@ -316,13 +317,80 @@ func TestPEXReactorSeedMode(t *testing.T) { pexR.attemptDisconnects() assert.Equal(t, 1, sw.Peers().Size()) - time.Sleep(100 * time.Millisecond) + // sleep for SeedDisconnectWaitPeriod + time.Sleep(pexRConfig.SeedDisconnectWaitPeriod + 1*time.Millisecond) // 3. attemptDisconnects should disconnect after wait period pexR.attemptDisconnects() assert.Equal(t, 0, sw.Peers().Size()) } +func TestPEXReactorDoesNotDisconnectFromPersistentPeerInSeedMode(t *testing.T) { + // directory to store address books + dir, err := ioutil.TempDir("", "pex_reactor") + require.Nil(t, err) + defer os.RemoveAll(dir) // nolint: errcheck + + pexRConfig := &PEXReactorConfig{SeedMode: true, SeedDisconnectWaitPeriod: 1 * time.Millisecond} + pexR, book := createReactor(pexRConfig) + defer teardownReactor(book) + + sw := createSwitchAndAddReactors(pexR) + sw.SetAddrBook(book) + err = sw.Start() + require.NoError(t, err) + defer sw.Stop() + + assert.Zero(t, sw.Peers().Size()) + + peerSwitch := testCreateDefaultPeer(dir, 1) + require.NoError(t, peerSwitch.Start()) + defer peerSwitch.Stop() + + err = sw.AddPersistentPeers([]string{peerSwitch.NetAddress().String()}) + require.NoError(t, err) + + // 1. Test crawlPeers dials the peer + pexR.crawlPeers([]*p2p.NetAddress{peerSwitch.NetAddress()}) + assert.Equal(t, 1, sw.Peers().Size()) + assert.True(t, sw.Peers().Has(peerSwitch.NodeInfo().ID())) + + // sleep for SeedDisconnectWaitPeriod + time.Sleep(pexRConfig.SeedDisconnectWaitPeriod + 1*time.Millisecond) + + // 2. attemptDisconnects should not disconnect because the peer is persistent + pexR.attemptDisconnects() + assert.Equal(t, 1, sw.Peers().Size()) +} + +func TestPEXReactorDialsPeerUpToMaxAttemptsInSeedMode(t *testing.T) { + // directory to store address books + dir, err := ioutil.TempDir("", "pex_reactor") + require.Nil(t, err) + defer os.RemoveAll(dir) // nolint: errcheck + + pexR, book := createReactor(&PEXReactorConfig{SeedMode: true}) + defer teardownReactor(book) + + sw := createSwitchAndAddReactors(pexR) + sw.SetAddrBook(book) + err = sw.Start() + require.NoError(t, err) + defer sw.Stop() + + peer := mock.NewPeer(nil) + addr := peer.SocketAddr() + + err = book.AddAddress(addr, addr) + require.NoError(t, err) + + assert.True(t, book.HasAddress(addr)) + // imitate maxAttemptsToDial reached + pexR.attemptsToDial.Store(addr.DialString(), _attemptsToDial{maxAttemptsToDial + 1, time.Now()}) + pexR.crawlPeers([]*p2p.NetAddress{addr}) + assert.False(t, book.HasAddress(addr)) +} + // connect a peer to a seed, wait a bit, then stop it. // this should give it time to request addrs and for the seed // to call FlushStop, and allows us to test calling Stop concurrently @@ -371,7 +439,7 @@ func TestPEXReactorSeedModeFlushStop(t *testing.T) { reactor := switches[0].Reactors()["pex"].(*PEXReactor) peerID := switches[1].NodeInfo().ID() - err = switches[1].DialPeerWithAddress(switches[0].NetAddress(), false) + err = switches[1].DialPeerWithAddress(switches[0].NetAddress()) assert.NoError(t, err) // sleep up to a second while waiting for the peer to send us a message. diff --git a/p2p/switch.go b/p2p/switch.go index a849fc90b..274774003 100644 --- a/p2p/switch.go +++ b/p2p/switch.go @@ -69,17 +69,16 @@ type PeerFilterFunc func(IPeerSet, Peer) error type Switch struct { cmn.BaseService - config *config.P2PConfig - reactors map[string]Reactor - chDescs []*conn.ChannelDescriptor - reactorsByCh map[byte]Reactor - peers *PeerSet - dialing *cmn.CMap - reconnecting *cmn.CMap - nodeInfo NodeInfo // our node info - nodeKey *NodeKey // our node privkey - addrBook AddrBook - persistentPeers map[ID]bool + config *config.P2PConfig + reactors map[string]Reactor + chDescs []*conn.ChannelDescriptor + reactorsByCh map[byte]Reactor + peers *PeerSet + dialing *cmn.CMap + reconnecting *cmn.CMap + nodeInfo NodeInfo // our node info + nodeKey *NodeKey // our node privkey + addrBook AddrBook transport Transport @@ -89,6 +88,8 @@ type Switch struct { rng *cmn.Rand // seed for randomizing dial times and orders metrics *Metrics + + persistentPeersAddrs []*NetAddress } // NetAddress returns the address the switch is listening on. @@ -107,21 +108,21 @@ func NewSwitch( options ...SwitchOption, ) *Switch { sw := &Switch{ - config: cfg, - reactors: make(map[string]Reactor), - chDescs: make([]*conn.ChannelDescriptor, 0), - reactorsByCh: make(map[byte]Reactor), - peers: NewPeerSet(), - dialing: cmn.NewCMap(), - reconnecting: cmn.NewCMap(), - metrics: NopMetrics(), - transport: transport, - filterTimeout: defaultFilterTimeout, + config: cfg, + reactors: make(map[string]Reactor), + chDescs: make([]*conn.ChannelDescriptor, 0), + reactorsByCh: make(map[byte]Reactor), + peers: NewPeerSet(), + dialing: cmn.NewCMap(), + reconnecting: cmn.NewCMap(), + metrics: NopMetrics(), + transport: transport, + filterTimeout: defaultFilterTimeout, + persistentPeersAddrs: make([]*NetAddress, 0), } // Ensure we have a completely undeterministic PRNG. sw.rng = cmn.NewRand() - sw.initPersistentPeers() sw.BaseService = *cmn.NewBaseService(nil, "P2P Switch", sw) for _, option := range options { @@ -152,13 +153,11 @@ func WithMetrics(metrics *Metrics) SwitchOption { // AddReactor adds the given reactor to the switch. // NOTE: Not goroutine safe. func (sw *Switch) AddReactor(name string, reactor Reactor) Reactor { - // Validate the reactor. - // No two reactors can share the same channel. - reactorChannels := reactor.GetChannels() - for _, chDesc := range reactorChannels { + for _, chDesc := range reactor.GetChannels() { chID := chDesc.ID + // No two reactors can share the same channel. if sw.reactorsByCh[chID] != nil { - cmn.PanicSanity(fmt.Sprintf("Channel %X has multiple reactors %v & %v", chID, sw.reactorsByCh[chID], reactor)) + panic(fmt.Sprintf("Channel %X has multiple reactors %v & %v", chID, sw.reactorsByCh[chID], reactor)) } sw.chDescs = append(sw.chDescs, chDesc) sw.reactorsByCh[chID] = reactor @@ -168,6 +167,23 @@ func (sw *Switch) AddReactor(name string, reactor Reactor) Reactor { return reactor } +// RemoveReactor removes the given Reactor from the Switch. +// NOTE: Not goroutine safe. +func (sw *Switch) RemoveReactor(name string, reactor Reactor) { + for _, chDesc := range reactor.GetChannels() { + // remove channel description + for i := 0; i < len(sw.chDescs); i++ { + if chDesc.ID == sw.chDescs[i].ID { + sw.chDescs = append(sw.chDescs[:i], sw.chDescs[i+1:]...) + break + } + } + delete(sw.reactorsByCh, chDesc.ID) + } + delete(sw.reactors, name) + reactor.SetSwitch(nil) +} + // Reactors returns a map of reactors registered on the switch. // NOTE: Not goroutine safe. func (sw *Switch) Reactors() map[string]Reactor { @@ -198,21 +214,6 @@ func (sw *Switch) SetNodeKey(nodeKey *NodeKey) { sw.nodeKey = nodeKey } -func (sw *Switch) initPersistentPeers() { - sw.persistentPeers = make(map[ID]bool, 0) - if sw.config.PersistentPeers != "" { - peers := cmn.SplitAndTrim(sw.config.PersistentPeers, ",", " ") - netAddrs, _ := NewNetAddressStrings(peers) - for _, addr := range netAddrs { - sw.persistentPeers[addr.ID] = true - } - } -} - -func (sw *Switch) IsPersistent(peer Peer) bool { - return sw.persistentPeers[peer.ID()] -} - //--------------------------------------------------------------------- // Service start/stop @@ -222,7 +223,7 @@ func (sw *Switch) OnStart() error { for _, reactor := range sw.reactors { err := reactor.Start() if err != nil { - return cmn.ErrorWrap(err, "failed to start %v", reactor) + return errors.Wrapf(err, "failed to start %v", reactor) } } @@ -236,11 +237,7 @@ func (sw *Switch) OnStart() error { func (sw *Switch) OnStop() { // Stop peers for _, p := range sw.peers.List() { - sw.transport.Cleanup(p) - p.Stop() - if sw.peers.Remove(p) { - sw.metrics.Peers.Add(float64(-1)) - } + sw.stopAndRemovePeer(p, nil) } // Stop reactors @@ -314,8 +311,20 @@ func (sw *Switch) StopPeerForError(peer Peer, reason interface{}) { sw.Logger.Error("Stopping peer for error", "peer", peer, "err", reason) sw.stopAndRemovePeer(peer, reason) - if sw.IsPersistent(peer) { - go sw.reconnectToPeer(peer.SocketAddr()) + if peer.IsPersistent() { + var addr *NetAddress + if peer.IsOutbound() { // socket address for outbound peers + addr = peer.SocketAddr() + } else { // self-reported address for inbound peers + var err error + addr, err = peer.NodeInfo().NetAddress() + if err != nil { + sw.Logger.Error("Wanted to reconnect to inbound peer, but self-reported address is wrong", + "peer", peer, "err", err) + return + } + } + go sw.reconnectToPeer(addr) } } @@ -327,14 +336,20 @@ func (sw *Switch) StopPeerGracefully(peer Peer) { } func (sw *Switch) stopAndRemovePeer(peer Peer, reason interface{}) { - if sw.peers.Remove(peer) { - sw.metrics.Peers.Add(float64(-1)) - } sw.transport.Cleanup(peer) peer.Stop() + for _, reactor := range sw.reactors { reactor.RemovePeer(peer, reason) } + + // Removing a peer should go last to avoid a situation where a peer + // reconnect to our node and the switch calls InitPeer before + // RemovePeer is finished. + // https://github.com/tendermint/tendermint/issues/3338 + if sw.peers.Remove(peer) { + sw.metrics.Peers.Add(float64(-1)) + } } // reconnectToPeer tries to reconnect to the addr, first repeatedly @@ -359,7 +374,7 @@ func (sw *Switch) reconnectToPeer(addr *NetAddress) { return } - err := sw.DialPeerWithAddress(addr, true) + err := sw.DialPeerWithAddress(addr) if err == nil { return // success } else if _, ok := err.(ErrCurrentlyDialingOrExistingAddress); ok { @@ -383,7 +398,7 @@ func (sw *Switch) reconnectToPeer(addr *NetAddress) { sleepIntervalSeconds := math.Pow(reconnectBackOffBaseSeconds, float64(i)) sw.randomSleep(time.Duration(sleepIntervalSeconds) * time.Second) - err := sw.DialPeerWithAddress(addr, true) + err := sw.DialPeerWithAddress(addr) if err == nil { return // success } else if _, ok := err.(ErrCurrentlyDialingOrExistingAddress); ok { @@ -419,28 +434,41 @@ func isPrivateAddr(err error) bool { return ok && te.PrivateAddr() } -// DialPeersAsync dials a list of peers asynchronously in random order (optionally, making them persistent). +// DialPeersAsync dials a list of peers asynchronously in random order. // Used to dial peers from config on startup or from unsafe-RPC (trusted sources). -// TODO: remove addrBook arg since it's now set on the switch -func (sw *Switch) DialPeersAsync(addrBook AddrBook, peers []string, persistent bool) error { +// It ignores ErrNetAddressLookup. However, if there are other errors, first +// encounter is returned. +// Nop if there are no peers. +func (sw *Switch) DialPeersAsync(peers []string) error { netAddrs, errs := NewNetAddressStrings(peers) - // only log errors, dial correct addresses + // report all the errors for _, err := range errs { sw.Logger.Error("Error in peer's address", "err", err) } + // return first non-ErrNetAddressLookup error + for _, err := range errs { + if _, ok := err.(ErrNetAddressLookup); ok { + continue + } + return err + } + sw.dialPeersAsync(netAddrs) + return nil +} +func (sw *Switch) dialPeersAsync(netAddrs []*NetAddress) { ourAddr := sw.NetAddress() // TODO: this code feels like it's in the wrong place. // The integration tests depend on the addrBook being saved // right away but maybe we can change that. Recall that // the addrBook is only written to disk every 2min - if addrBook != nil { + if sw.addrBook != nil { // add peers to `addrBook` for _, netAddr := range netAddrs { // do not add our address or ID if !netAddr.Same(ourAddr) { - if err := addrBook.AddAddress(netAddr, ourAddr); err != nil { + if err := sw.addrBook.AddAddress(netAddr, ourAddr); err != nil { if isPrivateAddr(err) { sw.Logger.Debug("Won't add peer's address to addrbook", "err", err) } else { @@ -451,7 +479,7 @@ func (sw *Switch) DialPeersAsync(addrBook AddrBook, peers []string, persistent b } // Persist some peers to disk right away. // NOTE: integration tests depend on this - addrBook.Save() + sw.addrBook.Save() } // permute the list, dial them in random order. @@ -468,7 +496,7 @@ func (sw *Switch) DialPeersAsync(addrBook AddrBook, peers []string, persistent b sw.randomSleep(0) - err := sw.DialPeerWithAddress(addr, persistent) + err := sw.DialPeerWithAddress(addr) if err != nil { switch err.(type) { case ErrSwitchConnectToSelf, ErrSwitchDuplicatePeerID, ErrCurrentlyDialingOrExistingAddress: @@ -479,16 +507,13 @@ func (sw *Switch) DialPeersAsync(addrBook AddrBook, peers []string, persistent b } }(i) } - return nil } // DialPeerWithAddress dials the given peer and runs sw.addPeer if it connects // and authenticates successfully. -// If `persistent == true`, the switch will always try to reconnect to this -// peer if the connection ever fails. // If we're currently dialing this address or it belongs to an existing peer, // ErrCurrentlyDialingOrExistingAddress is returned. -func (sw *Switch) DialPeerWithAddress(addr *NetAddress, persistent bool) error { +func (sw *Switch) DialPeerWithAddress(addr *NetAddress) error { if sw.IsDialingOrExistingAddress(addr) { return ErrCurrentlyDialingOrExistingAddress{addr.String()} } @@ -496,7 +521,7 @@ func (sw *Switch) DialPeerWithAddress(addr *NetAddress, persistent bool) error { sw.dialing.Set(string(addr.ID), addr) defer sw.dialing.Delete(string(addr.ID)) - return sw.addOutboundPeerWithConfig(addr, sw.config, persistent) + return sw.addOutboundPeerWithConfig(addr, sw.config) } // sleep for interval plus some random amount of ms on [0, dialRandomizerIntervalMilliseconds] @@ -513,6 +538,38 @@ func (sw *Switch) IsDialingOrExistingAddress(addr *NetAddress) bool { (!sw.config.AllowDuplicateIP && sw.peers.HasIP(addr.IP)) } +// AddPersistentPeers allows you to set persistent peers. It ignores +// ErrNetAddressLookup. However, if there are other errors, first encounter is +// returned. +func (sw *Switch) AddPersistentPeers(addrs []string) error { + sw.Logger.Info("Adding persistent peers", "addrs", addrs) + netAddrs, errs := NewNetAddressStrings(addrs) + // report all the errors + for _, err := range errs { + sw.Logger.Error("Error in peer's address", "err", err) + } + // return first non-ErrNetAddressLookup error + for _, err := range errs { + if _, ok := err.(ErrNetAddressLookup); ok { + continue + } + return err + } + sw.persistentPeersAddrs = netAddrs + return nil +} + +func (sw *Switch) isPeerPersistentFn() func(*NetAddress) bool { + return func(na *NetAddress) bool { + for _, pa := range sw.persistentPeersAddrs { + if pa.Equals(na) { + return true + } + } + return false + } +} + func (sw *Switch) acceptRoutine() { for { p, err := sw.transport.Accept(peerConfig{ @@ -520,6 +577,7 @@ func (sw *Switch) acceptRoutine() { onPeerError: sw.StopPeerForError, reactorsByCh: sw.reactorsByCh, metrics: sw.metrics, + isPersistent: sw.isPeerPersistentFn(), }) if err != nil { switch err := err.(type) { @@ -599,13 +657,12 @@ func (sw *Switch) acceptRoutine() { // dial the peer; make secret connection; authenticate against the dialed ID; // add the peer. -// if dialing fails, start the reconnect loop. If handhsake fails, its over. -// If peer is started succesffuly, reconnectLoop will start when -// StopPeerForError is called +// if dialing fails, start the reconnect loop. If handshake fails, it's over. +// If peer is started successfully, reconnectLoop will start when +// StopPeerForError is called. func (sw *Switch) addOutboundPeerWithConfig( addr *NetAddress, cfg *config.P2PConfig, - persistent bool, ) error { sw.Logger.Info("Dialing peer", "address", addr) @@ -618,13 +675,12 @@ func (sw *Switch) addOutboundPeerWithConfig( p, err := sw.transport.Dial(*addr, peerConfig{ chDescs: sw.chDescs, onPeerError: sw.StopPeerForError, - persistent: persistent, + isPersistent: sw.isPeerPersistentFn(), reactorsByCh: sw.reactorsByCh, metrics: sw.metrics, }) if err != nil { - switch e := err.(type) { - case ErrRejected: + if e, ok := err.(ErrRejected); ok { if e.IsSelf() { // Remove the given address from the address book and add to our addresses // to avoid dialing in the future. @@ -637,7 +693,7 @@ func (sw *Switch) addOutboundPeerWithConfig( // retry persistent peers after // any dial error besides IsSelf() - if persistent { + if sw.isPeerPersistentFn()(addr) { go sw.reconnectToPeer(addr) } @@ -719,11 +775,15 @@ func (sw *Switch) addPeer(p Peer) error { } func (sw *Switch) startInitPeer(p Peer) error { + // Add some data to the peer, which is required by reactors. for _, reactor := range sw.reactors { - p = reactor.InitAddPeer(p) + p = reactor.InitPeer(p) } - err := p.Start() // spawn send/recv routines + // Start the peer's send/recv routines. + // Must start it before adding it to the peer set + // to prevent Start and Stop from being called concurrently. + err := p.Start() if err != nil { // Should never happen sw.Logger.Error("Error starting peer", "err", err, "peer", p) diff --git a/p2p/switch_test.go b/p2p/switch_test.go index 5b49b05a9..000eed2e0 100644 --- a/p2p/switch_test.go +++ b/p2p/switch_test.go @@ -12,10 +12,11 @@ import ( "regexp" "strconv" "sync" + "sync/atomic" "testing" "time" - stdprometheus "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -167,7 +168,7 @@ func TestSwitchFiltersOutItself(t *testing.T) { rp.Start() // addr should be rejected in addPeer based on the same ID - err := s1.DialPeerWithAddress(rp.Addr(), false) + err := s1.DialPeerWithAddress(rp.Addr()) if assert.Error(t, err) { if err, ok := err.(ErrRejected); ok { if !err.IsSelf() { @@ -212,6 +213,7 @@ func TestSwitchPeerFilter(t *testing.T) { p, err := sw.transport.Dial(*rp.Addr(), peerConfig{ chDescs: sw.chDescs, onPeerError: sw.StopPeerForError, + isPersistent: sw.isPeerPersistentFn(), reactorsByCh: sw.reactorsByCh, }) if err != nil { @@ -256,6 +258,7 @@ func TestSwitchPeerFilterTimeout(t *testing.T) { p, err := sw.transport.Dial(*rp.Addr(), peerConfig{ chDescs: sw.chDescs, onPeerError: sw.StopPeerForError, + isPersistent: sw.isPeerPersistentFn(), reactorsByCh: sw.reactorsByCh, }) if err != nil { @@ -281,6 +284,7 @@ func TestSwitchPeerFilterDuplicate(t *testing.T) { p, err := sw.transport.Dial(*rp.Addr(), peerConfig{ chDescs: sw.chDescs, onPeerError: sw.StopPeerForError, + isPersistent: sw.isPeerPersistentFn(), reactorsByCh: sw.reactorsByCh, }) if err != nil { @@ -325,6 +329,7 @@ func TestSwitchStopsNonPersistentPeerOnError(t *testing.T) { p, err := sw.transport.Dial(*rp.Addr(), peerConfig{ chDescs: sw.chDescs, onPeerError: sw.StopPeerForError, + isPersistent: sw.isPeerPersistentFn(), reactorsByCh: sw.reactorsByCh, }) require.Nil(err) @@ -342,7 +347,7 @@ func TestSwitchStopsNonPersistentPeerOnError(t *testing.T) { } func TestSwitchStopPeerForError(t *testing.T) { - s := httptest.NewServer(stdprometheus.UninstrumentedHandler()) + s := httptest.NewServer(promhttp.Handler()) defer s.Close() scrapeMetrics := func() string { @@ -389,52 +394,34 @@ func TestSwitchStopPeerForError(t *testing.T) { assert.EqualValues(t, 0, peersMetricValue()) } -func TestSwitchReconnectsToPersistentPeer(t *testing.T) { - assert, require := assert.New(t), require.New(t) - +func TestSwitchReconnectsToOutboundPersistentPeer(t *testing.T) { sw := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc) err := sw.Start() - if err != nil { - t.Error(err) - } + require.NoError(t, err) defer sw.Stop() - // simulate remote peer + // 1. simulate failure by closing connection rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} rp.Start() defer rp.Stop() addr := *rp.Addr() sw.config.PersistentPeers = addr.String() - sw.initPersistentPeers() - p, err := sw.transport.Dial(addr, peerConfig{ - chDescs: sw.chDescs, - onPeerError: sw.StopPeerForError, - persistent: true, - reactorsByCh: sw.reactorsByCh, - }) - require.Nil(err) + err = sw.AddPersistentPeers([]string{rp.Addr().String()}) + require.NoError(t, err) - require.Nil(sw.addPeer(p)) + err = sw.DialPeerWithAddress(rp.Addr()) + require.Nil(t, err) + require.NotNil(t, sw.Peers().Get(rp.ID())) - require.NotNil(sw.Peers().Get(rp.ID())) - - // simulate failure by closing connection + p := sw.Peers().List()[0] p.(*peer).CloseConn() - // TODO: remove sleep, detect the disconnection, wait for reconnect - npeers := sw.Peers().Size() - for i := 0; i < 20; i++ { - time.Sleep(250 * time.Millisecond) - npeers = sw.Peers().Size() - if npeers > 0 { - break - } - } - assert.NotZero(npeers) - assert.False(p.IsRunning()) + waitUntilSwitchHasAtLeastNPeers(sw, 1) + assert.False(t, p.IsRunning()) // old peer instance + assert.Equal(t, 1, sw.Peers().Size()) // new peer instance - // simulate another remote peer + // 2. simulate first time dial failure rp = &remotePeer{ PrivKey: ed25519.GenPrivKey(), Config: cfg, @@ -445,25 +432,69 @@ func TestSwitchReconnectsToPersistentPeer(t *testing.T) { rp.Start() defer rp.Stop() - // simulate first time dial failure conf := config.DefaultP2PConfig() - conf.TestDialFail = true - sw.config.PersistentPeers = fmt.Sprintf("%s,%s", sw.config.PersistentPeers, rp.Addr().String()) - sw.initPersistentPeers() - err = sw.addOutboundPeerWithConfig(rp.Addr(), conf, true) - require.NotNil(err) + conf.TestDialFail = true // will trigger a reconnect + err = sw.addOutboundPeerWithConfig(rp.Addr(), conf) + require.NotNil(t, err) // DialPeerWithAddres - sw.peerConfig resets the dialer + waitUntilSwitchHasAtLeastNPeers(sw, 2) + assert.Equal(t, 2, sw.Peers().Size()) +} + +func TestSwitchReconnectsToInboundPersistentPeer(t *testing.T) { + sw := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc) + err := sw.Start() + require.NoError(t, err) + defer sw.Stop() + + // 1. simulate failure by closing the connection + rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} + rp.Start() + defer rp.Stop() + + err = sw.AddPersistentPeers([]string{rp.Addr().String()}) + require.NoError(t, err) + + conn, err := rp.Dial(sw.NetAddress()) + require.NoError(t, err) + time.Sleep(50 * time.Millisecond) + require.NotNil(t, sw.Peers().Get(rp.ID())) + + conn.Close() + + waitUntilSwitchHasAtLeastNPeers(sw, 1) + assert.Equal(t, 1, sw.Peers().Size()) +} + +func TestSwitchDialPeersAsync(t *testing.T) { + if testing.Short() { + return + } - // TODO: same as above + sw := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc) + err := sw.Start() + require.NoError(t, err) + defer sw.Stop() + + rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} + rp.Start() + defer rp.Stop() + + err = sw.DialPeersAsync([]string{rp.Addr().String()}) + require.NoError(t, err) + time.Sleep(dialRandomizerIntervalMilliseconds * time.Millisecond) + require.NotNil(t, sw.Peers().Get(rp.ID())) +} + +func waitUntilSwitchHasAtLeastNPeers(sw *Switch, n int) { for i := 0; i < 20; i++ { time.Sleep(250 * time.Millisecond) - npeers = sw.Peers().Size() - if npeers > 1 { + has := sw.Peers().Size() + if has >= n { break } } - assert.EqualValues(2, npeers) } func TestSwitchFullConnectivity(t *testing.T) { @@ -575,6 +606,71 @@ func TestSwitchAcceptRoutineErrorCases(t *testing.T) { }) } +// mockReactor checks that InitPeer never called before RemovePeer. If that's +// not true, InitCalledBeforeRemoveFinished will return true. +type mockReactor struct { + *BaseReactor + + // atomic + removePeerInProgress uint32 + initCalledBeforeRemoveFinished uint32 +} + +func (r *mockReactor) RemovePeer(peer Peer, reason interface{}) { + atomic.StoreUint32(&r.removePeerInProgress, 1) + defer atomic.StoreUint32(&r.removePeerInProgress, 0) + time.Sleep(100 * time.Millisecond) +} + +func (r *mockReactor) InitPeer(peer Peer) Peer { + if atomic.LoadUint32(&r.removePeerInProgress) == 1 { + atomic.StoreUint32(&r.initCalledBeforeRemoveFinished, 1) + } + + return peer +} + +func (r *mockReactor) InitCalledBeforeRemoveFinished() bool { + return atomic.LoadUint32(&r.initCalledBeforeRemoveFinished) == 1 +} + +// see stopAndRemovePeer +func TestSwitchInitPeerIsNotCalledBeforeRemovePeer(t *testing.T) { + // make reactor + reactor := &mockReactor{} + reactor.BaseReactor = NewBaseReactor("mockReactor", reactor) + + // make switch + sw := MakeSwitch(cfg, 1, "testing", "123.123.123", func(i int, sw *Switch) *Switch { + sw.AddReactor("mock", reactor) + return sw + }) + err := sw.Start() + require.NoError(t, err) + defer sw.Stop() + + // add peer + rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} + rp.Start() + defer rp.Stop() + _, err = rp.Dial(sw.NetAddress()) + require.NoError(t, err) + // wait till the switch adds rp to the peer set + time.Sleep(50 * time.Millisecond) + + // stop peer asynchronously + go sw.StopPeerForError(sw.Peers().Get(rp.ID()), "test") + + // simulate peer reconnecting to us + _, err = rp.Dial(sw.NetAddress()) + require.NoError(t, err) + // wait till the switch adds rp to the peer set + time.Sleep(50 * time.Millisecond) + + // make sure reactor.RemovePeer is finished before InitPeer is called + assert.False(t, reactor.InitCalledBeforeRemoveFinished()) +} + func BenchmarkSwitchBroadcast(b *testing.B) { s1, s2 := MakeSwitchPair(b, func(i int, sw *Switch) *Switch { // Make bar reactors of bar channels each diff --git a/p2p/test_util.go b/p2p/test_util.go index f8020924c..a14073f99 100644 --- a/p2p/test_util.go +++ b/p2p/test_util.go @@ -5,6 +5,8 @@ import ( "net" "time" + "github.com/pkg/errors" + "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" cmn "github.com/tendermint/tendermint/libs/common" @@ -27,7 +29,7 @@ func (ni mockNodeInfo) NetAddress() (*NetAddress, error) { return ni.addr, ni func (ni mockNodeInfo) Validate() error { return nil } func (ni mockNodeInfo) CompatibleWith(other NodeInfo) error { return nil } -func AddPeerToSwitch(sw *Switch, peer Peer) { +func AddPeerToSwitchPeerSet(sw *Switch, peer Peer) { sw.peers.Add(peer) } @@ -233,7 +235,7 @@ func testPeerConn( // Encrypt connection conn, err = upgradeSecretConn(conn, cfg.HandshakeTimeout, ourNodePrivKey) if err != nil { - return pc, cmn.ErrorWrap(err, "Error creating peer") + return pc, errors.Wrap(err, "Error creating peer") } // Only the information we already have diff --git a/p2p/transport.go b/p2p/transport.go index fd5674b85..8fec2ba14 100644 --- a/p2p/transport.go +++ b/p2p/transport.go @@ -39,11 +39,15 @@ type accept struct { // events. // TODO(xla): Refactor out with more static Reactor setup and PeerBehaviour. type peerConfig struct { - chDescs []*conn.ChannelDescriptor - onPeerError func(Peer, interface{}) - outbound, persistent bool - reactorsByCh map[byte]Reactor - metrics *Metrics + chDescs []*conn.ChannelDescriptor + onPeerError func(Peer, interface{}) + outbound bool + // isPersistent allows you to set a function, which, given socket address + // (for outbound peers) OR self-reported address (for inbound peers), tells + // if the peer is persistent or not. + isPersistent func(*NetAddress) bool + reactorsByCh map[byte]Reactor + metrics *Metrics } // Transport emits and connects to Peers. The implementation of Peer is left to @@ -464,9 +468,21 @@ func (mt *MultiplexTransport) wrapPeer( cfg peerConfig, socketAddr *NetAddress, ) Peer { + persistent := false + if cfg.isPersistent != nil { + if cfg.outbound { + persistent = cfg.isPersistent(socketAddr) + } else { + selfReportedAddr, err := ni.NetAddress() + if err == nil { + persistent = cfg.isPersistent(selfReportedAddr) + } + } + } + peerConn := newPeerConn( cfg.outbound, - cfg.persistent, + persistent, c, socketAddr, ) diff --git a/p2p/transport_test.go b/p2p/transport_test.go index 35fd9c66b..7580f0259 100644 --- a/p2p/transport_test.go +++ b/p2p/transport_test.go @@ -8,8 +8,6 @@ import ( "testing" "time" - "github.com/stretchr/testify/require" - "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/p2p/conn" ) @@ -39,6 +37,7 @@ func TestTransportMultiplexConnFilter(t *testing.T) { PrivKey: ed25519.GenPrivKey(), }, ) + id := mt.nodeKey.ID() MultiplexTransportConnFilters( func(_ ConnSet, _ net.Conn, _ []net.IP) error { return nil }, @@ -48,7 +47,7 @@ func TestTransportMultiplexConnFilter(t *testing.T) { }, )(mt) - addr, err := NewNetAddressStringWithOptionalID("127.0.0.1:0") + addr, err := NewNetAddressString(IDAddressString(id, "127.0.0.1:0")) if err != nil { t.Fatal(err) } @@ -60,13 +59,9 @@ func TestTransportMultiplexConnFilter(t *testing.T) { errc := make(chan error) go func() { - addr, err := NewNetAddressStringWithOptionalID(mt.listener.Addr().String()) - if err != nil { - errc <- err - return - } + addr := NewNetAddress(id, mt.listener.Addr()) - _, err = addr.Dial() + _, err := addr.Dial() if err != nil { errc <- err return @@ -96,6 +91,7 @@ func TestTransportMultiplexConnFilterTimeout(t *testing.T) { PrivKey: ed25519.GenPrivKey(), }, ) + id := mt.nodeKey.ID() MultiplexTransportFilterTimeout(5 * time.Millisecond)(mt) MultiplexTransportConnFilters( @@ -105,7 +101,7 @@ func TestTransportMultiplexConnFilterTimeout(t *testing.T) { }, )(mt) - addr, err := NewNetAddressStringWithOptionalID("127.0.0.1:0") + addr, err := NewNetAddressString(IDAddressString(id, "127.0.0.1:0")) if err != nil { t.Fatal(err) } @@ -117,13 +113,9 @@ func TestTransportMultiplexConnFilterTimeout(t *testing.T) { errc := make(chan error) go func() { - addr, err := NewNetAddressStringWithOptionalID(mt.listener.Addr().String()) - if err != nil { - errc <- err - return - } + addr := NewNetAddress(id, mt.listener.Addr()) - _, err = addr.Dial() + _, err := addr.Dial() if err != nil { errc <- err return @@ -144,9 +136,7 @@ func TestTransportMultiplexConnFilterTimeout(t *testing.T) { func TestTransportMultiplexAcceptMultiple(t *testing.T) { mt := testSetupMultiplexTransport(t) - id, addr := mt.nodeKey.ID(), mt.listener.Addr().String() - laddr, err := NewNetAddressStringWithOptionalID(IDAddressString(id, addr)) - require.NoError(t, err) + laddr := NewNetAddress(mt.nodeKey.ID(), mt.listener.Addr()) var ( seed = rand.New(rand.NewSource(time.Now().UnixNano())) @@ -232,11 +222,7 @@ func TestTransportMultiplexAcceptNonBlocking(t *testing.T) { // Simulate slow Peer. go func() { - addr, err := NewNetAddressStringWithOptionalID(IDAddressString(mt.nodeKey.ID(), mt.listener.Addr().String())) - if err != nil { - errc <- err - return - } + addr := NewNetAddress(mt.nodeKey.ID(), mt.listener.Addr()) c, err := addr.Dial() if err != nil { @@ -283,13 +269,9 @@ func TestTransportMultiplexAcceptNonBlocking(t *testing.T) { }, ) ) - addr, err := NewNetAddressStringWithOptionalID(IDAddressString(mt.nodeKey.ID(), mt.listener.Addr().String())) - if err != nil { - errc <- err - return - } + addr := NewNetAddress(mt.nodeKey.ID(), mt.listener.Addr()) - _, err = dialer.Dial(*addr, peerConfig{}) + _, err := dialer.Dial(*addr, peerConfig{}) if err != nil { errc <- err return @@ -329,13 +311,9 @@ func TestTransportMultiplexValidateNodeInfo(t *testing.T) { ) ) - addr, err := NewNetAddressStringWithOptionalID(IDAddressString(mt.nodeKey.ID(), mt.listener.Addr().String())) - if err != nil { - errc <- err - return - } + addr := NewNetAddress(mt.nodeKey.ID(), mt.listener.Addr()) - _, err = dialer.Dial(*addr, peerConfig{}) + _, err := dialer.Dial(*addr, peerConfig{}) if err != nil { errc <- err return @@ -372,13 +350,9 @@ func TestTransportMultiplexRejectMissmatchID(t *testing.T) { PrivKey: ed25519.GenPrivKey(), }, ) - addr, err := NewNetAddressStringWithOptionalID(IDAddressString(mt.nodeKey.ID(), mt.listener.Addr().String())) - if err != nil { - errc <- err - return - } + addr := NewNetAddress(mt.nodeKey.ID(), mt.listener.Addr()) - _, err = dialer.Dial(*addr, peerConfig{}) + _, err := dialer.Dial(*addr, peerConfig{}) if err != nil { errc <- err return @@ -415,12 +389,9 @@ func TestTransportMultiplexDialRejectWrongID(t *testing.T) { ) wrongID := PubKeyToID(ed25519.GenPrivKey().PubKey()) - addr, err := NewNetAddressStringWithOptionalID(IDAddressString(wrongID, mt.listener.Addr().String())) - if err != nil { - t.Fatalf("invalid address with ID: %v", err) - } + addr := NewNetAddress(wrongID, mt.listener.Addr()) - _, err = dialer.Dial(*addr, peerConfig{}) + _, err := dialer.Dial(*addr, peerConfig{}) if err != nil { t.Logf("connection failed: %v", err) if err, ok := err.(ErrRejected); ok { @@ -448,13 +419,9 @@ func TestTransportMultiplexRejectIncompatible(t *testing.T) { }, ) ) - addr, err := NewNetAddressStringWithOptionalID(IDAddressString(mt.nodeKey.ID(), mt.listener.Addr().String())) - if err != nil { - errc <- err - return - } + addr := NewNetAddress(mt.nodeKey.ID(), mt.listener.Addr()) - _, err = dialer.Dial(*addr, peerConfig{}) + _, err := dialer.Dial(*addr, peerConfig{}) if err != nil { errc <- err return @@ -479,13 +446,9 @@ func TestTransportMultiplexRejectSelf(t *testing.T) { errc := make(chan error) go func() { - addr, err := NewNetAddressStringWithOptionalID(IDAddressString(mt.nodeKey.ID(), mt.listener.Addr().String())) - if err != nil { - errc <- err - return - } + addr := NewNetAddress(mt.nodeKey.ID(), mt.listener.Addr()) - _, err = mt.Dial(*addr, peerConfig{}) + _, err := mt.Dial(*addr, peerConfig{}) if err != nil { errc <- err return @@ -609,7 +572,7 @@ func testSetupMultiplexTransport(t *testing.T) *MultiplexTransport { ) ) - addr, err := NewNetAddressStringWithOptionalID(IDAddressString(id, "127.0.0.1:0")) + addr, err := NewNetAddressString(IDAddressString(id, "127.0.0.1:0")) if err != nil { t.Fatal(err) } diff --git a/p2p/trust/store.go b/p2p/trust/store.go index d6b4c049d..fc1ad399e 100644 --- a/p2p/trust/store.go +++ b/p2p/trust/store.go @@ -156,7 +156,7 @@ func (tms *TrustMetricStore) loadFromDB() bool { peers := make(map[string]MetricHistoryJSON) err := json.Unmarshal(bytes, &peers) if err != nil { - cmn.PanicCrisis(fmt.Sprintf("Could not unmarshal Trust Metric Store DB data: %v", err)) + panic(fmt.Sprintf("Could not unmarshal Trust Metric Store DB data: %v", err)) } // If history data exists in the file, diff --git a/p2p/upnp/upnp.go b/p2p/upnp/upnp.go index d53974fc4..89f35c5df 100644 --- a/p2p/upnp/upnp.go +++ b/p2p/upnp/upnp.go @@ -197,7 +197,7 @@ func localIPv4() (net.IP, error) { } func getServiceURL(rootURL string) (url, urnDomain string, err error) { - r, err := http.Get(rootURL) + r, err := http.Get(rootURL) // nolint: gosec if err != nil { return } diff --git a/privval/wire.go b/privval/codec.go similarity index 100% rename from privval/wire.go rename to privval/codec.go diff --git a/privval/doc.go b/privval/doc.go index 80869a6a7..ad60673b6 100644 --- a/privval/doc.go +++ b/privval/doc.go @@ -6,16 +6,16 @@ FilePV FilePV is the simplest implementation and developer default. It uses one file for the private key and another to store state. -SignerValidatorEndpoint +SignerListenerEndpoint -SignerValidatorEndpoint establishes a connection to an external process, like a Key Management Server (KMS), using a socket. -SignerValidatorEndpoint listens for the external KMS process to dial in. -SignerValidatorEndpoint takes a listener, which determines the type of connection +SignerListenerEndpoint establishes a connection to an external process, like a Key Management Server (KMS), using a socket. +SignerListenerEndpoint listens for the external KMS process to dial in. +SignerListenerEndpoint takes a listener, which determines the type of connection (ie. encrypted over tcp, or unencrypted over unix). -SignerServiceEndpoint +SignerDialerEndpoint -SignerServiceEndpoint is a simple wrapper around a net.Conn. It's used by both IPCVal and TCPVal. +SignerDialerEndpoint is a simple wrapper around a net.Conn. It's used by both IPCVal and TCPVal. */ package privval diff --git a/privval/errors.go b/privval/errors.go index 75fb25fc6..9f151f11d 100644 --- a/privval/errors.go +++ b/privval/errors.go @@ -4,10 +4,21 @@ import ( "fmt" ) +type EndpointTimeoutError struct{} + +// Implement the net.Error interface. +func (e EndpointTimeoutError) Error() string { return "endpoint connection timed out" } +func (e EndpointTimeoutError) Timeout() bool { return true } +func (e EndpointTimeoutError) Temporary() bool { return true } + // Socket errors. var ( ErrUnexpectedResponse = fmt.Errorf("received unexpected response") - ErrConnTimeout = fmt.Errorf("remote signer timed out") + ErrNoConnection = fmt.Errorf("endpoint is not connected") + ErrConnectionTimeout = EndpointTimeoutError{} + + ErrReadTimeout = fmt.Errorf("endpoint read timed out") + ErrWriteTimeout = fmt.Errorf("endpoint write timed out") ) // RemoteSignerError allows (remote) validators to include meaningful error descriptions in their reply. @@ -18,5 +29,5 @@ type RemoteSignerError struct { } func (e *RemoteSignerError) Error() string { - return fmt.Sprintf("signerServiceEndpoint returned error #%d: %s", e.Code, e.Description) + return fmt.Sprintf("signerEndpoint returned error #%d: %s", e.Code, e.Description) } diff --git a/privval/file_deprecated_test.go b/privval/file_deprecated_test.go index 46391a3fe..e678bfc09 100644 --- a/privval/file_deprecated_test.go +++ b/privval/file_deprecated_test.go @@ -67,11 +67,11 @@ func assertEqualPV(t *testing.T, oldPV *privval.OldFilePV, newPV *privval.FilePV } func initTmpOldFile(t *testing.T) string { - tmpfile, err := ioutil.TempFile("", "priv_validator_*.json") + tmpFile, err := ioutil.TempFile("", "priv_validator_*.json") require.NoError(t, err) - t.Logf("created test file %s", tmpfile.Name()) - _, err = tmpfile.WriteString(oldPrivvalContent) + t.Logf("created test file %s", tmpFile.Name()) + _, err = tmpFile.WriteString(oldPrivvalContent) require.NoError(t, err) - return tmpfile.Name() + return tmpFile.Name() } diff --git a/privval/file_test.go b/privval/file_test.go index 06d75a809..38f6e6fe3 100644 --- a/privval/file_test.go +++ b/privval/file_test.go @@ -50,7 +50,7 @@ func TestResetValidator(t *testing.T) { // test vote height, round := int64(10), 1 voteType := byte(types.PrevoteType) - blockID := types.BlockID{[]byte{1, 2, 3}, types.PartSetHeader{}} + blockID := types.BlockID{Hash: []byte{1, 2, 3}, PartsHeader: types.PartSetHeader{}} vote := newVote(privVal.Key.Address, 0, height, round, voteType, blockID) err = privVal.SignVote("mychainid", vote) assert.NoError(t, err, "expected no error signing vote") @@ -58,7 +58,7 @@ func TestResetValidator(t *testing.T) { // priv val after signing is not same as empty assert.NotEqual(t, privVal.LastSignState, emptyState) - // priv val after reset is same as empty + // priv val after AcceptNewConnection is same as empty privVal.Reset() assert.Equal(t, privVal.LastSignState, emptyState) } @@ -162,8 +162,9 @@ func TestSignVote(t *testing.T) { privVal := GenFilePV(tempKeyFile.Name(), tempStateFile.Name()) - block1 := types.BlockID{[]byte{1, 2, 3}, types.PartSetHeader{}} - block2 := types.BlockID{[]byte{3, 2, 1}, types.PartSetHeader{}} + block1 := types.BlockID{Hash: []byte{1, 2, 3}, PartsHeader: types.PartSetHeader{}} + block2 := types.BlockID{Hash: []byte{3, 2, 1}, PartsHeader: types.PartSetHeader{}} + height, round := int64(10), 1 voteType := byte(types.PrevoteType) @@ -207,8 +208,8 @@ func TestSignProposal(t *testing.T) { privVal := GenFilePV(tempKeyFile.Name(), tempStateFile.Name()) - block1 := types.BlockID{[]byte{1, 2, 3}, types.PartSetHeader{5, []byte{1, 2, 3}}} - block2 := types.BlockID{[]byte{3, 2, 1}, types.PartSetHeader{10, []byte{3, 2, 1}}} + block1 := types.BlockID{Hash: []byte{1, 2, 3}, PartsHeader: types.PartSetHeader{Total: 5, Hash: []byte{1, 2, 3}}} + block2 := types.BlockID{Hash: []byte{3, 2, 1}, PartsHeader: types.PartSetHeader{Total: 10, Hash: []byte{3, 2, 1}}} height, round := int64(10), 1 // sign a proposal for first time @@ -249,7 +250,7 @@ func TestDifferByTimestamp(t *testing.T) { privVal := GenFilePV(tempKeyFile.Name(), tempStateFile.Name()) - block1 := types.BlockID{[]byte{1, 2, 3}, types.PartSetHeader{5, []byte{1, 2, 3}}} + block1 := types.BlockID{Hash: []byte{1, 2, 3}, PartsHeader: types.PartSetHeader{Total: 5, Hash: []byte{1, 2, 3}}} height, round := int64(10), 1 chainID := "mychainid" @@ -277,7 +278,7 @@ func TestDifferByTimestamp(t *testing.T) { // test vote { voteType := byte(types.PrevoteType) - blockID := types.BlockID{[]byte{1, 2, 3}, types.PartSetHeader{}} + blockID := types.BlockID{Hash: []byte{1, 2, 3}, PartsHeader: types.PartSetHeader{}} vote := newVote(privVal.Key.Address, 0, height, round, voteType, blockID) err := privVal.SignVote("mychainid", vote) assert.NoError(t, err, "expected no error signing vote") diff --git a/privval/messages.go b/privval/messages.go index 6774a2795..c172a5ea1 100644 --- a/privval/messages.go +++ b/privval/messages.go @@ -6,56 +6,59 @@ import ( "github.com/tendermint/tendermint/types" ) -// RemoteSignerMsg is sent between SignerServiceEndpoint and the SignerServiceEndpoint client. -type RemoteSignerMsg interface{} +// SignerMessage is sent between Signer Clients and Servers. +type SignerMessage interface{} func RegisterRemoteSignerMsg(cdc *amino.Codec) { - cdc.RegisterInterface((*RemoteSignerMsg)(nil), nil) + cdc.RegisterInterface((*SignerMessage)(nil), nil) cdc.RegisterConcrete(&PubKeyRequest{}, "tendermint/remotesigner/PubKeyRequest", nil) cdc.RegisterConcrete(&PubKeyResponse{}, "tendermint/remotesigner/PubKeyResponse", nil) cdc.RegisterConcrete(&SignVoteRequest{}, "tendermint/remotesigner/SignVoteRequest", nil) cdc.RegisterConcrete(&SignedVoteResponse{}, "tendermint/remotesigner/SignedVoteResponse", nil) cdc.RegisterConcrete(&SignProposalRequest{}, "tendermint/remotesigner/SignProposalRequest", nil) cdc.RegisterConcrete(&SignedProposalResponse{}, "tendermint/remotesigner/SignedProposalResponse", nil) + cdc.RegisterConcrete(&PingRequest{}, "tendermint/remotesigner/PingRequest", nil) cdc.RegisterConcrete(&PingResponse{}, "tendermint/remotesigner/PingResponse", nil) } +// TODO: Add ChainIDRequest + // PubKeyRequest requests the consensus public key from the remote signer. type PubKeyRequest struct{} -// PubKeyResponse is a PrivValidatorSocket message containing the public key. +// PubKeyResponse is a response message containing the public key. type PubKeyResponse struct { PubKey crypto.PubKey Error *RemoteSignerError } -// SignVoteRequest is a PrivValidatorSocket message containing a vote. +// SignVoteRequest is a request to sign a vote type SignVoteRequest struct { Vote *types.Vote } -// SignedVoteResponse is a PrivValidatorSocket message containing a signed vote along with a potenial error message. +// SignedVoteResponse is a response containing a signed vote or an error type SignedVoteResponse struct { Vote *types.Vote Error *RemoteSignerError } -// SignProposalRequest is a PrivValidatorSocket message containing a Proposal. +// SignProposalRequest is a request to sign a proposal type SignProposalRequest struct { Proposal *types.Proposal } -// SignedProposalResponse is a PrivValidatorSocket message containing a proposal response +// SignedProposalResponse is response containing a signed proposal or an error type SignedProposalResponse struct { Proposal *types.Proposal Error *RemoteSignerError } -// PingRequest is a PrivValidatorSocket message to keep the connection alive. +// PingRequest is a request to confirm that the connection is alive. type PingRequest struct { } -// PingRequest is a PrivValidatorSocket response to keep the connection alive. +// PingResponse is a response to confirm that the connection is alive. type PingResponse struct { } diff --git a/privval/signer_client.go b/privval/signer_client.go new file mode 100644 index 000000000..0885ee4aa --- /dev/null +++ b/privval/signer_client.go @@ -0,0 +1,131 @@ +package privval + +import ( + "time" + + "github.com/pkg/errors" + + "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/types" +) + +// SignerClient implements PrivValidator. +// Handles remote validator connections that provide signing services +type SignerClient struct { + endpoint *SignerListenerEndpoint +} + +var _ types.PrivValidator = (*SignerClient)(nil) + +// NewSignerClient returns an instance of SignerClient. +// it will start the endpoint (if not already started) +func NewSignerClient(endpoint *SignerListenerEndpoint) (*SignerClient, error) { + if !endpoint.IsRunning() { + if err := endpoint.Start(); err != nil { + return nil, errors.Wrap(err, "failed to start listener endpoint") + } + } + + return &SignerClient{endpoint: endpoint}, nil +} + +// Close closes the underlying connection +func (sc *SignerClient) Close() error { + return sc.endpoint.Close() +} + +// IsConnected indicates with the signer is connected to a remote signing service +func (sc *SignerClient) IsConnected() bool { + return sc.endpoint.IsConnected() +} + +// WaitForConnection waits maxWait for a connection or returns a timeout error +func (sc *SignerClient) WaitForConnection(maxWait time.Duration) error { + return sc.endpoint.WaitForConnection(maxWait) +} + +//-------------------------------------------------------- +// Implement PrivValidator + +// Ping sends a ping request to the remote signer +func (sc *SignerClient) Ping() error { + response, err := sc.endpoint.SendRequest(&PingRequest{}) + + if err != nil { + sc.endpoint.Logger.Error("SignerClient::Ping", "err", err) + return nil + } + + _, ok := response.(*PingResponse) + if !ok { + sc.endpoint.Logger.Error("SignerClient::Ping", "err", "response != PingResponse") + return err + } + + return nil +} + +// GetPubKey retrieves a public key from a remote signer +func (sc *SignerClient) GetPubKey() crypto.PubKey { + response, err := sc.endpoint.SendRequest(&PubKeyRequest{}) + if err != nil { + sc.endpoint.Logger.Error("SignerClient::GetPubKey", "err", err) + return nil + } + + pubKeyResp, ok := response.(*PubKeyResponse) + if !ok { + sc.endpoint.Logger.Error("SignerClient::GetPubKey", "err", "response != PubKeyResponse") + return nil + } + + if pubKeyResp.Error != nil { + sc.endpoint.Logger.Error("failed to get private validator's public key", "err", pubKeyResp.Error) + return nil + } + + return pubKeyResp.PubKey +} + +// SignVote requests a remote signer to sign a vote +func (sc *SignerClient) SignVote(chainID string, vote *types.Vote) error { + response, err := sc.endpoint.SendRequest(&SignVoteRequest{Vote: vote}) + if err != nil { + sc.endpoint.Logger.Error("SignerClient::SignVote", "err", err) + return err + } + + resp, ok := response.(*SignedVoteResponse) + if !ok { + sc.endpoint.Logger.Error("SignerClient::GetPubKey", "err", "response != SignedVoteResponse") + return ErrUnexpectedResponse + } + + if resp.Error != nil { + return resp.Error + } + *vote = *resp.Vote + + return nil +} + +// SignProposal requests a remote signer to sign a proposal +func (sc *SignerClient) SignProposal(chainID string, proposal *types.Proposal) error { + response, err := sc.endpoint.SendRequest(&SignProposalRequest{Proposal: proposal}) + if err != nil { + sc.endpoint.Logger.Error("SignerClient::SignProposal", "err", err) + return err + } + + resp, ok := response.(*SignedProposalResponse) + if !ok { + sc.endpoint.Logger.Error("SignerClient::SignProposal", "err", "response != SignedProposalResponse") + return ErrUnexpectedResponse + } + if resp.Error != nil { + return resp.Error + } + *proposal = *resp.Proposal + + return nil +} diff --git a/privval/signer_client_test.go b/privval/signer_client_test.go new file mode 100644 index 000000000..3d7cfb3e0 --- /dev/null +++ b/privval/signer_client_test.go @@ -0,0 +1,257 @@ +package privval + +import ( + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/types" +) + +type signerTestCase struct { + chainID string + mockPV types.PrivValidator + signerClient *SignerClient + signerServer *SignerServer +} + +func getSignerTestCases(t *testing.T) []signerTestCase { + testCases := make([]signerTestCase, 0) + + // Get test cases for each possible dialer (DialTCP / DialUnix / etc) + for _, dtc := range getDialerTestCases(t) { + chainID := common.RandStr(12) + mockPV := types.NewMockPV() + + // get a pair of signer listener, signer dialer endpoints + sl, sd := getMockEndpoints(t, dtc.addr, dtc.dialer) + sc, err := NewSignerClient(sl) + require.NoError(t, err) + ss := NewSignerServer(sd, chainID, mockPV) + + err = ss.Start() + require.NoError(t, err) + + tc := signerTestCase{ + chainID: chainID, + mockPV: mockPV, + signerClient: sc, + signerServer: ss, + } + + testCases = append(testCases, tc) + } + + return testCases +} + +func TestSignerClose(t *testing.T) { + for _, tc := range getSignerTestCases(t) { + err := tc.signerClient.Close() + assert.NoError(t, err) + + err = tc.signerServer.Stop() + assert.NoError(t, err) + } +} + +func TestSignerPing(t *testing.T) { + for _, tc := range getSignerTestCases(t) { + defer tc.signerServer.Stop() + defer tc.signerClient.Close() + + err := tc.signerClient.Ping() + assert.NoError(t, err) + } +} + +func TestSignerGetPubKey(t *testing.T) { + for _, tc := range getSignerTestCases(t) { + defer tc.signerServer.Stop() + defer tc.signerClient.Close() + + pubKey := tc.signerClient.GetPubKey() + expectedPubKey := tc.mockPV.GetPubKey() + + assert.Equal(t, expectedPubKey, pubKey) + + addr := tc.signerClient.GetPubKey().Address() + expectedAddr := tc.mockPV.GetPubKey().Address() + + assert.Equal(t, expectedAddr, addr) + } +} + +func TestSignerProposal(t *testing.T) { + for _, tc := range getSignerTestCases(t) { + ts := time.Now() + want := &types.Proposal{Timestamp: ts} + have := &types.Proposal{Timestamp: ts} + + defer tc.signerServer.Stop() + defer tc.signerClient.Close() + + require.NoError(t, tc.mockPV.SignProposal(tc.chainID, want)) + require.NoError(t, tc.signerClient.SignProposal(tc.chainID, have)) + + assert.Equal(t, want.Signature, have.Signature) + } +} + +func TestSignerVote(t *testing.T) { + for _, tc := range getSignerTestCases(t) { + ts := time.Now() + want := &types.Vote{Timestamp: ts, Type: types.PrecommitType} + have := &types.Vote{Timestamp: ts, Type: types.PrecommitType} + + defer tc.signerServer.Stop() + defer tc.signerClient.Close() + + require.NoError(t, tc.mockPV.SignVote(tc.chainID, want)) + require.NoError(t, tc.signerClient.SignVote(tc.chainID, have)) + + assert.Equal(t, want.Signature, have.Signature) + } +} + +func TestSignerVoteResetDeadline(t *testing.T) { + for _, tc := range getSignerTestCases(t) { + ts := time.Now() + want := &types.Vote{Timestamp: ts, Type: types.PrecommitType} + have := &types.Vote{Timestamp: ts, Type: types.PrecommitType} + + defer tc.signerServer.Stop() + defer tc.signerClient.Close() + + time.Sleep(testTimeoutReadWrite2o3) + + require.NoError(t, tc.mockPV.SignVote(tc.chainID, want)) + require.NoError(t, tc.signerClient.SignVote(tc.chainID, have)) + assert.Equal(t, want.Signature, have.Signature) + + // TODO(jleni): Clarify what is actually being tested + + // This would exceed the deadline if it was not extended by the previous message + time.Sleep(testTimeoutReadWrite2o3) + + require.NoError(t, tc.mockPV.SignVote(tc.chainID, want)) + require.NoError(t, tc.signerClient.SignVote(tc.chainID, have)) + assert.Equal(t, want.Signature, have.Signature) + } +} + +func TestSignerVoteKeepAlive(t *testing.T) { + for _, tc := range getSignerTestCases(t) { + ts := time.Now() + want := &types.Vote{Timestamp: ts, Type: types.PrecommitType} + have := &types.Vote{Timestamp: ts, Type: types.PrecommitType} + + defer tc.signerServer.Stop() + defer tc.signerClient.Close() + + // Check that even if the client does not request a + // signature for a long time. The service is still available + + // in this particular case, we use the dialer logger to ensure that + // test messages are properly interleaved in the test logs + tc.signerServer.Logger.Debug("TEST: Forced Wait -------------------------------------------------") + time.Sleep(testTimeoutReadWrite * 3) + tc.signerServer.Logger.Debug("TEST: Forced Wait DONE---------------------------------------------") + + require.NoError(t, tc.mockPV.SignVote(tc.chainID, want)) + require.NoError(t, tc.signerClient.SignVote(tc.chainID, have)) + + assert.Equal(t, want.Signature, have.Signature) + } +} + +func TestSignerSignProposalErrors(t *testing.T) { + for _, tc := range getSignerTestCases(t) { + // Replace service with a mock that always fails + tc.signerServer.privVal = types.NewErroringMockPV() + tc.mockPV = types.NewErroringMockPV() + + defer tc.signerServer.Stop() + defer tc.signerClient.Close() + + ts := time.Now() + proposal := &types.Proposal{Timestamp: ts} + err := tc.signerClient.SignProposal(tc.chainID, proposal) + require.Equal(t, err.(*RemoteSignerError).Description, types.ErroringMockPVErr.Error()) + + err = tc.mockPV.SignProposal(tc.chainID, proposal) + require.Error(t, err) + + err = tc.signerClient.SignProposal(tc.chainID, proposal) + require.Error(t, err) + } +} + +func TestSignerSignVoteErrors(t *testing.T) { + for _, tc := range getSignerTestCases(t) { + ts := time.Now() + vote := &types.Vote{Timestamp: ts, Type: types.PrecommitType} + + // Replace signer service privval with one that always fails + tc.signerServer.privVal = types.NewErroringMockPV() + tc.mockPV = types.NewErroringMockPV() + + defer tc.signerServer.Stop() + defer tc.signerClient.Close() + + err := tc.signerClient.SignVote(tc.chainID, vote) + require.Equal(t, err.(*RemoteSignerError).Description, types.ErroringMockPVErr.Error()) + + err = tc.mockPV.SignVote(tc.chainID, vote) + require.Error(t, err) + + err = tc.signerClient.SignVote(tc.chainID, vote) + require.Error(t, err) + } +} + +func brokenHandler(privVal types.PrivValidator, request SignerMessage, chainID string) (SignerMessage, error) { + var res SignerMessage + var err error + + switch r := request.(type) { + + // This is broken and will answer most requests with a pubkey response + case *PubKeyRequest: + res = &PubKeyResponse{nil, nil} + case *SignVoteRequest: + res = &PubKeyResponse{nil, nil} + case *SignProposalRequest: + res = &PubKeyResponse{nil, nil} + + case *PingRequest: + err, res = nil, &PingResponse{} + + default: + err = fmt.Errorf("unknown msg: %v", r) + } + + return res, err +} + +func TestSignerUnexpectedResponse(t *testing.T) { + for _, tc := range getSignerTestCases(t) { + tc.signerServer.privVal = types.NewMockPV() + tc.mockPV = types.NewMockPV() + + tc.signerServer.SetRequestHandler(brokenHandler) + + defer tc.signerServer.Stop() + defer tc.signerClient.Close() + + ts := time.Now() + want := &types.Vote{Timestamp: ts, Type: types.PrecommitType} + + e := tc.signerClient.SignVote(tc.chainID, want) + assert.EqualError(t, e, "received unexpected response") + } +} diff --git a/privval/signer_dialer_endpoint.go b/privval/signer_dialer_endpoint.go new file mode 100644 index 000000000..95094c6d0 --- /dev/null +++ b/privval/signer_dialer_endpoint.go @@ -0,0 +1,84 @@ +package privval + +import ( + "time" + + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" +) + +const ( + defaultMaxDialRetries = 10 + defaultRetryWaitMilliseconds = 100 +) + +// SignerServiceEndpointOption sets an optional parameter on the SignerDialerEndpoint. +type SignerServiceEndpointOption func(*SignerDialerEndpoint) + +// SignerDialerEndpointTimeoutReadWrite sets the read and write timeout for connections +// from external signing processes. +func SignerDialerEndpointTimeoutReadWrite(timeout time.Duration) SignerServiceEndpointOption { + return func(ss *SignerDialerEndpoint) { ss.timeoutReadWrite = timeout } +} + +// SignerDialerEndpointConnRetries sets the amount of attempted retries to acceptNewConnection. +func SignerDialerEndpointConnRetries(retries int) SignerServiceEndpointOption { + return func(ss *SignerDialerEndpoint) { ss.maxConnRetries = retries } +} + +// SignerDialerEndpoint dials using its dialer and responds to any +// signature requests using its privVal. +type SignerDialerEndpoint struct { + signerEndpoint + + dialer SocketDialer + + retryWait time.Duration + maxConnRetries int +} + +// NewSignerDialerEndpoint returns a SignerDialerEndpoint that will dial using the given +// dialer and respond to any signature requests over the connection +// using the given privVal. +func NewSignerDialerEndpoint( + logger log.Logger, + dialer SocketDialer, +) *SignerDialerEndpoint { + + sd := &SignerDialerEndpoint{ + dialer: dialer, + retryWait: defaultRetryWaitMilliseconds * time.Millisecond, + maxConnRetries: defaultMaxDialRetries, + } + + sd.BaseService = *cmn.NewBaseService(logger, "SignerDialerEndpoint", sd) + sd.signerEndpoint.timeoutReadWrite = defaultTimeoutReadWriteSeconds * time.Second + + return sd +} + +func (sd *SignerDialerEndpoint) ensureConnection() error { + if sd.IsConnected() { + return nil + } + + retries := 0 + for retries < sd.maxConnRetries { + conn, err := sd.dialer() + + if err != nil { + retries++ + sd.Logger.Debug("SignerDialer: Reconnection failed", "retries", retries, "max", sd.maxConnRetries, "err", err) + // Wait between retries + time.Sleep(sd.retryWait) + } else { + sd.SetConnection(conn) + sd.Logger.Debug("SignerDialer: Connection Ready") + return nil + } + } + + sd.Logger.Debug("SignerDialer: Max retries exceeded", "retries", retries, "max", sd.maxConnRetries) + + return ErrNoConnection +} diff --git a/privval/signer_endpoint.go b/privval/signer_endpoint.go new file mode 100644 index 000000000..425f73fea --- /dev/null +++ b/privval/signer_endpoint.go @@ -0,0 +1,156 @@ +package privval + +import ( + "fmt" + "net" + "sync" + "time" + + "github.com/pkg/errors" + + cmn "github.com/tendermint/tendermint/libs/common" +) + +const ( + defaultTimeoutReadWriteSeconds = 3 +) + +type signerEndpoint struct { + cmn.BaseService + + connMtx sync.Mutex + conn net.Conn + + timeoutReadWrite time.Duration +} + +// Close closes the underlying net.Conn. +func (se *signerEndpoint) Close() error { + se.DropConnection() + return nil +} + +// IsConnected indicates if there is an active connection +func (se *signerEndpoint) IsConnected() bool { + se.connMtx.Lock() + defer se.connMtx.Unlock() + return se.isConnected() +} + +// TryGetConnection retrieves a connection if it is already available +func (se *signerEndpoint) GetAvailableConnection(connectionAvailableCh chan net.Conn) bool { + se.connMtx.Lock() + defer se.connMtx.Unlock() + + // Is there a connection ready? + select { + case se.conn = <-connectionAvailableCh: + return true + default: + } + return false +} + +// TryGetConnection retrieves a connection if it is already available +func (se *signerEndpoint) WaitConnection(connectionAvailableCh chan net.Conn, maxWait time.Duration) error { + se.connMtx.Lock() + defer se.connMtx.Unlock() + + select { + case se.conn = <-connectionAvailableCh: + case <-time.After(maxWait): + return ErrConnectionTimeout + } + + return nil +} + +// SetConnection replaces the current connection object +func (se *signerEndpoint) SetConnection(newConnection net.Conn) { + se.connMtx.Lock() + defer se.connMtx.Unlock() + se.conn = newConnection +} + +// IsConnected indicates if there is an active connection +func (se *signerEndpoint) DropConnection() { + se.connMtx.Lock() + defer se.connMtx.Unlock() + se.dropConnection() +} + +// ReadMessage reads a message from the endpoint +func (se *signerEndpoint) ReadMessage() (msg SignerMessage, err error) { + se.connMtx.Lock() + defer se.connMtx.Unlock() + + if !se.isConnected() { + return nil, fmt.Errorf("endpoint is not connected") + } + + // Reset read deadline + deadline := time.Now().Add(se.timeoutReadWrite) + + err = se.conn.SetReadDeadline(deadline) + if err != nil { + return + } + + const maxRemoteSignerMsgSize = 1024 * 10 + _, err = cdc.UnmarshalBinaryLengthPrefixedReader(se.conn, &msg, maxRemoteSignerMsgSize) + if _, ok := err.(timeoutError); ok { + if err != nil { + err = errors.Wrap(ErrReadTimeout, err.Error()) + } else { + err = errors.Wrap(ErrReadTimeout, "Empty error") + } + se.Logger.Debug("Dropping [read]", "obj", se) + se.dropConnection() + } + + return +} + +// WriteMessage writes a message from the endpoint +func (se *signerEndpoint) WriteMessage(msg SignerMessage) (err error) { + se.connMtx.Lock() + defer se.connMtx.Unlock() + + if !se.isConnected() { + return errors.Wrap(ErrNoConnection, "endpoint is not connected") + } + + // Reset read deadline + deadline := time.Now().Add(se.timeoutReadWrite) + se.Logger.Debug("Write::Error Resetting deadline", "obj", se) + + err = se.conn.SetWriteDeadline(deadline) + if err != nil { + return + } + + _, err = cdc.MarshalBinaryLengthPrefixedWriter(se.conn, msg) + if _, ok := err.(timeoutError); ok { + if err != nil { + err = errors.Wrap(ErrWriteTimeout, err.Error()) + } else { + err = errors.Wrap(ErrWriteTimeout, "Empty error") + } + se.dropConnection() + } + + return +} + +func (se *signerEndpoint) isConnected() bool { + return se.conn != nil +} + +func (se *signerEndpoint) dropConnection() { + if se.conn != nil { + if err := se.conn.Close(); err != nil { + se.Logger.Error("signerEndpoint::dropConnection", "err", err) + } + se.conn = nil + } +} diff --git a/privval/signer_listener_endpoint.go b/privval/signer_listener_endpoint.go new file mode 100644 index 000000000..e25f18756 --- /dev/null +++ b/privval/signer_listener_endpoint.go @@ -0,0 +1,198 @@ +package privval + +import ( + "fmt" + "net" + "sync" + "time" + + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" +) + +// SignerValidatorEndpointOption sets an optional parameter on the SocketVal. +type SignerValidatorEndpointOption func(*SignerListenerEndpoint) + +// SignerListenerEndpoint listens for an external process to dial in +// and keeps the connection alive by dropping and reconnecting +type SignerListenerEndpoint struct { + signerEndpoint + + listener net.Listener + connectRequestCh chan struct{} + connectionAvailableCh chan net.Conn + + timeoutAccept time.Duration + pingTimer *time.Ticker + + instanceMtx sync.Mutex // Ensures instance public methods access, i.e. SendRequest +} + +// NewSignerListenerEndpoint returns an instance of SignerListenerEndpoint. +func NewSignerListenerEndpoint( + logger log.Logger, + listener net.Listener, +) *SignerListenerEndpoint { + sc := &SignerListenerEndpoint{ + listener: listener, + timeoutAccept: defaultTimeoutAcceptSeconds * time.Second, + } + + sc.BaseService = *cmn.NewBaseService(logger, "SignerListenerEndpoint", sc) + sc.signerEndpoint.timeoutReadWrite = defaultTimeoutReadWriteSeconds * time.Second + return sc +} + +// OnStart implements cmn.Service. +func (sl *SignerListenerEndpoint) OnStart() error { + sl.connectRequestCh = make(chan struct{}) + sl.connectionAvailableCh = make(chan net.Conn) + + sl.pingTimer = time.NewTicker(defaultPingPeriodMilliseconds * time.Millisecond) + + go sl.serviceLoop() + go sl.pingLoop() + + sl.connectRequestCh <- struct{}{} + + return nil +} + +// OnStop implements cmn.Service +func (sl *SignerListenerEndpoint) OnStop() { + sl.instanceMtx.Lock() + defer sl.instanceMtx.Unlock() + _ = sl.Close() + + // Stop listening + if sl.listener != nil { + if err := sl.listener.Close(); err != nil { + sl.Logger.Error("Closing Listener", "err", err) + sl.listener = nil + } + } + + sl.pingTimer.Stop() +} + +// WaitForConnection waits maxWait for a connection or returns a timeout error +func (sl *SignerListenerEndpoint) WaitForConnection(maxWait time.Duration) error { + sl.instanceMtx.Lock() + defer sl.instanceMtx.Unlock() + return sl.ensureConnection(maxWait) +} + +// SendRequest ensures there is a connection, sends a request and waits for a response +func (sl *SignerListenerEndpoint) SendRequest(request SignerMessage) (SignerMessage, error) { + sl.instanceMtx.Lock() + defer sl.instanceMtx.Unlock() + + err := sl.ensureConnection(sl.timeoutAccept) + if err != nil { + return nil, err + } + + err = sl.WriteMessage(request) + if err != nil { + return nil, err + } + + res, err := sl.ReadMessage() + if err != nil { + return nil, err + } + + return res, nil +} + +func (sl *SignerListenerEndpoint) ensureConnection(maxWait time.Duration) error { + if sl.IsConnected() { + return nil + } + + // Is there a connection ready? then use it + if sl.GetAvailableConnection(sl.connectionAvailableCh) { + return nil + } + + // block until connected or timeout + sl.triggerConnect() + err := sl.WaitConnection(sl.connectionAvailableCh, maxWait) + if err != nil { + return err + } + + return nil +} + +func (sl *SignerListenerEndpoint) acceptNewConnection() (net.Conn, error) { + if !sl.IsRunning() || sl.listener == nil { + return nil, fmt.Errorf("endpoint is closing") + } + + // wait for a new conn + sl.Logger.Info("SignerListener: Listening for new connection") + conn, err := sl.listener.Accept() + if err != nil { + return nil, err + } + + return conn, nil +} + +func (sl *SignerListenerEndpoint) triggerConnect() { + select { + case sl.connectRequestCh <- struct{}{}: + default: + } +} + +func (sl *SignerListenerEndpoint) triggerReconnect() { + sl.DropConnection() + sl.triggerConnect() +} + +func (sl *SignerListenerEndpoint) serviceLoop() { + for { + select { + case <-sl.connectRequestCh: + { + conn, err := sl.acceptNewConnection() + if err == nil { + sl.Logger.Info("SignerListener: Connected") + + // We have a good connection, wait for someone that needs one otherwise cancellation + select { + case sl.connectionAvailableCh <- conn: + case <-sl.Quit(): + return + } + } + + select { + case sl.connectRequestCh <- struct{}{}: + default: + } + } + case <-sl.Quit(): + return + } + } +} + +func (sl *SignerListenerEndpoint) pingLoop() { + for { + select { + case <-sl.pingTimer.C: + { + _, err := sl.SendRequest(&PingRequest{}) + if err != nil { + sl.Logger.Error("SignerListener: Ping timeout") + sl.triggerReconnect() + } + } + case <-sl.Quit(): + return + } + } +} diff --git a/privval/signer_listener_endpoint_test.go b/privval/signer_listener_endpoint_test.go new file mode 100644 index 000000000..7058ff8b8 --- /dev/null +++ b/privval/signer_listener_endpoint_test.go @@ -0,0 +1,198 @@ +package privval + +import ( + "net" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/tendermint/tendermint/crypto/ed25519" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/types" +) + +var ( + testTimeoutAccept = defaultTimeoutAcceptSeconds * time.Second + + testTimeoutReadWrite = 100 * time.Millisecond + testTimeoutReadWrite2o3 = 60 * time.Millisecond // 2/3 of the other one +) + +type dialerTestCase struct { + addr string + dialer SocketDialer +} + +// TestSignerRemoteRetryTCPOnly will test connection retry attempts over TCP. We +// don't need this for Unix sockets because the OS instantly knows the state of +// both ends of the socket connection. This basically causes the +// SignerDialerEndpoint.dialer() call inside SignerDialerEndpoint.acceptNewConnection() to return +// successfully immediately, putting an instant stop to any retry attempts. +func TestSignerRemoteRetryTCPOnly(t *testing.T) { + var ( + attemptCh = make(chan int) + retries = 10 + ) + + ln, err := net.Listen("tcp", "127.0.0.1:0") + require.NoError(t, err) + + // Continuously Accept connection and close {attempts} times + go func(ln net.Listener, attemptCh chan<- int) { + attempts := 0 + for { + conn, err := ln.Accept() + require.NoError(t, err) + + err = conn.Close() + require.NoError(t, err) + + attempts++ + + if attempts == retries { + attemptCh <- attempts + break + } + } + }(ln, attemptCh) + + dialerEndpoint := NewSignerDialerEndpoint( + log.TestingLogger(), + DialTCPFn(ln.Addr().String(), testTimeoutReadWrite, ed25519.GenPrivKey()), + ) + SignerDialerEndpointTimeoutReadWrite(time.Millisecond)(dialerEndpoint) + SignerDialerEndpointConnRetries(retries)(dialerEndpoint) + + chainId := cmn.RandStr(12) + mockPV := types.NewMockPV() + signerServer := NewSignerServer(dialerEndpoint, chainId, mockPV) + + err = signerServer.Start() + require.NoError(t, err) + defer signerServer.Stop() + + select { + case attempts := <-attemptCh: + assert.Equal(t, retries, attempts) + case <-time.After(1500 * time.Millisecond): + t.Error("expected remote to observe connection attempts") + } +} + +func TestRetryConnToRemoteSigner(t *testing.T) { + for _, tc := range getDialerTestCases(t) { + var ( + logger = log.TestingLogger() + chainID = cmn.RandStr(12) + mockPV = types.NewMockPV() + endpointIsOpenCh = make(chan struct{}) + thisConnTimeout = testTimeoutReadWrite + listenerEndpoint = newSignerListenerEndpoint(logger, tc.addr, thisConnTimeout) + ) + + dialerEndpoint := NewSignerDialerEndpoint( + logger, + tc.dialer, + ) + SignerDialerEndpointTimeoutReadWrite(testTimeoutReadWrite)(dialerEndpoint) + SignerDialerEndpointConnRetries(10)(dialerEndpoint) + + signerServer := NewSignerServer(dialerEndpoint, chainID, mockPV) + + startListenerEndpointAsync(t, listenerEndpoint, endpointIsOpenCh) + defer listenerEndpoint.Stop() + + require.NoError(t, signerServer.Start()) + assert.True(t, signerServer.IsRunning()) + <-endpointIsOpenCh + signerServer.Stop() + + dialerEndpoint2 := NewSignerDialerEndpoint( + logger, + tc.dialer, + ) + signerServer2 := NewSignerServer(dialerEndpoint2, chainID, mockPV) + + // let some pings pass + require.NoError(t, signerServer2.Start()) + assert.True(t, signerServer2.IsRunning()) + defer signerServer2.Stop() + + // give the client some time to re-establish the conn to the remote signer + // should see sth like this in the logs: + // + // E[10016-01-10|17:12:46.128] Ping err="remote signer timed out" + // I[10016-01-10|17:16:42.447] Re-created connection to remote signer impl=SocketVal + time.Sleep(testTimeoutReadWrite * 2) + } +} + +/////////////////////////////////// + +func newSignerListenerEndpoint(logger log.Logger, addr string, timeoutReadWrite time.Duration) *SignerListenerEndpoint { + proto, address := cmn.ProtocolAndAddress(addr) + + ln, err := net.Listen(proto, address) + logger.Info("SignerListener: Listening", "proto", proto, "address", address) + if err != nil { + panic(err) + } + + var listener net.Listener + + if proto == "unix" { + unixLn := NewUnixListener(ln) + UnixListenerTimeoutAccept(testTimeoutAccept)(unixLn) + UnixListenerTimeoutReadWrite(timeoutReadWrite)(unixLn) + listener = unixLn + } else { + tcpLn := NewTCPListener(ln, ed25519.GenPrivKey()) + TCPListenerTimeoutAccept(testTimeoutAccept)(tcpLn) + TCPListenerTimeoutReadWrite(timeoutReadWrite)(tcpLn) + listener = tcpLn + } + + return NewSignerListenerEndpoint(logger, listener) +} + +func startListenerEndpointAsync(t *testing.T, sle *SignerListenerEndpoint, endpointIsOpenCh chan struct{}) { + go func(sle *SignerListenerEndpoint) { + require.NoError(t, sle.Start()) + assert.True(t, sle.IsRunning()) + close(endpointIsOpenCh) + }(sle) +} + +func getMockEndpoints( + t *testing.T, + addr string, + socketDialer SocketDialer, +) (*SignerListenerEndpoint, *SignerDialerEndpoint) { + + var ( + logger = log.TestingLogger() + endpointIsOpenCh = make(chan struct{}) + + dialerEndpoint = NewSignerDialerEndpoint( + logger, + socketDialer, + ) + + listenerEndpoint = newSignerListenerEndpoint(logger, addr, testTimeoutReadWrite) + ) + + SignerDialerEndpointTimeoutReadWrite(testTimeoutReadWrite)(dialerEndpoint) + SignerDialerEndpointConnRetries(1e6)(dialerEndpoint) + + startListenerEndpointAsync(t, listenerEndpoint, endpointIsOpenCh) + + require.NoError(t, dialerEndpoint.Start()) + assert.True(t, dialerEndpoint.IsRunning()) + + <-endpointIsOpenCh + + return listenerEndpoint, dialerEndpoint +} diff --git a/privval/signer_remote.go b/privval/signer_remote.go deleted file mode 100644 index 730c2c8c1..000000000 --- a/privval/signer_remote.go +++ /dev/null @@ -1,203 +0,0 @@ -package privval - -import ( - "fmt" - "io" - "net" - - "github.com/pkg/errors" - - "github.com/tendermint/tendermint/crypto" - cmn "github.com/tendermint/tendermint/libs/common" - "github.com/tendermint/tendermint/types" -) - -// SignerRemote implements PrivValidator. -// It uses a net.Conn to request signatures from an external process. -type SignerRemote struct { - conn net.Conn - - // memoized - address types.Address - consensusPubKey crypto.PubKey -} - -// Check that SignerRemote implements PrivValidator. -var _ types.PrivValidator = (*SignerRemote)(nil) - -// NewSignerRemote returns an instance of SignerRemote. -func NewSignerRemote(conn net.Conn) (*SignerRemote, error) { - - // retrieve and memoize the consensus public key once. - pubKey, err := getPubKey(conn) - if err != nil { - return nil, cmn.ErrorWrap(err, "error while retrieving public key for remote signer") - } - if pubKey == nil { - return &SignerRemote{ - conn: conn, - }, nil - } - return &SignerRemote{ - conn: conn, - consensusPubKey: pubKey, - address: pubKey.Address(), - }, nil -} - -// Close calls Close on the underlying net.Conn. -func (sc *SignerRemote) Close() error { - return sc.conn.Close() -} - -func (sc *SignerRemote) GetAddress() types.Address { - return sc.address -} - -// GetPubKey implements PrivValidator. -func (sc *SignerRemote) GetPubKey() crypto.PubKey { - return sc.consensusPubKey -} - -// not thread-safe (only called on startup). -func getPubKey(conn net.Conn) (crypto.PubKey, error) { - err := writeMsg(conn, &PubKeyRequest{}) - if err != nil { - return nil, err - } - - res, err := readMsg(conn) - if err != nil { - return nil, err - } - - pubKeyResp, ok := res.(*PubKeyResponse) - if !ok { - return nil, errors.Wrap(ErrUnexpectedResponse, "response is not PubKeyResponse") - } - - if pubKeyResp.Error != nil { - return nil, errors.Wrap(pubKeyResp.Error, "failed to get private validator's public key") - } - - return pubKeyResp.PubKey, nil -} - -// SignVote implements PrivValidator. -func (sc *SignerRemote) SignVote(chainID string, vote *types.Vote) error { - err := writeMsg(sc.conn, &SignVoteRequest{Vote: vote}) - if err != nil { - return err - } - - res, err := readMsg(sc.conn) - if err != nil { - return err - } - - resp, ok := res.(*SignedVoteResponse) - if !ok { - return ErrUnexpectedResponse - } - if resp.Error != nil { - return resp.Error - } - *vote = *resp.Vote - - return nil -} - -// SignProposal implements PrivValidator. -func (sc *SignerRemote) SignProposal(chainID string, proposal *types.Proposal) error { - err := writeMsg(sc.conn, &SignProposalRequest{Proposal: proposal}) - if err != nil { - return err - } - - res, err := readMsg(sc.conn) - if err != nil { - return err - } - resp, ok := res.(*SignedProposalResponse) - if !ok { - return ErrUnexpectedResponse - } - if resp.Error != nil { - return resp.Error - } - *proposal = *resp.Proposal - - return nil -} - -// Ping is used to check connection health. -func (sc *SignerRemote) Ping() error { - err := writeMsg(sc.conn, &PingRequest{}) - if err != nil { - return err - } - - res, err := readMsg(sc.conn) - if err != nil { - return err - } - _, ok := res.(*PingResponse) - if !ok { - return ErrUnexpectedResponse - } - - return nil -} - -func readMsg(r io.Reader) (msg RemoteSignerMsg, err error) { - const maxRemoteSignerMsgSize = 1024 * 10 - _, err = cdc.UnmarshalBinaryLengthPrefixedReader(r, &msg, maxRemoteSignerMsgSize) - if _, ok := err.(timeoutError); ok { - err = cmn.ErrorWrap(ErrConnTimeout, err.Error()) - } - return -} - -func writeMsg(w io.Writer, msg interface{}) (err error) { - _, err = cdc.MarshalBinaryLengthPrefixedWriter(w, msg) - if _, ok := err.(timeoutError); ok { - err = cmn.ErrorWrap(ErrConnTimeout, err.Error()) - } - return -} - -func handleRequest(req RemoteSignerMsg, chainID string, privVal types.PrivValidator) (RemoteSignerMsg, error) { - var res RemoteSignerMsg - var err error - - switch r := req.(type) { - case *PubKeyRequest: - var p crypto.PubKey - p = privVal.GetPubKey() - res = &PubKeyResponse{p, nil} - - case *SignVoteRequest: - err = privVal.SignVote(chainID, r.Vote) - if err != nil { - res = &SignedVoteResponse{nil, &RemoteSignerError{0, err.Error()}} - } else { - res = &SignedVoteResponse{r.Vote, nil} - } - - case *SignProposalRequest: - err = privVal.SignProposal(chainID, r.Proposal) - if err != nil { - res = &SignedProposalResponse{nil, &RemoteSignerError{0, err.Error()}} - } else { - res = &SignedProposalResponse{r.Proposal, nil} - } - - case *PingRequest: - res = &PingResponse{} - - default: - err = fmt.Errorf("unknown msg: %v", r) - } - - return res, err -} diff --git a/privval/signer_remote_test.go b/privval/signer_remote_test.go deleted file mode 100644 index 28230b803..000000000 --- a/privval/signer_remote_test.go +++ /dev/null @@ -1,68 +0,0 @@ -package privval - -import ( - "net" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/tendermint/tendermint/crypto/ed25519" - cmn "github.com/tendermint/tendermint/libs/common" - "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/types" -) - -// TestSignerRemoteRetryTCPOnly will test connection retry attempts over TCP. We -// don't need this for Unix sockets because the OS instantly knows the state of -// both ends of the socket connection. This basically causes the -// SignerServiceEndpoint.dialer() call inside SignerServiceEndpoint.connect() to return -// successfully immediately, putting an instant stop to any retry attempts. -func TestSignerRemoteRetryTCPOnly(t *testing.T) { - var ( - attemptCh = make(chan int) - retries = 2 - ) - - ln, err := net.Listen("tcp", "127.0.0.1:0") - require.NoError(t, err) - - go func(ln net.Listener, attemptCh chan<- int) { - attempts := 0 - - for { - conn, err := ln.Accept() - require.NoError(t, err) - - err = conn.Close() - require.NoError(t, err) - - attempts++ - - if attempts == retries { - attemptCh <- attempts - break - } - } - }(ln, attemptCh) - - serviceEndpoint := NewSignerServiceEndpoint( - log.TestingLogger(), - cmn.RandStr(12), - types.NewMockPV(), - DialTCPFn(ln.Addr().String(), testTimeoutReadWrite, ed25519.GenPrivKey()), - ) - defer serviceEndpoint.Stop() - - SignerServiceEndpointTimeoutReadWrite(time.Millisecond)(serviceEndpoint) - SignerServiceEndpointConnRetries(retries)(serviceEndpoint) - - assert.Equal(t, serviceEndpoint.Start(), ErrDialRetryMax) - - select { - case attempts := <-attemptCh: - assert.Equal(t, retries, attempts) - case <-time.After(100 * time.Millisecond): - t.Error("expected remote to observe connection attempts") - } -} diff --git a/privval/signer_requestHandler.go b/privval/signer_requestHandler.go new file mode 100644 index 000000000..dcab7752e --- /dev/null +++ b/privval/signer_requestHandler.go @@ -0,0 +1,44 @@ +package privval + +import ( + "fmt" + + "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/types" +) + +func DefaultValidationRequestHandler(privVal types.PrivValidator, req SignerMessage, chainID string) (SignerMessage, error) { + var res SignerMessage + var err error + + switch r := req.(type) { + case *PubKeyRequest: + var p crypto.PubKey + p = privVal.GetPubKey() + res = &PubKeyResponse{p, nil} + + case *SignVoteRequest: + err = privVal.SignVote(chainID, r.Vote) + if err != nil { + res = &SignedVoteResponse{nil, &RemoteSignerError{0, err.Error()}} + } else { + res = &SignedVoteResponse{r.Vote, nil} + } + + case *SignProposalRequest: + err = privVal.SignProposal(chainID, r.Proposal) + if err != nil { + res = &SignedProposalResponse{nil, &RemoteSignerError{0, err.Error()}} + } else { + res = &SignedProposalResponse{r.Proposal, nil} + } + + case *PingRequest: + err, res = nil, &PingResponse{} + + default: + err = fmt.Errorf("unknown msg: %v", r) + } + + return res, err +} diff --git a/privval/signer_server.go b/privval/signer_server.go new file mode 100644 index 000000000..62dcc461c --- /dev/null +++ b/privval/signer_server.go @@ -0,0 +1,107 @@ +package privval + +import ( + "io" + "sync" + + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/types" +) + +// ValidationRequestHandlerFunc handles different remoteSigner requests +type ValidationRequestHandlerFunc func( + privVal types.PrivValidator, + requestMessage SignerMessage, + chainID string) (SignerMessage, error) + +type SignerServer struct { + cmn.BaseService + + endpoint *SignerDialerEndpoint + chainID string + privVal types.PrivValidator + + handlerMtx sync.Mutex + validationRequestHandler ValidationRequestHandlerFunc +} + +func NewSignerServer(endpoint *SignerDialerEndpoint, chainID string, privVal types.PrivValidator) *SignerServer { + ss := &SignerServer{ + endpoint: endpoint, + chainID: chainID, + privVal: privVal, + validationRequestHandler: DefaultValidationRequestHandler, + } + + ss.BaseService = *cmn.NewBaseService(endpoint.Logger, "SignerServer", ss) + + return ss +} + +// OnStart implements cmn.Service. +func (ss *SignerServer) OnStart() error { + go ss.serviceLoop() + return nil +} + +// OnStop implements cmn.Service. +func (ss *SignerServer) OnStop() { + ss.endpoint.Logger.Debug("SignerServer: OnStop calling Close") + _ = ss.endpoint.Close() +} + +// SetRequestHandler override the default function that is used to service requests +func (ss *SignerServer) SetRequestHandler(validationRequestHandler ValidationRequestHandlerFunc) { + ss.handlerMtx.Lock() + defer ss.handlerMtx.Unlock() + ss.validationRequestHandler = validationRequestHandler +} + +func (ss *SignerServer) servicePendingRequest() { + if !ss.IsRunning() { + return // Ignore error from closing. + } + + req, err := ss.endpoint.ReadMessage() + if err != nil { + if err != io.EOF { + ss.Logger.Error("SignerServer: HandleMessage", "err", err) + } + return + } + + var res SignerMessage + { + // limit the scope of the lock + ss.handlerMtx.Lock() + defer ss.handlerMtx.Unlock() + res, err = ss.validationRequestHandler(ss.privVal, req, ss.chainID) + if err != nil { + // only log the error; we'll reply with an error in res + ss.Logger.Error("SignerServer: handleMessage", "err", err) + } + } + + if res != nil { + err = ss.endpoint.WriteMessage(res) + if err != nil { + ss.Logger.Error("SignerServer: writeMessage", "err", err) + } + } +} + +func (ss *SignerServer) serviceLoop() { + for { + select { + default: + err := ss.endpoint.ensureConnection() + if err != nil { + return + } + ss.servicePendingRequest() + + case <-ss.Quit(): + return + } + } +} diff --git a/privval/signer_service_endpoint.go b/privval/signer_service_endpoint.go deleted file mode 100644 index 1b37d5fc6..000000000 --- a/privval/signer_service_endpoint.go +++ /dev/null @@ -1,139 +0,0 @@ -package privval - -import ( - "io" - "net" - "time" - - cmn "github.com/tendermint/tendermint/libs/common" - "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/types" -) - -// SignerServiceEndpointOption sets an optional parameter on the SignerServiceEndpoint. -type SignerServiceEndpointOption func(*SignerServiceEndpoint) - -// SignerServiceEndpointTimeoutReadWrite sets the read and write timeout for connections -// from external signing processes. -func SignerServiceEndpointTimeoutReadWrite(timeout time.Duration) SignerServiceEndpointOption { - return func(ss *SignerServiceEndpoint) { ss.timeoutReadWrite = timeout } -} - -// SignerServiceEndpointConnRetries sets the amount of attempted retries to connect. -func SignerServiceEndpointConnRetries(retries int) SignerServiceEndpointOption { - return func(ss *SignerServiceEndpoint) { ss.connRetries = retries } -} - -// SignerServiceEndpoint dials using its dialer and responds to any -// signature requests using its privVal. -type SignerServiceEndpoint struct { - cmn.BaseService - - chainID string - timeoutReadWrite time.Duration - connRetries int - privVal types.PrivValidator - - dialer SocketDialer - conn net.Conn -} - -// NewSignerServiceEndpoint returns a SignerServiceEndpoint that will dial using the given -// dialer and respond to any signature requests over the connection -// using the given privVal. -func NewSignerServiceEndpoint( - logger log.Logger, - chainID string, - privVal types.PrivValidator, - dialer SocketDialer, -) *SignerServiceEndpoint { - se := &SignerServiceEndpoint{ - chainID: chainID, - timeoutReadWrite: time.Second * defaultTimeoutReadWriteSeconds, - connRetries: defaultMaxDialRetries, - privVal: privVal, - dialer: dialer, - } - - se.BaseService = *cmn.NewBaseService(logger, "SignerServiceEndpoint", se) - return se -} - -// OnStart implements cmn.Service. -func (se *SignerServiceEndpoint) OnStart() error { - conn, err := se.connect() - if err != nil { - se.Logger.Error("OnStart", "err", err) - return err - } - - se.conn = conn - go se.handleConnection(conn) - - return nil -} - -// OnStop implements cmn.Service. -func (se *SignerServiceEndpoint) OnStop() { - if se.conn == nil { - return - } - - if err := se.conn.Close(); err != nil { - se.Logger.Error("OnStop", "err", cmn.ErrorWrap(err, "closing listener failed")) - } -} - -func (se *SignerServiceEndpoint) connect() (net.Conn, error) { - for retries := 0; retries < se.connRetries; retries++ { - // Don't sleep if it is the first retry. - if retries > 0 { - time.Sleep(se.timeoutReadWrite) - } - - conn, err := se.dialer() - if err == nil { - return conn, nil - } - - se.Logger.Error("dialing", "err", err) - } - - return nil, ErrDialRetryMax -} - -func (se *SignerServiceEndpoint) handleConnection(conn net.Conn) { - for { - if !se.IsRunning() { - return // Ignore error from listener closing. - } - - // Reset the connection deadline - deadline := time.Now().Add(se.timeoutReadWrite) - err := conn.SetDeadline(deadline) - if err != nil { - return - } - - req, err := readMsg(conn) - if err != nil { - if err != io.EOF { - se.Logger.Error("handleConnection readMsg", "err", err) - } - return - } - - res, err := handleRequest(req, se.chainID, se.privVal) - - if err != nil { - // only log the error; we'll reply with an error in res - se.Logger.Error("handleConnection handleRequest", "err", err) - } - - err = writeMsg(conn, res) - if err != nil { - se.Logger.Error("handleConnection writeMsg", "err", err) - return - } - } -} diff --git a/privval/signer_validator_endpoint.go b/privval/signer_validator_endpoint.go deleted file mode 100644 index 840364e90..000000000 --- a/privval/signer_validator_endpoint.go +++ /dev/null @@ -1,237 +0,0 @@ -package privval - -import ( - "fmt" - "net" - "sync" - "time" - - "github.com/tendermint/tendermint/crypto" - cmn "github.com/tendermint/tendermint/libs/common" - "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/types" -) - -const ( - defaultHeartbeatSeconds = 2 - defaultMaxDialRetries = 10 -) - -var ( - heartbeatPeriod = time.Second * defaultHeartbeatSeconds -) - -// SignerValidatorEndpointOption sets an optional parameter on the SocketVal. -type SignerValidatorEndpointOption func(*SignerValidatorEndpoint) - -// SignerValidatorEndpointSetHeartbeat sets the period on which to check the liveness of the -// connected Signer connections. -func SignerValidatorEndpointSetHeartbeat(period time.Duration) SignerValidatorEndpointOption { - return func(sc *SignerValidatorEndpoint) { sc.heartbeatPeriod = period } -} - -// SocketVal implements PrivValidator. -// It listens for an external process to dial in and uses -// the socket to request signatures. -type SignerValidatorEndpoint struct { - cmn.BaseService - - listener net.Listener - - // ping - cancelPingCh chan struct{} - pingTicker *time.Ticker - heartbeatPeriod time.Duration - - // signer is mutable since it can be reset if the connection fails. - // failures are detected by a background ping routine. - // All messages are request/response, so we hold the mutex - // so only one request/response pair can happen at a time. - // Methods on the underlying net.Conn itself are already goroutine safe. - mtx sync.Mutex - - // TODO: Signer should encapsulate and hide the endpoint completely. Invert the relation - signer *SignerRemote -} - -// Check that SignerValidatorEndpoint implements PrivValidator. -var _ types.PrivValidator = (*SignerValidatorEndpoint)(nil) - -// NewSignerValidatorEndpoint returns an instance of SignerValidatorEndpoint. -func NewSignerValidatorEndpoint(logger log.Logger, listener net.Listener) *SignerValidatorEndpoint { - sc := &SignerValidatorEndpoint{ - listener: listener, - heartbeatPeriod: heartbeatPeriod, - } - - sc.BaseService = *cmn.NewBaseService(logger, "SignerValidatorEndpoint", sc) - - return sc -} - -//-------------------------------------------------------- -// Implement PrivValidator - -// GetAddress implements PrivValidator. -func (ve *SignerValidatorEndpoint) GetAddress() types.Address { - ve.mtx.Lock() - defer ve.mtx.Unlock() - return ve.signer.GetAddress() -} - -// GetPubKey implements PrivValidator. -func (ve *SignerValidatorEndpoint) GetPubKey() crypto.PubKey { - ve.mtx.Lock() - defer ve.mtx.Unlock() - return ve.signer.GetPubKey() -} - -// SignVote implements PrivValidator. -func (ve *SignerValidatorEndpoint) SignVote(chainID string, vote *types.Vote) error { - ve.mtx.Lock() - defer ve.mtx.Unlock() - return ve.signer.SignVote(chainID, vote) -} - -// SignProposal implements PrivValidator. -func (ve *SignerValidatorEndpoint) SignProposal(chainID string, proposal *types.Proposal) error { - ve.mtx.Lock() - defer ve.mtx.Unlock() - return ve.signer.SignProposal(chainID, proposal) -} - -//-------------------------------------------------------- -// More thread safe methods proxied to the signer - -// Ping is used to check connection health. -func (ve *SignerValidatorEndpoint) Ping() error { - ve.mtx.Lock() - defer ve.mtx.Unlock() - return ve.signer.Ping() -} - -// Close closes the underlying net.Conn. -func (ve *SignerValidatorEndpoint) Close() { - ve.mtx.Lock() - defer ve.mtx.Unlock() - if ve.signer != nil { - if err := ve.signer.Close(); err != nil { - ve.Logger.Error("OnStop", "err", err) - } - } - - if ve.listener != nil { - if err := ve.listener.Close(); err != nil { - ve.Logger.Error("OnStop", "err", err) - } - } -} - -//-------------------------------------------------------- -// Service start and stop - -// OnStart implements cmn.Service. -func (ve *SignerValidatorEndpoint) OnStart() error { - if closed, err := ve.reset(); err != nil { - ve.Logger.Error("OnStart", "err", err) - return err - } else if closed { - return fmt.Errorf("listener is closed") - } - - // Start a routine to keep the connection alive - ve.cancelPingCh = make(chan struct{}, 1) - ve.pingTicker = time.NewTicker(ve.heartbeatPeriod) - go func() { - for { - select { - case <-ve.pingTicker.C: - err := ve.Ping() - if err != nil { - ve.Logger.Error("Ping", "err", err) - if err == ErrUnexpectedResponse { - return - } - - closed, err := ve.reset() - if err != nil { - ve.Logger.Error("Reconnecting to remote signer failed", "err", err) - continue - } - if closed { - ve.Logger.Info("listener is closing") - return - } - - ve.Logger.Info("Re-created connection to remote signer", "impl", ve) - } - case <-ve.cancelPingCh: - ve.pingTicker.Stop() - return - } - } - }() - - return nil -} - -// OnStop implements cmn.Service. -func (ve *SignerValidatorEndpoint) OnStop() { - if ve.cancelPingCh != nil { - close(ve.cancelPingCh) - } - ve.Close() -} - -//-------------------------------------------------------- -// Connection and signer management - -// waits to accept and sets a new connection. -// connection is closed in OnStop. -// returns true if the listener is closed -// (ie. it returns a nil conn). -func (ve *SignerValidatorEndpoint) reset() (closed bool, err error) { - ve.mtx.Lock() - defer ve.mtx.Unlock() - - // first check if the conn already exists and close it. - if ve.signer != nil { - if tmpErr := ve.signer.Close(); tmpErr != nil { - ve.Logger.Error("error closing socket val connection during reset", "err", tmpErr) - } - } - - // wait for a new conn - conn, err := ve.acceptConnection() - if err != nil { - return false, err - } - - // listener is closed - if conn == nil { - return true, nil - } - - ve.signer, err = NewSignerRemote(conn) - if err != nil { - // failed to fetch the pubkey. close out the connection. - if tmpErr := conn.Close(); tmpErr != nil { - ve.Logger.Error("error closing connection", "err", tmpErr) - } - return false, err - } - return false, nil -} - -// Attempt to accept a connection. -// Times out after the listener's timeoutAccept -func (ve *SignerValidatorEndpoint) acceptConnection() (net.Conn, error) { - conn, err := ve.listener.Accept() - if err != nil { - if !ve.IsRunning() { - return nil, nil // Ignore error from listener closing. - } - return nil, err - } - return conn, nil -} diff --git a/privval/signer_validator_endpoint_test.go b/privval/signer_validator_endpoint_test.go deleted file mode 100644 index bf4c29930..000000000 --- a/privval/signer_validator_endpoint_test.go +++ /dev/null @@ -1,505 +0,0 @@ -package privval - -import ( - "fmt" - "net" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/tendermint/tendermint/crypto/ed25519" - cmn "github.com/tendermint/tendermint/libs/common" - "github.com/tendermint/tendermint/libs/log" - - "github.com/tendermint/tendermint/types" -) - -var ( - testTimeoutAccept = defaultTimeoutAcceptSeconds * time.Second - - testTimeoutReadWrite = 100 * time.Millisecond - testTimeoutReadWrite2o3 = 66 * time.Millisecond // 2/3 of the other one - - testTimeoutHeartbeat = 10 * time.Millisecond - testTimeoutHeartbeat3o2 = 6 * time.Millisecond // 3/2 of the other one -) - -type socketTestCase struct { - addr string - dialer SocketDialer -} - -func socketTestCases(t *testing.T) []socketTestCase { - tcpAddr := fmt.Sprintf("tcp://%s", testFreeTCPAddr(t)) - unixFilePath, err := testUnixAddr() - require.NoError(t, err) - unixAddr := fmt.Sprintf("unix://%s", unixFilePath) - return []socketTestCase{ - { - addr: tcpAddr, - dialer: DialTCPFn(tcpAddr, testTimeoutReadWrite, ed25519.GenPrivKey()), - }, - { - addr: unixAddr, - dialer: DialUnixFn(unixFilePath), - }, - } -} - -func TestSocketPVAddress(t *testing.T) { - for _, tc := range socketTestCases(t) { - // Execute the test within a closure to ensure the deferred statements - // are called between each for loop iteration, for isolated test cases. - func() { - var ( - chainID = cmn.RandStr(12) - validatorEndpoint, serviceEndpoint = testSetupSocketPair(t, chainID, types.NewMockPV(), tc.addr, tc.dialer) - ) - defer validatorEndpoint.Stop() - defer serviceEndpoint.Stop() - - serviceAddr := serviceEndpoint.privVal.GetPubKey().Address() - validatorAddr := validatorEndpoint.GetPubKey().Address() - - assert.Equal(t, serviceAddr, validatorAddr) - }() - } -} - -func TestSocketPVPubKey(t *testing.T) { - for _, tc := range socketTestCases(t) { - func() { - var ( - chainID = cmn.RandStr(12) - validatorEndpoint, serviceEndpoint = testSetupSocketPair( - t, - chainID, - types.NewMockPV(), - tc.addr, - tc.dialer) - ) - defer validatorEndpoint.Stop() - defer serviceEndpoint.Stop() - - clientKey := validatorEndpoint.GetPubKey() - privvalPubKey := serviceEndpoint.privVal.GetPubKey() - - assert.Equal(t, privvalPubKey, clientKey) - }() - } -} - -func TestSocketPVProposal(t *testing.T) { - for _, tc := range socketTestCases(t) { - func() { - var ( - chainID = cmn.RandStr(12) - validatorEndpoint, serviceEndpoint = testSetupSocketPair( - t, - chainID, - types.NewMockPV(), - tc.addr, - tc.dialer) - - ts = time.Now() - privProposal = &types.Proposal{Timestamp: ts} - clientProposal = &types.Proposal{Timestamp: ts} - ) - defer validatorEndpoint.Stop() - defer serviceEndpoint.Stop() - - require.NoError(t, serviceEndpoint.privVal.SignProposal(chainID, privProposal)) - require.NoError(t, validatorEndpoint.SignProposal(chainID, clientProposal)) - - assert.Equal(t, privProposal.Signature, clientProposal.Signature) - }() - } -} - -func TestSocketPVVote(t *testing.T) { - for _, tc := range socketTestCases(t) { - func() { - var ( - chainID = cmn.RandStr(12) - validatorEndpoint, serviceEndpoint = testSetupSocketPair( - t, - chainID, - types.NewMockPV(), - tc.addr, - tc.dialer) - - ts = time.Now() - vType = types.PrecommitType - want = &types.Vote{Timestamp: ts, Type: vType} - have = &types.Vote{Timestamp: ts, Type: vType} - ) - defer validatorEndpoint.Stop() - defer serviceEndpoint.Stop() - - require.NoError(t, serviceEndpoint.privVal.SignVote(chainID, want)) - require.NoError(t, validatorEndpoint.SignVote(chainID, have)) - assert.Equal(t, want.Signature, have.Signature) - }() - } -} - -func TestSocketPVVoteResetDeadline(t *testing.T) { - for _, tc := range socketTestCases(t) { - func() { - var ( - chainID = cmn.RandStr(12) - validatorEndpoint, serviceEndpoint = testSetupSocketPair( - t, - chainID, - types.NewMockPV(), - tc.addr, - tc.dialer) - - ts = time.Now() - vType = types.PrecommitType - want = &types.Vote{Timestamp: ts, Type: vType} - have = &types.Vote{Timestamp: ts, Type: vType} - ) - defer validatorEndpoint.Stop() - defer serviceEndpoint.Stop() - - time.Sleep(testTimeoutReadWrite2o3) - - require.NoError(t, serviceEndpoint.privVal.SignVote(chainID, want)) - require.NoError(t, validatorEndpoint.SignVote(chainID, have)) - assert.Equal(t, want.Signature, have.Signature) - - // This would exceed the deadline if it was not extended by the previous message - time.Sleep(testTimeoutReadWrite2o3) - - require.NoError(t, serviceEndpoint.privVal.SignVote(chainID, want)) - require.NoError(t, validatorEndpoint.SignVote(chainID, have)) - assert.Equal(t, want.Signature, have.Signature) - }() - } -} - -func TestSocketPVVoteKeepalive(t *testing.T) { - for _, tc := range socketTestCases(t) { - func() { - var ( - chainID = cmn.RandStr(12) - validatorEndpoint, serviceEndpoint = testSetupSocketPair( - t, - chainID, - types.NewMockPV(), - tc.addr, - tc.dialer) - - ts = time.Now() - vType = types.PrecommitType - want = &types.Vote{Timestamp: ts, Type: vType} - have = &types.Vote{Timestamp: ts, Type: vType} - ) - defer validatorEndpoint.Stop() - defer serviceEndpoint.Stop() - - time.Sleep(testTimeoutReadWrite * 2) - - require.NoError(t, serviceEndpoint.privVal.SignVote(chainID, want)) - require.NoError(t, validatorEndpoint.SignVote(chainID, have)) - assert.Equal(t, want.Signature, have.Signature) - }() - } -} - -func TestSocketPVDeadline(t *testing.T) { - for _, tc := range socketTestCases(t) { - func() { - var ( - listenc = make(chan struct{}) - thisConnTimeout = 100 * time.Millisecond - validatorEndpoint = newSignerValidatorEndpoint(log.TestingLogger(), tc.addr, thisConnTimeout) - ) - - go func(sc *SignerValidatorEndpoint) { - defer close(listenc) - - // Note: the TCP connection times out at the accept() phase, - // whereas the Unix domain sockets connection times out while - // attempting to fetch the remote signer's public key. - assert.True(t, IsConnTimeout(sc.Start())) - - assert.False(t, sc.IsRunning()) - }(validatorEndpoint) - - for { - _, err := cmn.Connect(tc.addr) - if err == nil { - break - } - } - - <-listenc - }() - } -} - -func TestRemoteSignVoteErrors(t *testing.T) { - for _, tc := range socketTestCases(t) { - func() { - var ( - chainID = cmn.RandStr(12) - validatorEndpoint, serviceEndpoint = testSetupSocketPair( - t, - chainID, - types.NewErroringMockPV(), - tc.addr, - tc.dialer) - - ts = time.Now() - vType = types.PrecommitType - vote = &types.Vote{Timestamp: ts, Type: vType} - ) - defer validatorEndpoint.Stop() - defer serviceEndpoint.Stop() - - err := validatorEndpoint.SignVote("", vote) - require.Equal(t, err.(*RemoteSignerError).Description, types.ErroringMockPVErr.Error()) - - err = serviceEndpoint.privVal.SignVote(chainID, vote) - require.Error(t, err) - err = validatorEndpoint.SignVote(chainID, vote) - require.Error(t, err) - }() - } -} - -func TestRemoteSignProposalErrors(t *testing.T) { - for _, tc := range socketTestCases(t) { - func() { - var ( - chainID = cmn.RandStr(12) - validatorEndpoint, serviceEndpoint = testSetupSocketPair( - t, - chainID, - types.NewErroringMockPV(), - tc.addr, - tc.dialer) - - ts = time.Now() - proposal = &types.Proposal{Timestamp: ts} - ) - defer validatorEndpoint.Stop() - defer serviceEndpoint.Stop() - - err := validatorEndpoint.SignProposal("", proposal) - require.Equal(t, err.(*RemoteSignerError).Description, types.ErroringMockPVErr.Error()) - - err = serviceEndpoint.privVal.SignProposal(chainID, proposal) - require.Error(t, err) - - err = validatorEndpoint.SignProposal(chainID, proposal) - require.Error(t, err) - }() - } -} - -func TestErrUnexpectedResponse(t *testing.T) { - for _, tc := range socketTestCases(t) { - func() { - var ( - logger = log.TestingLogger() - chainID = cmn.RandStr(12) - readyCh = make(chan struct{}) - errCh = make(chan error, 1) - - serviceEndpoint = NewSignerServiceEndpoint( - logger, - chainID, - types.NewMockPV(), - tc.dialer, - ) - - validatorEndpoint = newSignerValidatorEndpoint( - logger, - tc.addr, - testTimeoutReadWrite) - ) - - testStartEndpoint(t, readyCh, validatorEndpoint) - defer validatorEndpoint.Stop() - SignerServiceEndpointTimeoutReadWrite(time.Millisecond)(serviceEndpoint) - SignerServiceEndpointConnRetries(100)(serviceEndpoint) - // we do not want to Start() the remote signer here and instead use the connection to - // reply with intentionally wrong replies below: - rsConn, err := serviceEndpoint.connect() - defer rsConn.Close() - require.NoError(t, err) - require.NotNil(t, rsConn) - // send over public key to get the remote signer running: - go testReadWriteResponse(t, &PubKeyResponse{}, rsConn) - <-readyCh - - // Proposal: - go func(errc chan error) { - errc <- validatorEndpoint.SignProposal(chainID, &types.Proposal{}) - }(errCh) - - // read request and write wrong response: - go testReadWriteResponse(t, &SignedVoteResponse{}, rsConn) - err = <-errCh - require.Error(t, err) - require.Equal(t, err, ErrUnexpectedResponse) - - // Vote: - go func(errc chan error) { - errc <- validatorEndpoint.SignVote(chainID, &types.Vote{}) - }(errCh) - // read request and write wrong response: - go testReadWriteResponse(t, &SignedProposalResponse{}, rsConn) - err = <-errCh - require.Error(t, err) - require.Equal(t, err, ErrUnexpectedResponse) - }() - } -} - -func TestRetryConnToRemoteSigner(t *testing.T) { - for _, tc := range socketTestCases(t) { - func() { - var ( - logger = log.TestingLogger() - chainID = cmn.RandStr(12) - readyCh = make(chan struct{}) - - serviceEndpoint = NewSignerServiceEndpoint( - logger, - chainID, - types.NewMockPV(), - tc.dialer, - ) - thisConnTimeout = testTimeoutReadWrite - validatorEndpoint = newSignerValidatorEndpoint(logger, tc.addr, thisConnTimeout) - ) - // Ping every: - SignerValidatorEndpointSetHeartbeat(testTimeoutHeartbeat)(validatorEndpoint) - - SignerServiceEndpointTimeoutReadWrite(testTimeoutReadWrite)(serviceEndpoint) - SignerServiceEndpointConnRetries(10)(serviceEndpoint) - - testStartEndpoint(t, readyCh, validatorEndpoint) - defer validatorEndpoint.Stop() - require.NoError(t, serviceEndpoint.Start()) - assert.True(t, serviceEndpoint.IsRunning()) - - <-readyCh - time.Sleep(testTimeoutHeartbeat * 2) - - serviceEndpoint.Stop() - rs2 := NewSignerServiceEndpoint( - logger, - chainID, - types.NewMockPV(), - tc.dialer, - ) - // let some pings pass - time.Sleep(testTimeoutHeartbeat3o2) - require.NoError(t, rs2.Start()) - assert.True(t, rs2.IsRunning()) - defer rs2.Stop() - - // give the client some time to re-establish the conn to the remote signer - // should see sth like this in the logs: - // - // E[10016-01-10|17:12:46.128] Ping err="remote signer timed out" - // I[10016-01-10|17:16:42.447] Re-created connection to remote signer impl=SocketVal - time.Sleep(testTimeoutReadWrite * 2) - }() - } -} - -func newSignerValidatorEndpoint(logger log.Logger, addr string, timeoutReadWrite time.Duration) *SignerValidatorEndpoint { - proto, address := cmn.ProtocolAndAddress(addr) - - ln, err := net.Listen(proto, address) - logger.Info("Listening at", "proto", proto, "address", address) - if err != nil { - panic(err) - } - - var listener net.Listener - - if proto == "unix" { - unixLn := NewUnixListener(ln) - UnixListenerTimeoutAccept(testTimeoutAccept)(unixLn) - UnixListenerTimeoutReadWrite(timeoutReadWrite)(unixLn) - listener = unixLn - } else { - tcpLn := NewTCPListener(ln, ed25519.GenPrivKey()) - TCPListenerTimeoutAccept(testTimeoutAccept)(tcpLn) - TCPListenerTimeoutReadWrite(timeoutReadWrite)(tcpLn) - listener = tcpLn - } - - return NewSignerValidatorEndpoint(logger, listener) -} - -func testSetupSocketPair( - t *testing.T, - chainID string, - privValidator types.PrivValidator, - addr string, - socketDialer SocketDialer, -) (*SignerValidatorEndpoint, *SignerServiceEndpoint) { - var ( - logger = log.TestingLogger() - privVal = privValidator - readyc = make(chan struct{}) - serviceEndpoint = NewSignerServiceEndpoint( - logger, - chainID, - privVal, - socketDialer, - ) - - thisConnTimeout = testTimeoutReadWrite - validatorEndpoint = newSignerValidatorEndpoint(logger, addr, thisConnTimeout) - ) - - SignerValidatorEndpointSetHeartbeat(testTimeoutHeartbeat)(validatorEndpoint) - SignerServiceEndpointTimeoutReadWrite(testTimeoutReadWrite)(serviceEndpoint) - SignerServiceEndpointConnRetries(1e6)(serviceEndpoint) - - testStartEndpoint(t, readyc, validatorEndpoint) - - require.NoError(t, serviceEndpoint.Start()) - assert.True(t, serviceEndpoint.IsRunning()) - - <-readyc - - return validatorEndpoint, serviceEndpoint -} - -func testReadWriteResponse(t *testing.T, resp RemoteSignerMsg, rsConn net.Conn) { - _, err := readMsg(rsConn) - require.NoError(t, err) - - err = writeMsg(rsConn, resp) - require.NoError(t, err) -} - -func testStartEndpoint(t *testing.T, readyCh chan struct{}, sc *SignerValidatorEndpoint) { - go func(sc *SignerValidatorEndpoint) { - require.NoError(t, sc.Start()) - assert.True(t, sc.IsRunning()) - - readyCh <- struct{}{} - }(sc) -} - -// testFreeTCPAddr claims a free port so we don't block on listener being ready. -func testFreeTCPAddr(t *testing.T) string { - ln, err := net.Listen("tcp", "127.0.0.1:0") - require.NoError(t, err) - defer ln.Close() - - return fmt.Sprintf("127.0.0.1:%d", ln.Addr().(*net.TCPAddr).Port) -} diff --git a/privval/socket_dialers_test.go b/privval/socket_dialers_test.go index 9d5d5cc2b..c77261bc5 100644 --- a/privval/socket_dialers_test.go +++ b/privval/socket_dialers_test.go @@ -1,26 +1,49 @@ package privval import ( + "fmt" "testing" "time" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/crypto/ed25519" cmn "github.com/tendermint/tendermint/libs/common" ) +func getDialerTestCases(t *testing.T) []dialerTestCase { + tcpAddr := GetFreeLocalhostAddrPort() + unixFilePath, err := testUnixAddr() + require.NoError(t, err) + unixAddr := fmt.Sprintf("unix://%s", unixFilePath) + + return []dialerTestCase{ + { + addr: tcpAddr, + dialer: DialTCPFn(tcpAddr, testTimeoutReadWrite, ed25519.GenPrivKey()), + }, + { + addr: unixAddr, + dialer: DialUnixFn(unixFilePath), + }, + } +} + func TestIsConnTimeoutForFundamentalTimeouts(t *testing.T) { // Generate a networking timeout - dialer := DialTCPFn(testFreeTCPAddr(t), time.Millisecond, ed25519.GenPrivKey()) + tcpAddr := GetFreeLocalhostAddrPort() + dialer := DialTCPFn(tcpAddr, time.Millisecond, ed25519.GenPrivKey()) _, err := dialer() assert.Error(t, err) assert.True(t, IsConnTimeout(err)) } func TestIsConnTimeoutForWrappedConnTimeouts(t *testing.T) { - dialer := DialTCPFn(testFreeTCPAddr(t), time.Millisecond, ed25519.GenPrivKey()) + tcpAddr := GetFreeLocalhostAddrPort() + dialer := DialTCPFn(tcpAddr, time.Millisecond, ed25519.GenPrivKey()) _, err := dialer() assert.Error(t, err) - err = cmn.ErrorWrap(ErrConnTimeout, err.Error()) + err = cmn.ErrorWrap(ErrConnectionTimeout, err.Error()) assert.True(t, IsConnTimeout(err)) } diff --git a/privval/socket_listeners.go b/privval/socket_listeners.go index 7c8835791..f4d875e71 100644 --- a/privval/socket_listeners.go +++ b/privval/socket_listeners.go @@ -9,8 +9,8 @@ import ( ) const ( - defaultTimeoutAcceptSeconds = 3 - defaultTimeoutReadWriteSeconds = 3 + defaultTimeoutAcceptSeconds = 3 + defaultPingPeriodMilliseconds = 100 ) // timeoutError can be used to check if an error returned from the netp package diff --git a/privval/socket_listeners_test.go b/privval/socket_listeners_test.go index 498ef79c0..3c4cb8588 100644 --- a/privval/socket_listeners_test.go +++ b/privval/socket_listeners_test.go @@ -97,7 +97,15 @@ func TestListenerTimeoutAccept(t *testing.T) { } func TestListenerTimeoutReadWrite(t *testing.T) { - for _, tc := range listenerTestCases(t, time.Second, time.Millisecond) { + const ( + // This needs to be long enough s.t. the Accept will definitely succeed: + timeoutAccept = time.Second + // This can be really short but in the TCP case, the accept can + // also trigger a timeoutReadWrite. Hence, we need to give it some time. + // Note: this controls how long this test actually runs. + timeoutReadWrite = 10 * time.Millisecond + ) + for _, tc := range listenerTestCases(t, timeoutAccept, timeoutReadWrite) { go func(dialer SocketDialer) { _, err := dialer() if err != nil { @@ -110,8 +118,7 @@ func TestListenerTimeoutReadWrite(t *testing.T) { t.Fatal(err) } - time.Sleep(2 * time.Millisecond) - + // this will timeout because we don't write anything: msg := make([]byte, 200) _, err = c.Read(msg) opErr, ok := err.(*net.OpError) @@ -122,5 +129,9 @@ func TestListenerTimeoutReadWrite(t *testing.T) { if have, want := opErr.Op, "read"; have != want { t.Errorf("for %s listener, have %v, want %v", tc.description, have, want) } + + if !opErr.Timeout() { + t.Errorf("for %s listener, got unexpected error: have %v, want Timeout error", tc.description, opErr) + } } } diff --git a/privval/utils.go b/privval/utils.go index d8837bdf0..a707e2ee4 100644 --- a/privval/utils.go +++ b/privval/utils.go @@ -1,7 +1,12 @@ package privval import ( + "fmt" + "net" + + "github.com/tendermint/tendermint/crypto/ed25519" cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" ) // IsConnTimeout returns a boolean indicating whether the error is known to @@ -9,7 +14,7 @@ import ( // network timeouts, as well as ErrConnTimeout errors. func IsConnTimeout(err error) bool { if cmnErr, ok := err.(cmn.Error); ok { - if cmnErr.Data() == ErrConnTimeout { + if cmnErr.Data() == ErrConnectionTimeout { return true } } @@ -18,3 +23,39 @@ func IsConnTimeout(err error) bool { } return false } + +// NewSignerListener creates a new SignerListenerEndpoint using the corresponding listen address +func NewSignerListener(listenAddr string, logger log.Logger) (*SignerListenerEndpoint, error) { + var listener net.Listener + + protocol, address := cmn.ProtocolAndAddress(listenAddr) + ln, err := net.Listen(protocol, address) + if err != nil { + return nil, err + } + switch protocol { + case "unix": + listener = NewUnixListener(ln) + case "tcp": + // TODO: persist this key so external signer can actually authenticate us + listener = NewTCPListener(ln, ed25519.GenPrivKey()) + default: + return nil, fmt.Errorf( + "wrong listen address: expected either 'tcp' or 'unix' protocols, got %s", + protocol, + ) + } + + pve := NewSignerListenerEndpoint(logger.With("module", "privval"), listener) + + return pve, nil +} + +// GetFreeLocalhostAddrPort returns a free localhost:port address +func GetFreeLocalhostAddrPort() string { + port, err := cmn.GetFreePort() + if err != nil { + panic(err) + } + return fmt.Sprintf("127.0.0.1:%d", port) +} diff --git a/privval/utils_test.go b/privval/utils_test.go index 23f6f6a3b..b07186f6c 100644 --- a/privval/utils_test.go +++ b/privval/utils_test.go @@ -5,6 +5,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + cmn "github.com/tendermint/tendermint/libs/common" ) diff --git a/proxy/app_conn.go b/proxy/app_conn.go index 61ad48361..47344b89c 100644 --- a/proxy/app_conn.go +++ b/proxy/app_conn.go @@ -15,7 +15,7 @@ type AppConnConsensus interface { InitChainSync(types.RequestInitChain) (*types.ResponseInitChain, error) BeginBlockSync(types.RequestBeginBlock) (*types.ResponseBeginBlock, error) - DeliverTxAsync(tx []byte) *abcicli.ReqRes + DeliverTxAsync(types.RequestDeliverTx) *abcicli.ReqRes EndBlockSync(types.RequestEndBlock) (*types.ResponseEndBlock, error) CommitSync() (*types.ResponseCommit, error) } @@ -24,8 +24,8 @@ type AppConnMempool interface { SetResponseCallback(abcicli.Callback) Error() error - CheckTxAsync(tx []byte) *abcicli.ReqRes - ReCheckTxAsync(tx []byte) *abcicli.ReqRes + ReCheckTxAsync(types.RequestCheckTx) *abcicli.ReqRes + CheckTxAsync(types.RequestCheckTx) *abcicli.ReqRes FlushAsync() *abcicli.ReqRes FlushSync() error @@ -107,8 +107,8 @@ func (app *appConnConsensus) BeginBlockSync(req types.RequestBeginBlock) (*types return app.appConn.BeginBlockSync(req) } -func (app *appConnConsensus) DeliverTxAsync(tx []byte) *abcicli.ReqRes { - return app.appConn.DeliverTxAsync(tx) +func (app *appConnConsensus) DeliverTxAsync(req types.RequestDeliverTx) *abcicli.ReqRes { + return app.appConn.DeliverTxAsync(req) } func (app *appConnConsensus) EndBlockSync(req types.RequestEndBlock) (*types.ResponseEndBlock, error) { @@ -148,12 +148,12 @@ func (app *appConnMempool) FlushSync() error { return app.appConn.FlushSync() } -func (app *appConnMempool) CheckTxAsync(tx []byte) *abcicli.ReqRes { - return app.appConn.CheckTxAsync(tx) +func (app *appConnMempool) CheckTxAsync(req types.RequestCheckTx) *abcicli.ReqRes { + return app.appConn.CheckTxAsync(req) } -func (app *appConnMempool) ReCheckTxAsync(tx []byte) *abcicli.ReqRes { - return app.appConn.ReCheckTxAsync(tx) +func (app *appConnMempool) ReCheckTxAsync(req types.RequestCheckTx) *abcicli.ReqRes { + return app.appConn.ReCheckTxAsync(req) } //------------------------------------------------ diff --git a/rpc/client/codec.go b/rpc/client/codec.go new file mode 100644 index 000000000..ef1a00ec4 --- /dev/null +++ b/rpc/client/codec.go @@ -0,0 +1,12 @@ +package client + +import ( + amino "github.com/tendermint/go-amino" + "github.com/tendermint/tendermint/types" +) + +var cdc = amino.NewCodec() + +func init() { + types.RegisterEvidences(cdc) +} diff --git a/rpc/client/examples_test.go b/rpc/client/examples_test.go new file mode 100644 index 000000000..720e48492 --- /dev/null +++ b/rpc/client/examples_test.go @@ -0,0 +1,126 @@ +package client_test + +import ( + "bytes" + "fmt" + + "github.com/tendermint/tendermint/abci/example/kvstore" + "github.com/tendermint/tendermint/rpc/client" + ctypes "github.com/tendermint/tendermint/rpc/core/types" + rpctest "github.com/tendermint/tendermint/rpc/test" +) + +func ExampleHTTP_simple() { + // Start a tendermint node (and kvstore) in the background to test against + app := kvstore.NewKVStoreApplication() + node := rpctest.StartTendermint(app, rpctest.SuppressStdout, rpctest.RecreateConfig) + defer rpctest.StopTendermint(node) + + // Create our RPC client + rpcAddr := rpctest.GetConfig().RPC.ListenAddress + c := client.NewHTTP(rpcAddr, "/websocket") + + // Create a transaction + k := []byte("name") + v := []byte("satoshi") + tx := append(k, append([]byte("="), v...)...) + + // Broadcast the transaction and wait for it to commit (rather use + // c.BroadcastTxSync though in production) + bres, err := c.BroadcastTxCommit(tx) + if err != nil { + panic(err) + } + if bres.CheckTx.IsErr() || bres.DeliverTx.IsErr() { + panic("BroadcastTxCommit transaction failed") + } + + // Now try to fetch the value for the key + qres, err := c.ABCIQuery("/key", k) + if err != nil { + panic(err) + } + if qres.Response.IsErr() { + panic("ABCIQuery failed") + } + if !bytes.Equal(qres.Response.Key, k) { + panic("returned key does not match queried key") + } + if !bytes.Equal(qres.Response.Value, v) { + panic("returned value does not match sent value") + } + + fmt.Println("Sent tx :", string(tx)) + fmt.Println("Queried for :", string(qres.Response.Key)) + fmt.Println("Got value :", string(qres.Response.Value)) + + // Output: + // Sent tx : name=satoshi + // Queried for : name + // Got value : satoshi +} + +func ExampleHTTP_batching() { + // Start a tendermint node (and kvstore) in the background to test against + app := kvstore.NewKVStoreApplication() + node := rpctest.StartTendermint(app, rpctest.SuppressStdout, rpctest.RecreateConfig) + defer rpctest.StopTendermint(node) + + // Create our RPC client + rpcAddr := rpctest.GetConfig().RPC.ListenAddress + c := client.NewHTTP(rpcAddr, "/websocket") + + // Create our two transactions + k1 := []byte("firstName") + v1 := []byte("satoshi") + tx1 := append(k1, append([]byte("="), v1...)...) + + k2 := []byte("lastName") + v2 := []byte("nakamoto") + tx2 := append(k2, append([]byte("="), v2...)...) + + txs := [][]byte{tx1, tx2} + + // Create a new batch + batch := c.NewBatch() + + // Queue up our transactions + for _, tx := range txs { + if _, err := batch.BroadcastTxCommit(tx); err != nil { + panic(err) + } + } + + // Send the batch of 2 transactions + if _, err := batch.Send(); err != nil { + panic(err) + } + + // Now let's query for the original results as a batch + keys := [][]byte{k1, k2} + for _, key := range keys { + if _, err := batch.ABCIQuery("/key", key); err != nil { + panic(err) + } + } + + // Send the 2 queries and keep the results + results, err := batch.Send() + if err != nil { + panic(err) + } + + // Each result in the returned list is the deserialized result of each + // respective ABCIQuery response + for _, result := range results { + qr, ok := result.(*ctypes.ResultABCIQuery) + if !ok { + panic("invalid result type from ABCIQuery request") + } + fmt.Println(string(qr.Response.Key), "=", string(qr.Response.Value)) + } + + // Output: + // firstName = satoshi + // lastName = nakamoto +} diff --git a/rpc/client/helpers.go b/rpc/client/helpers.go index 4889b0740..756ba2818 100644 --- a/rpc/client/helpers.go +++ b/rpc/client/helpers.go @@ -15,7 +15,7 @@ type Waiter func(delta int64) (abort error) // but you can plug in another one func DefaultWaitStrategy(delta int64) (abort error) { if delta > 10 { - return errors.Errorf("Waiting for %d blocks... aborting", delta) + return errors.Errorf("waiting for %d blocks... aborting", delta) } else if delta > 0 { // estimate of wait time.... // wait half a second for the next block (in progress) diff --git a/rpc/client/httpclient.go b/rpc/client/httpclient.go index f5a01e8c4..671d2425d 100644 --- a/rpc/client/httpclient.go +++ b/rpc/client/httpclient.go @@ -18,27 +18,72 @@ import ( ) /* -HTTP is a Client implementation that communicates with a tendermint node over -json rpc and websockets. +HTTP is a Client implementation that communicates with a Tendermint node over +JSON RPC and WebSockets. This is the main implementation you probably want to use in production code. -There are other implementations when calling the tendermint node in-process +There are other implementations when calling the Tendermint node in-process (Local), or when you want to mock out the server for test code (mock). You can subscribe for any event published by Tendermint using Subscribe method. -Note delivery is best-effort. If you don't read events fast enough or network -is slow, Tendermint might cancel the subscription. The client will attempt to +Note delivery is best-effort. If you don't read events fast enough or network is +slow, Tendermint might cancel the subscription. The client will attempt to resubscribe (you don't need to do anything). It will keep trying every second indefinitely until successful. + +Request batching is available for JSON RPC requests over HTTP, which conforms to +the JSON RPC specification (https://www.jsonrpc.org/specification#batch). See +the example for more details. */ type HTTP struct { remote string rpc *rpcclient.JSONRPCClient + + *baseRPCClient *WSEvents } -// NewHTTP takes a remote endpoint in the form tcp://: -// and the websocket path (which always seems to be "/websocket") +// BatchHTTP provides the same interface as `HTTP`, but allows for batching of +// requests (as per https://www.jsonrpc.org/specification#batch). Do not +// instantiate directly - rather use the HTTP.NewBatch() method to create an +// instance of this struct. +// +// Batching of HTTP requests is thread-safe in the sense that multiple +// goroutines can each create their own batches and send them using the same +// HTTP client. Multiple goroutines could also enqueue transactions in a single +// batch, but ordering of transactions in the batch cannot be guaranteed in such +// an example. +type BatchHTTP struct { + rpcBatch *rpcclient.JSONRPCRequestBatch + *baseRPCClient +} + +// rpcClient is an internal interface to which our RPC clients (batch and +// non-batch) must conform. Acts as an additional code-level sanity check to +// make sure the implementations stay coherent. +type rpcClient interface { + ABCIClient + HistoryClient + NetworkClient + SignClient + StatusClient +} + +// baseRPCClient implements the basic RPC method logic without the actual +// underlying RPC call functionality, which is provided by `caller`. +type baseRPCClient struct { + caller rpcclient.JSONRPCCaller +} + +var _ rpcClient = (*HTTP)(nil) +var _ rpcClient = (*BatchHTTP)(nil) +var _ rpcClient = (*baseRPCClient)(nil) + +//----------------------------------------------------------------------------- +// HTTP + +// NewHTTP takes a remote endpoint in the form ://: and +// the websocket path (which always seems to be "/websocket") func NewHTTP(remote, wsEndpoint string) *HTTP { rc := rpcclient.NewJSONRPCClient(remote) cdc := rc.Codec() @@ -46,39 +91,76 @@ func NewHTTP(remote, wsEndpoint string) *HTTP { rc.SetCodec(cdc) return &HTTP{ - rpc: rc, - remote: remote, - WSEvents: newWSEvents(cdc, remote, wsEndpoint), + rpc: rc, + remote: remote, + baseRPCClient: &baseRPCClient{caller: rc}, + WSEvents: newWSEvents(cdc, remote, wsEndpoint), } } var _ Client = (*HTTP)(nil) -func (c *HTTP) Status() (*ctypes.ResultStatus, error) { +// NewBatch creates a new batch client for this HTTP client. +func (c *HTTP) NewBatch() *BatchHTTP { + rpcBatch := c.rpc.NewRequestBatch() + return &BatchHTTP{ + rpcBatch: rpcBatch, + baseRPCClient: &baseRPCClient{ + caller: rpcBatch, + }, + } +} + +//----------------------------------------------------------------------------- +// BatchHTTP + +// Send is a convenience function for an HTTP batch that will trigger the +// compilation of the batched requests and send them off using the client as a +// single request. On success, this returns a list of the deserialized results +// from each request in the sent batch. +func (b *BatchHTTP) Send() ([]interface{}, error) { + return b.rpcBatch.Send() +} + +// Clear will empty out this batch of requests and return the number of requests +// that were cleared out. +func (b *BatchHTTP) Clear() int { + return b.rpcBatch.Clear() +} + +// Count returns the number of enqueued requests waiting to be sent. +func (b *BatchHTTP) Count() int { + return b.rpcBatch.Count() +} + +//----------------------------------------------------------------------------- +// baseRPCClient + +func (c *baseRPCClient) Status() (*ctypes.ResultStatus, error) { result := new(ctypes.ResultStatus) - _, err := c.rpc.Call("status", map[string]interface{}{}, result) + _, err := c.caller.Call("status", map[string]interface{}{}, result) if err != nil { return nil, errors.Wrap(err, "Status") } return result, nil } -func (c *HTTP) ABCIInfo() (*ctypes.ResultABCIInfo, error) { +func (c *baseRPCClient) ABCIInfo() (*ctypes.ResultABCIInfo, error) { result := new(ctypes.ResultABCIInfo) - _, err := c.rpc.Call("abci_info", map[string]interface{}{}, result) + _, err := c.caller.Call("abci_info", map[string]interface{}{}, result) if err != nil { return nil, errors.Wrap(err, "ABCIInfo") } return result, nil } -func (c *HTTP) ABCIQuery(path string, data cmn.HexBytes) (*ctypes.ResultABCIQuery, error) { +func (c *baseRPCClient) ABCIQuery(path string, data cmn.HexBytes) (*ctypes.ResultABCIQuery, error) { return c.ABCIQueryWithOptions(path, data, DefaultABCIQueryOptions) } -func (c *HTTP) ABCIQueryWithOptions(path string, data cmn.HexBytes, opts ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { +func (c *baseRPCClient) ABCIQueryWithOptions(path string, data cmn.HexBytes, opts ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { result := new(ctypes.ResultABCIQuery) - _, err := c.rpc.Call("abci_query", + _, err := c.caller.Call("abci_query", map[string]interface{}{"path": path, "data": data, "height": opts.Height, "prove": opts.Prove}, result) if err != nil { @@ -87,89 +169,89 @@ func (c *HTTP) ABCIQueryWithOptions(path string, data cmn.HexBytes, opts ABCIQue return result, nil } -func (c *HTTP) BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { +func (c *baseRPCClient) BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { result := new(ctypes.ResultBroadcastTxCommit) - _, err := c.rpc.Call("broadcast_tx_commit", map[string]interface{}{"tx": tx}, result) + _, err := c.caller.Call("broadcast_tx_commit", map[string]interface{}{"tx": tx}, result) if err != nil { return nil, errors.Wrap(err, "broadcast_tx_commit") } return result, nil } -func (c *HTTP) BroadcastTxAsync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) { +func (c *baseRPCClient) BroadcastTxAsync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) { return c.broadcastTX("broadcast_tx_async", tx) } -func (c *HTTP) BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) { +func (c *baseRPCClient) BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) { return c.broadcastTX("broadcast_tx_sync", tx) } -func (c *HTTP) broadcastTX(route string, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { +func (c *baseRPCClient) broadcastTX(route string, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { result := new(ctypes.ResultBroadcastTx) - _, err := c.rpc.Call(route, map[string]interface{}{"tx": tx}, result) + _, err := c.caller.Call(route, map[string]interface{}{"tx": tx}, result) if err != nil { return nil, errors.Wrap(err, route) } return result, nil } -func (c *HTTP) UnconfirmedTxs(limit int) (*ctypes.ResultUnconfirmedTxs, error) { +func (c *baseRPCClient) UnconfirmedTxs(limit int) (*ctypes.ResultUnconfirmedTxs, error) { result := new(ctypes.ResultUnconfirmedTxs) - _, err := c.rpc.Call("unconfirmed_txs", map[string]interface{}{"limit": limit}, result) + _, err := c.caller.Call("unconfirmed_txs", map[string]interface{}{"limit": limit}, result) if err != nil { return nil, errors.Wrap(err, "unconfirmed_txs") } return result, nil } -func (c *HTTP) NumUnconfirmedTxs() (*ctypes.ResultUnconfirmedTxs, error) { +func (c *baseRPCClient) NumUnconfirmedTxs() (*ctypes.ResultUnconfirmedTxs, error) { result := new(ctypes.ResultUnconfirmedTxs) - _, err := c.rpc.Call("num_unconfirmed_txs", map[string]interface{}{}, result) + _, err := c.caller.Call("num_unconfirmed_txs", map[string]interface{}{}, result) if err != nil { return nil, errors.Wrap(err, "num_unconfirmed_txs") } return result, nil } -func (c *HTTP) NetInfo() (*ctypes.ResultNetInfo, error) { +func (c *baseRPCClient) NetInfo() (*ctypes.ResultNetInfo, error) { result := new(ctypes.ResultNetInfo) - _, err := c.rpc.Call("net_info", map[string]interface{}{}, result) + _, err := c.caller.Call("net_info", map[string]interface{}{}, result) if err != nil { return nil, errors.Wrap(err, "NetInfo") } return result, nil } -func (c *HTTP) DumpConsensusState() (*ctypes.ResultDumpConsensusState, error) { +func (c *baseRPCClient) DumpConsensusState() (*ctypes.ResultDumpConsensusState, error) { result := new(ctypes.ResultDumpConsensusState) - _, err := c.rpc.Call("dump_consensus_state", map[string]interface{}{}, result) + _, err := c.caller.Call("dump_consensus_state", map[string]interface{}{}, result) if err != nil { return nil, errors.Wrap(err, "DumpConsensusState") } return result, nil } -func (c *HTTP) ConsensusState() (*ctypes.ResultConsensusState, error) { +func (c *baseRPCClient) ConsensusState() (*ctypes.ResultConsensusState, error) { result := new(ctypes.ResultConsensusState) - _, err := c.rpc.Call("consensus_state", map[string]interface{}{}, result) + _, err := c.caller.Call("consensus_state", map[string]interface{}{}, result) if err != nil { return nil, errors.Wrap(err, "ConsensusState") } return result, nil } -func (c *HTTP) Health() (*ctypes.ResultHealth, error) { +func (c *baseRPCClient) Health() (*ctypes.ResultHealth, error) { result := new(ctypes.ResultHealth) - _, err := c.rpc.Call("health", map[string]interface{}{}, result) + _, err := c.caller.Call("health", map[string]interface{}{}, result) if err != nil { return nil, errors.Wrap(err, "Health") } return result, nil } -func (c *HTTP) BlockchainInfo(minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) { +func (c *baseRPCClient) BlockchainInfo(minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) { result := new(ctypes.ResultBlockchainInfo) - _, err := c.rpc.Call("blockchain", + _, err := c.caller.Call("blockchain", map[string]interface{}{"minHeight": minHeight, "maxHeight": maxHeight}, result) if err != nil { @@ -178,65 +260,65 @@ func (c *HTTP) BlockchainInfo(minHeight, maxHeight int64) (*ctypes.ResultBlockch return result, nil } -func (c *HTTP) Genesis() (*ctypes.ResultGenesis, error) { +func (c *baseRPCClient) Genesis() (*ctypes.ResultGenesis, error) { result := new(ctypes.ResultGenesis) - _, err := c.rpc.Call("genesis", map[string]interface{}{}, result) + _, err := c.caller.Call("genesis", map[string]interface{}{}, result) if err != nil { return nil, errors.Wrap(err, "Genesis") } return result, nil } -func (c *HTTP) Block(height *int64) (*ctypes.ResultBlock, error) { +func (c *baseRPCClient) Block(height *int64) (*ctypes.ResultBlock, error) { result := new(ctypes.ResultBlock) - _, err := c.rpc.Call("block", map[string]interface{}{"height": height}, result) + _, err := c.caller.Call("block", map[string]interface{}{"height": height}, result) if err != nil { return nil, errors.Wrap(err, "Block") } return result, nil } -func (c *HTTP) BlockByHash(blockHash []byte) (*ctypes.ResultBlock, error) { +func (c *baseRPCClient) BlockByHash(blockHash []byte) (*ctypes.ResultBlock, error) { result := new(ctypes.ResultBlock) - _, err := c.rpc.Call("block_by_hash", map[string]interface{}{"hash": blockHash}, result) + _, err := c.caller.Call("block_by_hash", map[string]interface{}{"hash": blockHash}, result) if err != nil { return nil, errors.Wrap(err, "Block_by_hash") } return result, nil } -func (c *HTTP) BlockResults(height *int64) (*ctypes.ResultBlockResults, error) { +func (c *baseRPCClient) BlockResults(height *int64) (*ctypes.ResultBlockResults, error) { result := new(ctypes.ResultBlockResults) - _, err := c.rpc.Call("block_results", map[string]interface{}{"height": height}, result) + _, err := c.caller.Call("block_results", map[string]interface{}{"height": height}, result) if err != nil { return nil, errors.Wrap(err, "Block Result") } return result, nil } -func (c *HTTP) Commit(height *int64) (*ctypes.ResultCommit, error) { +func (c *baseRPCClient) Commit(height *int64) (*ctypes.ResultCommit, error) { result := new(ctypes.ResultCommit) - _, err := c.rpc.Call("commit", map[string]interface{}{"height": height}, result) + _, err := c.caller.Call("commit", map[string]interface{}{"height": height}, result) if err != nil { return nil, errors.Wrap(err, "Commit") } return result, nil } -func (c *HTTP) Tx(hash []byte, prove bool) (*ctypes.ResultTx, error) { +func (c *baseRPCClient) Tx(hash []byte, prove bool) (*ctypes.ResultTx, error) { result := new(ctypes.ResultTx) params := map[string]interface{}{ "hash": hash, "prove": prove, } - _, err := c.rpc.Call("tx", params, result) + _, err := c.caller.Call("tx", params, result) if err != nil { return nil, errors.Wrap(err, "Tx") } return result, nil } -func (c *HTTP) TxSearch(query string, prove bool, page, perPage int) (*ctypes.ResultTxSearch, error) { +func (c *baseRPCClient) TxSearch(query string, prove bool, page, perPage int) (*ctypes.ResultTxSearch, error) { result := new(ctypes.ResultTxSearch) params := map[string]interface{}{ "query": query, @@ -244,23 +326,33 @@ func (c *HTTP) TxSearch(query string, prove bool, page, perPage int) (*ctypes.Re "page": page, "per_page": perPage, } - _, err := c.rpc.Call("tx_search", params, result) + _, err := c.caller.Call("tx_search", params, result) if err != nil { return nil, errors.Wrap(err, "TxSearch") } return result, nil } -func (c *HTTP) Validators(height *int64) (*ctypes.ResultValidators, error) { +func (c *baseRPCClient) Validators(height *int64) (*ctypes.ResultValidators, error) { result := new(ctypes.ResultValidators) - _, err := c.rpc.Call("validators", map[string]interface{}{"height": height}, result) + _, err := c.caller.Call("validators", map[string]interface{}{"height": height}, result) if err != nil { return nil, errors.Wrap(err, "Validators") } return result, nil } -/** websocket event stuff here... **/ +func (c *baseRPCClient) BroadcastEvidence(ev types.Evidence) (*ctypes.ResultBroadcastEvidence, error) { + result := new(ctypes.ResultBroadcastEvidence) + _, err := c.caller.Call("broadcast_evidence", map[string]interface{}{"evidence": ev}, result) + if err != nil { + return nil, errors.Wrap(err, "BroadcastEvidence") + } + return result, nil +} + +//----------------------------------------------------------------------------- +// WSEvents type WSEvents struct { cmn.BaseService @@ -370,6 +462,8 @@ func (w *WSEvents) UnsubscribeAll(ctx context.Context, subscriber string) error func (w *WSEvents) redoSubscriptionsAfter(d time.Duration) { time.Sleep(d) + w.mtx.RLock() + defer w.mtx.RUnlock() for q := range w.subscriptions { err := w.ws.Subscribe(context.Background(), q) if err != nil { diff --git a/rpc/client/interface.go b/rpc/client/interface.go index 6807bc4aa..0352d100d 100644 --- a/rpc/client/interface.go +++ b/rpc/client/interface.go @@ -28,9 +28,24 @@ import ( "github.com/tendermint/tendermint/types" ) -// ABCIClient groups together the functionality that principally -// affects the ABCI app. In many cases this will be all we want, -// so we can accept an interface which is easier to mock +// Client wraps most important rpc calls a client would make if you want to +// listen for events, test if it also implements events.EventSwitch. +type Client interface { + cmn.Service + ABCIClient + EventsClient + HistoryClient + NetworkClient + SignClient + StatusClient + EvidenceClient +} + +// ABCIClient groups together the functionality that principally affects the +// ABCI app. +// +// In many cases this will be all we want, so we can accept an interface which +// is easier to mock. type ABCIClient interface { // Reading from abci app ABCIInfo() (*ctypes.ResultABCIInfo, error) @@ -44,8 +59,8 @@ type ABCIClient interface { BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) } -// SignClient groups together the interfaces need to get valid -// signatures and prove anything about the chain +// SignClient groups together the functionality needed to get valid signatures +// and prove anything about the chain. type SignClient interface { Block(height *int64) (*ctypes.ResultBlock, error) BlockByHash(hash []byte) (*ctypes.ResultBlock, error) @@ -56,32 +71,19 @@ type SignClient interface { TxSearch(query string, prove bool, page, perPage int) (*ctypes.ResultTxSearch, error) } -// HistoryClient shows us data from genesis to now in large chunks. +// HistoryClient provides access to data from genesis to now in large chunks. type HistoryClient interface { Genesis() (*ctypes.ResultGenesis, error) BlockchainInfo(minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) } +// StatusClient provides access to general chain info. type StatusClient interface { - // General chain info Status() (*ctypes.ResultStatus, error) } -// Client wraps most important rpc calls a client would make -// if you want to listen for events, test if it also -// implements events.EventSwitch -type Client interface { - cmn.Service - ABCIClient - EventsClient - HistoryClient - NetworkClient - SignClient - StatusClient -} - -// NetworkClient is general info about the network state. May not -// be needed usually. +// NetworkClient is general info about the network state. May not be needed +// usually. type NetworkClient interface { NetInfo() (*ctypes.ResultNetInfo, error) DumpConsensusState() (*ctypes.ResultDumpConsensusState, error) @@ -111,3 +113,9 @@ type MempoolClient interface { UnconfirmedTxs(limit int) (*ctypes.ResultUnconfirmedTxs, error) NumUnconfirmedTxs() (*ctypes.ResultUnconfirmedTxs, error) } + +// EvidenceClient is used for submitting an evidence of the malicious +// behaviour. +type EvidenceClient interface { + BroadcastEvidence(ev types.Evidence) (*ctypes.ResultBroadcastEvidence, error) +} diff --git a/rpc/client/localclient.go b/rpc/client/localclient.go index 3cba893de..136f228d0 100644 --- a/rpc/client/localclient.go +++ b/rpc/client/localclient.go @@ -161,6 +161,10 @@ func (c *Local) TxSearch(query string, prove bool, page, perPage int) (*ctypes.R return core.TxSearch(c.ctx, query, prove, page, perPage) } +func (c *Local) BroadcastEvidence(ev types.Evidence) (*ctypes.ResultBroadcastEvidence, error) { + return core.BroadcastEvidence(c.ctx, ev) +} + func (c *Local) Subscribe(ctx context.Context, subscriber, query string, outCapacity ...int) (out <-chan ctypes.ResultEvent, err error) { q, err := tmquery.New(query) if err != nil { @@ -186,7 +190,7 @@ func (c *Local) eventsRoutine(sub types.Subscription, subscriber string, q tmpub for { select { case msg := <-sub.Out(): - result := ctypes.ResultEvent{Query: q.String(), Data: msg.Data(), Tags: msg.Tags()} + result := ctypes.ResultEvent{Query: q.String(), Data: msg.Data(), Events: msg.Events()} if cap(outc) == 0 { outc <- result } else { diff --git a/rpc/client/main_test.go b/rpc/client/main_test.go index 6ec7b7b0e..d600b32f8 100644 --- a/rpc/client/main_test.go +++ b/rpc/client/main_test.go @@ -1,6 +1,7 @@ package client_test import ( + "io/ioutil" "os" "testing" @@ -13,7 +14,11 @@ var node *nm.Node func TestMain(m *testing.M) { // start a tendermint node (and kvstore) in the background to test against - app := kvstore.NewKVStoreApplication() + dir, err := ioutil.TempDir("/tmp", "rpc-client-test") + if err != nil { + panic(err) + } + app := kvstore.NewPersistentKVStoreApplication(dir) node = rpctest.StartTendermint(app) code := m.Run() diff --git a/rpc/client/mock/abci.go b/rpc/client/mock/abci.go index 2ab62a420..f40755fec 100644 --- a/rpc/client/mock/abci.go +++ b/rpc/client/mock/abci.go @@ -45,29 +45,29 @@ func (a ABCIApp) ABCIQueryWithOptions(path string, data cmn.HexBytes, opts clien // TODO: Make it wait for a commit and set res.Height appropriately. func (a ABCIApp) BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { res := ctypes.ResultBroadcastTxCommit{} - res.CheckTx = a.App.CheckTx(tx) + res.CheckTx = a.App.CheckTx(abci.RequestCheckTx{Tx: tx}) if res.CheckTx.IsErr() { return &res, nil } - res.DeliverTx = a.App.DeliverTx(tx) + res.DeliverTx = a.App.DeliverTx(abci.RequestDeliverTx{Tx: tx}) res.Height = -1 // TODO return &res, nil } func (a ABCIApp) BroadcastTxAsync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) { - c := a.App.CheckTx(tx) + c := a.App.CheckTx(abci.RequestCheckTx{Tx: tx}) // and this gets written in a background thread... if !c.IsErr() { - go func() { a.App.DeliverTx(tx) }() // nolint: errcheck + go func() { a.App.DeliverTx(abci.RequestDeliverTx{Tx: tx}) }() // nolint: errcheck } return &ctypes.ResultBroadcastTx{Code: c.Code, Data: c.Data, Log: c.Log, Hash: tx.Hash()}, nil } func (a ABCIApp) BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) { - c := a.App.CheckTx(tx) + c := a.App.CheckTx(abci.RequestCheckTx{Tx: tx}) // and this gets written in a background thread... if !c.IsErr() { - go func() { a.App.DeliverTx(tx) }() // nolint: errcheck + go func() { a.App.DeliverTx(abci.RequestDeliverTx{Tx: tx}) }() // nolint: errcheck } return &ctypes.ResultBroadcastTx{Code: c.Code, Data: c.Data, Log: c.Log, Hash: tx.Hash()}, nil } diff --git a/rpc/client/mock/client.go b/rpc/client/mock/client.go index 5a00c2791..08a1539f3 100644 --- a/rpc/client/mock/client.go +++ b/rpc/client/mock/client.go @@ -36,6 +36,7 @@ type Client struct { client.HistoryClient client.StatusClient client.EventsClient + client.EvidenceClient cmn.Service } @@ -151,3 +152,7 @@ func (c Client) Commit(height *int64) (*ctypes.ResultCommit, error) { func (c Client) Validators(height *int64) (*ctypes.ResultValidators, error) { return core.Validators(&rpctypes.Context{}, height) } + +func (c Client) BroadcastEvidence(ev types.Evidence) (*ctypes.ResultBroadcastEvidence, error) { + return core.BroadcastEvidence(&rpctypes.Context{}, ev) +} diff --git a/rpc/client/rpc_test.go b/rpc/client/rpc_test.go index ba9bc3af7..d87650df7 100644 --- a/rpc/client/rpc_test.go +++ b/rpc/client/rpc_test.go @@ -1,9 +1,12 @@ package client_test import ( + "bytes" "fmt" + "math/rand" "net/http" "strings" + "sync" "testing" "github.com/stretchr/testify/assert" @@ -11,7 +14,12 @@ import ( abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/crypto/ed25519" + "github.com/tendermint/tendermint/crypto/tmhash" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/privval" "github.com/tendermint/tendermint/rpc/client" + ctypes "github.com/tendermint/tendermint/rpc/core/types" rpctest "github.com/tendermint/tendermint/rpc/test" "github.com/tendermint/tendermint/types" ) @@ -249,7 +257,8 @@ func TestAppCalls(t *testing.T) { func TestBroadcastTxSync(t *testing.T) { require := require.New(t) - mempool := node.MempoolReactor().Mempool + // TODO (melekes): use mempool which is set on RPC rather than getting it from node + mempool := node.Mempool() initMempoolSize := mempool.Size() for i, c := range GetClients() { @@ -269,7 +278,7 @@ func TestBroadcastTxSync(t *testing.T) { func TestBroadcastTxCommit(t *testing.T) { require := require.New(t) - mempool := node.MempoolReactor().Mempool + mempool := node.Mempool() for i, c := range GetClients() { _, _, tx := MakeTxKV() bres, err := c.BroadcastTxCommit(tx) @@ -284,7 +293,7 @@ func TestBroadcastTxCommit(t *testing.T) { func TestUnconfirmedTxs(t *testing.T) { _, _, tx := MakeTxKV() - mempool := node.MempoolReactor().Mempool + mempool := node.Mempool() _ = mempool.CheckTx(tx, nil) for i, c := range GetClients() { @@ -305,7 +314,7 @@ func TestUnconfirmedTxs(t *testing.T) { func TestNumUnconfirmedTxs(t *testing.T) { _, _, tx := MakeTxKV() - mempool := node.MempoolReactor().Mempool + mempool := node.Mempool() _ = mempool.CheckTx(tx, nil) mempoolSize := mempool.Size() @@ -422,22 +431,258 @@ func TestTxSearch(t *testing.T) { require.Len(t, result.Txs, 0) // query using a tag (see kvstore application) - result, err = c.TxSearch("app.creator='Cosmoshi Netowoko'", false, 1, 30) + result, err = c.TxSearch("creator='Cosmoshi Netowoko'", false, 1, 30) require.Nil(t, err, "%+v", err) if len(result.Txs) == 0 { t.Fatal("expected a lot of transactions") } // query using a tag (see kvstore application) and height - result, err = c.TxSearch("app.creator='Cosmoshi Netowoko' AND tx.height<10000", true, 1, 30) + result, err = c.TxSearch("creator='Cosmoshi Netowoko' AND tx.height<10000", true, 1, 30) require.Nil(t, err, "%+v", err) if len(result.Txs) == 0 { t.Fatal("expected a lot of transactions") } // query a non existing tx with page 1 and txsPerPage 1 - result, err = c.TxSearch("app.creator='Cosmoshi Neetowoko'", true, 1, 1) + result, err = c.TxSearch("creator='Cosmoshi Neetowoko'", true, 1, 1) require.Nil(t, err, "%+v", err) require.Len(t, result.Txs, 0) } } + +func deepcpVote(vote *types.Vote) (res *types.Vote) { + res = &types.Vote{ + ValidatorAddress: make([]byte, len(vote.ValidatorAddress)), + ValidatorIndex: vote.ValidatorIndex, + Height: vote.Height, + Round: vote.Round, + Type: vote.Type, + BlockID: types.BlockID{ + Hash: make([]byte, len(vote.BlockID.Hash)), + PartsHeader: vote.BlockID.PartsHeader, + }, + Signature: make([]byte, len(vote.Signature)), + } + copy(res.ValidatorAddress, vote.ValidatorAddress) + copy(res.BlockID.Hash, vote.BlockID.Hash) + copy(res.Signature, vote.Signature) + return +} + +func newEvidence(t *testing.T, val *privval.FilePV, vote *types.Vote, vote2 *types.Vote, chainID string) types.DuplicateVoteEvidence { + var err error + vote2_ := deepcpVote(vote2) + vote2_.Signature, err = val.Key.PrivKey.Sign(vote2_.SignBytes(chainID)) + require.NoError(t, err) + + return types.DuplicateVoteEvidence{ + PubKey: val.Key.PubKey, + VoteA: vote, + VoteB: vote2_, + } +} + +func makeEvidences(t *testing.T, val *privval.FilePV, chainID string) (ev types.DuplicateVoteEvidence, fakes []types.DuplicateVoteEvidence) { + vote := &types.Vote{ + ValidatorAddress: val.Key.Address, + ValidatorIndex: 0, + Height: 1, + Round: 0, + Type: types.PrevoteType, + BlockID: types.BlockID{ + Hash: tmhash.Sum([]byte("blockhash")), + PartsHeader: types.PartSetHeader{ + Total: 1000, + Hash: tmhash.Sum([]byte("partset")), + }, + }, + } + + var err error + vote.Signature, err = val.Key.PrivKey.Sign(vote.SignBytes(chainID)) + require.NoError(t, err) + + vote2 := deepcpVote(vote) + vote2.BlockID.Hash = tmhash.Sum([]byte("blockhash2")) + + ev = newEvidence(t, val, vote, vote2, chainID) + + fakes = make([]types.DuplicateVoteEvidence, 42) + + // different address + vote2 = deepcpVote(vote) + for i := 0; i < 10; i++ { + rand.Read(vote2.ValidatorAddress) // nolint: gosec + fakes[i] = newEvidence(t, val, vote, vote2, chainID) + } + // different index + vote2 = deepcpVote(vote) + for i := 10; i < 20; i++ { + vote2.ValidatorIndex = rand.Int()%100 + 1 // nolint: gosec + fakes[i] = newEvidence(t, val, vote, vote2, chainID) + } + // different height + vote2 = deepcpVote(vote) + for i := 20; i < 30; i++ { + vote2.Height = rand.Int63()%1000 + 100 // nolint: gosec + fakes[i] = newEvidence(t, val, vote, vote2, chainID) + } + // different round + vote2 = deepcpVote(vote) + for i := 30; i < 40; i++ { + vote2.Round = rand.Int()%10 + 1 // nolint: gosec + fakes[i] = newEvidence(t, val, vote, vote2, chainID) + } + // different type + vote2 = deepcpVote(vote) + vote2.Type = types.PrecommitType + fakes[40] = newEvidence(t, val, vote, vote2, chainID) + // exactly same vote + vote2 = deepcpVote(vote) + fakes[41] = newEvidence(t, val, vote, vote2, chainID) + return +} + +func TestBroadcastEvidenceDuplicateVote(t *testing.T) { + config := rpctest.GetConfig() + chainID := config.ChainID() + pvKeyFile := config.PrivValidatorKeyFile() + pvKeyStateFile := config.PrivValidatorStateFile() + pv := privval.LoadOrGenFilePV(pvKeyFile, pvKeyStateFile) + + ev, fakes := makeEvidences(t, pv, chainID) + + t.Logf("evidence %v", ev) + + for i, c := range GetClients() { + t.Logf("client %d", i) + + result, err := c.BroadcastEvidence(&types.DuplicateVoteEvidence{PubKey: ev.PubKey, VoteA: ev.VoteA, VoteB: ev.VoteB}) + require.Nil(t, err) + require.Equal(t, ev.Hash(), result.Hash, "Invalid response, result %+v", result) + + status, err := c.Status() + require.NoError(t, err) + client.WaitForHeight(c, status.SyncInfo.LatestBlockHeight+2, nil) + + ed25519pub := ev.PubKey.(ed25519.PubKeyEd25519) + rawpub := ed25519pub[:] + result2, err := c.ABCIQuery("/val", rawpub) + require.Nil(t, err, "Error querying evidence, err %v", err) + qres := result2.Response + require.True(t, qres.IsOK(), "Response not OK") + + var v abci.ValidatorUpdate + err = abci.ReadMessage(bytes.NewReader(qres.Value), &v) + require.NoError(t, err, "Error reading query result, value %v", qres.Value) + + require.EqualValues(t, rawpub, v.PubKey.Data, "Stored PubKey not equal with expected, value %v", string(qres.Value)) + require.Equal(t, int64(9), v.Power, "Stored Power not equal with expected, value %v", string(qres.Value)) + + for _, fake := range fakes { + _, err := c.BroadcastEvidence(&types.DuplicateVoteEvidence{ + PubKey: fake.PubKey, + VoteA: fake.VoteA, + VoteB: fake.VoteB}) + require.Error(t, err, "Broadcasting fake evidence succeed: %s", fake.String()) + } + } +} + +func TestBatchedJSONRPCCalls(t *testing.T) { + c := getHTTPClient() + testBatchedJSONRPCCalls(t, c) +} + +func testBatchedJSONRPCCalls(t *testing.T, c *client.HTTP) { + k1, v1, tx1 := MakeTxKV() + k2, v2, tx2 := MakeTxKV() + + batch := c.NewBatch() + r1, err := batch.BroadcastTxCommit(tx1) + require.NoError(t, err) + r2, err := batch.BroadcastTxCommit(tx2) + require.NoError(t, err) + require.Equal(t, 2, batch.Count()) + bresults, err := batch.Send() + require.NoError(t, err) + require.Len(t, bresults, 2) + require.Equal(t, 0, batch.Count()) + + bresult1, ok := bresults[0].(*ctypes.ResultBroadcastTxCommit) + require.True(t, ok) + require.Equal(t, *bresult1, *r1) + bresult2, ok := bresults[1].(*ctypes.ResultBroadcastTxCommit) + require.True(t, ok) + require.Equal(t, *bresult2, *r2) + apph := cmn.MaxInt64(bresult1.Height, bresult2.Height) + 1 + + client.WaitForHeight(c, apph, nil) + + q1, err := batch.ABCIQuery("/key", k1) + require.NoError(t, err) + q2, err := batch.ABCIQuery("/key", k2) + require.NoError(t, err) + require.Equal(t, 2, batch.Count()) + qresults, err := batch.Send() + require.NoError(t, err) + require.Len(t, qresults, 2) + require.Equal(t, 0, batch.Count()) + + qresult1, ok := qresults[0].(*ctypes.ResultABCIQuery) + require.True(t, ok) + require.Equal(t, *qresult1, *q1) + qresult2, ok := qresults[1].(*ctypes.ResultABCIQuery) + require.True(t, ok) + require.Equal(t, *qresult2, *q2) + + require.Equal(t, qresult1.Response.Key, k1) + require.Equal(t, qresult2.Response.Key, k2) + require.Equal(t, qresult1.Response.Value, v1) + require.Equal(t, qresult2.Response.Value, v2) +} + +func TestBatchedJSONRPCCallsCancellation(t *testing.T) { + c := getHTTPClient() + _, _, tx1 := MakeTxKV() + _, _, tx2 := MakeTxKV() + + batch := c.NewBatch() + _, err := batch.BroadcastTxCommit(tx1) + require.NoError(t, err) + _, err = batch.BroadcastTxCommit(tx2) + require.NoError(t, err) + // we should have 2 requests waiting + require.Equal(t, 2, batch.Count()) + // we want to make sure we cleared 2 pending requests + require.Equal(t, 2, batch.Clear()) + // now there should be no batched requests + require.Equal(t, 0, batch.Count()) +} + +func TestSendingEmptyJSONRPCRequestBatch(t *testing.T) { + c := getHTTPClient() + batch := c.NewBatch() + _, err := batch.Send() + require.Error(t, err, "sending an empty batch of JSON RPC requests should result in an error") +} + +func TestClearingEmptyJSONRPCRequestBatch(t *testing.T) { + c := getHTTPClient() + batch := c.NewBatch() + require.Zero(t, batch.Clear(), "clearing an empty batch of JSON RPC requests should result in a 0 result") +} + +func TestConcurrentJSONRPCBatching(t *testing.T) { + var wg sync.WaitGroup + c := getHTTPClient() + for i := 0; i < 50; i++ { + wg.Add(1) + go func() { + defer wg.Done() + testBatchedJSONRPCCalls(t, c) + }() + } + wg.Wait() +} diff --git a/rpc/core/blocks.go b/rpc/core/blocks.go index ecf43073f..27e5171d9 100644 --- a/rpc/core/blocks.go +++ b/rpc/core/blocks.go @@ -456,7 +456,8 @@ func Commit(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultCommit, erro // If no height is provided, it will fetch results for the latest block. // // Results are for the height of the block containing the txs. -// Thus response.results[5] is the results of executing getBlock(h).Txs[5] +// Thus response.results.deliver_tx[5] is the results of executing +// getBlock(h).Txs[5] // // ```shell // curl 'localhost:27147/block_results?height=10' @@ -477,17 +478,27 @@ func Commit(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultCommit, erro // // ```json // { -// "height": "10", -// "results": [ -// { -// "code": "0", -// "data": "CAFE00F00D" -// }, -// { -// "code": "102", -// "data": "" +// "jsonrpc": "2.0", +// "id": "", +// "result": { +// "height": "39", +// "results": { +// "deliver_tx": [ +// { +// "tags": [ +// { +// "key": "YXBwLmNyZWF0b3I=", +// "value": "Q29zbW9zaGkgTmV0b3dva28=" +// } +// ] +// } +// ], +// "end_block": { +// "validator_updates": null +// }, +// "begin_block": {} +// } // } -// ] // } // ``` func BlockResults(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultBlockResults, error) { @@ -497,7 +508,6 @@ func BlockResults(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultBlockR return nil, err } - // load the results results, err := sm.LoadABCIResponses(stateDB, height) if err != nil { return nil, err diff --git a/rpc/core/events.go b/rpc/core/events.go index 410cad101..5f82fca7f 100644 --- a/rpc/core/events.go +++ b/rpc/core/events.go @@ -22,26 +22,83 @@ import ( // string (escaped with single quotes), number, date or time. // // Examples: -// tm.event = 'NewBlock' # new blocks -// tm.event = 'CompleteProposal' # node got a complete proposal +// tm.event = 'NewBlock' # new blocks +// tm.event = 'CompleteProposal' # node got a complete proposal // tm.event = 'Tx' AND tx.hash = 'XYZ' # single transaction -// tm.event = 'Tx' AND tx.height = 5 # all txs of the fifth block -// tx.height = 5 # all txs of the fifth block +// tm.event = 'Tx' AND tx.height = 5 # all txs of the fifth block +// tx.height = 5 # all txs of the fifth block // // Tendermint provides a few predefined keys: tm.event, tx.hash and tx.height. -// Note for transactions, you can define additional keys by providing tags with +// Note for transactions, you can define additional keys by providing events with // DeliverTx response. // -// DeliverTx{ -// Tags: []*KVPair{ -// "agent.name": "K", -// } -// } +// import ( +// abci "github.com/tendermint/tendermint/abci/types" +// "github.com/tendermint/tendermint/libs/pubsub/query" +// ) // -// tm.event = 'Tx' AND agent.name = 'K' -// tm.event = 'Tx' AND account.created_at >= TIME 2013-05-03T14:45:00Z -// tm.event = 'Tx' AND contract.sign_date = DATE 2017-01-01 -// tm.event = 'Tx' AND account.owner CONTAINS 'Igor' +// abci.ResponseDeliverTx{ +// Events: []abci.Event{ +// { +// Type: "rewards.withdraw", +// Attributes: cmn.KVPairs{ +// cmn.KVPair{Key: []byte("address"), Value: []byte("AddrA")}, +// cmn.KVPair{Key: []byte("source"), Value: []byte("SrcX")}, +// cmn.KVPair{Key: []byte("amount"), Value: []byte("...")}, +// cmn.KVPair{Key: []byte("balance"), Value: []byte("...")}, +// }, +// }, +// { +// Type: "rewards.withdraw", +// Attributes: cmn.KVPairs{ +// cmn.KVPair{Key: []byte("address"), Value: []byte("AddrB")}, +// cmn.KVPair{Key: []byte("source"), Value: []byte("SrcY")}, +// cmn.KVPair{Key: []byte("amount"), Value: []byte("...")}, +// cmn.KVPair{Key: []byte("balance"), Value: []byte("...")}, +// }, +// }, +// { +// Type: "transfer", +// Attributes: cmn.KVPairs{ +// cmn.KVPair{Key: []byte("sender"), Value: []byte("AddrC")}, +// cmn.KVPair{Key: []byte("recipient"), Value: []byte("AddrD")}, +// cmn.KVPair{Key: []byte("amount"), Value: []byte("...")}, +// }, +// }, +// }, +// } +// +// All events are indexed by a composite key of the form {eventType}.{evenAttrKey}. +// In the above examples, the following keys would be indexed: +// - rewards.withdraw.address +// - rewards.withdraw.source +// - rewards.withdraw.amount +// - rewards.withdraw.balance +// - transfer.sender +// - transfer.recipient +// - transfer.amount +// +// Multiple event types with duplicate keys are allowed and are meant to +// categorize unique and distinct events. In the above example, all events +// indexed under the key `rewards.withdraw.address` will have the following +// values stored and queryable: +// +// - AddrA +// - AddrB +// +// To create a query for txs where address AddrA withdrew rewards: +// query.MustParse("tm.event = 'Tx' AND rewards.withdraw.address = 'AddrA'") +// +// To create a query for txs where address AddrA withdrew rewards from source Y: +// query.MustParse("tm.event = 'Tx' AND rewards.withdraw.address = 'AddrA' AND rewards.withdraw.source = 'Y'") +// +// To create a query for txs where AddrA transferred funds: +// query.MustParse("tm.event = 'Tx' AND transfer.sender = 'AddrA'") +// +// The following queries would return no results: +// query.MustParse("tm.event = 'Tx' AND transfer.sender = 'AddrZ'") +// query.MustParse("tm.event = 'Tx' AND rewards.withdraw.address = 'AddrZ'") +// query.MustParse("tm.event = 'Tx' AND rewards.withdraw.source = 'W'") // // See list of all possible events here // https://godoc.org/github.com/tendermint/tendermint/types#pkg-constants @@ -50,7 +107,6 @@ import ( // https://godoc.org/github.com/tendermint/tendermint/libs/pubsub/query. // // ```go -// import "github.com/tendermint/tendermint/libs/pubsub/query" // import "github.com/tendermint/tendermint/types" // // client := client.NewHTTP("tcp://0.0.0.0:27147", "/websocket") @@ -59,15 +115,17 @@ import ( // // handle error // } // defer client.Stop() -// ctx, cancel := context.WithTimeout(context.Background(), timeout) +// ctx, cancel := context.WithTimeout(context.Background(), 1 * time.Second) // defer cancel() -// query := query.MustParse("tm.event = 'Tx' AND tx.height = 3") -// txs := make(chan interface{}) -// err = client.Subscribe(ctx, "test-client", query, txs) +// query := "tm.event = 'Tx' AND tx.height = 3" +// txs, err := client.Subscribe(ctx, "test-client", query) +// if err != nil { +// // handle error +// } // // go func() { // for e := range txs { -// fmt.Println("got ", e.(types.EventDataTx)) +// fmt.Println("got ", e.Data.(types.EventDataTx)) // } // }() // ``` @@ -105,8 +163,10 @@ func Subscribe(ctx *rpctypes.Context, query string) (*ctypes.ResultSubscribe, er if err != nil { return nil, errors.Wrap(err, "failed to parse query") } + subCtx, cancel := context.WithTimeout(ctx.Context(), SubscribeTimeout) defer cancel() + sub, err := eventBus.Subscribe(subCtx, addr, q) if err != nil { return nil, err @@ -116,7 +176,7 @@ func Subscribe(ctx *rpctypes.Context, query string) (*ctypes.ResultSubscribe, er for { select { case msg := <-sub.Out(): - resultEvent := &ctypes.ResultEvent{Query: query, Data: msg.Data(), Tags: msg.Tags()} + resultEvent := &ctypes.ResultEvent{Query: query, Data: msg.Data(), Events: msg.Events()} ctx.WSConn.TryWriteRPCResponse( rpctypes.NewRPCSuccessResponse( ctx.WSConn.Codec(), @@ -154,7 +214,11 @@ func Subscribe(ctx *rpctypes.Context, query string) (*ctypes.ResultSubscribe, er // // handle error // } // defer client.Stop() -// err = client.Unsubscribe("test-client", query) +// query := "tm.event = 'Tx' AND tx.height = 3" +// err = client.Unsubscribe(context.Background(), "test-client", query) +// if err != nil { +// // handle error +// } // ``` // // > The above command returns JSON structured like this: @@ -198,7 +262,10 @@ func Unsubscribe(ctx *rpctypes.Context, query string) (*ctypes.ResultUnsubscribe // // handle error // } // defer client.Stop() -// err = client.UnsubscribeAll("test-client") +// err = client.UnsubscribeAll(context.Background(), "test-client") +// if err != nil { +// // handle error +// } // ``` // // > The above command returns JSON structured like this: diff --git a/rpc/core/evidence.go b/rpc/core/evidence.go new file mode 100644 index 000000000..b2dfd097f --- /dev/null +++ b/rpc/core/evidence.go @@ -0,0 +1,39 @@ +package core + +import ( + ctypes "github.com/tendermint/tendermint/rpc/core/types" + rpctypes "github.com/tendermint/tendermint/rpc/lib/types" + "github.com/tendermint/tendermint/types" +) + +// Broadcast evidence of the misbehavior. +// +// ```shell +// curl 'localhost:26657/broadcast_evidence?evidence={amino-encoded DuplicateVoteEvidence}' +// ``` +// +// ```go +// client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") +// err := client.Start() +// if err != nil { +// // handle error +// } +// defer client.Stop() +// res, err := client.BroadcastEvidence(&types.DuplicateVoteEvidence{PubKey: ev.PubKey, VoteA: ev.VoteA, VoteB: ev.VoteB}) +// ``` +// +// > The above command returns JSON structured like this: +// +// ```json +// ``` +// +// | Parameter | Type | Default | Required | Description | +// |-----------+----------------+---------+----------+-----------------------------| +// | evidence | types.Evidence | nil | true | Amino-encoded JSON evidence | +func BroadcastEvidence(ctx *rpctypes.Context, ev types.Evidence) (*ctypes.ResultBroadcastEvidence, error) { + err := evidencePool.AddEvidence(ev) + if err != nil { + return nil, err + } + return &ctypes.ResultBroadcastEvidence{Hash: ev.Hash()}, nil +} diff --git a/rpc/core/mempool.go b/rpc/core/mempool.go index 7642b60b0..1523c7996 100644 --- a/rpc/core/mempool.go +++ b/rpc/core/mempool.go @@ -19,6 +19,19 @@ import ( // Returns right away, with no response. Does not wait for CheckTx nor // DeliverTx results. // +// If you want to be sure that the transaction is included in a block, you can +// subscribe for the result using JSONRPC via a websocket. See +// https://tendermint.com/docs/app-dev/subscribing-to-events-via-websocket.html +// If you haven't received anything after a couple of blocks, resend it. If the +// same happens again, send it to some other node. A few reasons why it could +// happen: +// +// 1. malicious node can drop or pretend it had committed your tx +// 2. malicious proposer (not necessary the one you're communicating with) can +// drop transactions, which might become valid in the future +// (https://github.com/tendermint/tendermint/issues/3322) +// 3. node can be offline +// // Please refer to // https://tendermint.com/docs/tendermint-core/using-tendermint.html#formatting // for formatting/encoding rules. @@ -69,6 +82,18 @@ func BroadcastTxAsync(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadca // Returns with the response from CheckTx. Does not wait for DeliverTx result. // +// If you want to be sure that the transaction is included in a block, you can +// subscribe for the result using JSONRPC via a websocket. See +// https://tendermint.com/docs/app-dev/subscribing-to-events-via-websocket.html +// If you haven't received anything after a couple of blocks, resend it. If the +// same happens again, send it to some other node. A few reasons why it could +// happen: +// +// 1. malicious node can drop or pretend it had committed your tx +// 2. malicious proposer (not necessary the one you're communicating with) can +// drop transactions, which might become valid in the future +// (https://github.com/tendermint/tendermint/issues/3322) +// // Please refer to // https://tendermint.com/docs/tendermint-core/using-tendermint.html#formatting // for formatting/encoding rules. diff --git a/rpc/core/net.go b/rpc/core/net.go index 23bc40e88..165230619 100644 --- a/rpc/core/net.go +++ b/rpc/core/net.go @@ -184,10 +184,8 @@ func UnsafeDialSeeds(ctx *rpctypes.Context, seeds []string) (*ctypes.ResultDialS if len(seeds) == 0 { return &ctypes.ResultDialSeeds{}, errors.New("No seeds provided") } - // starts go routines to dial each peer after random delays - logger.Info("DialSeeds", "addrBook", addrBook, "seeds", seeds) - err := p2pPeers.DialPeersAsync(addrBook, seeds, false) - if err != nil { + logger.Info("DialSeeds", "seeds", seeds) + if err := p2pPeers.DialPeersAsync(seeds); err != nil { return &ctypes.ResultDialSeeds{}, err } return &ctypes.ResultDialSeeds{Log: "Dialing seeds in progress. See /net_info for details"}, nil @@ -197,10 +195,13 @@ func UnsafeDialPeers(ctx *rpctypes.Context, peers []string, persistent bool) (*c if len(peers) == 0 { return &ctypes.ResultDialPeers{}, errors.New("No peers provided") } - // starts go routines to dial each peer after random delays - logger.Info("DialPeers", "addrBook", addrBook, "peers", peers, "persistent", persistent) - err := p2pPeers.DialPeersAsync(addrBook, peers, persistent) - if err != nil { + logger.Info("DialPeers", "peers", peers, "persistent", persistent) + if persistent { + if err := p2pPeers.AddPersistentPeers(peers); err != nil { + return &ctypes.ResultDialPeers{}, err + } + } + if err := p2pPeers.DialPeersAsync(peers); err != nil { return &ctypes.ResultDialPeers{}, err } return &ctypes.ResultDialPeers{Log: "Dialing peers in progress. See /net_info for details"}, nil diff --git a/rpc/core/net_test.go b/rpc/core/net_test.go new file mode 100644 index 000000000..651e1f69d --- /dev/null +++ b/rpc/core/net_test.go @@ -0,0 +1,73 @@ +package core + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/p2p" + rpctypes "github.com/tendermint/tendermint/rpc/lib/types" +) + +func TestUnsafeDialSeeds(t *testing.T) { + sw := p2p.MakeSwitch(cfg.DefaultP2PConfig(), 1, "testing", "123.123.123", + func(n int, sw *p2p.Switch) *p2p.Switch { return sw }) + err := sw.Start() + require.NoError(t, err) + defer sw.Stop() + + logger = log.TestingLogger() + p2pPeers = sw + + testCases := []struct { + seeds []string + isErr bool + }{ + {[]string{}, true}, + {[]string{"d51fb70907db1c6c2d5237e78379b25cf1a37ab4@127.0.0.1:41198"}, false}, + {[]string{"127.0.0.1:41198"}, true}, + } + + for _, tc := range testCases { + res, err := UnsafeDialSeeds(&rpctypes.Context{}, tc.seeds) + if tc.isErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.NotNil(t, res) + } + } +} + +func TestUnsafeDialPeers(t *testing.T) { + sw := p2p.MakeSwitch(cfg.DefaultP2PConfig(), 1, "testing", "123.123.123", + func(n int, sw *p2p.Switch) *p2p.Switch { return sw }) + err := sw.Start() + require.NoError(t, err) + defer sw.Stop() + + logger = log.TestingLogger() + p2pPeers = sw + + testCases := []struct { + peers []string + isErr bool + }{ + {[]string{}, true}, + {[]string{"d51fb70907db1c6c2d5237e78379b25cf1a37ab4@127.0.0.1:41198"}, false}, + {[]string{"127.0.0.1:41198"}, true}, + } + + for _, tc := range testCases { + res, err := UnsafeDialPeers(&rpctypes.Context{}, tc.peers, false) + if tc.isErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.NotNil(t, res) + } + } +} diff --git a/rpc/core/pipe.go b/rpc/core/pipe.go index 71f2045ec..caca655fb 100644 --- a/rpc/core/pipe.go +++ b/rpc/core/pipe.go @@ -1,6 +1,7 @@ package core import ( + "fmt" "time" cfg "github.com/tendermint/tendermint/config" @@ -47,7 +48,8 @@ type transport interface { } type peers interface { - DialPeersAsync(p2p.AddrBook, []string, bool) error + AddPersistentPeers([]string) error + DialPeersAsync([]string) error NumPeers() (outbound, inbound, dialig int) Peers() p2p.IPeerSet } @@ -76,8 +78,8 @@ var ( blockIndexer blockindex.BlockIndexer consensusReactor *consensus.ConsensusReactor eventBus *types.EventBus // thread safe - mempool *mempl.Mempool indexerHub *sm.IndexHub + mempool mempl.Mempool logger log.Logger @@ -92,7 +94,7 @@ func SetBlockStore(bs sm.BlockStore) { blockStore = bs } -func SetMempool(mem *mempl.Mempool) { +func SetMempool(mem mempl.Mempool) { mempool = mem } @@ -157,19 +159,24 @@ func SetConfig(c cfg.RPCConfig) { config = c } -func validatePage(page, perPage, totalCount int) int { +func validatePage(page, perPage, totalCount int) (int, error) { if perPage < 1 { - return 1 + panic(fmt.Sprintf("zero or negative perPage: %d", perPage)) + } + + if page == 0 { + return 1, nil // default } pages := ((totalCount - 1) / perPage) + 1 - if page < 1 { - page = 1 - } else if page > pages { - page = pages + if pages == 0 { + pages = 1 // one page (even if it's empty) + } + if page < 0 || page > pages { + return 1, fmt.Errorf("page should be within [0, %d] range, given %d", pages, page) } - return page + return page, nil } func validatePerPage(perPage int) int { diff --git a/rpc/core/pipe_test.go b/rpc/core/pipe_test.go index fd58695e7..168c69c39 100644 --- a/rpc/core/pipe_test.go +++ b/rpc/core/pipe_test.go @@ -16,33 +16,39 @@ func TestPaginationPage(t *testing.T) { perPage int page int newPage int + expErr bool }{ - {0, 0, 1, 1}, + {0, 10, 1, 1, false}, - {0, 10, 0, 1}, - {0, 10, 1, 1}, - {0, 10, 2, 1}, + {0, 10, 0, 1, false}, + {0, 10, 1, 1, false}, + {0, 10, 2, 0, true}, - {5, 10, -1, 1}, - {5, 10, 0, 1}, - {5, 10, 1, 1}, - {5, 10, 2, 1}, - {5, 10, 2, 1}, + {5, 10, -1, 0, true}, + {5, 10, 0, 1, false}, + {5, 10, 1, 1, false}, + {5, 10, 2, 0, true}, + {5, 10, 2, 0, true}, - {5, 5, 1, 1}, - {5, 5, 2, 1}, - {5, 5, 3, 1}, + {5, 5, 1, 1, false}, + {5, 5, 2, 0, true}, + {5, 5, 3, 0, true}, - {5, 3, 2, 2}, - {5, 3, 3, 2}, + {5, 3, 2, 2, false}, + {5, 3, 3, 0, true}, - {5, 2, 2, 2}, - {5, 2, 3, 3}, - {5, 2, 4, 3}, + {5, 2, 2, 2, false}, + {5, 2, 3, 3, false}, + {5, 2, 4, 0, true}, } for _, c := range cases { - p := validatePage(c.page, c.perPage, c.totalCount) + p, err := validatePage(c.page, c.perPage, c.totalCount) + if c.expErr { + assert.Error(t, err) + continue + } + assert.Equal(t, c.newPage, p, fmt.Sprintf("%v", c)) } diff --git a/rpc/core/routes.go b/rpc/core/routes.go index 4e292de2f..d44ce257a 100644 --- a/rpc/core/routes.go +++ b/rpc/core/routes.go @@ -5,7 +5,7 @@ import ( ) // TODO: better system than "unsafe" prefix -// NOTE: Amino is registered in rpc/core/types/wire.go. +// NOTE: Amino is registered in rpc/core/types/codec.go. var Routes = map[string]*rpc.RPCFunc{ // subscribe/unsubscribe are reserved for websocket events. "subscribe": rpc.NewWSRPCFunc(Subscribe, "query"), @@ -31,7 +31,7 @@ var Routes = map[string]*rpc.RPCFunc{ "unconfirmed_txs": rpc.NewRPCFunc(UnconfirmedTxs, "limit"), "num_unconfirmed_txs": rpc.NewRPCFunc(NumUnconfirmedTxs, ""), - // broadcast API + // tx broadcast API "broadcast_tx_commit": rpc.NewRPCFunc(BroadcastTxCommit, "tx"), "broadcast_tx_sync": rpc.NewRPCFunc(BroadcastTxSync, "tx"), "broadcast_tx_async": rpc.NewRPCFunc(BroadcastTxAsync, "tx"), @@ -39,6 +39,9 @@ var Routes = map[string]*rpc.RPCFunc{ // abci API "abci_query": rpc.NewRPCFunc(ABCIQuery, "path,data,height,prove"), "abci_info": rpc.NewRPCFunc(ABCIInfo, ""), + + // evidence API + "broadcast_evidence": rpc.NewRPCFunc(BroadcastEvidence, "evidence"), } func AddUnsafeRoutes() { diff --git a/rpc/core/tx.go b/rpc/core/tx.go index 9c80a915a..ab84c6a57 100644 --- a/rpc/core/tx.go +++ b/rpc/core/tx.go @@ -198,7 +198,10 @@ func TxSearch(ctx *rpctypes.Context, query string, prove bool, page, perPage int totalCount := len(results) perPage = validatePerPage(perPage) - page = validatePage(page, perPage, totalCount) + page, err = validatePage(page, perPage, totalCount) + if err != nil { + return nil, err + } skipCount := validateSkipCount(page, perPage) apiResults := make([]*ctypes.ResultTx, cmn.MinInt(perPage, totalCount-skipCount)) diff --git a/rpc/core/types/wire.go b/rpc/core/types/codec.go similarity index 100% rename from rpc/core/types/wire.go rename to rpc/core/types/codec.go diff --git a/rpc/core/types/responses.go b/rpc/core/types/responses.go index 075fa957a..7619a899a 100644 --- a/rpc/core/types/responses.go +++ b/rpc/core/types/responses.go @@ -161,6 +161,13 @@ type ResultBroadcastTxCommit struct { Height int64 `json:"height"` } +type ResultBroadcastTxCommitDeprecated struct { + CheckTx abci.ResponseCheckTxDeprecated `json:"check_tx"` + DeliverTx abci.ResponseDeliverTxDeprecated `json:"deliver_tx"` + Hash cmn.HexBytes `json:"hash"` + Height int64 `json:"height"` +} + // Result of querying for a tx type ResultTx struct { Hash cmn.HexBytes `json:"hash"` @@ -171,6 +178,15 @@ type ResultTx struct { Proof types.TxProof `json:"proof,omitempty"` } +type ResultTxDeprecated struct { + Hash cmn.HexBytes `json:"hash"` + Height int64 `json:"height"` + Index uint32 `json:"index"` + TxResult abci.ResponseDeliverTxDeprecated `json:"tx_result"` + Tx types.Tx `json:"tx"` + Proof types.TxProof `json:"proof,omitempty"` +} + // Result of searching for txs type ResultTxSearch struct { Txs []*ResultTx `json:"txs"` @@ -195,6 +211,11 @@ type ResultABCIQuery struct { Response abci.ResponseQuery `json:"response"` } +// Result of broadcasting evidence +type ResultBroadcastEvidence struct { + Hash []byte `json:"hash"` +} + // empty results type ( ResultUnsafeFlushMempool struct{} @@ -206,7 +227,7 @@ type ( // Event data from a subscription type ResultEvent struct { - Query string `json:"query"` - Data types.TMEventData `json:"data"` - Tags map[string]string `json:"tags"` + Query string `json:"query"` + Data types.TMEventData `json:"data"` + Events map[string][]string `json:"events"` } diff --git a/rpc/grpc/client_server.go b/rpc/grpc/client_server.go index 922016dd5..d02120e10 100644 --- a/rpc/grpc/client_server.go +++ b/rpc/grpc/client_server.go @@ -2,8 +2,8 @@ package core_grpc import ( "net" - "time" + "golang.org/x/net/context" "google.golang.org/grpc" cmn "github.com/tendermint/tendermint/libs/common" @@ -26,13 +26,13 @@ func StartGRPCServer(ln net.Listener) error { // StartGRPCClient dials the gRPC server using protoAddr and returns a new // BroadcastAPIClient. func StartGRPCClient(protoAddr string) BroadcastAPIClient { - conn, err := grpc.Dial(protoAddr, grpc.WithInsecure(), grpc.WithDialer(dialerFunc)) + conn, err := grpc.Dial(protoAddr, grpc.WithInsecure(), grpc.WithContextDialer(dialerFunc)) if err != nil { panic(err) } return NewBroadcastAPIClient(conn) } -func dialerFunc(addr string, timeout time.Duration) (net.Conn, error) { +func dialerFunc(ctx context.Context, addr string) (net.Conn, error) { return cmn.Connect(addr) } diff --git a/rpc/lib/client/http_client.go b/rpc/lib/client/http_client.go index 0d4df49a9..db57c536e 100644 --- a/rpc/lib/client/http_client.go +++ b/rpc/lib/client/http_client.go @@ -10,10 +10,13 @@ import ( "net/url" "reflect" "strings" + "sync" "github.com/pkg/errors" + amino "github.com/tendermint/go-amino" + cmn "github.com/tendermint/tendermint/libs/common" types "github.com/tendermint/tendermint/rpc/lib/types" ) @@ -39,12 +42,13 @@ func makeHTTPDialer(remoteAddr string) (string, string, func(string, string) (ne parts := strings.SplitN(remoteAddr, "://", 2) var protocol, address string - if len(parts) == 1 { + switch { + case len(parts) == 1: // default to tcp if nothing specified protocol, address = protoTCP, remoteAddr - } else if len(parts) == 2 { + case len(parts) == 2: protocol, address = parts[0], parts[1] - } else { + default: // return a invalid message msg := fmt.Sprintf("Invalid addr: %s", remoteAddr) return clientProtocol, msg, func(_ string, _ string) (net.Conn, error) { @@ -83,25 +87,56 @@ func makeHTTPClient(remoteAddr string) (string, *http.Client) { //------------------------------------------------------------------------------------ +// jsonRPCBufferedRequest encapsulates a single buffered request, as well as its +// anticipated response structure. +type jsonRPCBufferedRequest struct { + request types.RPCRequest + result interface{} // The result will be deserialized into this object. +} + +// JSONRPCRequestBatch allows us to buffer multiple request/response structures +// into a single batch request. Note that this batch acts like a FIFO queue, and +// is thread-safe. +type JSONRPCRequestBatch struct { + client *JSONRPCClient + + mtx sync.Mutex + requests []*jsonRPCBufferedRequest +} + // JSONRPCClient takes params as a slice type JSONRPCClient struct { address string client *http.Client + id types.JSONRPCStringID cdc *amino.Codec } +// JSONRPCCaller implementers can facilitate calling the JSON RPC endpoint. +type JSONRPCCaller interface { + Call(method string, params map[string]interface{}, result interface{}) (interface{}, error) +} + +// Both JSONRPCClient and JSONRPCRequestBatch can facilitate calls to the JSON +// RPC endpoint. +var _ JSONRPCCaller = (*JSONRPCClient)(nil) +var _ JSONRPCCaller = (*JSONRPCRequestBatch)(nil) + // NewJSONRPCClient returns a JSONRPCClient pointed at the given address. func NewJSONRPCClient(remote string) *JSONRPCClient { address, client := makeHTTPClient(remote) return &JSONRPCClient{ address: address, client: client, + id: types.JSONRPCStringID("jsonrpc-client-" + cmn.RandStr(8)), cdc: amino.NewCodec(), } } +// Call will send the request for the given method through to the RPC endpoint +// immediately, without buffering of requests. func (c *JSONRPCClient) Call(method string, params map[string]interface{}, result interface{}) (interface{}, error) { - request, err := types.MapToRequest(c.cdc, types.JSONRPCStringID("jsonrpc-client"), method, params) + request, err := types.MapToRequest(c.cdc, c.id, method, params) if err != nil { return nil, err } @@ -109,9 +144,7 @@ func (c *JSONRPCClient) Call(method string, params map[string]interface{}, resul if err != nil { return nil, err } - // log.Info(string(requestBytes)) requestBuf := bytes.NewBuffer(requestBytes) - // log.Info(Fmt("RPC request to %v (%v): %v", c.remote, method, string(requestBytes))) httpResponse, err := c.client.Post(c.address, "text/json", requestBuf) if err != nil { return nil, err @@ -122,8 +155,40 @@ func (c *JSONRPCClient) Call(method string, params map[string]interface{}, resul if err != nil { return nil, err } - // log.Info(Fmt("RPC response: %v", string(responseBytes))) - return unmarshalResponseBytes(c.cdc, responseBytes, result) + return unmarshalResponseBytes(c.cdc, responseBytes, c.id, result) +} + +// NewRequestBatch starts a batch of requests for this client. +func (c *JSONRPCClient) NewRequestBatch() *JSONRPCRequestBatch { + return &JSONRPCRequestBatch{ + requests: make([]*jsonRPCBufferedRequest, 0), + client: c, + } +} + +func (c *JSONRPCClient) sendBatch(requests []*jsonRPCBufferedRequest) ([]interface{}, error) { + reqs := make([]types.RPCRequest, 0, len(requests)) + results := make([]interface{}, 0, len(requests)) + for _, req := range requests { + reqs = append(reqs, req.request) + results = append(results, req.result) + } + // serialize the array of requests into a single JSON object + requestBytes, err := json.Marshal(reqs) + if err != nil { + return nil, err + } + httpResponse, err := c.client.Post(c.address, "text/json", bytes.NewBuffer(requestBytes)) + if err != nil { + return nil, err + } + defer httpResponse.Body.Close() // nolint: errcheck + + responseBytes, err := ioutil.ReadAll(httpResponse.Body) + if err != nil { + return nil, err + } + return unmarshalResponseBytesArray(c.cdc, responseBytes, c.id, results) } func (c *JSONRPCClient) Codec() *amino.Codec { @@ -136,6 +201,57 @@ func (c *JSONRPCClient) SetCodec(cdc *amino.Codec) { //------------------------------------------------------------- +// Count returns the number of enqueued requests waiting to be sent. +func (b *JSONRPCRequestBatch) Count() int { + b.mtx.Lock() + defer b.mtx.Unlock() + return len(b.requests) +} + +func (b *JSONRPCRequestBatch) enqueue(req *jsonRPCBufferedRequest) { + b.mtx.Lock() + defer b.mtx.Unlock() + b.requests = append(b.requests, req) +} + +// Clear empties out the request batch. +func (b *JSONRPCRequestBatch) Clear() int { + b.mtx.Lock() + defer b.mtx.Unlock() + return b.clear() +} + +func (b *JSONRPCRequestBatch) clear() int { + count := len(b.requests) + b.requests = make([]*jsonRPCBufferedRequest, 0) + return count +} + +// Send will attempt to send the current batch of enqueued requests, and then +// will clear out the requests once done. On success, this returns the +// deserialized list of results from each of the enqueued requests. +func (b *JSONRPCRequestBatch) Send() ([]interface{}, error) { + b.mtx.Lock() + defer func() { + b.clear() + b.mtx.Unlock() + }() + return b.client.sendBatch(b.requests) +} + +// Call enqueues a request to call the given RPC method with the specified +// parameters, in the same way that the `JSONRPCClient.Call` function would. +func (b *JSONRPCRequestBatch) Call(method string, params map[string]interface{}, result interface{}) (interface{}, error) { + request, err := types.MapToRequest(b.client.cdc, b.client.id, method, params) + if err != nil { + return nil, err + } + b.enqueue(&jsonRPCBufferedRequest{request: request, result: result}) + return result, nil +} + +//------------------------------------------------------------- + // URI takes params as a map type URIClient struct { address string @@ -168,7 +284,7 @@ func (c *URIClient) Call(method string, params map[string]interface{}, result in if err != nil { return nil, err } - return unmarshalResponseBytes(c.cdc, responseBytes, result) + return unmarshalResponseBytes(c.cdc, responseBytes, "", result) } func (c *URIClient) Codec() *amino.Codec { @@ -181,7 +297,7 @@ func (c *URIClient) SetCodec(cdc *amino.Codec) { //------------------------------------------------ -func unmarshalResponseBytes(cdc *amino.Codec, responseBytes []byte, result interface{}) (interface{}, error) { +func unmarshalResponseBytes(cdc *amino.Codec, responseBytes []byte, expectedID types.JSONRPCStringID, result interface{}) (interface{}, error) { // Read response. If rpc/core/types is imported, the result will unmarshal // into the correct type. // log.Notice("response", "response", string(responseBytes)) @@ -189,19 +305,71 @@ func unmarshalResponseBytes(cdc *amino.Codec, responseBytes []byte, result inter response := &types.RPCResponse{} err = json.Unmarshal(responseBytes, response) if err != nil { - return nil, errors.Errorf("Error unmarshalling rpc response %s, error %v", string(responseBytes), err) + return nil, errors.Wrap(err, "error unmarshalling rpc response") } if response.Error != nil { - return nil, errors.Errorf("Response error: %v", response.Error) + return nil, errors.Wrap(response.Error, "response error") + } + // From the JSON-RPC 2.0 spec: + // id: It MUST be the same as the value of the id member in the Request Object. + if err := validateResponseID(response, expectedID); err != nil { + return nil, err } // Unmarshal the RawMessage into the result. err = cdc.UnmarshalJSON(response.Result, result) if err != nil { - return nil, errors.Errorf("Error unmarshalling rpc response result %s, error %v", string(response.Result), err) + return nil, errors.Wrap(err, "error unmarshalling rpc response result") } return result, nil } +func unmarshalResponseBytesArray(cdc *amino.Codec, responseBytes []byte, expectedID types.JSONRPCStringID, results []interface{}) ([]interface{}, error) { + var ( + err error + responses []types.RPCResponse + ) + err = json.Unmarshal(responseBytes, &responses) + if err != nil { + return nil, errors.Wrap(err, "error unmarshalling rpc response") + } + // No response error checking here as there may be a mixture of successful + // and unsuccessful responses. + + if len(results) != len(responses) { + return nil, errors.Errorf("expected %d result objects into which to inject responses, but got %d", len(responses), len(results)) + } + + for i, response := range responses { + // From the JSON-RPC 2.0 spec: + // id: It MUST be the same as the value of the id member in the Request Object. + if err := validateResponseID(&response, expectedID); err != nil { + return nil, errors.Wrapf(err, "failed to validate response ID in response %d", i) + } + if err := cdc.UnmarshalJSON(responses[i].Result, results[i]); err != nil { + return nil, errors.Wrap(err, "error unmarshalling rpc response result") + } + } + return results, nil +} + +func validateResponseID(res *types.RPCResponse, expectedID types.JSONRPCStringID) error { + // we only validate a response ID if the expected ID is non-empty + if len(expectedID) == 0 { + return nil + } + if res.ID == nil { + return errors.Errorf("missing ID in response") + } + id, ok := res.ID.(types.JSONRPCStringID) + if !ok { + return errors.Errorf("expected ID string in response but got: %v", id) + } + if expectedID != id { + return errors.Errorf("response ID (%s) does not match request ID (%s)", id, expectedID) + } + return nil +} + func argsToURLValues(cdc *amino.Codec, args map[string]interface{}) (url.Values, error) { values := make(url.Values) if len(args) == 0 { diff --git a/rpc/lib/client/ws_client.go b/rpc/lib/client/ws_client.go index e3b559569..05180c753 100644 --- a/rpc/lib/client/ws_client.go +++ b/rpc/lib/client/ws_client.go @@ -369,10 +369,11 @@ func (c *WSClient) writeRoutine() { defer func() { ticker.Stop() - if err := c.conn.Close(); err != nil { - // ignore error; it will trigger in tests - // likely because it's closing an already closed connection - } + c.conn.Close() + // err != nil { + // ignore error; it will trigger in tests + // likely because it's closing an already closed connection + // } c.wg.Done() }() @@ -421,10 +422,11 @@ func (c *WSClient) writeRoutine() { // executing all reads from this goroutine. func (c *WSClient) readRoutine() { defer func() { - if err := c.conn.Close(); err != nil { - // ignore error; it will trigger in tests - // likely because it's closing an already closed connection - } + c.conn.Close() + // err != nil { + // ignore error; it will trigger in tests + // likely because it's closing an already closed connection + // } c.wg.Done() }() diff --git a/rpc/lib/client/ws_client_test.go b/rpc/lib/client/ws_client_test.go index e902fe21a..4f2cc9ada 100644 --- a/rpc/lib/client/ws_client_test.go +++ b/rpc/lib/client/ws_client_test.go @@ -212,7 +212,8 @@ func callWgDoneOnResult(t *testing.T, c *WSClient, wg *sync.WaitGroup) { select { case resp := <-c.ResponsesCh: if resp.Error != nil { - t.Fatalf("unexpected error: %v", resp.Error) + t.Errorf("unexpected error: %v", resp.Error) + return } if resp.Result != nil { wg.Done() diff --git a/rpc/lib/server/handlers.go b/rpc/lib/server/handlers.go index e0a79dd26..975b00d57 100644 --- a/rpc/lib/server/handlers.go +++ b/rpc/lib/server/handlers.go @@ -104,7 +104,7 @@ func makeJSONRPCHandler(funcMap map[string]*RPCFunc, cdc *amino.Codec, logger lo return func(w http.ResponseWriter, r *http.Request) { b, err := ioutil.ReadAll(r.Body) if err != nil { - WriteRPCResponseHTTP(w, types.RPCInvalidRequestError(types.JSONRPCStringID(""), errors.Wrap(err, "Error reading request body"))) + WriteRPCResponseHTTP(w, types.RPCInvalidRequestError(types.JSONRPCStringID(""), errors.Wrap(err, "error reading request body"))) return } // if its an empty request (like from a browser), @@ -114,49 +114,59 @@ func makeJSONRPCHandler(funcMap map[string]*RPCFunc, cdc *amino.Codec, logger lo return } - var request types.RPCRequest - err = json.Unmarshal(b, &request) - if err != nil { - WriteRPCResponseHTTP(w, types.RPCParseError(types.JSONRPCStringID(""), errors.Wrap(err, "Error unmarshalling request"))) - return - } - // A Notification is a Request object without an "id" member. - // The Server MUST NOT reply to a Notification, including those that are within a batch request. - if request.ID == types.JSONRPCStringID("") { - logger.Debug("HTTPJSONRPC received a notification, skipping... (please send a non-empty ID if you want to call a method)") - return - } - if len(r.URL.Path) > 1 { - WriteRPCResponseHTTP(w, types.RPCInvalidRequestError(request.ID, errors.Errorf("Path %s is invalid", r.URL.Path))) - return - } - - rpcFunc := funcMap[request.Method] - if rpcFunc == nil || rpcFunc.ws { - WriteRPCResponseHTTP(w, types.RPCMethodNotFoundError(request.ID)) - return + // first try to unmarshal the incoming request as an array of RPC requests + var ( + requests []types.RPCRequest + responses []types.RPCResponse + ) + if err := json.Unmarshal(b, &requests); err != nil { + // next, try to unmarshal as a single request + var request types.RPCRequest + if err := json.Unmarshal(b, &request); err != nil { + WriteRPCResponseHTTP(w, types.RPCParseError(types.JSONRPCStringID(""), errors.Wrap(err, "error unmarshalling request"))) + return + } + requests = []types.RPCRequest{request} } - ctx := &types.Context{JSONReq: &request, HTTPReq: r} - args := []reflect.Value{reflect.ValueOf(ctx)} - if len(request.Params) > 0 { - fnArgs, err := jsonParamsToArgs(rpcFunc, cdc, request.Params) + for _, request := range requests { + // A Notification is a Request object without an "id" member. + // The Server MUST NOT reply to a Notification, including those that are within a batch request. + if request.ID == types.JSONRPCStringID("") { + logger.Debug("HTTPJSONRPC received a notification, skipping... (please send a non-empty ID if you want to call a method)") + continue + } + if len(r.URL.Path) > 1 { + responses = append(responses, types.RPCInvalidRequestError(request.ID, errors.Errorf("path %s is invalid", r.URL.Path))) + continue + } + rpcFunc, ok := funcMap[request.Method] + if !ok || rpcFunc.ws { + responses = append(responses, types.RPCMethodNotFoundError(request.ID)) + continue + } + ctx := &types.Context{JSONReq: &request, HTTPReq: r} + args := []reflect.Value{reflect.ValueOf(ctx)} + if len(request.Params) > 0 { + fnArgs, err := jsonParamsToArgs(rpcFunc, cdc, request.Params) + if err != nil { + responses = append(responses, types.RPCInvalidParamsError(request.ID, errors.Wrap(err, "error converting json params to arguments"))) + continue + } + args = append(args, fnArgs...) + } + returns := rpcFunc.f.Call(args) + logger.Info("HTTPJSONRPC", "method", request.Method, "args", args, "returns", returns) + result, err := unreflectResult(returns) if err != nil { - WriteRPCResponseHTTP(w, types.RPCInvalidParamsError(request.ID, errors.Wrap(err, "Error converting json params to arguments"))) - return + responses = append(responses, types.RPCInternalError(request.ID, err)) + continue } - args = append(args, fnArgs...) + responses = append(responses, types.NewRPCSuccessResponse(cdc, request.ID, result)) } - - returns := rpcFunc.f.Call(args) - - logger.Info("HTTPJSONRPC", "method", request.Method, "args", args, "returns", returns) - result, err := unreflectResult(returns) - if err != nil { - WriteRPCResponseHTTP(w, types.RPCInternalError(request.ID, err)) - return + if len(responses) > 0 { + WriteRPCResponseArrayHTTP(w, responses) } - WriteRPCResponseHTTP(w, types.NewRPCSuccessResponse(cdc, request.ID, result)) } } @@ -195,7 +205,7 @@ func mapParamsToArgs(rpcFunc *RPCFunc, cdc *amino.Codec, params map[string]json. func arrayParamsToArgs(rpcFunc *RPCFunc, cdc *amino.Codec, params []json.RawMessage, argsOffset int) ([]reflect.Value, error) { if len(rpcFunc.argNames) != len(params) { - return nil, errors.Errorf("Expected %v parameters (%v), got %v (%v)", + return nil, errors.Errorf("expected %v parameters (%v), got %v (%v)", len(rpcFunc.argNames), rpcFunc.argNames, len(params), params) } @@ -237,7 +247,7 @@ func jsonParamsToArgs(rpcFunc *RPCFunc, cdc *amino.Codec, raw []byte) ([]reflect } // Otherwise, bad format, we cannot parse - return nil, errors.Errorf("Unknown type for JSON params: %v. Expected map or array", err) + return nil, errors.Errorf("unknown type for JSON params: %v. Expected map or array", err) } // rpc.json @@ -262,7 +272,7 @@ func makeHTTPHandler(rpcFunc *RPCFunc, cdc *amino.Codec, logger log.Logger) func fnArgs, err := httpParamsToArgs(rpcFunc, cdc, r) if err != nil { - WriteRPCResponseHTTP(w, types.RPCInvalidParamsError(types.JSONRPCStringID(""), errors.Wrap(err, "Error converting http params to arguments"))) + WriteRPCResponseHTTP(w, types.RPCInvalidParamsError(types.JSONRPCStringID(""), errors.Wrap(err, "error converting http params to arguments"))) return } args = append(args, fnArgs...) @@ -330,13 +340,14 @@ func jsonStringToArg(cdc *amino.Codec, rt reflect.Type, arg string) (reflect.Val func nonJSONStringToArg(cdc *amino.Codec, rt reflect.Type, arg string) (reflect.Value, error, bool) { if rt.Kind() == reflect.Ptr { rv_, err, ok := nonJSONStringToArg(cdc, rt.Elem(), arg) - if err != nil { + switch { + case err != nil: return reflect.Value{}, err, false - } else if ok { + case ok: rv := reflect.New(rt.Elem()) rv.Elem().Set(rv_) return rv, nil, true - } else { + default: return reflect.Value{}, nil, false } } else { @@ -373,7 +384,7 @@ func _nonJSONStringToArg(cdc *amino.Codec, rt reflect.Type, arg string) (reflect if isHexString { if !expectingString && !expectingByteSlice { - err := errors.Errorf("Got a hex string arg, but expected '%s'", + err := errors.Errorf("got a hex string arg, but expected '%s'", rt.Kind().String()) return reflect.ValueOf(nil), err, false } @@ -439,6 +450,9 @@ type wsConnection struct { // Send pings to server with this period. Must be less than readWait, but greater than zero. pingPeriod time.Duration + // Maximum message size. + readLimit int64 + // callback which is called upon disconnect onDisconnect func(remoteAddr string) @@ -460,7 +474,6 @@ func NewWSConnection( cdc *amino.Codec, options ...func(*wsConnection), ) *wsConnection { - baseConn.SetReadLimit(maxBodyBytes) wsc := &wsConnection{ remoteAddr: baseConn.RemoteAddr().String(), baseConn: baseConn, @@ -474,6 +487,7 @@ func NewWSConnection( for _, option := range options { option(wsc) } + wsc.baseConn.SetReadLimit(wsc.readLimit) wsc.BaseService = *cmn.NewBaseService(nil, "wsConnection", wsc) return wsc } @@ -524,6 +538,14 @@ func PingPeriod(pingPeriod time.Duration) func(*wsConnection) { } } +// ReadLimit sets the maximum size for reading message. +// It should only be used in the constructor - not Goroutine-safe. +func ReadLimit(readLimit int64) func(*wsConnection) { + return func(wsc *wsConnection) { + wsc.readLimit = readLimit + } +} + // OnStart implements cmn.Service by starting the read and write routines. It // blocks until the connection closes. func (wsc *wsConnection) OnStart() error { @@ -653,7 +675,7 @@ func (wsc *wsConnection) processRequest(message []byte) { var request types.RPCRequest err := json.Unmarshal(message, &request) if err != nil { - wsc.WriteRPCResponse(types.RPCParseError(types.JSONRPCStringID(""), errors.Wrap(err, "Error unmarshaling request"))) + wsc.WriteRPCResponse(types.RPCParseError(types.JSONRPCStringID(""), errors.Wrap(err, "error unmarshaling request"))) return } @@ -677,7 +699,7 @@ func (wsc *wsConnection) processRequest(message []byte) { if len(request.Params) > 0 { fnArgs, err := jsonParamsToArgs(rpcFunc, wsc.cdc, request.Params) if err != nil { - wsc.WriteRPCResponse(types.RPCInternalError(request.ID, errors.Wrap(err, "Error converting json params to arguments"))) + wsc.WriteRPCResponse(types.RPCInternalError(request.ID, errors.Wrap(err, "error converting json params to arguments"))) return } args = append(args, fnArgs...) @@ -734,12 +756,10 @@ func (wsc *wsConnection) writeRoutine() { jsonBytes, err := json.MarshalIndent(msg, "", " ") if err != nil { wsc.Logger.Error("Failed to marshal RPCResponse to JSON", "err", err) - } else { - if err = wsc.writeMessageWithDeadline(websocket.TextMessage, jsonBytes); err != nil { - wsc.Logger.Error("Failed to write response", "err", err) - wsc.Stop() - return - } + } else if err = wsc.writeMessageWithDeadline(websocket.TextMessage, jsonBytes); err != nil { + wsc.Logger.Error("Failed to write response", "err", err) + wsc.Stop() + return } case <-wsc.Quit(): return diff --git a/rpc/lib/server/handlers_test.go b/rpc/lib/server/handlers_test.go index f8ad06107..9cded2953 100644 --- a/rpc/lib/server/handlers_test.go +++ b/rpc/lib/server/handlers_test.go @@ -154,6 +154,72 @@ func TestRPCNotification(t *testing.T) { require.Equal(t, len(blob), 0, "a notification SHOULD NOT be responded to by the server") } +func TestRPCNotificationInBatch(t *testing.T) { + mux := testMux() + tests := []struct { + payload string + expectCount int + }{ + { + `[ + {"jsonrpc": "2.0","id": ""}, + {"jsonrpc": "2.0","method":"c","id":"abc","params":["a","10"]} + ]`, + 1, + }, + { + `[ + {"jsonrpc": "2.0","id": ""}, + {"jsonrpc": "2.0","method":"c","id":"abc","params":["a","10"]}, + {"jsonrpc": "2.0","id": ""}, + {"jsonrpc": "2.0","method":"c","id":"abc","params":["a","10"]} + ]`, + 2, + }, + } + for i, tt := range tests { + req, _ := http.NewRequest("POST", "http://localhost/", strings.NewReader(tt.payload)) + rec := httptest.NewRecorder() + mux.ServeHTTP(rec, req) + res := rec.Result() + // Always expecting back a JSONRPCResponse + assert.True(t, statusOK(res.StatusCode), "#%d: should always return 2XX", i) + blob, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Errorf("#%d: err reading body: %v", i, err) + continue + } + + var responses []types.RPCResponse + // try to unmarshal an array first + err = json.Unmarshal(blob, &responses) + if err != nil { + // if we were actually expecting an array, but got an error + if tt.expectCount > 1 { + t.Errorf("#%d: expected an array, couldn't unmarshal it\nblob: %s", i, blob) + continue + } else { + // we were expecting an error here, so let's unmarshal a single response + var response types.RPCResponse + err = json.Unmarshal(blob, &response) + if err != nil { + t.Errorf("#%d: expected successful parsing of an RPCResponse\nblob: %s", i, blob) + continue + } + // have a single-element result + responses = []types.RPCResponse{response} + } + } + if tt.expectCount != len(responses) { + t.Errorf("#%d: expected %d response(s), but got %d\nblob: %s", i, tt.expectCount, len(responses), blob) + continue + } + for _, response := range responses { + assert.NotEqual(t, response, new(types.RPCResponse), "#%d: not expecting a blank RPCResponse", i) + } + } +} + func TestUnknownRPCPath(t *testing.T) { mux := testMux() req, _ := http.NewRequest("GET", "http://localhost/unknownrpcpath", nil) diff --git a/rpc/lib/server/http_params.go b/rpc/lib/server/http_params.go index 3c948c0ba..8ade41c79 100644 --- a/rpc/lib/server/http_params.go +++ b/rpc/lib/server/http_params.go @@ -76,7 +76,7 @@ func GetParamUint(r *http.Request, param string) (uint, error) { func GetParamRegexp(r *http.Request, param string, re *regexp.Regexp) (string, error) { s := GetParam(r, param) if !re.MatchString(s) { - return "", errors.Errorf(param, "Did not match regular expression %v", re.String()) + return "", errors.Errorf(param, "did not match regular expression %v", re.String()) } return s, nil } diff --git a/rpc/lib/server/http_server.go b/rpc/lib/server/http_server.go index c4bb6fa17..c97739bd2 100644 --- a/rpc/lib/server/http_server.go +++ b/rpc/lib/server/http_server.go @@ -26,6 +26,11 @@ type Config struct { ReadTimeout time.Duration // mirrors http.Server#WriteTimeout WriteTimeout time.Duration + // MaxBodyBytes controls the maximum number of bytes the + // server will read parsing the request body. + MaxBodyBytes int64 + // mirrors http.Server#MaxHeaderBytes + MaxHeaderBytes int } // DefaultConfig returns a default configuration. @@ -34,28 +39,21 @@ func DefaultConfig() *Config { MaxOpenConnections: 0, // unlimited ReadTimeout: 10 * time.Second, WriteTimeout: 10 * time.Second, + MaxBodyBytes: int64(1000000), // 1MB + MaxHeaderBytes: 1 << 20, // same as the net/http default } } -const ( - // maxBodyBytes controls the maximum number of bytes the - // server will read parsing the request body. - maxBodyBytes = int64(1000000) // 1MB - - // same as the net/http default - maxHeaderBytes = 1 << 20 -) - // StartHTTPServer takes a listener and starts an HTTP server with the given handler. // It wraps handler with RecoverAndLogHandler. // NOTE: This function blocks - you may want to call it in a go-routine. func StartHTTPServer(listener net.Listener, handler http.Handler, logger log.Logger, config *Config) error { logger.Info(fmt.Sprintf("Starting RPC HTTP server on %s", listener.Addr())) s := &http.Server{ - Handler: RecoverAndLogHandler(maxBytesHandler{h: handler, n: maxBodyBytes}, logger), + Handler: RecoverAndLogHandler(maxBytesHandler{h: handler, n: config.MaxBodyBytes}, logger), ReadTimeout: config.ReadTimeout, WriteTimeout: config.WriteTimeout, - MaxHeaderBytes: maxHeaderBytes, + MaxHeaderBytes: config.MaxHeaderBytes, } err := s.Serve(listener) logger.Info("RPC HTTP server stopped", "err", err) @@ -75,10 +73,10 @@ func StartHTTPAndTLSServer( logger.Info(fmt.Sprintf("Starting RPC HTTPS server on %s (cert: %q, key: %q)", listener.Addr(), certFile, keyFile)) s := &http.Server{ - Handler: RecoverAndLogHandler(maxBytesHandler{h: handler, n: maxBodyBytes}, logger), + Handler: RecoverAndLogHandler(maxBytesHandler{h: handler, n: config.MaxBodyBytes}, logger), ReadTimeout: config.ReadTimeout, WriteTimeout: config.WriteTimeout, - MaxHeaderBytes: maxHeaderBytes, + MaxHeaderBytes: config.MaxHeaderBytes, } err := s.ServeTLS(listener, certFile, keyFile) @@ -98,7 +96,9 @@ func WriteRPCResponseHTTPError( w.Header().Set("Content-Type", "application/json") w.WriteHeader(httpCode) - w.Write(jsonBytes) // nolint: errcheck, gas + if _, err := w.Write(jsonBytes); err != nil { + panic(err) + } } func WriteRPCResponseHTTP(w http.ResponseWriter, res types.RPCResponse) { @@ -108,12 +108,33 @@ func WriteRPCResponseHTTP(w http.ResponseWriter, res types.RPCResponse) { } w.Header().Set("Content-Type", "application/json") w.WriteHeader(200) - w.Write(jsonBytes) // nolint: errcheck, gas + if _, err := w.Write(jsonBytes); err != nil { + panic(err) + } +} + +// WriteRPCResponseArrayHTTP will do the same as WriteRPCResponseHTTP, except it +// can write arrays of responses for batched request/response interactions via +// the JSON RPC. +func WriteRPCResponseArrayHTTP(w http.ResponseWriter, res []types.RPCResponse) { + if len(res) == 1 { + WriteRPCResponseHTTP(w, res[0]) + } else { + jsonBytes, err := json.MarshalIndent(res, "", " ") + if err != nil { + panic(err) + } + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(200) + if _, err := w.Write(jsonBytes); err != nil { + panic(err) + } + } } //----------------------------------------------------------------------------- -// Wraps an HTTP handler, adding error logging. +// RecoverAndLogHandler wraps an HTTP handler, adding error logging. // If the inner function panics, the outer function recovers, logs, sends an // HTTP 500 error response. func RecoverAndLogHandler(handler http.Handler, logger log.Logger) http.Handler { @@ -191,14 +212,14 @@ func Listen(addr string, config *Config) (listener net.Listener, err error) { parts := strings.SplitN(addr, "://", 2) if len(parts) != 2 { return nil, errors.Errorf( - "Invalid listening address %s (use fully formed addresses, including the tcp:// or unix:// prefix)", + "invalid listening address %s (use fully formed addresses, including the tcp:// or unix:// prefix)", addr, ) } proto, addr := parts[0], parts[1] listener, err = net.Listen(proto, addr) if err != nil { - return nil, errors.Errorf("Failed to listen on %v: %v", addr, err) + return nil, errors.Errorf("failed to listen on %v: %v", addr, err) } if config.MaxOpenConnections > 0 { listener = netutil.LimitListener(listener, config.MaxOpenConnections) diff --git a/rpc/lib/server/http_server_test.go b/rpc/lib/server/http_server_test.go index 7f47a30b3..b463aa6a8 100644 --- a/rpc/lib/server/http_server_test.go +++ b/rpc/lib/server/http_server_test.go @@ -1,16 +1,18 @@ package rpcserver import ( + "crypto/tls" "fmt" "io" "io/ioutil" + "net" "net/http" - "os" "sync" "sync/atomic" "testing" "time" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/libs/log" @@ -66,18 +68,27 @@ func TestMaxOpenConnections(t *testing.T) { } func TestStartHTTPAndTLSServer(t *testing.T) { - config := DefaultConfig() - config.MaxOpenConnections = 1 - // set up fixtures - listenerAddr := "tcp://0.0.0.0:0" - listener, err := Listen(listenerAddr, config) + ln, err := net.Listen("tcp", "localhost:0") require.NoError(t, err) + defer ln.Close() + mux := http.NewServeMux() - mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {}) + mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + fmt.Fprint(w, "some body") + }) - // test failure - err = StartHTTPAndTLSServer(listener, mux, "", "", log.TestingLogger(), config) - require.IsType(t, (*os.PathError)(nil), err) + go StartHTTPAndTLSServer(ln, mux, "test.crt", "test.key", log.TestingLogger(), DefaultConfig()) - // TODO: test that starting the server can actually work + tr := &http.Transport{ + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, // nolint: gosec + } + c := &http.Client{Transport: tr} + res, err := c.Get("https://" + ln.Addr().String()) + require.NoError(t, err) + defer res.Body.Close() + assert.Equal(t, http.StatusOK, res.StatusCode) + + body, err := ioutil.ReadAll(res.Body) + require.NoError(t, err) + assert.Equal(t, []byte("some body"), body) } diff --git a/rpc/lib/server/test.crt b/rpc/lib/server/test.crt new file mode 100644 index 000000000..e4ab1965d --- /dev/null +++ b/rpc/lib/server/test.crt @@ -0,0 +1,25 @@ +-----BEGIN CERTIFICATE----- +MIIEODCCAiCgAwIBAgIQWDHUrd4tOM2xExWhzOEJ7DANBgkqhkiG9w0BAQsFADAZ +MRcwFQYDVQQDEw50ZW5kZXJtaW50LmNvbTAeFw0xOTA2MDIxMTAyMDdaFw0yMDEy +MDIxMTAyMDRaMBExDzANBgNVBAMTBnNlcnZlcjCCASIwDQYJKoZIhvcNAQEBBQAD +ggEPADCCAQoCggEBANBaa6dc9GZcIhAHWqVrx0LONYf+IlbvTP7yrV45ws0ix8TX +1NUOiDY1cwzKH8ay/HYX45e2fFLrtLidc9h+apsC55k3Vdcy00+Ksr/adjR8D4A/ +GpnTS+hVDHTlqINe9a7USok34Zr1rc3fh4Imu5RxEurjMwkA/36k6+OpXMp2qlKY +S1fGqwn2KGhXkp/yTWZILEMXBazNxGx4xfqYXzWm6boeyJAXpM2DNkv7dtwa/CWY +WacUQJApNInwn5+B8LLoo+pappkfZOjAD9/aHKsyFTSWmmWeg7V//ouB3u5vItqf +GP+3xmPgeYeEyOIe/P2f8bRuQs+GGwSCmi6F1GUCAwEAAaOBgzCBgDAOBgNVHQ8B +Af8EBAMCA7gwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMB0GA1UdDgQW +BBSpBFIMbkBR4xVYQZtUJQQwzPmbHjAfBgNVHSMEGDAWgBTUkz3u+N2iMe6yKb5+ +R1d4CeM9YTAPBgNVHREECDAGhwR/AAABMA0GCSqGSIb3DQEBCwUAA4ICAQBCqdzS +tPHkMYWjYs6aREwob9whjyG8a4Qp6IkP1SYHCwpzsTeWLi9ybEcDRb3jZ4iRxbZg +7GFxjqHoWgBZHAIyICMsHupOJEtXq5hx86NuMwk/12bx1eNj0yTIAnVOA+em/ZtB +zR38OwB8xXmjKd0Ow1Y7zCh5zE2gU+sR0JOJSfxXUZrJvwDNrbcmZPQ+kwuq4cyv +fxZnvZf/owbyOLQFdbiPQbbiZ7JSv8q7GCMleULCEygrsWClYkULUByhKykCHJIU +wfq1owge9EqG/4CDCCjB9vBFmUyv3FJhgWnzd6tPQckFoHSoD0Bjsv/pQFcsGLcg ++e/Mm6hZgCXXwI2WHYbxqz5ToOaRQQYo6N77jWejOBMecOZmPDyQ2nz73aJd11GW +NiDT7pyMlBJA8W4wAvVP4ow2ugqsPjqZ6EyismIGFUTqMp+NtXOsLPK+sEMhKhJ9 +ulczRpPEf25roBt6aEk2fTAfAPmbpvNamBLSbBU23mzJ38RmfhxLOlOgCGbBBX4d +kE+/+En8UJO4X8CKaKRo/c5G2UZ6++2cjp6SPrsGENDMW5yBGegrDw+ow8/bLxIr +OjWpSe2cygovy3aHE6UBOgkxw9KIaSEqFgjQZ0i+xO6l6qQoljQgUGXfecVMR+7C +4KsyVVTMlK9/thA7Zfc8a5z8ZCtIKkT52XsJhw== +-----END CERTIFICATE----- diff --git a/rpc/lib/server/test.key b/rpc/lib/server/test.key new file mode 100644 index 000000000..bb9af06b0 --- /dev/null +++ b/rpc/lib/server/test.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEoQIBAAKCAQEA0Fprp1z0ZlwiEAdapWvHQs41h/4iVu9M/vKtXjnCzSLHxNfU +1Q6INjVzDMofxrL8dhfjl7Z8Uuu0uJ1z2H5qmwLnmTdV1zLTT4qyv9p2NHwPgD8a +mdNL6FUMdOWog171rtRKiTfhmvWtzd+Hgia7lHES6uMzCQD/fqTr46lcynaqUphL +V8arCfYoaFeSn/JNZkgsQxcFrM3EbHjF+phfNabpuh7IkBekzYM2S/t23Br8JZhZ +pxRAkCk0ifCfn4Hwsuij6lqmmR9k6MAP39ocqzIVNJaaZZ6DtX/+i4He7m8i2p8Y +/7fGY+B5h4TI4h78/Z/xtG5Cz4YbBIKaLoXUZQIDAQABAoH/NodzpVmunRt/zrIe +By0t+U3+tJjOY/I9NHxO41o6oXV40wupqBkljQpwEejUaCxv5nhaGFqqLwmBQs/y +gbaUL/2Sn4bb8HZc13R1U8DZLuNJK0dYrumd9DBOEkoI0FkJ87ebyk3VvbiOxFK8 +JFP+w9rUGKVdtf2M4JhJJEwu/M2Yawx9/8CrCIY2G6ufaylrIysLeQMsxrogF8n4 +hq7fyqveWRzxhqUxS2fp9Ynpx4jnd1lMzv+z3i8eEsW+gB9yke7UkXZMbtZg1xfB +JjiEfcDVfSwSihhgOYttgQ9hkIdohDUak7OzRSWVBuoxWUhMfrQxw/HZlgZJL9Vf +rGdlAoGBANOGmgEGky+acV33WTWGV5OdAw6B/SlBEoORJbj6UzQiUz3hFH/Tgpbj +JOKHWGbGd8OtOYbt9JoofGlNgHA/4nAEYAc2HGa+q0fBwMUflU0DudAxXis4jDmE +D76moGmyJoSgwVrp1W/vwNixA5RpcZ3Wst2nf9RKLr+DxypHTit/AoGBAPwpDeqc +rwXOTl0KR/080Nc11Z03VIVZAGfA59J73HmADF9bBVlmReQdkwX0lERchdzD0lfa +XqbqBLr4FS5Uqyn5f3DCaMnOeKfvtGw2z6LnY+w03mii4PEW/vNKLlB18NdduPwL +KeAc08Zh+qJFMKD1PoEQOH+Y7NybBbaQL8IbAoGAfPPUYaq6o7I+Kd4FysKTVVW5 +CobrP8V65FGH0R++qttkBPfDHkeZqvx/O3nsVLoE4YigpP5IMhCcfbAUoTp7zuQm +vdvPJzqW/4qLD2c60QXUbBHdqPZ8jzVd/6d6tzVP36T+02+yb69XYiofDTrErRK5 +EorxzjwMJYH40xbQLI0CgYBh7d/FucwPSSwN3ixPIQtKSVIImLBuiT4rDTP6/reF +SEGF1ueg7KNAEGxE59OdKQGj1zkdfWU9Fa14n1g6gg9nYcoolJf1qAYb0nAThsXk +0lBwL6ggowERIIkrGygZf3Rlb7SjzgIZU5i7dtnLo2tbV2NK5G3MwCtdEaeKWzzw ++QKBgQC7+JPHoqbnNgis2vCGLKMOU3HpJK/rYEU/8ZUegc9lshEFZYsRbtKQQJQs +nqsChrG8UoK84frujEBkO/Nzsil85p8ar79wZguGnVvswTWaTuKvl8H/qQQ/JSHZ +OHGQD4qwTCkdRr8Vf8NfuCoZlJDnHncLJZNWjrb5feqCnJ/YIQ== +-----END RSA PRIVATE KEY----- diff --git a/rpc/test/helpers.go b/rpc/test/helpers.go index dc5c4a41d..56617acf4 100644 --- a/rpc/test/helpers.go +++ b/rpc/test/helpers.go @@ -8,9 +8,8 @@ import ( "strings" "time" - "github.com/tendermint/tendermint/libs/log" - abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/libs/log" cfg "github.com/tendermint/tendermint/config" cmn "github.com/tendermint/tendermint/libs/common" @@ -23,7 +22,18 @@ import ( rpcclient "github.com/tendermint/tendermint/rpc/lib/client" ) +// Options helps with specifying some parameters for our RPC testing for greater +// control. +type Options struct { + suppressStdout bool + recreateConfig bool +} + var globalConfig *cfg.Config +var defaultOptions = Options{ + suppressStdout: false, + recreateConfig: false, +} func waitForRPC() { laddr := GetConfig().RPC.ListenAddress @@ -77,22 +87,28 @@ func makeAddrs() (string, string, string) { fmt.Sprintf("tcp://0.0.0.0:%d", randPort()) } +func createConfig() *cfg.Config { + pathname := makePathname() + c := cfg.ResetTestRoot(pathname) + + // and we use random ports to run in parallel + tm, rpc, grpc := makeAddrs() + c.P2P.ListenAddress = tm + c.RPC.ListenAddress = rpc + c.RPC.CORSAllowedOrigins = []string{"https://tendermint.com/"} + c.RPC.GRPCListenAddress = grpc + c.TxIndex.IndexTags = "creator,tx.height" // see kvstore application + c.RPC.WebsocketPoolMaxSize = 1 + c.RPC.WebsocketPoolQueueSize = 1 + c.RPC.WebsocketPoolSpawnSize = 1 + return c +} + // GetConfig returns a config for the test cases as a singleton -func GetConfig() *cfg.Config { - if globalConfig == nil { - pathname := makePathname() - globalConfig = cfg.ResetTestRoot(pathname) - - // and we use random ports to run in parallel - tm, rpc, grpc := makeAddrs() - globalConfig.P2P.ListenAddress = tm - globalConfig.RPC.ListenAddress = rpc - globalConfig.RPC.CORSAllowedOrigins = []string{"https://tendermint.com/"} - globalConfig.RPC.GRPCListenAddress = grpc - globalConfig.RPC.WebsocketPoolMaxSize = 1 - globalConfig.RPC.WebsocketPoolQueueSize = 1 - globalConfig.RPC.WebsocketPoolSpawnSize = 1 - globalConfig.TxIndex.IndexTags = "app.creator,tx.height" // see kvstore application + +func GetConfig(forceCreate ...bool) *cfg.Config { + if globalConfig == nil || (len(forceCreate) > 0 && forceCreate[0]) { + globalConfig = createConfig() } return globalConfig } @@ -103,8 +119,12 @@ func GetGRPCClient() core_grpc.BroadcastAPIClient { } // StartTendermint starts a test tendermint server in a go routine and returns when it is initialized -func StartTendermint(app abci.Application) *nm.Node { - node := NewTendermint(app) +func StartTendermint(app abci.Application, opts ...func(*Options)) *nm.Node { + nodeOpts := defaultOptions + for _, opt := range opts { + opt(&nodeOpts) + } + node := NewTendermint(app, &nodeOpts) err := node.Start() if err != nil { panic(err) @@ -114,7 +134,9 @@ func StartTendermint(app abci.Application) *nm.Node { waitForRPC() waitForGRPC() - fmt.Println("Tendermint running!") + if !nodeOpts.suppressStdout { + fmt.Println("Tendermint running!") + } return node } @@ -128,13 +150,17 @@ func StopTendermint(node *nm.Node) { } // NewTendermint creates a new tendermint server and sleeps forever -func NewTendermint(app abci.Application) *nm.Node { +func NewTendermint(app abci.Application, opts *Options) *nm.Node { // Create & start node - config := GetConfig() - // change default config for test + config := GetConfig(opts.recreateConfig) config.TxIndex.EnableRangeQuery = true - logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout)) - logger = log.NewFilter(logger, log.AllowError()) + var logger log.Logger + if opts.suppressStdout { + logger = log.NewNopLogger() + } else { + logger = log.NewTMLogger(log.NewSyncWriter(os.Stdout)) + logger = log.NewFilter(logger, log.AllowError()) + } pvKeyFile := config.PrivValidatorKeyFile() pvKeyStateFile := config.PrivValidatorStateFile() pv := privval.LoadOrGenFilePV(pvKeyFile, pvKeyStateFile) @@ -153,3 +179,15 @@ func NewTendermint(app abci.Application) *nm.Node { } return node } + +// SuppressStdout is an option that tries to make sure the RPC test Tendermint +// node doesn't log anything to stdout. +func SuppressStdout(o *Options) { + o.suppressStdout = true +} + +// RecreateConfig instructs the RPC test to recreate the configuration each +// time, instead of treating it as a global singleton. +func RecreateConfig(o *Options) { + o.recreateConfig = true +} diff --git a/scripts/dist.sh b/scripts/dist.sh index f999c5376..ac62f1099 100755 --- a/scripts/dist.sh +++ b/scripts/dist.sh @@ -31,9 +31,6 @@ XC_EXCLUDE=${XC_EXCLUDE:-" darwin/arm solaris/amd64 solaris/386 solaris/arm free # Make sure build tools are available. make get_tools -# Get VENDORED dependencies -make get_vendor_deps - # Build! # ldflags: -s Omit the symbol table and debug information. # -w Omit the DWARF symbol table. diff --git a/scripts/get_nodejs.sh b/scripts/get_nodejs.sh new file mode 100755 index 000000000..59469cc50 --- /dev/null +++ b/scripts/get_nodejs.sh @@ -0,0 +1,14 @@ +#!/usr/bin/env bash + +VERSION=v12.9.0 +NODE_FULL=node-${VERSION}-linux-x64 + +mkdir -p ~/.local/bin +mkdir -p ~/.local/node +wget http://nodejs.org/dist/${VERSION}/${NODE_FULL}.tar.gz -O ~/.local/node/${NODE_FULL}.tar.gz +tar -xzf ~/.local/node/${NODE_FULL}.tar.gz -C ~/.local/node/ +ln -s ~/.local/node/${NODE_FULL}/bin/node ~/.local/bin/node +ln -s ~/.local/node/${NODE_FULL}/bin/npm ~/.local/bin/npm +export PATH=~/.local/bin:$PATH +npm i -g dredd +ln -s ~/.local/node/${NODE_FULL}/bin/dredd ~/.local/bin/dredd diff --git a/scripts/get_tools.sh b/scripts/get_tools.sh index dd9566917..4dfb454bd 100755 --- a/scripts/get_tools.sh +++ b/scripts/get_tools.sh @@ -48,9 +48,6 @@ installFromGithub() { echo "" } -######################## COMMON TOOLS ######################################## -installFromGithub golang/dep 22125cfaa6ddc71e145b1535d4b7ee9744fefff2 cmd/dep - ######################## DEVELOPER TOOLS ##################################### installFromGithub gogo/protobuf 61dbc136cf5d2f08d68a011382652244990a53a9 protoc-gen-gogo @@ -67,3 +64,4 @@ installFromGithub golangci/golangci-lint 7b2421d55194c9dc385eff7720a037aa9244ca3 installFromGithub petermattis/goid b0b1615b78e5ee59739545bb38426383b2cda4c9 installFromGithub sasha-s/go-deadlock d68e2bc52ae3291765881b9056f2c1527f245f1e go get golang.org/x/tools/cmd/goimports +installFromGithub snikch/goodman 10e37e294daa3c9a90abded60ff9924bafab3888 cmd/goodman diff --git a/scripts/gitian-build.sh b/scripts/gitian-build.sh new file mode 100755 index 000000000..fbe475015 --- /dev/null +++ b/scripts/gitian-build.sh @@ -0,0 +1,201 @@ +#!/bin/bash + +# symbol prefixes: +# g_ -> global +# l_ - local variable +# f_ -> function + +set -euo pipefail + +GITIAN_CACHE_DIRNAME='.gitian-builder-cache' +GO_DEBIAN_RELEASE='1.12.8-1' +GO_TARBALL="golang-debian-${GO_DEBIAN_RELEASE}.tar.gz" +GO_TARBALL_URL="https://salsa.debian.org/go-team/compiler/golang/-/archive/debian/${GO_DEBIAN_RELEASE}/${GO_TARBALL}" + +# Defaults + +DEFAULT_SIGN_COMMAND='gpg --detach-sign' +DEFAULT_TENDERMINT_SIGS=${TENDERMINT_SIGS:-'tendermint.sigs'} +DEFAULT_GITIAN_REPO='https://github.com/devrandom/gitian-builder' +DEFAULT_GBUILD_FLAGS='' +DEFAULT_SIGS_REPO='https://github.com/tendermint/tendermint.sigs' + +# Overrides + +SIGN_COMMAND=${SIGN_COMMAND:-${DEFAULT_SIGN_COMMAND}} +GITIAN_REPO=${GITIAN_REPO:-${DEFAULT_GITIAN_REPO}} +GBUILD_FLAGS=${GBUILD_FLAGS:-${DEFAULT_GBUILD_FLAGS}} + +# Globals + +g_workdir='' +g_gitian_cache='' +g_cached_gitian='' +g_cached_go_tarball='' +g_sign_identity='' +g_sigs_dir='' +g_flag_commit='' + + +f_help() { + cat >&2 <&2 + mkdir "${l_builddir}/inputs/" + cp -v "${g_cached_go_tarball}" "${l_builddir}/inputs/" + done +} + +f_build() { + local l_descriptor + + l_descriptor=$1 + + bin/gbuild --commit tendermint="$g_commit" ${GBUILD_FLAGS} "$l_descriptor" + libexec/stop-target || f_echo_stderr "warning: couldn't stop target" +} + +f_sign_verify() { + local l_descriptor + + l_descriptor=$1 + + bin/gsign -p "${SIGN_COMMAND}" -s "${g_sign_identity}" --destination="${g_sigs_dir}" --release=${g_release} ${l_descriptor} + bin/gverify --destination="${g_sigs_dir}" --release="${g_release}" ${l_descriptor} +} + +f_commit_sig() { + local l_release_name + + l_release_name=$1 + + pushd "${g_sigs_dir}" + git add . || echo "git add failed" >&2 + git commit -m "Add ${l_release_name} reproducible build" || echo "git commit failed" >&2 + popd +} + +f_prep_docker_image() { + pushd $1 + bin/make-base-vm --docker --suite bionic --arch amd64 + popd +} + +f_ensure_cache() { + g_gitian_cache="${g_workdir}/${GITIAN_CACHE_DIRNAME}" + [ -d "${g_gitian_cache}" ] || mkdir "${g_gitian_cache}" + + g_cached_go_tarball="${g_gitian_cache}/${GO_TARBALL}" + if [ ! -f "${g_cached_go_tarball}" ]; then + f_echo_stderr "${g_cached_go_tarball}: cache miss, caching..." + curl -L "${GO_TARBALL_URL}" --output "${g_cached_go_tarball}" + fi + + g_cached_gitian="${g_gitian_cache}/gitian-builder" + if [ ! -d "${g_cached_gitian}" ]; then + f_echo_stderr "${g_cached_gitian}: cache miss, caching..." + git clone ${GITIAN_REPO} "${g_cached_gitian}" + fi +} + +f_demangle_platforms() { + case "${1}" in + all) + printf '%s' 'darwin linux windows' ;; + linux|darwin|windows) + printf '%s' "${1}" ;; + *) + echo "invalid platform -- ${1}" + exit 1 + esac +} + +f_echo_stderr() { + echo $@ >&2 +} + + +while getopts ":cs:h" opt; do + case "${opt}" in + h) f_help ; exit 0 ;; + c) g_flag_commit=y ;; + s) g_sign_identity="${OPTARG}" ;; + esac +done + +shift "$((OPTIND-1))" + +g_platforms=$(f_demangle_platforms "${1}") +g_workdir="$(pwd)" +g_commit="$(git rev-parse HEAD)" +g_sigs_dir=${TENDERMINT_SIGS:-"${g_workdir}/${DEFAULT_TENDERMINT_SIGS}"} + +f_ensure_cache + +f_prep_docker_image "${g_cached_gitian}" + +f_prep_build "${g_platforms}" + +export USE_DOCKER=1 +for g_os in ${g_platforms}; do + g_release="$(git describe --tags --abbrev=9 | sed 's/^v//')-${g_os}" + g_descriptor="${g_workdir}/scripts/gitian-descriptors/gitian-${g_os}.yml" + [ -f ${g_descriptor} ] + g_builddir="$(f_builddir ${g_os})" + + pushd "${g_builddir}" + f_build "${g_descriptor}" + if [ -n "${g_sign_identity}" ]; then + f_sign_verify "${g_descriptor}" + fi + popd + + if [ -n "${g_sign_identity}" -a -n "${g_flag_commit}" ]; then + [ -d "${g_sigs_dir}/.git/" ] && f_commit_sig ${g_release} || f_echo_stderr "couldn't commit, ${g_sigs_dir} is not a git clone" + fi +done + +exit 0 diff --git a/scripts/gitian-descriptors/gitian-darwin.yml b/scripts/gitian-descriptors/gitian-darwin.yml new file mode 100644 index 000000000..58b4f0cb8 --- /dev/null +++ b/scripts/gitian-descriptors/gitian-darwin.yml @@ -0,0 +1,111 @@ +--- +name: "tendermint-darwin" +enable_cache: true +distro: "ubuntu" +suites: +- "bionic" +architectures: +- "amd64" +packages: +- "bsdmainutils" +- "build-essential" +- "ca-certificates" +- "curl" +- "debhelper" +- "dpkg-dev" +- "devscripts" +- "fakeroot" +- "git" +- "golang-any" +- "xxd" +- "quilt" +remotes: +- "url": "https://github.com/tendermint/tendermint.git" + "dir": "tendermint" +files: +- "golang-debian-1.12.8-1.tar.gz" +script: | + set -e -o pipefail + + GO_SRC_RELEASE=golang-debian-1.12.8-1 + GO_SRC_TARBALL="${GO_SRC_RELEASE}.tar.gz" + # Compile go and configure the environment + export TAR_OPTIONS="--mtime="$REFERENCE_DATE\\\ $REFERENCE_TIME"" + export BUILD_DIR=`pwd` + tar xf "${GO_SRC_TARBALL}" + rm -f "${GO_SRC_TARBALL}" + [ -d "${GO_SRC_RELEASE}/" ] + mv "${GO_SRC_RELEASE}/" go/ + pushd go/ + QUILT_PATCHES=debian/patches quilt push -a + fakeroot debian/rules build RUN_TESTS=false GOCACHE=/tmp/go-cache + popd + + export GOOS=darwin + export GOROOT=${BUILD_DIR}/go + export GOPATH=${BUILD_DIR}/gopath + mkdir -p ${GOPATH}/bin + + export PATH_orig=${PATH} + export PATH=$GOPATH/bin:$GOROOT/bin:$PATH + + export ARCHS='386 amd64' + export GO111MODULE=on + + # Make release tarball + pushd tendermint + VERSION=$(git describe --tags | sed 's/^v//') + COMMIT=$(git rev-parse --short=8 HEAD) + DISTNAME=tendermint-${VERSION} + git archive --format tar.gz --prefix ${DISTNAME}/ -o ${DISTNAME}.tar.gz HEAD + SOURCEDIST=`pwd`/`echo tendermint-*.tar.gz` + popd + + # Correct tar file order + mkdir -p temp + pushd temp + tar xf $SOURCEDIST + rm $SOURCEDIST + find tendermint-* | sort | tar --no-recursion --mode='u+rw,go+r-w,a+X' --owner=0 --group=0 -c -T - | gzip -9n > $SOURCEDIST + popd + + # Prepare GOPATH and install deps + distsrc=${GOPATH}/src/github.com/tendermint/tendermint + mkdir -p ${distsrc} + pushd ${distsrc} + tar --strip-components=1 -xf $SOURCEDIST + go mod download + popd + + # Configure LDFLAGS for reproducible builds + LDFLAGS="-extldflags=-static -buildid=${VERSION} -s -w \ + -X github.com/tendermint/tendermint/version.GitCommit=${COMMIT}" + + # Extract release tarball and build + for arch in ${ARCHS}; do + INSTALLPATH=`pwd`/installed/${DISTNAME}-${arch} + mkdir -p ${INSTALLPATH} + + # Build tendermint binary + pushd ${distsrc} + GOARCH=${arch} GOROOT_FINAL=${GOROOT} go build -a \ + -gcflags=all=-trimpath=${GOPATH} \ + -asmflags=all=-trimpath=${GOPATH} \ + -mod=readonly -tags "tendermint" \ + -ldflags="${LDFLAGS}" \ + -o ${INSTALLPATH}/tendermint ./cmd/tendermint/ + + popd # ${distsrc} + + pushd ${INSTALLPATH} + find -type f | sort | tar \ + --no-recursion --mode='u+rw,go+r-w,a+X' \ + --numeric-owner --sort=name \ + --owner=0 --group=0 -c -T - | gzip -9n > ${OUTDIR}/${DISTNAME}-darwin-${arch}.tar.gz + popd # installed + done + + rm -rf ${distsrc} + + mkdir -p $OUTDIR/src + mv $SOURCEDIST $OUTDIR/src diff --git a/scripts/gitian-descriptors/gitian-linux.yml b/scripts/gitian-descriptors/gitian-linux.yml new file mode 100644 index 000000000..6969d41d7 --- /dev/null +++ b/scripts/gitian-descriptors/gitian-linux.yml @@ -0,0 +1,110 @@ +--- +name: "tendermint-linux" +enable_cache: true +distro: "ubuntu" +suites: +- "bionic" +architectures: +- "amd64" +packages: +- "bsdmainutils" +- "build-essential" +- "ca-certificates" +- "curl" +- "debhelper" +- "dpkg-dev" +- "devscripts" +- "fakeroot" +- "git" +- "golang-any" +- "xxd" +- "quilt" +remotes: +- "url": "https://github.com/tendermint/tendermint.git" + "dir": "tendermint" +files: +- "golang-debian-1.12.8-1.tar.gz" +script: | + set -e -o pipefail + + GO_SRC_RELEASE=golang-debian-1.12.8-1 + GO_SRC_TARBALL="${GO_SRC_RELEASE}.tar.gz" + # Compile go and configure the environment + export TAR_OPTIONS="--mtime="$REFERENCE_DATE\\\ $REFERENCE_TIME"" + export BUILD_DIR=`pwd` + tar xf "${GO_SRC_TARBALL}" + rm -f "${GO_SRC_TARBALL}" + [ -d "${GO_SRC_RELEASE}/" ] + mv "${GO_SRC_RELEASE}/" go/ + pushd go/ + QUILT_PATCHES=debian/patches quilt push -a + fakeroot debian/rules build RUN_TESTS=false GOCACHE=/tmp/go-cache + popd + + export GOROOT=${BUILD_DIR}/go + export GOPATH=${BUILD_DIR}/gopath + mkdir -p ${GOPATH}/bin + + export PATH_orig=${PATH} + export PATH=$GOPATH/bin:$GOROOT/bin:$PATH + + export ARCHS='386 amd64 arm arm64' + export GO111MODULE=on + + # Make release tarball + pushd tendermint + VERSION=$(git describe --tags | sed 's/^v//') + COMMIT=$(git rev-parse --short=8 HEAD) + DISTNAME=tendermint-${VERSION} + git archive --format tar.gz --prefix ${DISTNAME}/ -o ${DISTNAME}.tar.gz HEAD + SOURCEDIST=`pwd`/`echo tendermint-*.tar.gz` + popd + + # Correct tar file order + mkdir -p temp + pushd temp + tar xf $SOURCEDIST + rm $SOURCEDIST + find tendermint-* | sort | tar --no-recursion --mode='u+rw,go+r-w,a+X' --owner=0 --group=0 -c -T - | gzip -9n > $SOURCEDIST + popd + + # Prepare GOPATH and install deps + distsrc=${GOPATH}/src/github.com/tendermint/tendermint + mkdir -p ${distsrc} + pushd ${distsrc} + tar --strip-components=1 -xf $SOURCEDIST + go mod download + popd + + # Configure LDFLAGS for reproducible builds + LDFLAGS="-extldflags=-static -buildid=${VERSION} -s -w \ + -X github.com/tendermint/tendermint/version.GitCommit=${COMMIT}" + + # Extract release tarball and build + for arch in ${ARCHS}; do + INSTALLPATH=`pwd`/installed/${DISTNAME}-${arch} + mkdir -p ${INSTALLPATH} + + # Build tendermint binary + pushd ${distsrc} + GOARCH=${arch} GOROOT_FINAL=${GOROOT} go build -a \ + -gcflags=all=-trimpath=${GOPATH} \ + -asmflags=all=-trimpath=${GOPATH} \ + -mod=readonly -tags "tendermint" \ + -ldflags="${LDFLAGS}" \ + -o ${INSTALLPATH}/tendermint ./cmd/tendermint/ + + popd # ${distsrc} + + pushd ${INSTALLPATH} + find -type f | sort | tar \ + --no-recursion --mode='u+rw,go+r-w,a+X' \ + --numeric-owner --sort=name \ + --owner=0 --group=0 -c -T - | gzip -9n > ${OUTDIR}/${DISTNAME}-linux-${arch}.tar.gz + popd # installed + done + + rm -rf ${distsrc} + + mkdir -p $OUTDIR/src + mv $SOURCEDIST $OUTDIR/src diff --git a/scripts/gitian-descriptors/gitian-windows.yml b/scripts/gitian-descriptors/gitian-windows.yml new file mode 100644 index 000000000..3215e7814 --- /dev/null +++ b/scripts/gitian-descriptors/gitian-windows.yml @@ -0,0 +1,111 @@ +--- +name: "tendermint-windows" +enable_cache: true +distro: "ubuntu" +suites: +- "bionic" +architectures: +- "amd64" +packages: +- "bsdmainutils" +- "build-essential" +- "ca-certificates" +- "curl" +- "debhelper" +- "dpkg-dev" +- "devscripts" +- "fakeroot" +- "git" +- "golang-any" +- "xxd" +- "quilt" +remotes: +- "url": "https://github.com/tendermint/tendermint.git" + "dir": "tendermint" +files: +- "golang-debian-1.12.8-1.tar.gz" +script: | + set -e -o pipefail + + GO_SRC_RELEASE=golang-debian-1.12.8-1 + GO_SRC_TARBALL="${GO_SRC_RELEASE}.tar.gz" + # Compile go and configure the environment + export TAR_OPTIONS="--mtime="$REFERENCE_DATE\\\ $REFERENCE_TIME"" + export BUILD_DIR=`pwd` + tar xf "${GO_SRC_TARBALL}" + rm -f "${GO_SRC_TARBALL}" + [ -d "${GO_SRC_RELEASE}/" ] + mv "${GO_SRC_RELEASE}/" go/ + pushd go/ + QUILT_PATCHES=debian/patches quilt push -a + fakeroot debian/rules build RUN_TESTS=false GOCACHE=/tmp/go-cache + popd + + export GOOS=windows + export GOROOT=${BUILD_DIR}/go + export GOPATH=${BUILD_DIR}/gopath + mkdir -p ${GOPATH}/bin + + export PATH_orig=${PATH} + export PATH=$GOPATH/bin:$GOROOT/bin:$PATH + + export ARCHS='386 amd64' + export GO111MODULE=on + + # Make release tarball + pushd tendermint + VERSION=$(git describe --tags | sed 's/^v//') + COMMIT=$(git rev-parse --short=8 HEAD) + DISTNAME=tendermint-${VERSION} + git archive --format tar.gz --prefix ${DISTNAME}/ -o ${DISTNAME}.tar.gz HEAD + SOURCEDIST=`pwd`/`echo tendermint-*.tar.gz` + popd + + # Correct tar file order + mkdir -p temp + pushd temp + tar xf $SOURCEDIST + rm $SOURCEDIST + find tendermint-* | sort | tar --no-recursion --mode='u+rw,go+r-w,a+X' --owner=0 --group=0 -c -T - | gzip -9n > $SOURCEDIST + popd + + # Prepare GOPATH and install deps + distsrc=${GOPATH}/src/github.com/tendermint/tendermint + mkdir -p ${distsrc} + pushd ${distsrc} + tar --strip-components=1 -xf $SOURCEDIST + go mod download + popd + + # Configure LDFLAGS for reproducible builds + LDFLAGS="-extldflags=-static -buildid=${VERSION} -s -w \ + -X github.com/tendermint/tendermint/version.GitCommit=${COMMIT}" + + # Extract release tarball and build + for arch in ${ARCHS}; do + INSTALLPATH=`pwd`/installed/${DISTNAME}-${arch} + mkdir -p ${INSTALLPATH} + + # Build tendermint binary + pushd ${distsrc} + GOARCH=${arch} GOROOT_FINAL=${GOROOT} go build -a \ + -gcflags=all=-trimpath=${GOPATH} \ + -asmflags=all=-trimpath=${GOPATH} \ + -mod=readonly -tags "tendermint" \ + -ldflags="${LDFLAGS}" \ + -o ${INSTALLPATH}/tendermint.exe ./cmd/tendermint/ + + popd # ${distsrc} + + pushd ${INSTALLPATH} + find -type f | sort | tar \ + --no-recursion --mode='u+rw,go+r-w,a+X' \ + --numeric-owner --sort=name \ + --owner=0 --group=0 -c -T - | gzip -9n > ${OUTDIR}/${DISTNAME}-windows-${arch}.tar.gz + popd # installed + done + + rm -rf ${distsrc} + + mkdir -p $OUTDIR/src + mv $SOURCEDIST $OUTDIR/src diff --git a/scripts/gitian-keys/README.md b/scripts/gitian-keys/README.md new file mode 100644 index 000000000..f4ad711a9 --- /dev/null +++ b/scripts/gitian-keys/README.md @@ -0,0 +1,29 @@ +## PGP keys of Gitian builders and Tendermint Developers + +The file `keys.txt` contains fingerprints of the public keys of Gitian builders +and active developers. + +The associated keys are mainly used to sign git commits or the build results +of Gitian builds. + +The most recent version of each pgp key can be found on most PGP key servers. + +Fetch the latest version from the key server to see if any key was revoked in +the meantime. +To fetch the latest version of all pgp keys in your gpg homedir, + +```sh +gpg --refresh-keys +``` + +To fetch keys of Gitian builders and active core developers, feed the list of +fingerprints of the primary keys into gpg: + +```sh +while read fingerprint keyholder_name; \ +do gpg --keyserver hkp://subset.pool.sks-keyservers.net \ +--recv-keys ${fingerprint}; done < ./keys.txt +``` + +Add your key to the list if you are a Tendermint core developer or you have +provided Gitian signatures for two major or minor releases of Tendermint. diff --git a/scripts/gitian-keys/keys.txt b/scripts/gitian-keys/keys.txt new file mode 100644 index 000000000..91330ae0b --- /dev/null +++ b/scripts/gitian-keys/keys.txt @@ -0,0 +1 @@ +04160004A8276E40BB9890FBE8A48AE5311D765A Alessio Treglia diff --git a/scripts/install/install_tendermint_arm.sh b/scripts/install/install_tendermint_arm.sh index b260d8d07..085ba82f4 100644 --- a/scripts/install/install_tendermint_arm.sh +++ b/scripts/install/install_tendermint_arm.sh @@ -32,7 +32,6 @@ git checkout $BRANCH # XXX: uncomment if branch isn't master # git fetch origin $BRANCH make get_tools -make get_vendor_deps make install # the binary is located in $GOPATH/bin diff --git a/scripts/install/install_tendermint_bsd.sh b/scripts/install/install_tendermint_bsd.sh index b76b94855..294155d0e 100644 --- a/scripts/install/install_tendermint_bsd.sh +++ b/scripts/install/install_tendermint_bsd.sh @@ -47,7 +47,6 @@ cd "$GOPATH/src/$REPO" # build & install master git checkout $BRANCH gmake get_tools -gmake get_vendor_deps gmake install # the binary is located in $GOPATH/bin diff --git a/scripts/install/install_tendermint_osx.sh b/scripts/install/install_tendermint_osx.sh index b4107ab01..ee799f66a 100644 --- a/scripts/install/install_tendermint_osx.sh +++ b/scripts/install/install_tendermint_osx.sh @@ -37,5 +37,4 @@ git checkout $BRANCH # XXX: uncomment if branch isn't master # git fetch origin $BRANCH make get_tools -make get_vendor_deps make install diff --git a/scripts/install/install_tendermint_ubuntu.sh b/scripts/install/install_tendermint_ubuntu.sh index 3fe6ea8ed..2e5558ff6 100644 --- a/scripts/install/install_tendermint_ubuntu.sh +++ b/scripts/install/install_tendermint_ubuntu.sh @@ -41,7 +41,6 @@ git checkout $BRANCH # XXX: uncomment if branch isn't master # git fetch origin $BRANCH make get_tools -make get_vendor_deps make install # the binary is located in $GOPATH/bin diff --git a/scripts/release_management/bump-semver.py b/scripts/release_management/bump-semver.py index b13a10342..ce56d8d7c 100755 --- a/scripts/release_management/bump-semver.py +++ b/scripts/release_management/bump-semver.py @@ -8,6 +8,7 @@ import re import argparse +import sys def semver(ver): @@ -17,6 +18,18 @@ def semver(ver): return ver +def get_tendermint_version(): + """Extracts the current Tendermint version from version/version.go""" + pattern = re.compile(r"TMCoreSemVer = \"(?P([0-9.]+)+)\"") + with open("version/version.go", "rt") as version_file: + for line in version_file: + m = pattern.search(line) + if m: + return m.group('version') + + return None + + if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--version", help="Version number to bump, e.g.: v1.0.0", required=True, type=semver) @@ -34,4 +47,16 @@ def semver(ver): else: patch = int(patch) + 1 - print("{0}.{1}".format(majorminorprefix, patch)) + expected_version = "{0}.{1}".format(majorminorprefix, patch) + # if we're doing a release + if expected_version != "v0.0.0": + cur_version = get_tendermint_version() + if not cur_version: + print("Failed to obtain Tendermint version from version/version.go") + sys.exit(1) + expected_version_noprefix = expected_version.lstrip("v") + if expected_version_noprefix != "0.0.0" and expected_version_noprefix != cur_version: + print("Expected version/version.go#TMCoreSemVer to be {0}, but was {1}".format(expected_version_noprefix, cur_version)) + sys.exit(1) + + print(expected_version) diff --git a/snapshot/manager.go b/snapshot/manager.go index 19ef1f735..72beccf32 100644 --- a/snapshot/manager.go +++ b/snapshot/manager.go @@ -3,12 +3,12 @@ package snapshot import ( "crypto/sha256" "fmt" + "github.com/tendermint/tendermint/store" "sync/atomic" "github.com/golang/snappy" abci "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tendermint/blockchain" dbm "github.com/tendermint/tendermint/libs/db" "github.com/tendermint/tendermint/libs/log" sm "github.com/tendermint/tendermint/state" @@ -20,7 +20,7 @@ var manager atomic.Value // the global manager maintain tracking current snapsho func InitSnapshotManager( stateDB dbm.DB, txIndexDB dbm.DB, - blockStore *blockchain.BlockStore, + blockStore *store.BlockStore, dbDir string, logger log.Logger) { @@ -72,7 +72,7 @@ func ManagerAt(height int64) *SnapshotManager { type SnapshotManager struct { stateDB dbm.DB txIndexDB dbm.DB - blockStore *blockchain.BlockStore + blockStore *store.BlockStore logger log.Logger height int64 @@ -147,7 +147,7 @@ func (mgr *SnapshotManager) GetStateDB() dbm.DB { return mgr.stateDB } -func (mgr *SnapshotManager) GetBlockStore() *blockchain.BlockStore { +func (mgr *SnapshotManager) GetBlockStore() *store.BlockStore { return mgr.blockStore } diff --git a/snapshot/pool.go b/snapshot/pool.go index c35455b84..2c97739ca 100644 --- a/snapshot/pool.go +++ b/snapshot/pool.go @@ -44,12 +44,12 @@ type StatePool struct { mtx sync.Mutex manifest *abci.Manifest manifestHash abci.SHA256Sum - dummyHash abci.SHA256Sum + dummyHash abci.SHA256Sum state *sm.State // tendermint state - block *types.Block - seenCommit *types.Commit + block *types.Block + seenCommit *types.Commit stateDB dbm.DB - app proxy.AppConnState + app proxy.AppConnState requesters sync.Map // map[abci.SHA256Sum]*spRequester @@ -59,7 +59,7 @@ type StatePool struct { // peers respondedPeers map[abci.SHA256Sum]map[p2p.ID]struct{} // peers that response manifest to us, used to determine whom we will trust and request snapshot chunk from - trustedPeers map[p2p.ID]*spPeer // peers we trusted and want sync from + trustedPeers map[p2p.ID]*spPeer // peers we trusted and want sync from // number of pending request snapshots after pool init numPending int @@ -70,7 +70,7 @@ type StatePool struct { func NewStatePool(app proxy.AppConnState, requestsCh chan<- SnapshotRequest, errorsCh chan<- peerError, stateDB dbm.DB) *StatePool { sp := &StatePool{ - app: app, + app: app, trustedPeers: make(map[p2p.ID]*spPeer), receivedManifests: make(map[abci.SHA256Sum]*abci.Manifest), pendingScheduledHashes: make(chan abci.SHA256Sum, 100), @@ -259,7 +259,7 @@ func (pool *StatePool) initGuarded(hash abci.SHA256Sum, manifest *abci.Manifest, return err } - for peer, _ := range peers { + for peer := range peers { pool.addPeerGuarded(peer, hash) } @@ -293,7 +293,7 @@ func (pool *StatePool) initGuarded(hash abci.SHA256Sum, manifest *abci.Manifest, func (pool *StatePool) loadFromDiskOrSchedule(hash abci.SHA256Sum, existChunks map[abci.SHA256Sum]abci.SnapshotChunk) { if compressed, err := Manager().Reader.LoadFromRestoration(hash); err == nil { chunkHash := sha256.Sum256(compressed) - if chunkHash != hash { // the chunk we downloaded last time is not complete, re-download it again + if chunkHash != hash { // the chunk we downloaded last time is not complete, re-download it again pool.pendingScheduledHashes <- hash } if decompressed, err := snappy.Decode(nil, compressed); err == nil { @@ -399,7 +399,7 @@ func (pool *StatePool) processChunk(peerID p2p.ID, msg *bcChunkResponseMessage, } // TODO: this function may block receive goroutine which makes peer timeout -func (pool *StatePool) processChunkImpl(hash abci.SHA256Sum, snapshotChunk abci.SnapshotChunk) (err error){ +func (pool *StatePool) processChunkImpl(hash abci.SHA256Sum, snapshotChunk abci.SnapshotChunk) (err error) { numPending := pool.numPending switch chunk := snapshotChunk.(type) { @@ -637,6 +637,6 @@ type SnapshotRequest struct { PeerID p2p.ID } -func(req SnapshotRequest) String() string { +func (req SnapshotRequest) String() string { return fmt.Sprintf("%d, %x", req.Height, req.Hash) } diff --git a/snapshot/reactor.go b/snapshot/reactor.go index b5f683dd0..27dd4e765 100644 --- a/snapshot/reactor.go +++ b/snapshot/reactor.go @@ -12,12 +12,12 @@ import ( "github.com/tendermint/go-amino" abci "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tendermint/blockchain" cfg "github.com/tendermint/tendermint/config" dbm "github.com/tendermint/tendermint/libs/db" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/proxy" + sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" ) @@ -48,6 +48,12 @@ const ( stateSyncLockFileName = "STATESYNC.LOCK" ) +type fastSyncReactor interface { + // for when we switch from blockchain reactor and fast sync to + // the consensus machine + SwitchToBlockchain(*sm.State) +} + type peerError struct { err error peerID p2p.ID @@ -222,7 +228,7 @@ func (bcSR *StateReactor) startFastSync() { bcSR.Logger.Error("failed to stat state sync lock file", "err", err) } - bcR := bcSR.Switch.Reactor("BLOCKCHAIN").(*blockchain.BlockchainReactor) + bcR := bcSR.Switch.Reactor("BLOCKCHAIN").(fastSyncReactor) bcR.SwitchToBlockchain(bcSR.pool.state) bcSR.pool.Stop() } diff --git a/state/blockindex/kv/kv.go b/state/blockindex/kv/kv.go index 45b4921de..1c24cc7bf 100644 --- a/state/blockindex/kv/kv.go +++ b/state/blockindex/kv/kv.go @@ -54,6 +54,6 @@ func (bki *BlockIndex) Index(header *types.Header) error { if err != nil { return err } - bki.store.Set(hash, rawBytes) + bki.store.SetSync(hash, rawBytes) return nil } diff --git a/state/wire.go b/state/codec.go similarity index 100% rename from state/wire.go rename to state/codec.go diff --git a/state/execution.go b/state/execution.go index 07e76c9ad..159b3a99b 100644 --- a/state/execution.go +++ b/state/execution.go @@ -8,6 +8,7 @@ import ( dbm "github.com/tendermint/tendermint/libs/db" "github.com/tendermint/tendermint/libs/fail" "github.com/tendermint/tendermint/libs/log" + mempl "github.com/tendermint/tendermint/mempool" "github.com/tendermint/tendermint/proxy" "github.com/tendermint/tendermint/types" ) @@ -30,7 +31,7 @@ type BlockExecutor struct { // manage the mempool lock during commit // and update both with block results after commit. - mempool Mempool + mempool mempl.Mempool evpool EvidencePool logger log.Logger @@ -51,8 +52,7 @@ func BlockExecutorWithMetrics(metrics *Metrics) BlockExecutorOption { // NewBlockExecutor returns a new BlockExecutor with a NopEventBus. // Call SetEventBus to provide one. -func NewBlockExecutor(db dbm.DB, logger log.Logger, proxyApp proxy.AppConnConsensus, - mempool Mempool, evpool EvidencePool, withAppState bool, options ...BlockExecutorOption) *BlockExecutor { +func NewBlockExecutor(db dbm.DB, logger log.Logger, proxyApp proxy.AppConnConsensus, mempool mempl.Mempool, evpool EvidencePool, withAppState bool, options ...BlockExecutorOption) *BlockExecutor { res := &BlockExecutor{ db: db, proxyApp: proxyApp, @@ -71,6 +71,10 @@ func NewBlockExecutor(db dbm.DB, logger log.Logger, proxyApp proxy.AppConnConsen return res } +func (blockExec *BlockExecutor) DB() dbm.DB { + return blockExec.db +} + // SetEventBus - sets the event bus for publishing block related events. // If not called, it defaults to types.NopEventBus. func (blockExec *BlockExecutor) SetEventBus(eventBus types.BlockEventPublisher) { @@ -121,7 +125,7 @@ func (blockExec *BlockExecutor) ApplyBlock(state State, blockID types.BlockID, b } startTime := time.Now().UnixNano() - abciResponses, err := execBlockOnProxyApp(blockExec.logger, blockExec.proxyApp, block, state.LastValidators, blockExec.db) + abciResponses, err := execBlockOnProxyApp(blockExec.logger, blockExec.proxyApp, block, &state, blockExec.db) endTime := time.Now().UnixNano() blockExec.metrics.BlockProcessingTime.Observe(float64(endTime-startTime) / 1000000) if err != nil { @@ -131,7 +135,7 @@ func (blockExec *BlockExecutor) ApplyBlock(state State, blockID types.BlockID, b fail.Fail() // XXX // Save the results before we commit. - saveABCIResponses(blockExec.db, block.Height, abciResponses) + SaveABCIResponses(blockExec.db, block.Height, abciResponses) fail.Fail() // XXX @@ -156,7 +160,7 @@ func (blockExec *BlockExecutor) ApplyBlock(state State, blockID types.BlockID, b } // Lock mempool, commit app state, update mempoool. - appHash, err := blockExec.Commit(state, block) + appHash, err := blockExec.Commit(state, block, abciResponses.DeliverTx) if err != nil { return state, fmt.Errorf("Commit failed for application: %v", err) } @@ -188,6 +192,7 @@ func (blockExec *BlockExecutor) ApplyBlock(state State, blockID types.BlockID, b func (blockExec *BlockExecutor) Commit( state State, block *types.Block, + deliverTxResponses []*abci.ResponseDeliverTx, ) ([]byte, error) { blockExec.mempool.Lock() defer blockExec.mempool.Unlock() @@ -222,6 +227,7 @@ func (blockExec *BlockExecutor) Commit( err = blockExec.mempool.Update( block.Height, block.Txs, + deliverTxResponses, TxPreCheck(state), TxPostCheck(state), ) @@ -238,7 +244,7 @@ func execBlockOnProxyApp( logger log.Logger, proxyAppConn proxy.AppConnConsensus, block *types.Block, - lastValSet *types.ValidatorSet, + state *State, stateDB dbm.DB, ) (*ABCIResponses, error) { var validTxs, invalidTxs = 0, 0 @@ -248,8 +254,7 @@ func execBlockOnProxyApp( // Execute transactions and get hash. proxyCb := func(req *abci.Request, res *abci.Response) { - switch r := res.Value.(type) { - case *abci.Response_DeliverTx: + if r, ok := res.Value.(*abci.Response_DeliverTx); ok { // TODO: make use of res.Log // TODO: make use of this info // Blocks may include invalid txs. @@ -266,7 +271,7 @@ func execBlockOnProxyApp( } proxyAppConn.SetResponseCallback(proxyCb) - commitInfo, byzVals := getBeginBlockValidatorInfo(block, lastValSet, stateDB) + commitInfo, byzVals := getBeginBlockValidatorInfo(block, state, stateDB) // Begin block var err error @@ -283,7 +288,7 @@ func execBlockOnProxyApp( // Run txs of block. for _, tx := range block.Txs { - proxyAppConn.DeliverTxAsync(tx) + proxyAppConn.DeliverTxAsync(abci.RequestDeliverTx{Tx: tx}) if err := proxyAppConn.Error(); err != nil { return nil, err } @@ -301,22 +306,36 @@ func execBlockOnProxyApp( return abciResponses, nil } -func getBeginBlockValidatorInfo(block *types.Block, lastValSet *types.ValidatorSet, stateDB dbm.DB) (abci.LastCommitInfo, []abci.Evidence) { - - // Sanity check that commit length matches validator set size - - // only applies after first block +func getBeginBlockValidatorInfo(block *types.Block, state *State, stateDB dbm.DB) (abci.LastCommitInfo, []abci.Evidence) { + voteInfos := make([]abci.VoteInfo, block.LastCommit.Size()) + byzVals := make([]abci.Evidence, len(block.Evidence.Evidence)) + var lastValSet *types.ValidatorSet + var err error if block.Height > 1 { - precommitLen := len(block.LastCommit.Precommits) + // for state sync, validator set can't be load from db and it should be equal to the validator set in state + if block.Height == state.LastBlockHeight + 1 { + lastValSet = state.Validators + } else { + lastValSet, err = LoadValidators(stateDB, block.Height-1) + if err != nil { + panic(err) // shouldn't happen + } + } + + // Sanity check that commit length matches validator set size - + // only applies after first block + + precommitLen := block.LastCommit.Size() valSetLen := len(lastValSet.Validators) if precommitLen != valSetLen { // sanity check panic(fmt.Sprintf("precommit length (%d) doesn't match valset length (%d) at height %d\n\n%v\n\n%v", precommitLen, valSetLen, block.Height, block.LastCommit.Precommits, lastValSet.Validators)) } + } else { + lastValSet = types.NewValidatorSet(nil) } - // Collect the vote info (list of validators and whether or not they signed). - voteInfos := make([]abci.VoteInfo, len(lastValSet.Validators)) for i, val := range lastValSet.Validators { var vote *types.CommitSig if i < len(block.LastCommit.Precommits) { @@ -329,12 +348,6 @@ func getBeginBlockValidatorInfo(block *types.Block, lastValSet *types.ValidatorS voteInfos[i] = voteInfo } - commitInfo := abci.LastCommitInfo{ - Round: int32(block.LastCommit.Round()), - Votes: voteInfos, - } - - byzVals := make([]abci.Evidence, len(block.Evidence.Evidence)) for i, ev := range block.Evidence.Evidence { // We need the validator set. We already did this in validateBlock. // TODO: Should we instead cache the valset in the evidence itself and add @@ -346,6 +359,10 @@ func getBeginBlockValidatorInfo(block *types.Block, lastValSet *types.ValidatorS byzVals[i] = types.TM2PB.Evidence(ev, valset, block.Time) } + commitInfo := abci.LastCommitInfo{ + Round: int32(block.LastCommit.Round()), + Votes: voteInfos, + } return commitInfo, byzVals } @@ -474,10 +491,10 @@ func ExecCommitBlock( appConnConsensus proxy.AppConnConsensus, block *types.Block, logger log.Logger, - lastValSet *types.ValidatorSet, stateDB dbm.DB, ) ([]byte, error) { - _, err := execBlockOnProxyApp(logger, appConnConsensus, block, lastValSet, stateDB) + state := LoadState(stateDB) + _, err := execBlockOnProxyApp(logger, appConnConsensus, block, &state, stateDB) if err != nil { logger.Error("Error executing block on proxy app", "height", block.Height, "err", err) return nil, err diff --git a/state/execution_test.go b/state/execution_test.go index 5f83b783f..9e0079388 100644 --- a/state/execution_test.go +++ b/state/execution_test.go @@ -1,25 +1,22 @@ -package state +package state_test import ( "context" - "fmt" "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/tendermint/tendermint/abci/example/kvstore" abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/crypto/secp256k1" - cmn "github.com/tendermint/tendermint/libs/common" - dbm "github.com/tendermint/tendermint/libs/db" "github.com/tendermint/tendermint/libs/log" - tmtime "github.com/tendermint/tendermint/types/time" - + "github.com/tendermint/tendermint/mock" "github.com/tendermint/tendermint/proxy" + sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" + tmtime "github.com/tendermint/tendermint/types/time" ) var ( @@ -35,13 +32,13 @@ func TestApplyBlock(t *testing.T) { require.Nil(t, err) defer proxyApp.Stop() - state, stateDB := state(1, 1) + state, stateDB, _ := makeState(1, 1) - blockExec := NewBlockExecutor(stateDB, log.TestingLogger(), proxyApp.Consensus(), - MockMempool{}, MockEvidencePool{}, true) + blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyApp.Consensus(), + mock.Mempool{}, sm.MockEvidencePool{}, true) block := makeBlock(state, 1) - blockID := types.BlockID{block.Hash(), block.MakePartSet(testPartSize).Header()} + blockID := types.BlockID{Hash: block.Hash(), PartsHeader: block.MakePartSet(testPartSize).Header()} //nolint:ineffassign state, err = blockExec.ApplyBlock(state, blockID, block) @@ -59,11 +56,11 @@ func TestBeginBlockValidators(t *testing.T) { require.Nil(t, err) defer proxyApp.Stop() - state, stateDB := state(2, 2) + state, stateDB, _ := makeState(2, 2) prevHash := state.LastBlockID.Hash prevParts := types.PartSetHeader{} - prevBlockID := types.BlockID{prevHash, prevParts} + prevBlockID := types.BlockID{Hash: prevHash, PartsHeader: prevParts} now := tmtime.Now() commitSig0 := (&types.Vote{ValidatorIndex: 0, Timestamp: now, Type: types.PrecommitType}).CommitSig() @@ -85,7 +82,7 @@ func TestBeginBlockValidators(t *testing.T) { // block for height 2 block, _ := state.MakeBlock(2, makeTxs(2), lastCommit, nil, state.Validators.GetProposer().Address) - _, err = ExecCommitBlock(proxyApp.Consensus(), block, log.TestingLogger(), state.Validators, stateDB) + _, err = sm.ExecCommitBlock(proxyApp.Consensus(), block, log.TestingLogger(), stateDB) require.Nil(t, err, tc.desc) // -> app receives a list of validators with a bool indicating if they signed @@ -112,11 +109,11 @@ func TestBeginBlockByzantineValidators(t *testing.T) { require.Nil(t, err) defer proxyApp.Stop() - state, stateDB := state(2, 12) + state, stateDB, _ := makeState(2, 12) prevHash := state.LastBlockID.Hash prevParts := types.PartSetHeader{} - prevBlockID := types.BlockID{prevHash, prevParts} + prevBlockID := types.BlockID{Hash: prevHash, PartsHeader: prevParts} height1, idx1, val1 := int64(8), 0, state.Validators.Validators[0].Address height2, idx2, val2 := int64(3), 1, state.Validators.Validators[1].Address @@ -146,7 +143,7 @@ func TestBeginBlockByzantineValidators(t *testing.T) { block, _ := state.MakeBlock(10, makeTxs(2), lastCommit, nil, state.Validators.GetProposer().Address) block.Time = now block.Evidence.Evidence = tc.evidence - _, err = ExecCommitBlock(proxyApp.Consensus(), block, log.TestingLogger(), state.Validators, stateDB) + _, err = sm.ExecCommitBlock(proxyApp.Consensus(), block, log.TestingLogger(), stateDB) require.Nil(t, err, tc.desc) // -> app must receive an index of the byzantine validator @@ -160,7 +157,7 @@ func TestValidateValidatorUpdates(t *testing.T) { secpKey := secp256k1.GenPrivKey().PubKey() - defaultValidatorParams := types.ValidatorParams{[]string{types.ABCIPubKeyTypeEd25519}} + defaultValidatorParams := types.ValidatorParams{PubKeyTypes: []string{types.ABCIPubKeyTypeEd25519}} testCases := []struct { name string @@ -214,7 +211,7 @@ func TestValidateValidatorUpdates(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - err := validateValidatorUpdates(tc.abciUpdates, tc.validatorParams) + err := sm.ValidateValidatorUpdates(tc.abciUpdates, tc.validatorParams) if tc.shouldErr { assert.Error(t, err) } else { @@ -308,10 +305,10 @@ func TestEndBlockValidatorUpdates(t *testing.T) { require.Nil(t, err) defer proxyApp.Stop() - state, stateDB := state(1, 1) + state, stateDB, _ := makeState(1, 1) + + blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyApp.Consensus(), mock.Mempool{}, sm.MockEvidencePool{}, true) - blockExec := NewBlockExecutor(stateDB, log.TestingLogger(), proxyApp.Consensus(), - MockMempool{}, MockEvidencePool{}, true) eventBus := types.NewEventBus() err = eventBus.Start() require.NoError(t, err) @@ -322,7 +319,7 @@ func TestEndBlockValidatorUpdates(t *testing.T) { require.NoError(t, err) block := makeBlock(state, 1) - blockID := types.BlockID{block.Hash(), block.MakePartSet(testPartSize).Header()} + blockID := types.BlockID{Hash: block.Hash(), PartsHeader: block.MakePartSet(testPartSize).Header()} pubkey := ed25519.GenPrivKey().PubKey() app.ValidatorUpdates = []abci.ValidatorUpdate{ @@ -366,11 +363,11 @@ func TestEndBlockValidatorUpdatesResultingInEmptySet(t *testing.T) { require.Nil(t, err) defer proxyApp.Stop() - state, stateDB := state(1, 1) - blockExec := NewBlockExecutor(stateDB, log.TestingLogger(), proxyApp.Consensus(), MockMempool{}, MockEvidencePool{}, true) + state, stateDB, _ := makeState(1, 1) + blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyApp.Consensus(), mock.Mempool{}, sm.MockEvidencePool{}, true) block := makeBlock(state, 1) - blockID := types.BlockID{block.Hash(), block.MakePartSet(testPartSize).Header()} + blockID := types.BlockID{Hash: block.Hash(), PartsHeader: block.MakePartSet(testPartSize).Header()} // Remove the only validator app.ValidatorUpdates = []abci.ValidatorUpdate{ @@ -382,90 +379,3 @@ func TestEndBlockValidatorUpdatesResultingInEmptySet(t *testing.T) { assert.NotEmpty(t, state.NextValidators.Validators) } - -//---------------------------------------------------------------------------- - -// make some bogus txs -func makeTxs(height int64) (txs []types.Tx) { - for i := 0; i < nTxsPerBlock; i++ { - txs = append(txs, types.Tx([]byte{byte(height), byte(i)})) - } - return txs -} - -func state(nVals, height int) (State, dbm.DB) { - vals := make([]types.GenesisValidator, nVals) - for i := 0; i < nVals; i++ { - secret := []byte(fmt.Sprintf("test%d", i)) - pk := ed25519.GenPrivKeyFromSecret(secret) - vals[i] = types.GenesisValidator{ - pk.PubKey().Address(), - pk.PubKey(), - 1000, - fmt.Sprintf("test%d", i), - } - } - s, _ := MakeGenesisState(&types.GenesisDoc{ - ChainID: chainID, - Validators: vals, - AppHash: nil, - }) - - // save validators to db for 2 heights - stateDB := dbm.NewMemDB() - SaveState(stateDB, s) - - for i := 1; i < height; i++ { - s.LastBlockHeight++ - s.LastValidators = s.Validators.Copy() - SaveState(stateDB, s) - } - return s, stateDB -} - -func makeBlock(state State, height int64) *types.Block { - block, _ := state.MakeBlock(height, makeTxs(state.LastBlockHeight), new(types.Commit), nil, state.Validators.GetProposer().Address) - return block -} - -//---------------------------------------------------------------------------- - -type testApp struct { - abci.BaseApplication - - CommitVotes []abci.VoteInfo - ByzantineValidators []abci.Evidence - ValidatorUpdates []abci.ValidatorUpdate -} - -var _ abci.Application = (*testApp)(nil) - -func (app *testApp) Info(req abci.RequestInfo) (resInfo abci.ResponseInfo) { - return abci.ResponseInfo{} -} - -func (app *testApp) BeginBlock(req abci.RequestBeginBlock) abci.ResponseBeginBlock { - app.CommitVotes = req.LastCommitInfo.Votes - app.ByzantineValidators = req.ByzantineValidators - return abci.ResponseBeginBlock{} -} - -func (app *testApp) EndBlock(req abci.RequestEndBlock) abci.ResponseEndBlock { - return abci.ResponseEndBlock{ValidatorUpdates: app.ValidatorUpdates} -} - -func (app *testApp) DeliverTx(tx []byte) abci.ResponseDeliverTx { - return abci.ResponseDeliverTx{Tags: []cmn.KVPair{}} -} - -func (app *testApp) CheckTx(tx []byte) abci.ResponseCheckTx { - return abci.ResponseCheckTx{} -} - -func (app *testApp) Commit() abci.ResponseCommit { - return abci.ResponseCommit{} -} - -func (app *testApp) Query(reqQuery abci.RequestQuery) (resQuery abci.ResponseQuery) { - return -} diff --git a/state/export_test.go b/state/export_test.go new file mode 100644 index 000000000..0b7da945f --- /dev/null +++ b/state/export_test.go @@ -0,0 +1,62 @@ +package state + +import ( + abci "github.com/tendermint/tendermint/abci/types" + dbm "github.com/tendermint/tendermint/libs/db" + "github.com/tendermint/tendermint/types" +) + +// +// TODO: Remove dependence on all entities exported from this file. +// +// Every entity exported here is dependent on a private entity from the `state` +// package. Currently, these functions are only made available to tests in the +// `state_test` package, but we should not be relying on them for our testing. +// Instead, we should be exclusively relying on exported entities for our +// testing, and should be refactoring exported entities to make them more +// easily testable from outside of the package. +// + +const ValSetCheckpointInterval = valSetCheckpointInterval + +// UpdateState is an alias for updateState exported from execution.go, +// exclusively and explicitly for testing. +func UpdateState( + state State, + blockID types.BlockID, + header *types.Header, + abciResponses *ABCIResponses, + validatorUpdates []*types.Validator, +) (State, error) { + return updateState(state, blockID, header, abciResponses, validatorUpdates) +} + +// ValidateValidatorUpdates is an alias for validateValidatorUpdates exported +// from execution.go, exclusively and explicitly for testing. +func ValidateValidatorUpdates(abciUpdates []abci.ValidatorUpdate, params types.ValidatorParams) error { + return validateValidatorUpdates(abciUpdates, params) +} + +// CalcValidatorsKey is an alias for the private calcValidatorsKey method in +// store.go, exported exclusively and explicitly for testing. +func CalcValidatorsKey(height int64) []byte { + return calcValidatorsKey(height) +} + +// SaveABCIResponses is an alias for the private SaveABCIResponses method in +// store.go, exported exclusively and explicitly for testing. +func SaveABCIResponses(db dbm.DB, height int64, abciResponses *ABCIResponses) { + SaveABCIResponses(db, height, abciResponses) +} + +// SaveConsensusParamsInfo is an alias for the private saveConsensusParamsInfo +// method in store.go, exported exclusively and explicitly for testing. +func SaveConsensusParamsInfo(db dbm.DB, nextHeight, changeHeight int64, params types.ConsensusParams) { + saveConsensusParamsInfo(db, nextHeight, changeHeight, params) +} + +// SaveValidatorsInfo is an alias for the private saveValidatorsInfo method in +// store.go, exported exclusively and explicitly for testing. +func SaveValidatorsInfo(db dbm.DB, height, lastHeightChanged int64, valSet *types.ValidatorSet) { + saveValidatorsInfo(db, height, lastHeightChanged, valSet) +} diff --git a/state/helpers_test.go b/state/helpers_test.go new file mode 100644 index 000000000..c3e296239 --- /dev/null +++ b/state/helpers_test.go @@ -0,0 +1,260 @@ +package state_test + +import ( + "bytes" + "fmt" + + abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/crypto/ed25519" + dbm "github.com/tendermint/tendermint/libs/db" + "github.com/tendermint/tendermint/proxy" + sm "github.com/tendermint/tendermint/state" + "github.com/tendermint/tendermint/types" + tmtime "github.com/tendermint/tendermint/types/time" +) + +type paramsChangeTestCase struct { + height int64 + params types.ConsensusParams +} + +// always returns true if asked if any evidence was already committed. +type mockEvPoolAlwaysCommitted struct{} + +func (m mockEvPoolAlwaysCommitted) PendingEvidence(int64) []types.Evidence { return nil } +func (m mockEvPoolAlwaysCommitted) AddEvidence(types.Evidence) error { return nil } +func (m mockEvPoolAlwaysCommitted) Update(*types.Block, sm.State) {} +func (m mockEvPoolAlwaysCommitted) IsCommitted(types.Evidence) bool { return true } + +func newTestApp() proxy.AppConns { + app := &testApp{} + cc := proxy.NewLocalClientCreator(app) + return proxy.NewAppConns(cc) +} + +func makeAndCommitGoodBlock( + state sm.State, + height int64, + lastCommit *types.Commit, + proposerAddr []byte, + blockExec *sm.BlockExecutor, + privVals map[string]types.PrivValidator, + evidence []types.Evidence) (sm.State, types.BlockID, *types.Commit, error) { + // A good block passes + state, blockID, err := makeAndApplyGoodBlock(state, height, lastCommit, proposerAddr, blockExec, evidence) + if err != nil { + return state, types.BlockID{}, nil, err + } + + // Simulate a lastCommit for this block from all validators for the next height + commit, err := makeValidCommit(height, blockID, state.Validators, privVals) + if err != nil { + return state, types.BlockID{}, nil, err + } + return state, blockID, commit, nil +} + +func makeAndApplyGoodBlock(state sm.State, height int64, lastCommit *types.Commit, proposerAddr []byte, + blockExec *sm.BlockExecutor, evidence []types.Evidence) (sm.State, types.BlockID, error) { + block, _ := state.MakeBlock(height, makeTxs(height), lastCommit, evidence, proposerAddr) + if err := blockExec.ValidateBlock(state, block); err != nil { + return state, types.BlockID{}, err + } + blockID := types.BlockID{Hash: block.Hash(), PartsHeader: types.PartSetHeader{}} + state, err := blockExec.ApplyBlock(state, blockID, block) + if err != nil { + return state, types.BlockID{}, err + } + return state, blockID, nil +} + +func makeValidCommit(height int64, blockID types.BlockID, vals *types.ValidatorSet, privVals map[string]types.PrivValidator) (*types.Commit, error) { + sigs := make([]*types.CommitSig, 0) + for i := 0; i < vals.Size(); i++ { + _, val := vals.GetByIndex(i) + vote, err := types.MakeVote(height, blockID, vals, privVals[val.Address.String()], chainID) + if err != nil { + return nil, err + } + sigs = append(sigs, vote.CommitSig()) + } + return types.NewCommit(blockID, sigs), nil +} + +// make some bogus txs +func makeTxs(height int64) (txs []types.Tx) { + for i := 0; i < nTxsPerBlock; i++ { + txs = append(txs, types.Tx([]byte{byte(height), byte(i)})) + } + return txs +} + +func makeState(nVals, height int) (sm.State, dbm.DB, map[string]types.PrivValidator) { + vals := make([]types.GenesisValidator, nVals) + privVals := make(map[string]types.PrivValidator, nVals) + for i := 0; i < nVals; i++ { + secret := []byte(fmt.Sprintf("test%d", i)) + pk := ed25519.GenPrivKeyFromSecret(secret) + valAddr := pk.PubKey().Address() + vals[i] = types.GenesisValidator{ + Address: valAddr, + PubKey: pk.PubKey(), + Power: 1000, + Name: fmt.Sprintf("test%d", i), + } + privVals[valAddr.String()] = types.NewMockPVWithParams(pk, false, false) + } + s, _ := sm.MakeGenesisState(&types.GenesisDoc{ + ChainID: chainID, + Validators: vals, + AppHash: nil, + }) + + stateDB := dbm.NewMemDB() + sm.SaveState(stateDB, s) + + for i := 1; i < height; i++ { + s.LastBlockHeight++ + s.LastValidators = s.Validators.Copy() + sm.SaveState(stateDB, s) + } + return s, stateDB, privVals +} + +func makeBlock(state sm.State, height int64) *types.Block { + block, _ := state.MakeBlock(height, makeTxs(state.LastBlockHeight), new(types.Commit), nil, state.Validators.GetProposer().Address) + return block +} + +func genValSet(size int) *types.ValidatorSet { + vals := make([]*types.Validator, size) + for i := 0; i < size; i++ { + vals[i] = types.NewValidator(ed25519.GenPrivKey().PubKey(), 10) + } + return types.NewValidatorSet(vals) +} + +func makeConsensusParams( + blockBytes, blockGas int64, + evidenceAge int64, +) types.ConsensusParams { + return types.ConsensusParams{ + BlockSize: types.BlockSizeParams{ + MaxBytes: blockBytes, + MaxGas: blockGas, + }, + Evidence: types.EvidenceParams{ + MaxAge: evidenceAge, + }, + } +} + +func makeHeaderPartsResponsesValPubKeyChange(state sm.State, pubkey crypto.PubKey) (types.Header, types.BlockID, *sm.ABCIResponses) { + + block := makeBlock(state, state.LastBlockHeight+1) + abciResponses := &sm.ABCIResponses{ + EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil}, + } + + // If the pubkey is new, remove the old and add the new. + _, val := state.NextValidators.GetByIndex(0) + if !bytes.Equal(pubkey.Bytes(), val.PubKey.Bytes()) { + abciResponses.EndBlock = &abci.ResponseEndBlock{ + ValidatorUpdates: []abci.ValidatorUpdate{ + types.TM2PB.NewValidatorUpdate(val.PubKey, 0), + types.TM2PB.NewValidatorUpdate(pubkey, 10), + }, + } + } + + return block.Header, types.BlockID{Hash: block.Hash(), PartsHeader: types.PartSetHeader{}}, abciResponses +} + +func makeHeaderPartsResponsesValPowerChange(state sm.State, power int64) (types.Header, types.BlockID, *sm.ABCIResponses) { + + block := makeBlock(state, state.LastBlockHeight+1) + abciResponses := &sm.ABCIResponses{ + EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil}, + } + + // If the pubkey is new, remove the old and add the new. + _, val := state.NextValidators.GetByIndex(0) + if val.VotingPower != power { + abciResponses.EndBlock = &abci.ResponseEndBlock{ + ValidatorUpdates: []abci.ValidatorUpdate{ + types.TM2PB.NewValidatorUpdate(val.PubKey, power), + }, + } + } + + return block.Header, types.BlockID{Hash: block.Hash(), PartsHeader: types.PartSetHeader{}}, abciResponses +} + +func makeHeaderPartsResponsesParams(state sm.State, params types.ConsensusParams) (types.Header, types.BlockID, *sm.ABCIResponses) { + + block := makeBlock(state, state.LastBlockHeight+1) + abciResponses := &sm.ABCIResponses{ + EndBlock: &abci.ResponseEndBlock{ConsensusParamUpdates: types.TM2PB.ConsensusParams(¶ms)}, + } + return block.Header, types.BlockID{Hash: block.Hash(), PartsHeader: types.PartSetHeader{}}, abciResponses +} + +func randomGenesisDoc() *types.GenesisDoc { + pubkey := ed25519.GenPrivKey().PubKey() + return &types.GenesisDoc{ + GenesisTime: tmtime.Now(), + ChainID: "abc", + Validators: []types.GenesisValidator{ + { + Address: pubkey.Address(), + PubKey: pubkey, + Power: 10, + Name: "myval", + }, + }, + ConsensusParams: types.DefaultConsensusParams(), + } +} + +//---------------------------------------------------------------------------- + +type testApp struct { + abci.BaseApplication + + CommitVotes []abci.VoteInfo + ByzantineValidators []abci.Evidence + ValidatorUpdates []abci.ValidatorUpdate +} + +var _ abci.Application = (*testApp)(nil) + +func (app *testApp) Info(req abci.RequestInfo) (resInfo abci.ResponseInfo) { + return abci.ResponseInfo{} +} + +func (app *testApp) BeginBlock(req abci.RequestBeginBlock) abci.ResponseBeginBlock { + app.CommitVotes = req.LastCommitInfo.Votes + app.ByzantineValidators = req.ByzantineValidators + return abci.ResponseBeginBlock{} +} + +func (app *testApp) EndBlock(req abci.RequestEndBlock) abci.ResponseEndBlock { + return abci.ResponseEndBlock{ValidatorUpdates: app.ValidatorUpdates} +} + +func (app *testApp) DeliverTx(req abci.RequestDeliverTx) abci.ResponseDeliverTx { + return abci.ResponseDeliverTx{Events: []abci.Event{}} +} + +func (app *testApp) CheckTx(req abci.RequestCheckTx) abci.ResponseCheckTx { + return abci.ResponseCheckTx{} +} + +func (app *testApp) Commit() abci.ResponseCommit { + return abci.ResponseCommit{} +} + +func (app *testApp) Query(reqQuery abci.RequestQuery) (resQuery abci.ResponseQuery) { + return +} diff --git a/state/index.go b/state/index.go index d726db529..ad00fa584 100644 --- a/state/index.go +++ b/state/index.go @@ -152,7 +152,7 @@ func (ih *IndexHub) SetIndexedHeight(h int64) { if err != nil { ih.Logger.Error("failed to MarshalBinaryBare for indexed height", "error", err, "height", h) } else { - ih.stateDB.Set(IndexHeightKey, rawHeight) + ih.stateDB.SetSync(IndexHeightKey, rawHeight) } } diff --git a/state/main_test.go b/state/main_test.go new file mode 100644 index 000000000..00ecf2686 --- /dev/null +++ b/state/main_test.go @@ -0,0 +1,13 @@ +package state_test + +import ( + "os" + "testing" + + "github.com/tendermint/tendermint/types" +) + +func TestMain(m *testing.M) { + types.RegisterMockEvidencesGlobal() + os.Exit(m.Run()) +} diff --git a/state/services.go b/state/services.go index 07d12c5a1..10b389ee7 100644 --- a/state/services.go +++ b/state/services.go @@ -1,8 +1,6 @@ package state import ( - abci "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tendermint/mempool" "github.com/tendermint/tendermint/types" ) @@ -11,57 +9,6 @@ import ( // NOTE: Interfaces used by RPC must be thread safe! //------------------------------------------------------ -//------------------------------------------------------ -// mempool - -// Mempool defines the mempool interface as used by the ConsensusState. -// Updates to the mempool need to be synchronized with committing a block -// so apps can reset their transient state on Commit -type Mempool interface { - Lock() - Unlock() - - Size() int - CheckTx(types.Tx, func(*abci.Response)) error - CheckTxWithInfo(types.Tx, func(*abci.Response), mempool.TxInfo) error - ReapMaxBytesMaxGas(maxBytes, maxGas int64) types.Txs - Update(int64, types.Txs, mempool.PreCheckFunc, mempool.PostCheckFunc) error - Flush() - FlushAppConn() error - - TxsAvailable() <-chan struct{} - EnableTxsAvailable() -} - -// MockMempool is an empty implementation of a Mempool, useful for testing. -type MockMempool struct{} - -var _ Mempool = MockMempool{} - -func (MockMempool) Lock() {} -func (MockMempool) Unlock() {} -func (MockMempool) Size() int { return 0 } -func (MockMempool) CheckTx(_ types.Tx, _ func(*abci.Response)) error { - return nil -} -func (MockMempool) CheckTxWithInfo(_ types.Tx, _ func(*abci.Response), - _ mempool.TxInfo) error { - return nil -} -func (MockMempool) ReapMaxBytesMaxGas(_, _ int64) types.Txs { return types.Txs{} } -func (MockMempool) Update( - _ int64, - _ types.Txs, - _ mempool.PreCheckFunc, - _ mempool.PostCheckFunc, -) error { - return nil -} -func (MockMempool) Flush() {} -func (MockMempool) FlushAppConn() error { return nil } -func (MockMempool) TxsAvailable() <-chan struct{} { return make(chan struct{}) } -func (MockMempool) EnableTxsAvailable() {} - //------------------------------------------------------ // blockstore @@ -96,7 +43,7 @@ type EvidencePool interface { IsCommitted(types.Evidence) bool } -// MockMempool is an empty implementation of a Mempool, useful for testing. +// MockEvidencePool is an empty implementation of EvidencePool, useful for testing. type MockEvidencePool struct{} func (m MockEvidencePool) PendingEvidence(int64) []types.Evidence { return nil } diff --git a/state/state.go b/state/state.go index b6253b645..3e88fd11a 100644 --- a/state/state.go +++ b/state/state.go @@ -94,9 +94,9 @@ func (state State) Copy() State { LastBlockID: state.LastBlockID, LastBlockTime: state.LastBlockTime, - NextValidators: state.NextValidators.Copy(), - Validators: state.Validators.Copy(), - LastValidators: state.LastValidators.Copy(), + NextValidators: state.NextValidators.Copy(), + Validators: state.Validators.Copy(), + /**/ LastValidators: state.LastValidators.Copy(), LastHeightValidatorsChanged: state.LastHeightValidatorsChanged, ConsensusParams: state.ConsensusParams, diff --git a/state/state_test.go b/state/state_test.go index bdec63a68..67886cb1e 100644 --- a/state/state_test.go +++ b/state/state_test.go @@ -1,4 +1,4 @@ -package state +package state_test import ( "bytes" @@ -10,23 +10,22 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - abci "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" cmn "github.com/tendermint/tendermint/libs/common" dbm "github.com/tendermint/tendermint/libs/db" + sm "github.com/tendermint/tendermint/state" cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/types" ) // setupTestCase does setup common to all test cases. -func setupTestCase(t *testing.T) (func(t *testing.T), dbm.DB, State) { +func setupTestCase(t *testing.T) (func(t *testing.T), dbm.DB, sm.State) { config := cfg.ResetTestRoot("state_") dbType := dbm.DBBackendType(config.DBBackend) stateDB := dbm.NewDB("state", dbType, config.DBDir()) - state, err := LoadStateFromDBOrGenesisFile(stateDB, config.GenesisFile()) + state, err := sm.LoadStateFromDBOrGenesisFile(stateDB, config.GenesisFile()) assert.NoError(t, err, "expected no error on LoadStateFromDBOrGenesisFile") tearDown := func(t *testing.T) { os.RemoveAll(config.RootDir) } @@ -59,7 +58,7 @@ func TestMakeGenesisStateNilValidators(t *testing.T) { Validators: nil, } require.Nil(t, doc.ValidateAndComplete()) - state, err := MakeGenesisState(&doc) + state, err := sm.MakeGenesisState(&doc) require.Nil(t, err) require.Equal(t, 0, len(state.Validators.Validators)) require.Equal(t, 0, len(state.NextValidators.Validators)) @@ -73,9 +72,9 @@ func TestStateSaveLoad(t *testing.T) { assert := assert.New(t) state.LastBlockHeight++ - SaveState(stateDB, state) + sm.SaveState(stateDB, state) - loadedState := LoadState(stateDB) + loadedState := sm.LoadState(stateDB) assert.True(state.Equals(loadedState), fmt.Sprintf("expected state and its copy to be identical.\ngot: %v\nexpected: %v\n", loadedState, state)) @@ -92,15 +91,16 @@ func TestABCIResponsesSaveLoad1(t *testing.T) { // Build mock responses. block := makeBlock(state, 2) - abciResponses := NewABCIResponses(block) - abciResponses.DeliverTx[0] = &abci.ResponseDeliverTx{Data: []byte("foo"), Tags: nil} - abciResponses.DeliverTx[1] = &abci.ResponseDeliverTx{Data: []byte("bar"), Log: "ok", Tags: nil} + abciResponses := sm.NewABCIResponses(block) + abciResponses.DeliverTx[0] = &abci.ResponseDeliverTx{Data: []byte("foo"), Events: nil} + abciResponses.DeliverTx[1] = &abci.ResponseDeliverTx{Data: []byte("bar"), Log: "ok", Events: nil} + abciResponses.EndBlock = &abci.ResponseEndBlock{ValidatorUpdates: []abci.ValidatorUpdate{ types.TM2PB.NewValidatorUpdate(ed25519.GenPrivKey().PubKey(), 10), }} - saveABCIResponses(stateDB, block.Height, abciResponses) - loadedABCIResponses, err := LoadABCIResponses(stateDB, block.Height) + sm.SaveABCIResponses(stateDB, block.Height, abciResponses) + loadedABCIResponses, err := sm.LoadABCIResponses(stateDB, block.Height) assert.Nil(err) assert.Equal(abciResponses, loadedABCIResponses, fmt.Sprintf("ABCIResponses don't match:\ngot: %v\nexpected: %v\n", @@ -129,20 +129,22 @@ func TestABCIResponsesSaveLoad2(t *testing.T) { {Code: 32, Data: []byte("Hello"), Log: "Huh?"}, }, types.ABCIResults{ - {32, []byte("Hello")}, + {Code: 32, Data: []byte("Hello")}, }}, 2: { []*abci.ResponseDeliverTx{ {Code: 383}, - {Data: []byte("Gotcha!"), - Tags: []cmn.KVPair{ - {Key: []byte("a"), Value: []byte("1")}, - {Key: []byte("build"), Value: []byte("stuff")}, - }}, + { + Data: []byte("Gotcha!"), + Events: []abci.Event{ + {Type: "type1", Attributes: []cmn.KVPair{{Key: []byte("a"), Value: []byte("1")}}}, + {Type: "type2", Attributes: []cmn.KVPair{{Key: []byte("build"), Value: []byte("stuff")}}}, + }, + }, }, types.ABCIResults{ - {383, nil}, - {0, []byte("Gotcha!")}, + {Code: 383, Data: nil}, + {Code: 0, Data: []byte("Gotcha!")}, }}, 3: { nil, @@ -153,24 +155,24 @@ func TestABCIResponsesSaveLoad2(t *testing.T) { // Query all before, this should return error. for i := range cases { h := int64(i + 1) - res, err := LoadABCIResponses(stateDB, h) + res, err := sm.LoadABCIResponses(stateDB, h) assert.Error(err, "%d: %#v", i, res) } // Add all cases. for i, tc := range cases { h := int64(i + 1) // last block height, one below what we save - responses := &ABCIResponses{ + responses := &sm.ABCIResponses{ DeliverTx: tc.added, EndBlock: &abci.ResponseEndBlock{}, } - saveABCIResponses(stateDB, h, responses) + sm.SaveABCIResponses(stateDB, h, responses) } // Query all before, should return expected value. for i, tc := range cases { h := int64(i + 1) - res, err := LoadABCIResponses(stateDB, h) + res, err := sm.LoadABCIResponses(stateDB, h) assert.NoError(err, "%d", i) assert.Equal(tc.expected.Hash(), res.ResultsHash(), "%d", i) } @@ -184,26 +186,26 @@ func TestValidatorSimpleSaveLoad(t *testing.T) { assert := assert.New(t) // Can't load anything for height 0. - v, err := LoadValidators(stateDB, 0) - assert.IsType(ErrNoValSetForHeight{}, err, "expected err at height 0") + _, err := sm.LoadValidators(stateDB, 0) + assert.IsType(sm.ErrNoValSetForHeight{}, err, "expected err at height 0") // Should be able to load for height 1. - v, err = LoadValidators(stateDB, 1) + v, err := sm.LoadValidators(stateDB, 1) assert.Nil(err, "expected no err at height 1") assert.Equal(v.Hash(), state.Validators.Hash(), "expected validator hashes to match") // Should be able to load for height 2. - v, err = LoadValidators(stateDB, 2) + v, err = sm.LoadValidators(stateDB, 2) assert.Nil(err, "expected no err at height 2") assert.Equal(v.Hash(), state.NextValidators.Hash(), "expected validator hashes to match") // Increment height, save; should be able to load for next & next next height. state.LastBlockHeight++ nextHeight := state.LastBlockHeight + 1 - saveValidatorsInfo(stateDB, nextHeight+1, state.LastHeightValidatorsChanged, state.NextValidators) - vp0, err := LoadValidators(stateDB, nextHeight+0) + sm.SaveValidatorsInfo(stateDB, nextHeight+1, state.LastHeightValidatorsChanged, state.NextValidators) + vp0, err := sm.LoadValidators(stateDB, nextHeight+0) assert.Nil(err, "expected no err") - vp1, err := LoadValidators(stateDB, nextHeight+1) + vp1, err := sm.LoadValidators(stateDB, nextHeight+1) assert.Nil(err, "expected no err") assert.Equal(vp0.Hash(), state.Validators.Hash(), "expected validator hashes to match") assert.Equal(vp1.Hash(), state.NextValidators.Hash(), "expected next validator hashes to match") @@ -232,13 +234,13 @@ func TestOneValidatorChangesSaveLoad(t *testing.T) { changeIndex++ power++ } - header, blockID, responses := makeHeaderPartsResponsesValPowerChange(state, i, power) + header, blockID, responses := makeHeaderPartsResponsesValPowerChange(state, power) validatorUpdates, err = types.PB2TM.ValidatorUpdates(responses.EndBlock.ValidatorUpdates) require.NoError(t, err) - state, err = updateState(state, blockID, &header, responses, validatorUpdates) + state, err = sm.UpdateState(state, blockID, &header, responses, validatorUpdates) require.NoError(t, err) nextHeight := state.LastBlockHeight + 1 - saveValidatorsInfo(stateDB, nextHeight+1, state.LastHeightValidatorsChanged, state.NextValidators) + sm.SaveValidatorsInfo(stateDB, nextHeight+1, state.LastHeightValidatorsChanged, state.NextValidators) } // On each height change, increment the power by one. @@ -256,7 +258,7 @@ func TestOneValidatorChangesSaveLoad(t *testing.T) { } for i, power := range testCases { - v, err := LoadValidators(stateDB, int64(i+1+1)) // +1 because vset changes delayed by 1 block. + v, err := sm.LoadValidators(stateDB, int64(i+1+1)) // +1 because vset changes delayed by 1 block. assert.Nil(t, err, fmt.Sprintf("expected no err at height %d", i)) assert.Equal(t, v.Size(), 1, "validator set size is greater than 1: %d", v.Size()) _, val := v.GetByIndex(0) @@ -402,13 +404,13 @@ func TestProposerPriorityDoesNotGetResetToZero(t *testing.T) { assert.EqualValues(t, 0, val1.ProposerPriority) block := makeBlock(state, state.LastBlockHeight+1) - blockID := types.BlockID{block.Hash(), block.MakePartSet(testPartSize).Header()} - abciResponses := &ABCIResponses{ + blockID := types.BlockID{Hash: block.Hash(), PartsHeader: block.MakePartSet(testPartSize).Header()} + abciResponses := &sm.ABCIResponses{ EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil}, } validatorUpdates, err := types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates) require.NoError(t, err) - updatedState, err := updateState(state, blockID, &block.Header, abciResponses, validatorUpdates) + updatedState, err := sm.UpdateState(state, blockID, &block.Header, abciResponses, validatorUpdates) assert.NoError(t, err) curTotal := val1VotingPower // one increment step and one validator: 0 + power - total_power == 0 @@ -420,7 +422,7 @@ func TestProposerPriorityDoesNotGetResetToZero(t *testing.T) { updateAddVal := abci.ValidatorUpdate{PubKey: types.TM2PB.PubKey(val2PubKey), Power: val2VotingPower} validatorUpdates, err = types.PB2TM.ValidatorUpdates([]abci.ValidatorUpdate{updateAddVal}) assert.NoError(t, err) - updatedState2, err := updateState(updatedState, blockID, &block.Header, abciResponses, validatorUpdates) + updatedState2, err := sm.UpdateState(updatedState, blockID, &block.Header, abciResponses, validatorUpdates) assert.NoError(t, err) require.Equal(t, len(updatedState2.NextValidators.Validators), 2) @@ -439,13 +441,13 @@ func TestProposerPriorityDoesNotGetResetToZero(t *testing.T) { // 3. Center - with avg, resulting val2:-61, val1:62 avg := big.NewInt(0).Add(big.NewInt(wantVal1Prio), big.NewInt(wantVal2Prio)) avg.Div(avg, big.NewInt(2)) - wantVal2Prio = wantVal2Prio - avg.Int64() // -61 - wantVal1Prio = wantVal1Prio - avg.Int64() // 62 + wantVal2Prio -= avg.Int64() // -61 + wantVal1Prio -= avg.Int64() // 62 // 4. Steps from IncrementProposerPriority - wantVal1Prio = wantVal1Prio + val1VotingPower // 72 - wantVal2Prio = wantVal2Prio + val2VotingPower // 39 - wantVal1Prio = wantVal1Prio - totalPowerAfter // -38 as val1 is proposer + wantVal1Prio += val1VotingPower // 72 + wantVal2Prio += val2VotingPower // 39 + wantVal1Prio -= totalPowerAfter // -38 as val1 is proposer assert.Equal(t, wantVal1Prio, updatedVal1.ProposerPriority) assert.Equal(t, wantVal2Prio, addedVal2.ProposerPriority) @@ -459,7 +461,7 @@ func TestProposerPriorityDoesNotGetResetToZero(t *testing.T) { // this will cause the diff of priorities (77) // to be larger than threshold == 2*totalVotingPower (22): - updatedState3, err := updateState(updatedState2, blockID, &block.Header, abciResponses, validatorUpdates) + updatedState3, err := sm.UpdateState(updatedState2, blockID, &block.Header, abciResponses, validatorUpdates) assert.NoError(t, err) require.Equal(t, len(updatedState3.NextValidators.Validators), 2) @@ -512,15 +514,15 @@ func TestProposerPriorityProposerAlternates(t *testing.T) { assert.Equal(t, val1PubKey.Address(), state.Validators.Proposer.Address) block := makeBlock(state, state.LastBlockHeight+1) - blockID := types.BlockID{block.Hash(), block.MakePartSet(testPartSize).Header()} + blockID := types.BlockID{Hash: block.Hash(), PartsHeader: block.MakePartSet(testPartSize).Header()} // no updates: - abciResponses := &ABCIResponses{ + abciResponses := &sm.ABCIResponses{ EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil}, } validatorUpdates, err := types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates) require.NoError(t, err) - updatedState, err := updateState(state, blockID, &block.Header, abciResponses, validatorUpdates) + updatedState, err := sm.UpdateState(state, blockID, &block.Header, abciResponses, validatorUpdates) assert.NoError(t, err) // 0 + 10 (initial prio) - 10 (avg) - 10 (mostest - total) = -10 @@ -535,7 +537,7 @@ func TestProposerPriorityProposerAlternates(t *testing.T) { validatorUpdates, err = types.PB2TM.ValidatorUpdates([]abci.ValidatorUpdate{updateAddVal}) assert.NoError(t, err) - updatedState2, err := updateState(updatedState, blockID, &block.Header, abciResponses, validatorUpdates) + updatedState2, err := sm.UpdateState(updatedState, blockID, &block.Header, abciResponses, validatorUpdates) assert.NoError(t, err) require.Equal(t, len(updatedState2.NextValidators.Validators), 2) @@ -562,9 +564,9 @@ func TestProposerPriorityProposerAlternates(t *testing.T) { expectedVal2Prio := v2PrioWhenAddedVal2 - avg.Int64() // -11 expectedVal1Prio := oldVal1.ProposerPriority - avg.Int64() // 11 // 4. Increment - expectedVal2Prio = expectedVal2Prio + val2VotingPower // -11 + 10 = -1 - expectedVal1Prio = expectedVal1Prio + val1VotingPower // 11 + 10 == 21 - expectedVal1Prio = expectedVal1Prio - totalPower // 1, val1 proposer + expectedVal2Prio += val2VotingPower // -11 + 10 = -1 + expectedVal1Prio += val1VotingPower // 11 + 10 == 21 + expectedVal1Prio -= totalPower // 1, val1 proposer assert.EqualValues(t, expectedVal1Prio, updatedVal1.ProposerPriority) assert.EqualValues(t, expectedVal2Prio, updatedVal2.ProposerPriority, "unexpected proposer priority for validator: %v", updatedVal2) @@ -572,7 +574,7 @@ func TestProposerPriorityProposerAlternates(t *testing.T) { validatorUpdates, err = types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates) require.NoError(t, err) - updatedState3, err := updateState(updatedState2, blockID, &block.Header, abciResponses, validatorUpdates) + updatedState3, err := sm.UpdateState(updatedState2, blockID, &block.Header, abciResponses, validatorUpdates) assert.NoError(t, err) assert.Equal(t, updatedState3.Validators.Proposer.Address, updatedState3.NextValidators.Proposer.Address) @@ -588,7 +590,7 @@ func TestProposerPriorityProposerAlternates(t *testing.T) { // Increment expectedVal2Prio2 := expectedVal2Prio + val2VotingPower // -1 + 10 = 9 expectedVal1Prio2 := expectedVal1Prio + val1VotingPower // 1 + 10 == 11 - expectedVal1Prio2 = expectedVal1Prio2 - totalPower // -9, val1 proposer + expectedVal1Prio2 -= totalPower // -9, val1 proposer assert.EqualValues(t, expectedVal1Prio2, updatedVal1.ProposerPriority, "unexpected proposer priority for validator: %v", updatedVal2) assert.EqualValues(t, expectedVal2Prio2, updatedVal2.ProposerPriority, "unexpected proposer priority for validator: %v", updatedVal2) @@ -596,13 +598,13 @@ func TestProposerPriorityProposerAlternates(t *testing.T) { // no changes in voting power and both validators have same voting power // -> proposers should alternate: oldState := updatedState3 - abciResponses = &ABCIResponses{ + abciResponses = &sm.ABCIResponses{ EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil}, } validatorUpdates, err = types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates) require.NoError(t, err) - oldState, err = updateState(oldState, blockID, &block.Header, abciResponses, validatorUpdates) + oldState, err = sm.UpdateState(oldState, blockID, &block.Header, abciResponses, validatorUpdates) assert.NoError(t, err) expectedVal1Prio2 = 1 expectedVal2Prio2 = -1 @@ -611,13 +613,13 @@ func TestProposerPriorityProposerAlternates(t *testing.T) { for i := 0; i < 1000; i++ { // no validator updates: - abciResponses := &ABCIResponses{ + abciResponses := &sm.ABCIResponses{ EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil}, } validatorUpdates, err = types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates) require.NoError(t, err) - updatedState, err := updateState(oldState, blockID, &block.Header, abciResponses, validatorUpdates) + updatedState, err := sm.UpdateState(oldState, blockID, &block.Header, abciResponses, validatorUpdates) assert.NoError(t, err) // alternate (and cyclic priorities): assert.NotEqual(t, updatedState.Validators.Proposer.Address, updatedState.NextValidators.Proposer.Address, "iter: %v", i) @@ -658,16 +660,17 @@ func TestLargeGenesisValidator(t *testing.T) { oldState := state for i := 0; i < 10; i++ { // no updates: - abciResponses := &ABCIResponses{ + abciResponses := &sm.ABCIResponses{ EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil}, } validatorUpdates, err := types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates) require.NoError(t, err) block := makeBlock(oldState, oldState.LastBlockHeight+1) - blockID := types.BlockID{block.Hash(), block.MakePartSet(testPartSize).Header()} + blockID := types.BlockID{Hash: block.Hash(), PartsHeader: block.MakePartSet(testPartSize).Header()} - updatedState, err := updateState(oldState, blockID, &block.Header, abciResponses, validatorUpdates) + updatedState, err := sm.UpdateState(oldState, blockID, &block.Header, abciResponses, validatorUpdates) + require.NoError(t, err) // no changes in voting power (ProposerPrio += VotingPower == Voting in 1st round; than shiftByAvg == 0, // than -Total == -Voting) // -> no change in ProposerPrio (stays zero): @@ -686,26 +689,28 @@ func TestLargeGenesisValidator(t *testing.T) { firstAddedVal := abci.ValidatorUpdate{PubKey: types.TM2PB.PubKey(firstAddedValPubKey), Power: firstAddedValVotingPower} validatorUpdates, err := types.PB2TM.ValidatorUpdates([]abci.ValidatorUpdate{firstAddedVal}) assert.NoError(t, err) - abciResponses := &ABCIResponses{ + abciResponses := &sm.ABCIResponses{ EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: []abci.ValidatorUpdate{firstAddedVal}}, } block := makeBlock(oldState, oldState.LastBlockHeight+1) - blockID := types.BlockID{block.Hash(), block.MakePartSet(testPartSize).Header()} - updatedState, err := updateState(oldState, blockID, &block.Header, abciResponses, validatorUpdates) + blockID := types.BlockID{Hash: block.Hash(), PartsHeader: block.MakePartSet(testPartSize).Header()} + updatedState, err := sm.UpdateState(oldState, blockID, &block.Header, abciResponses, validatorUpdates) + require.NoError(t, err) lastState := updatedState for i := 0; i < 200; i++ { // no updates: - abciResponses := &ABCIResponses{ + abciResponses := &sm.ABCIResponses{ EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil}, } validatorUpdates, err := types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates) require.NoError(t, err) block := makeBlock(lastState, lastState.LastBlockHeight+1) - blockID := types.BlockID{block.Hash(), block.MakePartSet(testPartSize).Header()} + blockID := types.BlockID{Hash: block.Hash(), PartsHeader: block.MakePartSet(testPartSize).Header()} - updatedStateInner, err := updateState(lastState, blockID, &block.Header, abciResponses, validatorUpdates) + updatedStateInner, err := sm.UpdateState(lastState, blockID, &block.Header, abciResponses, validatorUpdates) + require.NoError(t, err) lastState = updatedStateInner } // set state to last state of above iteration @@ -729,25 +734,26 @@ func TestLargeGenesisValidator(t *testing.T) { validatorUpdates, err := types.PB2TM.ValidatorUpdates([]abci.ValidatorUpdate{addedVal}) assert.NoError(t, err) - abciResponses := &ABCIResponses{ + abciResponses := &sm.ABCIResponses{ EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: []abci.ValidatorUpdate{addedVal}}, } block := makeBlock(oldState, oldState.LastBlockHeight+1) - blockID := types.BlockID{block.Hash(), block.MakePartSet(testPartSize).Header()} - state, err = updateState(state, blockID, &block.Header, abciResponses, validatorUpdates) + blockID := types.BlockID{Hash: block.Hash(), PartsHeader: block.MakePartSet(testPartSize).Header()} + state, err = sm.UpdateState(state, blockID, &block.Header, abciResponses, validatorUpdates) + require.NoError(t, err) } require.Equal(t, 10+2, len(state.NextValidators.Validators)) // remove genesis validator: removeGenesisVal := abci.ValidatorUpdate{PubKey: types.TM2PB.PubKey(genesisPubKey), Power: 0} - abciResponses = &ABCIResponses{ + abciResponses = &sm.ABCIResponses{ EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: []abci.ValidatorUpdate{removeGenesisVal}}, } block = makeBlock(oldState, oldState.LastBlockHeight+1) - blockID = types.BlockID{block.Hash(), block.MakePartSet(testPartSize).Header()} + blockID = types.BlockID{Hash: block.Hash(), PartsHeader: block.MakePartSet(testPartSize).Header()} validatorUpdates, err = types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates) require.NoError(t, err) - updatedState, err = updateState(state, blockID, &block.Header, abciResponses, validatorUpdates) + updatedState, err = sm.UpdateState(state, blockID, &block.Header, abciResponses, validatorUpdates) require.NoError(t, err) // only the first added val (not the genesis val) should be left assert.Equal(t, 11, len(updatedState.NextValidators.Validators)) @@ -758,14 +764,15 @@ func TestLargeGenesisValidator(t *testing.T) { count := 0 isProposerUnchanged := true for isProposerUnchanged { - abciResponses := &ABCIResponses{ + abciResponses := &sm.ABCIResponses{ EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil}, } validatorUpdates, err = types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates) require.NoError(t, err) block = makeBlock(curState, curState.LastBlockHeight+1) - blockID = types.BlockID{block.Hash(), block.MakePartSet(testPartSize).Header()} - curState, err = updateState(curState, blockID, &block.Header, abciResponses, validatorUpdates) + blockID = types.BlockID{Hash: block.Hash(), PartsHeader: block.MakePartSet(testPartSize).Header()} + curState, err = sm.UpdateState(curState, blockID, &block.Header, abciResponses, validatorUpdates) + require.NoError(t, err) if !bytes.Equal(curState.Validators.Proposer.Address, curState.NextValidators.Proposer.Address) { isProposerUnchanged = false } @@ -780,16 +787,17 @@ func TestLargeGenesisValidator(t *testing.T) { proposers := make([]*types.Validator, numVals) for i := 0; i < 100; i++ { // no updates: - abciResponses := &ABCIResponses{ + abciResponses := &sm.ABCIResponses{ EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil}, } validatorUpdates, err := types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates) require.NoError(t, err) block := makeBlock(updatedState, updatedState.LastBlockHeight+1) - blockID := types.BlockID{block.Hash(), block.MakePartSet(testPartSize).Header()} + blockID := types.BlockID{Hash: block.Hash(), PartsHeader: block.MakePartSet(testPartSize).Header()} - updatedState, err = updateState(updatedState, blockID, &block.Header, abciResponses, validatorUpdates) + updatedState, err = sm.UpdateState(updatedState, blockID, &block.Header, abciResponses, validatorUpdates) + require.NoError(t, err) if i > numVals { // expect proposers to cycle through after the first iteration (of numVals blocks): if proposers[i%numVals] == nil { proposers[i%numVals] = updatedState.NextValidators.Proposer @@ -806,15 +814,15 @@ func TestStoreLoadValidatorsIncrementsProposerPriority(t *testing.T) { defer tearDown(t) state.Validators = genValSet(valSetSize) state.NextValidators = state.Validators.CopyIncrementProposerPriority(1) - SaveState(stateDB, state) + sm.SaveState(stateDB, state) nextHeight := state.LastBlockHeight + 1 - v0, err := LoadValidators(stateDB, nextHeight) + v0, err := sm.LoadValidators(stateDB, nextHeight) assert.Nil(t, err) acc0 := v0.Validators[0].ProposerPriority - v1, err := LoadValidators(stateDB, nextHeight+1) + v1, err := sm.LoadValidators(stateDB, nextHeight+1) assert.Nil(t, err) acc1 := v1.Validators[0].ProposerPriority @@ -830,28 +838,27 @@ func TestManyValidatorChangesSaveLoad(t *testing.T) { require.Equal(t, int64(0), state.LastBlockHeight) state.Validators = genValSet(valSetSize) state.NextValidators = state.Validators.CopyIncrementProposerPriority(1) - SaveState(stateDB, state) + sm.SaveState(stateDB, state) _, valOld := state.Validators.GetByIndex(0) var pubkeyOld = valOld.PubKey pubkey := ed25519.GenPrivKey().PubKey() - const height = 1 // Swap the first validator with a new one (validator set size stays the same). - header, blockID, responses := makeHeaderPartsResponsesValPubKeyChange(state, height, pubkey) + header, blockID, responses := makeHeaderPartsResponsesValPubKeyChange(state, pubkey) // Save state etc. var err error var validatorUpdates []*types.Validator validatorUpdates, err = types.PB2TM.ValidatorUpdates(responses.EndBlock.ValidatorUpdates) require.NoError(t, err) - state, err = updateState(state, blockID, &header, responses, validatorUpdates) + state, err = sm.UpdateState(state, blockID, &header, responses, validatorUpdates) require.Nil(t, err) nextHeight := state.LastBlockHeight + 1 - saveValidatorsInfo(stateDB, nextHeight+1, state.LastHeightValidatorsChanged, state.NextValidators) + sm.SaveValidatorsInfo(stateDB, nextHeight+1, state.LastHeightValidatorsChanged, state.NextValidators) // Load nextheight, it should be the oldpubkey. - v0, err := LoadValidators(stateDB, nextHeight) + v0, err := sm.LoadValidators(stateDB, nextHeight) assert.Nil(t, err) assert.Equal(t, valSetSize, v0.Size()) index, val := v0.GetByAddress(pubkeyOld.Address()) @@ -861,7 +868,7 @@ func TestManyValidatorChangesSaveLoad(t *testing.T) { } // Load nextheight+1, it should be the new pubkey. - v1, err := LoadValidators(stateDB, nextHeight+1) + v1, err := sm.LoadValidators(stateDB, nextHeight+1) assert.Nil(t, err) assert.Equal(t, valSetSize, v1.Size()) index, val = v1.GetByAddress(pubkey.Address()) @@ -871,14 +878,6 @@ func TestManyValidatorChangesSaveLoad(t *testing.T) { } } -func genValSet(size int) *types.ValidatorSet { - vals := make([]*types.Validator, size) - for i := 0; i < size; i++ { - vals[i] = types.NewValidator(ed25519.GenPrivKey().PubKey(), 10) - } - return types.NewValidatorSet(vals) -} - func TestStateMakeBlock(t *testing.T) { tearDown, _, state := setupTestCase(t) defer tearDown(t) @@ -924,14 +923,14 @@ func TestConsensusParamsChangesSaveLoad(t *testing.T) { changeIndex++ cp = params[changeIndex] } - header, blockID, responses := makeHeaderPartsResponsesParams(state, i, cp) + header, blockID, responses := makeHeaderPartsResponsesParams(state, cp) validatorUpdates, err = types.PB2TM.ValidatorUpdates(responses.EndBlock.ValidatorUpdates) require.NoError(t, err) - state, err = updateState(state, blockID, &header, responses, validatorUpdates) + state, err = sm.UpdateState(state, blockID, &header, responses, validatorUpdates) require.Nil(t, err) nextHeight := state.LastBlockHeight + 1 - saveConsensusParamsInfo(stateDB, nextHeight, state.LastHeightConsensusParamsChanged, state.ConsensusParams) + sm.SaveConsensusParamsInfo(stateDB, nextHeight, state.LastHeightConsensusParamsChanged, state.ConsensusParams) } // Make all the test cases by using the same params until after the change. @@ -949,27 +948,15 @@ func TestConsensusParamsChangesSaveLoad(t *testing.T) { } for _, testCase := range testCases { - p, err := LoadConsensusParams(stateDB, testCase.height) + p, err := sm.LoadConsensusParams(stateDB, testCase.height) assert.Nil(t, err, fmt.Sprintf("expected no err at height %d", testCase.height)) assert.Equal(t, testCase.params, p, fmt.Sprintf(`unexpected consensus params at height %d`, testCase.height)) } } -func makeParams(blockBytes, blockGas, evidenceAge int64) types.ConsensusParams { - return types.ConsensusParams{ - BlockSize: types.BlockSizeParams{ - MaxBytes: blockBytes, - MaxGas: blockGas, - }, - Evidence: types.EvidenceParams{ - MaxAge: evidenceAge, - }, - } -} - func TestApplyUpdates(t *testing.T) { - initParams := makeParams(1, 2, 3) + initParams := makeConsensusParams(1, 2, 3) cases := [...]struct { init types.ConsensusParams @@ -985,14 +972,14 @@ func TestApplyUpdates(t *testing.T) { MaxGas: 55, }, }, - makeParams(44, 55, 3)}, + makeConsensusParams(44, 55, 3)}, 3: {initParams, abci.ConsensusParams{ Evidence: &abci.EvidenceParams{ MaxAge: 66, }, }, - makeParams(1, 2, 66)}, + makeConsensusParams(1, 2, 66)}, } for i, tc := range cases { @@ -1000,61 +987,3 @@ func TestApplyUpdates(t *testing.T) { assert.Equal(t, tc.expected, res, "case %d", i) } } - -func makeHeaderPartsResponsesValPubKeyChange(state State, height int64, - pubkey crypto.PubKey) (types.Header, types.BlockID, *ABCIResponses) { - - block := makeBlock(state, state.LastBlockHeight+1) - abciResponses := &ABCIResponses{ - EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil}, - } - - // If the pubkey is new, remove the old and add the new. - _, val := state.NextValidators.GetByIndex(0) - if !bytes.Equal(pubkey.Bytes(), val.PubKey.Bytes()) { - abciResponses.EndBlock = &abci.ResponseEndBlock{ - ValidatorUpdates: []abci.ValidatorUpdate{ - types.TM2PB.NewValidatorUpdate(val.PubKey, 0), - types.TM2PB.NewValidatorUpdate(pubkey, 10), - }, - } - } - - return block.Header, types.BlockID{block.Hash(), types.PartSetHeader{}}, abciResponses -} - -func makeHeaderPartsResponsesValPowerChange(state State, height int64, - power int64) (types.Header, types.BlockID, *ABCIResponses) { - - block := makeBlock(state, state.LastBlockHeight+1) - abciResponses := &ABCIResponses{ - EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil}, - } - - // If the pubkey is new, remove the old and add the new. - _, val := state.NextValidators.GetByIndex(0) - if val.VotingPower != power { - abciResponses.EndBlock = &abci.ResponseEndBlock{ - ValidatorUpdates: []abci.ValidatorUpdate{ - types.TM2PB.NewValidatorUpdate(val.PubKey, power), - }, - } - } - - return block.Header, types.BlockID{block.Hash(), types.PartSetHeader{}}, abciResponses -} - -func makeHeaderPartsResponsesParams(state State, height int64, - params types.ConsensusParams) (types.Header, types.BlockID, *ABCIResponses) { - - block := makeBlock(state, state.LastBlockHeight+1) - abciResponses := &ABCIResponses{ - EndBlock: &abci.ResponseEndBlock{ConsensusParamUpdates: types.TM2PB.ConsensusParams(¶ms)}, - } - return block.Header, types.BlockID{block.Hash(), types.PartSetHeader{}}, abciResponses -} - -type paramsChangeTestCase struct { - height int64 - params types.ConsensusParams -} diff --git a/state/store.go b/state/store.go index 5938b202e..5a6b1cbf5 100644 --- a/state/store.go +++ b/state/store.go @@ -145,6 +145,12 @@ type ABCIResponses struct { BeginBlock *abci.ResponseBeginBlock } +type ABCIResponsesDeprecated struct { + DeliverTx []*abci.ResponseDeliverTxDeprecated + EndBlock *abci.ResponseEndBlockDeprecated + BeginBlock *abci.ResponseBeginBlockDeprecated +} + // NewABCIResponses returns a new ABCIResponses func NewABCIResponses(block *types.Block) *ABCIResponses { resDeliverTxs := make([]*abci.ResponseDeliverTx, block.NumTxs) @@ -179,9 +185,22 @@ func LoadABCIResponses(db dbm.DB, height int64) (*ABCIResponses, error) { abciResponses := new(ABCIResponses) err := cdc.UnmarshalBinaryBare(buf, abciResponses) if err != nil { - // DATA HAS BEEN CORRUPTED OR THE SPEC HAS CHANGED - cmn.Exit(fmt.Sprintf(`LoadABCIResponses: Data has been corrupted or its spec has + deprecated := new(ABCIResponsesDeprecated) + err := cdc.UnmarshalBinaryBare(buf, deprecated) + if err != nil { + // DATA HAS BEEN CORRUPTED OR THE SPEC HAS CHANGED + cmn.Exit(fmt.Sprintf(`LoadABCIResponses: Data has been corrupted or its spec has changed: %v\n`, err)) + } + var deliverTxs []*abci.ResponseDeliverTx + for _, result := range deprecated.DeliverTx { + deliverTxs = append(deliverTxs, abci.ConvertDeprecatedDeliverTxResponse(result)) + } + return &ABCIResponses{ + DeliverTx: deliverTxs, + EndBlock: abci.ConvertDeprecatedEndBlockResponse(deprecated.EndBlock), + BeginBlock: abci.ConvertDeprecatedBeginBlockResponse(deprecated.BeginBlock), + }, nil } // TODO: ensure that buf is completely read. @@ -191,7 +210,7 @@ func LoadABCIResponses(db dbm.DB, height int64) (*ABCIResponses, error) { // SaveABCIResponses persists the ABCIResponses to the database. // This is useful in case we crash after app.Commit and before s.Save(). // Responses are indexed by height so they can also be loaded later to produce Merkle proofs. -func saveABCIResponses(db dbm.DB, height int64, abciResponses *ABCIResponses) { +func SaveABCIResponses(db dbm.DB, height int64, abciResponses *ABCIResponses) { db.SetSync(calcABCIResponsesKey(height), abciResponses.Bytes()) } diff --git a/state/store_test.go b/state/store_test.go index 916d6d417..2b70b93a6 100644 --- a/state/store_test.go +++ b/state/store_test.go @@ -1,14 +1,58 @@ -package state +package state_test import ( "fmt" "os" "testing" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" cfg "github.com/tendermint/tendermint/config" dbm "github.com/tendermint/tendermint/libs/db" + sm "github.com/tendermint/tendermint/state" + "github.com/tendermint/tendermint/types" ) +func TestStoreLoadValidators(t *testing.T) { + stateDB := dbm.NewMemDB() + val, _ := types.RandValidator(true, 10) + vals := types.NewValidatorSet([]*types.Validator{val}) + + // 1) LoadValidators loads validators using a height where they were last changed + sm.SaveValidatorsInfo(stateDB, 1, 1, vals) + sm.SaveValidatorsInfo(stateDB, 2, 1, vals) + loadedVals, err := sm.LoadValidators(stateDB, 2) + require.NoError(t, err) + assert.NotZero(t, loadedVals.Size()) + + // 2) LoadValidators loads validators using a checkpoint height + + // TODO(melekes): REMOVE in 0.33 release + // https://github.com/tendermint/tendermint/issues/3543 + // for releases prior to v0.31.4, it uses last height changed + valInfo := &sm.ValidatorsInfo{ + LastHeightChanged: sm.ValSetCheckpointInterval, + } + stateDB.Set(sm.CalcValidatorsKey(sm.ValSetCheckpointInterval), valInfo.Bytes()) + assert.NotPanics(t, func() { + sm.SaveValidatorsInfo(stateDB, sm.ValSetCheckpointInterval+1, 1, vals) + loadedVals, err := sm.LoadValidators(stateDB, sm.ValSetCheckpointInterval+1) + if err != nil { + t.Fatal(err) + } + if loadedVals.Size() == 0 { + t.Fatal("Expected validators to be non-empty") + } + }) + // ENDREMOVE + + sm.SaveValidatorsInfo(stateDB, sm.ValSetCheckpointInterval, 1, vals) + + loadedVals, err = sm.LoadValidators(stateDB, sm.ValSetCheckpointInterval) + require.NoError(t, err) + assert.NotZero(t, loadedVals.Size()) +} + func BenchmarkLoadValidators(b *testing.B) { const valSetSize = 100 @@ -16,20 +60,20 @@ func BenchmarkLoadValidators(b *testing.B) { defer os.RemoveAll(config.RootDir) dbType := dbm.DBBackendType(config.DBBackend) stateDB := dbm.NewDB("state", dbType, config.DBDir()) - state, err := LoadStateFromDBOrGenesisFile(stateDB, config.GenesisFile()) + state, err := sm.LoadStateFromDBOrGenesisFile(stateDB, config.GenesisFile()) if err != nil { b.Fatal(err) } state.Validators = genValSet(valSetSize) state.NextValidators = state.Validators.CopyIncrementProposerPriority(1) - SaveState(stateDB, state) + sm.SaveState(stateDB, state) for i := 10; i < 10000000000; i *= 10 { // 10, 100, 1000, ... - saveValidatorsInfo(stateDB, int64(i), state.LastHeightValidatorsChanged, state.NextValidators) + sm.SaveValidatorsInfo(stateDB, int64(i), state.LastHeightValidatorsChanged, state.NextValidators) b.Run(fmt.Sprintf("height=%d", i), func(b *testing.B) { for n := 0; n < b.N; n++ { - _, err := LoadValidators(stateDB, int64(i)) + _, err := sm.LoadValidators(stateDB, int64(i)) if err != nil { b.Fatal(err) } diff --git a/state/tx_filter_test.go b/state/tx_filter_test.go index 52ae396bf..e7765a823 100644 --- a/state/tx_filter_test.go +++ b/state/tx_filter_test.go @@ -1,4 +1,4 @@ -package state +package state_test import ( "os" @@ -7,11 +7,10 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/tendermint/tendermint/crypto/ed25519" cmn "github.com/tendermint/tendermint/libs/common" dbm "github.com/tendermint/tendermint/libs/db" + sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" - tmtime "github.com/tendermint/tendermint/types/time" ) func TestTxFilter(t *testing.T) { @@ -34,10 +33,10 @@ func TestTxFilter(t *testing.T) { for i, tc := range testCases { stateDB := dbm.NewDB("state", "memdb", os.TempDir()) - state, err := LoadStateFromDBOrGenesisDoc(stateDB, genDoc) + state, err := sm.LoadStateFromDBOrGenesisDoc(stateDB, genDoc) require.NoError(t, err) - f := TxPreCheck(state) + f := sm.TxPreCheck(state) if tc.isErr { assert.NotNil(t, f(tc.tx), "#%v", i) } else { @@ -45,13 +44,3 @@ func TestTxFilter(t *testing.T) { } } } - -func randomGenesisDoc() *types.GenesisDoc { - pubkey := ed25519.GenPrivKey().PubKey() - return &types.GenesisDoc{ - GenesisTime: tmtime.Now(), - ChainID: "abc", - Validators: []types.GenesisValidator{{pubkey.Address(), pubkey, 10, "myval"}}, - ConsensusParams: types.DefaultConsensusParams(), - } -} diff --git a/state/txindex/indexer_service_test.go b/state/txindex/indexer_service_test.go index ff3c63842..bec4f2998 100644 --- a/state/txindex/indexer_service_test.go +++ b/state/txindex/indexer_service_test.go @@ -43,14 +43,14 @@ func TestIndexerServiceIndexesBlocks(t *testing.T) { Tx: types.Tx("foo"), Result: abci.ResponseDeliverTx{Code: 0}, } - eventBus.PublishEventTx(types.EventDataTx{*txResult1}) + eventBus.PublishEventTx(types.EventDataTx{TxResult: *txResult1}) txResult2 := &types.TxResult{ Height: 1, Index: uint32(1), Tx: types.Tx("bar"), Result: abci.ResponseDeliverTx{Code: 0}, } - eventBus.PublishEventTx(types.EventDataTx{*txResult2}) + eventBus.PublishEventTx(types.EventDataTx{TxResult: *txResult2}) time.Sleep(100 * time.Millisecond) diff --git a/state/txindex/kv/wire.go b/state/txindex/kv/codec.go similarity index 100% rename from state/txindex/kv/wire.go rename to state/txindex/kv/codec.go diff --git a/state/txindex/kv/kv.go b/state/txindex/kv/kv.go index a95723294..32e3d55cc 100644 --- a/state/txindex/kv/kv.go +++ b/state/txindex/kv/kv.go @@ -10,9 +10,10 @@ import ( "time" "github.com/pkg/errors" + + abci "github.com/tendermint/tendermint/abci/types" cmn "github.com/tendermint/tendermint/libs/common" dbm "github.com/tendermint/tendermint/libs/db" - "github.com/tendermint/tendermint/libs/pubsub/query" "github.com/tendermint/tendermint/state/txindex" "github.com/tendermint/tendermint/types" @@ -77,13 +78,30 @@ func (txi *TxIndex) Get(hash []byte) (*types.TxResult, error) { txResult := new(types.TxResult) err := cdc.UnmarshalBinaryBare(rawBytes, &txResult) if err != nil { - return nil, fmt.Errorf("Error reading TxResult: %v", err) + txResultDeprecated := new(types.TxResultDeprecated) + err := cdc.UnmarshalBinaryBare(rawBytes, &txResultDeprecated) + if err != nil { + return nil, fmt.Errorf("Error reading TxResult: %v", err) + } + result := abci.ConvertDeprecatedDeliverTxResponse(&txResultDeprecated.Result) + if result == nil { + return nil, fmt.Errorf("Error: failed to convert deprecated tx result") + } + return &types.TxResult{ + Height: txResultDeprecated.Height, + Index: txResultDeprecated.Index, + Tx: txResultDeprecated.Tx, + Result: *result, + }, nil } return txResult, nil } -// AddBatch indexes a batch of transactions using the given list of tags. +// AddBatch indexes a batch of transactions using the given list of events. Each +// key that indexed from the tx's events is a composite of the event type and +// the respective attribute's key delimited by a "." (eg. "account.number"). +// Any event with an empty type is not indexed. func (txi *TxIndex) AddBatch(b *txindex.Batch) error { storeBatch := txi.store.NewBatch() defer storeBatch.Close() @@ -91,12 +109,8 @@ func (txi *TxIndex) AddBatch(b *txindex.Batch) error { for _, result := range b.Ops { hash := result.Tx.Hash() - // index tx by tags - for _, tag := range result.Result.Tags { - if txi.indexAllTags || cmn.StringInSlice(string(tag.Key), txi.tagsToIndex) { - storeBatch.Set(keyForTag(tag, result), hash) - } - } + // index tx by events + txi.indexEvents(result, hash, storeBatch) // index tx by height if txi.indexAllTags || cmn.StringInSlice(types.TxHeightKey, txi.tagsToIndex) { @@ -111,23 +125,22 @@ func (txi *TxIndex) AddBatch(b *txindex.Batch) error { storeBatch.Set(hash, rawBytes) } - storeBatch.Write() + storeBatch.WriteSync() return nil } -// Index indexes a single transaction using the given list of tags. +// Index indexes a single transaction using the given list of events. Each key +// that indexed from the tx's events is a composite of the event type and the +// respective attribute's key delimited by a "." (eg. "account.number"). +// Any event with an empty type is not indexed. func (txi *TxIndex) Index(result *types.TxResult) error { b := txi.store.NewBatch() defer b.Close() hash := result.Tx.Hash() - // index tx by tags - for _, tag := range result.Result.Tags { - if txi.indexAllTags || cmn.StringInSlice(string(tag.Key), txi.tagsToIndex) { - b.Set(keyForTag(tag, result), hash) - } - } + // index tx by events + txi.indexEvents(result, hash, b) // index tx by height if txi.indexAllTags || cmn.StringInSlice(types.TxHeightKey, txi.tagsToIndex) { @@ -139,12 +152,34 @@ func (txi *TxIndex) Index(result *types.TxResult) error { if err != nil { return err } + b.Set(hash, rawBytes) + b.WriteSync() - b.Write() return nil } +func (txi *TxIndex) indexEvents(result *types.TxResult, hash []byte, store dbm.SetDeleter) { + for _, event := range result.Result.Events { + // only index events with a non-empty type + if len(event.Type) == 0 { + continue + } + + for _, attr := range event.Attributes { + if len(attr.Key) == 0 { + continue + } + // Notice, https://github.com/tendermint/tendermint/pull/3643 introduce a breaking change. + // We don't want to stop the world and reindex the data, so we choose attribute key as store key. + compositeTag := string(attr.Key) + if txi.indexAllTags || cmn.StringInSlice(compositeTag, txi.tagsToIndex) { + store.Set(keyForEvent(compositeTag, attr.Value, result), hash) + } + } + } +} + // Search performs a search using the given query. It breaks the query into // conditions (like "tx.height > 5"). For each condition, it queries the DB // index. One special use cases here: (1) if "tx.hash" is found, it returns tx @@ -160,8 +195,8 @@ func (txi *TxIndex) Search(queryStr string) ([]*types.TxResult, error) { if err != nil { return nil, err } - var hashes [][]byte var hashesInitialized bool + filteredHashes := make(map[string][]byte) // get a list of conditions (like "tx.height > 5") conditions := q.Conditions() @@ -191,10 +226,16 @@ func (txi *TxIndex) Search(queryStr string) ([]*types.TxResult, error) { for _, r := range ranges { if !hashesInitialized { - hashes = txi.matchRange(r, startKey(r.key)) + filteredHashes = txi.matchRange(r, startKey(r.key), filteredHashes, true) hashesInitialized = true + + // Ignore any remaining conditions if the first condition resulted + // in no matches (assuming implicit AND operand). + if len(filteredHashes) == 0 { + break + } } else { - hashes = intersect(hashes, txi.matchRange(r, startKey(r.key))) + filteredHashes = txi.matchRange(r, startKey(r.key), filteredHashes, false) } } } @@ -209,21 +250,26 @@ func (txi *TxIndex) Search(queryStr string) ([]*types.TxResult, error) { } if !hashesInitialized { - hashes = txi.match(c, startKeyForCondition(c, height)) + filteredHashes = txi.match(c, startKeyForCondition(c, height), filteredHashes, true) hashesInitialized = true + + // Ignore any remaining conditions if the first condition resulted + // in no matches (assuming implicit AND operand). + if len(filteredHashes) == 0 { + break + } } else { - hashes = intersect(hashes, txi.match(c, startKeyForCondition(c, height))) + filteredHashes = txi.match(c, startKeyForCondition(c, height), filteredHashes, false) } } - results := make([]*types.TxResult, len(hashes)) - i := 0 - for _, h := range hashes { - results[i], err = txi.Get(h) + results := make([]*types.TxResult, 0, len(filteredHashes)) + for _, h := range filteredHashes { + res, err := txi.Get(h) if err != nil { return nil, errors.Wrapf(err, "failed to get Tx{%X}", h) } - i++ + results = append(results, res) } // sort by height & index by default @@ -351,63 +397,115 @@ func isRangeOperation(op query.Operator) bool { } } -func (txi *TxIndex) match(c query.Condition, startKeyBz []byte) (hashes [][]byte) { - if c.Op == query.OpEqual { +// match returns all matching txs by hash that meet a given condition and start +// key. An already filtered result (filteredHashes) is provided such that any +// non-intersecting matches are removed. +// +// NOTE: filteredHashes may be empty if no previous condition has matched. +func (txi *TxIndex) match(c query.Condition, startKeyBz []byte, filteredHashes map[string][]byte, firstRun bool) map[string][]byte { + // A previous match was attempted but resulted in no matches, so we return + // no matches (assuming AND operand). + if !firstRun && len(filteredHashes) == 0 { + return filteredHashes + } + + tmpHashes := make(map[string][]byte) + + switch { + case c.Op == query.OpEqual: it := dbm.IteratePrefix(txi.store, startKeyBz) defer it.Close() + for ; it.Valid(); it.Next() { - hashes = append(hashes, it.Value()) + tmpHashes[string(it.Value())] = it.Value() } - } else if c.Op == query.OpContains { + + case c.Op == query.OpContains: // XXX: startKey does not apply here. - // For example, if startKey = "account.owner/an/" and search query = "accoutn.owner CONTAINS an" + // For example, if startKey = "account.owner/an/" and search query = "account.owner CONTAINS an" // we can't iterate with prefix "account.owner/an/" because we might miss keys like "account.owner/Ulan/" it := dbm.IteratePrefix(txi.store, startKey(c.Tag)) defer it.Close() + for ; it.Valid(); it.Next() { if !isTagKey(it.Key()) { continue } + if strings.Contains(extractValueFromKey(it.Key()), c.Operand.(string)) { - hashes = append(hashes, it.Value()) + tmpHashes[string(it.Value())] = it.Value() } } - } else { + default: panic("other operators should be handled already") } - return + + if len(tmpHashes) == 0 || firstRun { + // Either: + // + // 1. Regardless if a previous match was attempted, which may have had + // results, but no match was found for the current condition, then we + // return no matches (assuming AND operand). + // + // 2. A previous match was not attempted, so we return all results. + return tmpHashes + } + + // Remove/reduce matches in filteredHashes that were not found in this + // match (tmpHashes). + for k := range filteredHashes { + if tmpHashes[k] == nil { + delete(filteredHashes, k) + } + } + + return filteredHashes } -func (txi *TxIndex) matchRange(r queryRange, startKey []byte) (hashes [][]byte) { - // create a map to prevent duplicates - hashesMap := make(map[string][]byte) +// matchRange returns all matching txs by hash that meet a given queryRange and +// start key. An already filtered result (filteredHashes) is provided such that +// any non-intersecting matches are removed. +// +// NOTE: filteredHashes may be empty if no previous condition has matched. +func (txi *TxIndex) matchRange(r queryRange, startKey []byte, filteredHashes map[string][]byte, firstRun bool) map[string][]byte { + // A previous match was attempted but resulted in no matches, so we return + // no matches (assuming AND operand). + if !firstRun && len(filteredHashes) == 0 { + return filteredHashes + } + tmpHashes := make(map[string][]byte) lowerBound := r.lowerBoundValue() upperBound := r.upperBoundValue() it := dbm.IteratePrefix(txi.store, startKey) defer it.Close() + LOOP: for ; it.Valid(); it.Next() { if !isTagKey(it.Key()) { continue } - switch r.AnyBound().(type) { - case int64: + + if _, ok := r.AnyBound().(int64); ok { v, err := strconv.ParseInt(extractValueFromKey(it.Key()), 10, 64) if err != nil { continue LOOP } + include := true if lowerBound != nil && v < lowerBound.(int64) { include = false } + if upperBound != nil && v > upperBound.(int64) { include = false } + if include { - hashesMap[fmt.Sprintf("%X", it.Value())] = it.Value() + tmpHashes[string(it.Value())] = it.Value() } + // XXX: passing time in a ABCI Tags is not yet implemented // case time.Time: // v := strconv.ParseInt(extractValueFromKey(it.Key()), 10, 64) @@ -416,13 +514,27 @@ LOOP: // } } } - hashes = make([][]byte, len(hashesMap)) - i := 0 - for _, h := range hashesMap { - hashes[i] = h - i++ + + if len(tmpHashes) == 0 || firstRun { + // Either: + // + // 1. Regardless if a previous match was attempted, which may have had + // results, but no match was found for the current condition, then we + // return no matches (assuming AND operand). + // + // 2. A previous match was not attempted, so we return all results. + return tmpHashes } - return + + // Remove/reduce matches in filteredHashes that were not found in this + // match (tmpHashes). + for k := range filteredHashes { + if tmpHashes[k] == nil { + delete(filteredHashes, k) + } + } + + return filteredHashes } /////////////////////////////////////////////////////////////////////////////// @@ -437,10 +549,10 @@ func extractValueFromKey(key []byte) string { return parts[1] } -func keyForTag(tag cmn.KVPair, result *types.TxResult) []byte { +func keyForEvent(key string, value []byte, result *types.TxResult) []byte { return []byte(fmt.Sprintf("%s/%s/%d/%d", - tag.Key, - tag.Value, + key, + value, result.Height, result.Index, )) @@ -469,18 +581,3 @@ func startKey(fields ...interface{}) []byte { } return b.Bytes() } - -/////////////////////////////////////////////////////////////////////////////// -// Utils - -func intersect(as, bs [][]byte) [][]byte { - i := make([][]byte, 0, cmn.MinInt(len(as), len(bs))) - for _, a := range as { - for _, b := range bs { - if bytes.Equal(a, b) { - i = append(i, a) - } - } - } - return i -} diff --git a/state/txindex/kv/kv_bench_test.go b/state/txindex/kv/kv_bench_test.go new file mode 100644 index 000000000..60685e949 --- /dev/null +++ b/state/txindex/kv/kv_bench_test.go @@ -0,0 +1,71 @@ +package kv + +import ( + "crypto/rand" + "fmt" + "io/ioutil" + "testing" + + abci "github.com/tendermint/tendermint/abci/types" + cmn "github.com/tendermint/tendermint/libs/common" + dbm "github.com/tendermint/tendermint/libs/db" + "github.com/tendermint/tendermint/types" +) + +func BenchmarkTxSearch(b *testing.B) { + dbDir, err := ioutil.TempDir("", "benchmark_tx_search_test") + if err != nil { + b.Errorf("failed to create temporary directory: %s", err) + } + + db, err := dbm.NewGoLevelDB("benchmark_tx_search_test", dbDir) + if err != nil { + b.Errorf("failed to create database: %s", err) + } + + allowedTags := []string{"transfer.address", "transfer.amount"} + indexer := NewTxIndex(db, IndexTags(allowedTags)) + + for i := 0; i < 35000; i++ { + events := []abci.Event{ + { + Type: "transfer", + Attributes: []cmn.KVPair{ + {Key: []byte("address"), Value: []byte(fmt.Sprintf("address_%d", i%100))}, + {Key: []byte("amount"), Value: []byte("50")}, + }, + }, + } + + txBz := make([]byte, 8) + if _, err := rand.Read(txBz); err != nil { + b.Errorf("failed produce random bytes: %s", err) + } + + txResult := &types.TxResult{ + Height: int64(i), + Index: 0, + Tx: types.Tx(string(txBz)), + Result: abci.ResponseDeliverTx{ + Data: []byte{0}, + Code: abci.CodeTypeOK, + Log: "", + Events: events, + }, + } + + if err := indexer.Index(txResult); err != nil { + b.Errorf("failed to index tx: %s", err) + } + } + + txQuery := "transfer.address = 'address_43' AND transfer.amount = 50" + + b.ResetTimer() + + for i := 0; i < b.N; i++ { + if _, err := indexer.Search(txQuery); err != nil { + b.Errorf("failed to query for txs: %s", err) + } + } +} diff --git a/state/txindex/kv/kv_test.go b/state/txindex/kv/kv_test.go index 604be3932..3ecd374c0 100644 --- a/state/txindex/kv/kv_test.go +++ b/state/txindex/kv/kv_test.go @@ -11,7 +11,6 @@ import ( abci "github.com/tendermint/tendermint/abci/types" cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/libs/db" - "github.com/tendermint/tendermint/state/txindex" "github.com/tendermint/tendermint/types" ) @@ -20,7 +19,15 @@ func TestTxIndex(t *testing.T) { indexer := NewTxIndex(db.NewMemDB()) tx := types.Tx("HELLO WORLD") - txResult := &types.TxResult{1, 0, tx, abci.ResponseDeliverTx{Data: []byte{0}, Code: abci.CodeTypeOK, Log: "", Tags: nil}} + txResult := &types.TxResult{ + Height: 1, + Index: 0, + Tx: tx, + Result: abci.ResponseDeliverTx{ + Data: []byte{0}, + Code: abci.CodeTypeOK, Log: "", Events: nil, + }, + } hash := tx.Hash() batch := txindex.NewBatch(1) @@ -35,7 +42,15 @@ func TestTxIndex(t *testing.T) { assert.Equal(t, txResult, loadedTxResult) tx2 := types.Tx("BYE BYE WORLD") - txResult2 := &types.TxResult{1, 0, tx2, abci.ResponseDeliverTx{Data: []byte{0}, Code: abci.CodeTypeOK, Log: "", Tags: nil}} + txResult2 := &types.TxResult{ + Height: 1, + Index: 0, + Tx: tx2, + Result: abci.ResponseDeliverTx{ + Data: []byte{0}, + Code: abci.CodeTypeOK, Log: "", Events: nil, + }, + } hash2 := tx2.Hash() err = indexer.Index(txResult2) @@ -47,13 +62,13 @@ func TestTxIndex(t *testing.T) { } func TestTxSearch(t *testing.T) { - allowedTags := []string{"account.number", "account.owner", "account.date"} + allowedTags := []string{"number", "owner", "date"} indexer := NewTxIndex(db.NewMemDB(), IndexTags(allowedTags), EnableRangeQuery()) - txResult := txResultWithTags([]cmn.KVPair{ - {Key: []byte("account.number"), Value: []byte("1")}, - {Key: []byte("account.owner"), Value: []byte("Ivan")}, - {Key: []byte("not_allowed"), Value: []byte("Vlad")}, + txResult := txResultWithEvents([]abci.Event{ + {Type: "account", Attributes: []cmn.KVPair{{Key: []byte("number"), Value: []byte("1")}}}, + {Type: "account", Attributes: []cmn.KVPair{{Key: []byte("owner"), Value: []byte("Ivan")}}}, + {Type: "", Attributes: []cmn.KVPair{{Key: []byte("not_allowed"), Value: []byte("Vlad")}}}, }) hash := txResult.Tx.Hash() @@ -67,31 +82,36 @@ func TestTxSearch(t *testing.T) { // search by hash {fmt.Sprintf("tx.hash = '%X'", hash), 1}, // search by exact match (one tag) - {"account.number = 1", 1}, + {"number = 1", 1}, // search by exact match (two tags) - {"account.number = 1 AND account.owner = 'Ivan'", 1}, + {"number = 1 AND owner = 'Ivan'", 1}, // search by exact match (two tags) - {"account.number = 1 AND account.owner = 'Vlad'", 0}, + {"number = 1 AND owner = 'Vlad'", 0}, + {"owner = 'Vlad' AND number = 1", 0}, + {"number >= 1 AND owner = 'Vlad'", 0}, + {"owner = 'Vlad' AND number >= 1", 0}, + {"number <= 0", 0}, + {"number <= 0 AND owner = 'Ivan'", 0}, // search using a prefix of the stored value - {"account.owner = 'Iv'", 0}, + {"owner = 'Iv'", 0}, // search by range - {"account.number >= 1 AND account.number <= 5", 1}, + {"number >= 1 AND number <= 5", 1}, // search by range (lower bound) - {"account.number >= 1", 1}, + {"number >= 1", 1}, // search by range (upper bound) - {"account.number <= 5", 1}, + {"number <= 5", 1}, // search using not allowed tag {"not_allowed = 'boom'", 0}, // search for not existing tx result - {"account.number >= 2 AND account.number <= 5", 0}, + {"number >= 2 AND number <= 5", 0}, // search using not existing tag - {"account.date >= TIME 2013-05-03T14:45:00Z", 0}, + {"date >= TIME 2013-05-03T14:45:00Z", 0}, // search using CONTAINS - {"account.owner CONTAINS 'an'", 1}, + {"owner CONTAINS 'an'", 1}, // search for non existing value using CONTAINS - {"account.owner CONTAINS 'Vlad'", 0}, + {"owner CONTAINS 'Vlad'", 0}, // search using the wrong tag (of numeric type) using CONTAINS - {"account.number CONTAINS 'Iv'", 0}, + {"number CONTAINS 'Iv'", 0}, } for _, tc := range testCases { @@ -107,19 +127,88 @@ func TestTxSearch(t *testing.T) { } } +func TestTxSearchDeprecatedIndexing(t *testing.T) { + allowedTags := []string{"number", "sender"} + indexer := NewTxIndex(db.NewMemDB(), IndexTags(allowedTags), EnableRangeQuery()) + + // index tx using events indexing (composite key) + txResult1 := txResultWithEvents([]abci.Event{ + {Type: "account", Attributes: []cmn.KVPair{{Key: []byte("number"), Value: []byte("1")}}}, + }) + hash1 := txResult1.Tx.Hash() + + err := indexer.Index(txResult1) + require.NoError(t, err) + + // index tx also using deprecated indexing (tag as key) + txResult2 := txResultWithEvents(nil) + txResult2.Tx = types.Tx("HELLO WORLD 2") + + hash2 := txResult2.Tx.Hash() + b := indexer.store.NewBatch() + + rawBytes, err := cdc.MarshalBinaryBare(txResult2) + require.NoError(t, err) + + depKey := []byte(fmt.Sprintf("%s/%s/%d/%d", + "sender", + "addr1", + txResult2.Height, + txResult2.Index, + )) + + b.Set(depKey, hash2) + b.Set(keyForHeight(txResult2), hash2) + b.Set(hash2, rawBytes) + b.Write() + + testCases := []struct { + q string + results []*types.TxResult + }{ + // search by hash + {fmt.Sprintf("tx.hash = '%X'", hash1), []*types.TxResult{txResult1}}, + // search by hash + {fmt.Sprintf("tx.hash = '%X'", hash2), []*types.TxResult{txResult2}}, + // search by exact match (one tag) + {"number = 1", []*types.TxResult{txResult1}}, + {"number >= 1 AND number <= 5", []*types.TxResult{txResult1}}, + // search by range (lower bound) + {"number >= 1", []*types.TxResult{txResult1}}, + // search by range (upper bound) + {"number <= 5", []*types.TxResult{txResult1}}, + // search using not allowed tag + {"not_allowed = 'boom'", []*types.TxResult{}}, + // search for not existing tx result + {"number >= 2 AND number <= 5", []*types.TxResult{}}, + // search using not existing tag + {"date >= TIME 2013-05-03T14:45:00Z", []*types.TxResult{}}, + // search by deprecated tag + {"sender = 'addr1'", []*types.TxResult{txResult2}}, + } + + for _, tc := range testCases { + t.Run(tc.q, func(t *testing.T) { + results, err := indexer.Search(tc.q) + require.NoError(t, err) + require.Equal(t, results, tc.results) + }) + } +} + func TestTxSearchOneTxWithMultipleSameTagsButDifferentValues(t *testing.T) { - allowedTags := []string{"account.number"} + allowedTags := []string{"number"} indexer := NewTxIndex(db.NewMemDB(), IndexTags(allowedTags), EnableRangeQuery()) - txResult := txResultWithTags([]cmn.KVPair{ - {Key: []byte("account.number"), Value: []byte("1")}, - {Key: []byte("account.number"), Value: []byte("2")}, + txResult := txResultWithEvents([]abci.Event{ + {Type: "account", Attributes: []cmn.KVPair{{Key: []byte("number"), Value: []byte("1")}}}, + {Type: "account", Attributes: []cmn.KVPair{{Key: []byte("number"), Value: []byte("2")}}}, }) err := indexer.Index(txResult) require.NoError(t, err) - results, err := indexer.Search("account.number >= 1") + results, err := indexer.Search("number >= 1") assert.NoError(t, err) assert.Len(t, results, 1) @@ -127,13 +216,14 @@ func TestTxSearchOneTxWithMultipleSameTagsButDifferentValues(t *testing.T) { } func TestTxSearchMultipleTxs(t *testing.T) { - allowedTags := []string{"account.number", "account.number.id"} + allowedTags := []string{"number", "number.id"} indexer := NewTxIndex(db.NewMemDB(), IndexTags(allowedTags), EnableRangeQuery()) // indexed first, but bigger height (to test the order of transactions) - txResult := txResultWithTags([]cmn.KVPair{ - {Key: []byte("account.number"), Value: []byte("1")}, + txResult := txResultWithEvents([]abci.Event{ + {Type: "account", Attributes: []cmn.KVPair{{Key: []byte("number"), Value: []byte("1")}}}, }) + txResult.Tx = types.Tx("Bob's account") txResult.Height = 2 txResult.Index = 1 @@ -141,8 +231,8 @@ func TestTxSearchMultipleTxs(t *testing.T) { require.NoError(t, err) // indexed second, but smaller height (to test the order of transactions) - txResult2 := txResultWithTags([]cmn.KVPair{ - {Key: []byte("account.number"), Value: []byte("2")}, + txResult2 := txResultWithEvents([]abci.Event{ + {Type: "account", Attributes: []cmn.KVPair{{Key: []byte("number"), Value: []byte("2")}}}, }) txResult2.Tx = types.Tx("Alice's account") txResult2.Height = 1 @@ -152,8 +242,8 @@ func TestTxSearchMultipleTxs(t *testing.T) { require.NoError(t, err) // indexed third (to test the order of transactions) - txResult3 := txResultWithTags([]cmn.KVPair{ - {Key: []byte("account.number"), Value: []byte("3")}, + txResult3 := txResultWithEvents([]abci.Event{ + {Type: "account", Attributes: []cmn.KVPair{{Key: []byte("number"), Value: []byte("3")}}}, }) txResult3.Tx = types.Tx("Jack's account") txResult3.Height = 1 @@ -163,8 +253,8 @@ func TestTxSearchMultipleTxs(t *testing.T) { // indexed fourth (to test we don't include txs with similar tags) // https://github.com/tendermint/tendermint/issues/2908 - txResult4 := txResultWithTags([]cmn.KVPair{ - {Key: []byte("account.number.id"), Value: []byte("1")}, + txResult4 := txResultWithEvents([]abci.Event{ + {Type: "account", Attributes: []cmn.KVPair{{Key: []byte("number.id"), Value: []byte("1")}}}, }) txResult4.Tx = types.Tx("Mike's account") txResult4.Height = 2 @@ -172,7 +262,7 @@ func TestTxSearchMultipleTxs(t *testing.T) { err = indexer.Index(txResult4) require.NoError(t, err) - results, err := indexer.Search("account.number >= 1") + results, err := indexer.Search("number >= 1") assert.NoError(t, err) require.Len(t, results, 3) @@ -182,45 +272,46 @@ func TestTxSearchMultipleTxs(t *testing.T) { func TestIndexAllTags(t *testing.T) { indexer := NewTxIndex(db.NewMemDB(), IndexAllTags(), EnableRangeQuery()) - txResult := txResultWithTags([]cmn.KVPair{ - {Key: []byte("account.owner"), Value: []byte("Ivan")}, - {Key: []byte("account.number"), Value: []byte("1")}, + txResult := txResultWithEvents([]abci.Event{ + {Type: "account", Attributes: []cmn.KVPair{{Key: []byte("owner"), Value: []byte("Ivan")}}}, + {Type: "account", Attributes: []cmn.KVPair{{Key: []byte("number"), Value: []byte("1")}}}, }) err := indexer.Index(txResult) require.NoError(t, err) - results, err := indexer.Search("account.number >= 1") + results, err := indexer.Search("number >= 1") assert.NoError(t, err) assert.Len(t, results, 1) assert.Equal(t, []*types.TxResult{txResult}, results) - results, err = indexer.Search("account.owner = 'Ivan'") + results, err = indexer.Search("owner = 'Ivan'") assert.NoError(t, err) assert.Len(t, results, 1) assert.Equal(t, []*types.TxResult{txResult}, results) } + func TestDisableRangeQuery(t *testing.T) { indexer := NewTxIndex(db.NewMemDB(), IndexAllTags()) - _, err := indexer.Search("account.number >= 1") + _, err := indexer.Search("number >= 1") assert.Error(t, err) - _, err = indexer.Search("account.number >= 1 AND account.sequence < 100 AND tx.height > 200 AND tx.height <= 300") + _, err = indexer.Search("number >= 1 AND sequence < 100 AND tx.height > 200 AND tx.height <= 300") assert.Error(t, err) } -func txResultWithTags(tags []cmn.KVPair) *types.TxResult { +func txResultWithEvents(events []abci.Event) *types.TxResult { tx := types.Tx("HELLO WORLD") return &types.TxResult{ Height: 1, Index: 0, Tx: tx, Result: abci.ResponseDeliverTx{ - Data: []byte{0}, - Code: abci.CodeTypeOK, - Log: "", - Tags: tags, + Data: []byte{0}, + Code: abci.CodeTypeOK, + Log: "", + Events: events, }, } } @@ -232,7 +323,7 @@ func benchmarkTxIndex(txsCount int64, b *testing.B) { } defer os.RemoveAll(dir) // nolint: errcheck - store := db.NewDB("tx_index", "leveldb", dir) + store := db.NewDB("tx_index", "goleveldb", dir) indexer := NewTxIndex(store) batch := txindex.NewBatch(txsCount) @@ -244,10 +335,10 @@ func benchmarkTxIndex(txsCount int64, b *testing.B) { Index: txIndex, Tx: tx, Result: abci.ResponseDeliverTx{ - Data: []byte{0}, - Code: abci.CodeTypeOK, - Log: "", - Tags: []cmn.KVPair{}, + Data: []byte{0}, + Code: abci.CodeTypeOK, + Log: "", + Events: []abci.Event{}, }, } if err := batch.Add(txResult); err != nil { diff --git a/state/validation.go b/state/validation.go index 4d8a423e4..7eaa8523c 100644 --- a/state/validation.go +++ b/state/validation.go @@ -96,10 +96,7 @@ func validateBlock(evidencePool EvidencePool, stateDB dbm.DB, state State, block } } else { if len(block.LastCommit.Precommits) != state.LastValidators.Size() { - return fmt.Errorf("Invalid block commit size. Expected %v, got %v", - state.LastValidators.Size(), - len(block.LastCommit.Precommits), - ) + return types.NewErrInvalidCommitPrecommits(state.LastValidators.Size(), len(block.LastCommit.Precommits)) } err := state.LastValidators.VerifyCommit( state.ChainID, state.LastBlockID, block.Height-1, block.LastCommit) diff --git a/state/validation_test.go b/state/validation_test.go index ed2bf8720..65b27df80 100644 --- a/state/validation_test.go +++ b/state/validation_test.go @@ -1,31 +1,30 @@ -package state +package state_test import ( "testing" "time" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/mock" "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/crypto/tmhash" "github.com/tendermint/tendermint/libs/log" + sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" + tmtime "github.com/tendermint/tendermint/types/time" ) -// TODO(#2589): -// - generalize this past the first height -// - add txs and build up full State properly -// - test block.Time (see #2587 - there are no conditions on time for the first height) -func TestValidateBlockHeader(t *testing.T) { - var height int64 = 1 // TODO(#2589): generalize - state, stateDB := state(1, int(height)) +const validationTestsStopHeight int64 = 10 - blockExec := NewBlockExecutor(stateDB, log.TestingLogger(), nil, nil, nil, true) +func TestValidateBlockHeader(t *testing.T) { + proxyApp := newTestApp() + require.NoError(t, proxyApp.Start()) + defer proxyApp.Stop() - // A good block passes. - block := makeBlock(state, height) - err := blockExec.ValidateBlock(state, block) - require.NoError(t, err) + state, stateDB, privVals := makeState(3, 1) + blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyApp.Consensus(), mock.Mempool{}, sm.MockEvidencePool{}, true) + lastCommit := types.NewCommit(types.BlockID{}, nil) // some bad values wrongHash := tmhash.Sum([]byte("this hash is wrong")) @@ -43,7 +42,7 @@ func TestValidateBlockHeader(t *testing.T) { {"Version wrong2", func(block *types.Block) { block.Version = wrongVersion2 }}, {"ChainID wrong", func(block *types.Block) { block.ChainID = "not-the-real-one" }}, {"Height wrong", func(block *types.Block) { block.Height += 10 }}, - {"Time wrong", func(block *types.Block) { block.Time = block.Time.Add(-time.Second * 3600 * 24) }}, + {"Time wrong", func(block *types.Block) { block.Time = block.Time.Add(-time.Second * 1) }}, {"NumTxs wrong", func(block *types.Block) { block.NumTxs += 10 }}, {"TotalTxs wrong", func(block *types.Block) { block.TotalTxs += 10 }}, @@ -62,78 +61,145 @@ func TestValidateBlockHeader(t *testing.T) { {"Proposer invalid", func(block *types.Block) { block.ProposerAddress = []byte("wrong size") }}, } - for _, tc := range testCases { - block := makeBlock(state, height) - tc.malleateBlock(block) - err := blockExec.ValidateBlock(state, block) - require.Error(t, err, tc.name) + // Build up state for multiple heights + for height := int64(1); height < validationTestsStopHeight; height++ { + proposerAddr := state.Validators.GetProposer().Address + /* + Invalid blocks don't pass + */ + for _, tc := range testCases { + block, _ := state.MakeBlock(height, makeTxs(height), lastCommit, nil, proposerAddr) + tc.malleateBlock(block) + err := blockExec.ValidateBlock(state, block) + require.Error(t, err, tc.name) + } + + /* + A good block passes + */ + var err error + state, _, lastCommit, err = makeAndCommitGoodBlock(state, height, lastCommit, proposerAddr, blockExec, privVals, nil) + require.NoError(t, err, "height %d", height) } } -/* - TODO(#2589): - - test Block.Data.Hash() == Block.DataHash - - test len(Block.Data.Txs) == Block.NumTxs -*/ -func TestValidateBlockData(t *testing.T) { -} - -/* - TODO(#2589): - - test len(block.LastCommit.Precommits) == state.LastValidators.Size() - - test state.LastValidators.VerifyCommit -*/ func TestValidateBlockCommit(t *testing.T) { + proxyApp := newTestApp() + require.NoError(t, proxyApp.Start()) + defer proxyApp.Stop() + + state, stateDB, privVals := makeState(1, 1) + blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyApp.Consensus(), mock.Mempool{}, sm.MockEvidencePool{}, true) + lastCommit := types.NewCommit(types.BlockID{}, nil) + wrongPrecommitsCommit := types.NewCommit(types.BlockID{}, nil) + badPrivVal := types.NewMockPV() + + for height := int64(1); height < validationTestsStopHeight; height++ { + proposerAddr := state.Validators.GetProposer().Address + if height > 1 { + /* + #2589: ensure state.LastValidators.VerifyCommit fails here + */ + // should be height-1 instead of height + wrongHeightVote, err := types.MakeVote(height, state.LastBlockID, state.Validators, privVals[proposerAddr.String()], chainID) + require.NoError(t, err, "height %d", height) + wrongHeightCommit := types.NewCommit(state.LastBlockID, []*types.CommitSig{wrongHeightVote.CommitSig()}) + block, _ := state.MakeBlock(height, makeTxs(height), wrongHeightCommit, nil, proposerAddr) + err = blockExec.ValidateBlock(state, block) + _, isErrInvalidCommitHeight := err.(types.ErrInvalidCommitHeight) + require.True(t, isErrInvalidCommitHeight, "expected ErrInvalidCommitHeight at height %d but got: %v", height, err) + + /* + #2589: test len(block.LastCommit.Precommits) == state.LastValidators.Size() + */ + block, _ = state.MakeBlock(height, makeTxs(height), wrongPrecommitsCommit, nil, proposerAddr) + err = blockExec.ValidateBlock(state, block) + _, isErrInvalidCommitPrecommits := err.(types.ErrInvalidCommitPrecommits) + require.True(t, isErrInvalidCommitPrecommits, "expected ErrInvalidCommitPrecommits at height %d but got: %v", height, err) + } + + /* + A good block passes + */ + var err error + var blockID types.BlockID + state, blockID, lastCommit, err = makeAndCommitGoodBlock(state, height, lastCommit, proposerAddr, blockExec, privVals, nil) + require.NoError(t, err, "height %d", height) + + /* + wrongPrecommitsCommit is fine except for the extra bad precommit + */ + goodVote, err := types.MakeVote(height, blockID, state.Validators, privVals[proposerAddr.String()], chainID) + require.NoError(t, err, "height %d", height) + badVote := &types.Vote{ + ValidatorAddress: badPrivVal.GetPubKey().Address(), + ValidatorIndex: 0, + Height: height, + Round: 0, + Timestamp: tmtime.Now(), + Type: types.PrecommitType, + BlockID: blockID, + } + err = badPrivVal.SignVote(chainID, goodVote) + require.NoError(t, err, "height %d", height) + wrongPrecommitsCommit = types.NewCommit(blockID, []*types.CommitSig{goodVote.CommitSig(), badVote.CommitSig()}) + } } -/* - TODO(#2589): - - test good/bad evidence in block -*/ func TestValidateBlockEvidence(t *testing.T) { - var height int64 = 1 // TODO(#2589): generalize - state, stateDB := state(1, int(height)) - - blockExec := NewBlockExecutor(stateDB, log.TestingLogger(), nil, nil, nil, true) - - // make some evidence - addr, _ := state.Validators.GetByIndex(0) - goodEvidence := types.NewMockGoodEvidence(height, 0, addr) - - // A block with a couple pieces of evidence passes. - block := makeBlock(state, height) - block.Evidence.Evidence = []types.Evidence{goodEvidence, goodEvidence} - block.EvidenceHash = block.Evidence.Hash() - err := blockExec.ValidateBlock(state, block) - require.NoError(t, err) - - // A block with too much evidence fails. - maxBlockSize := state.ConsensusParams.BlockSize.MaxBytes - maxNumEvidence, _ := types.MaxEvidencePerBlock(maxBlockSize) - require.True(t, maxNumEvidence > 2) - for i := int64(0); i < maxNumEvidence; i++ { - block.Evidence.Evidence = append(block.Evidence.Evidence, goodEvidence) + proxyApp := newTestApp() + require.NoError(t, proxyApp.Start()) + defer proxyApp.Stop() + + state, stateDB, privVals := makeState(3, 1) + blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyApp.Consensus(), mock.Mempool{}, sm.MockEvidencePool{}, true) + lastCommit := types.NewCommit(types.BlockID{}, nil) + + for height := int64(1); height < validationTestsStopHeight; height++ { + proposerAddr := state.Validators.GetProposer().Address + proposerIdx, _ := state.Validators.GetByAddress(proposerAddr) + goodEvidence := types.NewMockGoodEvidence(height, proposerIdx, proposerAddr) + if height > 1 { + /* + A block with too much evidence fails + */ + maxBlockSize := state.ConsensusParams.BlockSize.MaxBytes + maxNumEvidence, _ := types.MaxEvidencePerBlock(maxBlockSize) + require.True(t, maxNumEvidence > 2) + evidence := make([]types.Evidence, 0) + // one more than the maximum allowed evidence + for i := int64(0); i <= maxNumEvidence; i++ { + evidence = append(evidence, goodEvidence) + } + block, _ := state.MakeBlock(height, makeTxs(height), lastCommit, evidence, proposerAddr) + err := blockExec.ValidateBlock(state, block) + _, ok := err.(*types.ErrEvidenceOverflow) + require.True(t, ok, "expected error to be of type ErrEvidenceOverflow at height %d", height) + } + + /* + A good block with several pieces of good evidence passes + */ + maxBlockSize := state.ConsensusParams.BlockSize.MaxBytes + maxNumEvidence, _ := types.MaxEvidencePerBlock(maxBlockSize) + require.True(t, maxNumEvidence > 2) + evidence := make([]types.Evidence, 0) + // precisely the amount of allowed evidence + for i := int64(0); i < maxNumEvidence; i++ { + evidence = append(evidence, goodEvidence) + } + + var err error + state, _, lastCommit, err = makeAndCommitGoodBlock(state, height, lastCommit, proposerAddr, blockExec, privVals, evidence) + require.NoError(t, err, "height %d", height) } - block.EvidenceHash = block.Evidence.Hash() - err = blockExec.ValidateBlock(state, block) - require.Error(t, err) - _, ok := err.(*types.ErrEvidenceOverflow) - require.True(t, ok) } -// always returns true if asked if any evidence was already committed. -type mockEvPoolAlwaysCommitted struct{} - -func (m mockEvPoolAlwaysCommitted) PendingEvidence(int64) []types.Evidence { return nil } -func (m mockEvPoolAlwaysCommitted) AddEvidence(types.Evidence) error { return nil } -func (m mockEvPoolAlwaysCommitted) Update(*types.Block, State) {} -func (m mockEvPoolAlwaysCommitted) IsCommitted(types.Evidence) bool { return true } - func TestValidateFailBlockOnCommittedEvidence(t *testing.T) { var height int64 = 1 - state, stateDB := state(1, int(height)) + state, stateDB, _ := makeState(1, int(height)) - blockExec := NewBlockExecutor(stateDB, log.TestingLogger(), nil, nil, mockEvPoolAlwaysCommitted{}, true) + blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), nil, nil, mockEvPoolAlwaysCommitted{}, true) // A block with a couple pieces of evidence passes. block := makeBlock(state, height) addr, _ := state.Validators.GetByIndex(0) @@ -145,12 +211,3 @@ func TestValidateFailBlockOnCommittedEvidence(t *testing.T) { require.Error(t, err) require.IsType(t, err, &types.ErrEvidenceInvalid{}) } - -/* - TODO(#2589): - - test unmarshalling BlockParts that are too big into a Block that - (note this logic happens in the consensus, not in the validation here). - - test making blocks from the types.MaxXXX functions works/fails as expected -*/ -func TestValidateBlockSize(t *testing.T) { -} diff --git a/store/codec.go b/store/codec.go new file mode 100644 index 000000000..4895e8994 --- /dev/null +++ b/store/codec.go @@ -0,0 +1,12 @@ +package store + +import ( + amino "github.com/tendermint/go-amino" + "github.com/tendermint/tendermint/types" +) + +var cdc = amino.NewCodec() + +func init() { + types.RegisterBlockAmino(cdc) +} diff --git a/blockchain/store.go b/store/store.go similarity index 89% rename from blockchain/store.go rename to store/store.go index 096c5688d..9a4abdb10 100644 --- a/blockchain/store.go +++ b/store/store.go @@ -1,12 +1,11 @@ -package blockchain +package store import ( "fmt" "sync" - cmn "github.com/tendermint/tendermint/libs/common" + "github.com/pkg/errors" dbm "github.com/tendermint/tendermint/libs/db" - "github.com/tendermint/tendermint/types" ) @@ -67,7 +66,7 @@ func (bs *BlockStore) LoadBlock(height int64) *types.Block { if err != nil { // NOTE: The existence of meta should imply the existence of the // block. So, make sure meta is only saved after blocks are saved. - panic(cmn.ErrorWrap(err, "Error reading block")) + panic(errors.Wrap(err, "Error reading block")) } return block } @@ -83,7 +82,7 @@ func (bs *BlockStore) LoadBlockPart(height int64, index int) *types.Part { } err := cdc.UnmarshalBinaryBare(bz, part) if err != nil { - panic(cmn.ErrorWrap(err, "Error reading block part")) + panic(errors.Wrap(err, "Error reading block part")) } return part } @@ -98,7 +97,7 @@ func (bs *BlockStore) LoadBlockMeta(height int64) *types.BlockMeta { } err := cdc.UnmarshalBinaryBare(bz, blockMeta) if err != nil { - panic(cmn.ErrorWrap(err, "Error reading block meta")) + panic(errors.Wrap(err, "Error reading block meta")) } return blockMeta } @@ -115,7 +114,7 @@ func (bs *BlockStore) LoadBlockCommit(height int64) *types.Commit { } err := cdc.UnmarshalBinaryBare(bz, commit) if err != nil { - panic(cmn.ErrorWrap(err, "Error reading block commit")) + panic(errors.Wrap(err, "Error reading block commit")) } return commit } @@ -131,7 +130,7 @@ func (bs *BlockStore) LoadSeenCommit(height int64) *types.Commit { } err := cdc.UnmarshalBinaryBare(bz, commit) if err != nil { - panic(cmn.ErrorWrap(err, "Error reading block seen commit")) + panic(errors.Wrap(err, "Error reading block seen commit")) } return commit } @@ -144,14 +143,14 @@ func (bs *BlockStore) LoadSeenCommit(height int64) *types.Commit { // most recent height. Otherwise they'd stall at H-1. func (bs *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) { if block == nil { - cmn.PanicSanity("BlockStore can only save a non-nil block") + panic("BlockStore can only save a non-nil block") } height := block.Height if g, w := height, bs.Height()+1; g != w { - cmn.PanicSanity(fmt.Sprintf("BlockStore can only save contiguous blocks. Wanted %v, got %v", w, g)) + panic(fmt.Sprintf("BlockStore can only save contiguous blocks. Wanted %v, got %v", w, g)) } if !blockParts.IsComplete() { - cmn.PanicSanity(fmt.Sprintf("BlockStore can only save complete block part sets")) + panic(fmt.Sprintf("BlockStore can only save complete block part sets")) } // Save block meta @@ -195,7 +194,7 @@ func (bs *BlockStore) SetHeight(height int64) { func (bs *BlockStore) saveBlockPart(height int64, index int, part *types.Part) { if height != bs.Height()+1 { - cmn.PanicSanity(fmt.Sprintf("BlockStore can only save contiguous blocks. Wanted %v, got %v", bs.Height()+1, height)) + panic(fmt.Sprintf("BlockStore can only save contiguous blocks. Wanted %v, got %v", bs.Height()+1, height)) } partBytes := cdc.MustMarshalBinaryBare(part) bs.db.Set(calcBlockPartKey(height, index), partBytes) @@ -223,6 +222,7 @@ func calcSeenCommitKey(height int64) []byte { var blockStoreKey = []byte("blockStore") +// BlockStoreStateJSON is the block store state JSON structure. type BlockStoreStateJSON struct { Height int64 `json:"height"` } @@ -231,7 +231,7 @@ type BlockStoreStateJSON struct { func (bsj BlockStoreStateJSON) Save(db dbm.DB) { bytes, err := cdc.MarshalJSON(bsj) if err != nil { - cmn.PanicSanity(fmt.Sprintf("Could not marshal state bytes: %v", err)) + panic(fmt.Sprintf("Could not marshal state bytes: %v", err)) } db.SetSync(blockStoreKey, bytes) } diff --git a/blockchain/store_test.go b/store/store_test.go similarity index 98% rename from blockchain/store_test.go rename to store/store_test.go index 28ab6051a..f7d6c1cd2 100644 --- a/blockchain/store_test.go +++ b/store/store_test.go @@ -1,4 +1,4 @@ -package blockchain +package store import ( "bytes" @@ -9,10 +9,11 @@ import ( "testing" "time" + "github.com/pkg/errors" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + cfg "github.com/tendermint/tendermint/config" - cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/libs/db" dbm "github.com/tendermint/tendermint/libs/db" "github.com/tendermint/tendermint/libs/log" @@ -40,7 +41,7 @@ func makeStateAndBlockStore(logger log.Logger) (sm.State, *BlockStore, cleanupFu stateDB := dbm.NewMemDB() state, err := sm.LoadStateFromDBOrGenesisFile(stateDB, config.GenesisFile()) if err != nil { - panic(cmn.ErrorWrap(err, "error constructing state from genesis file")) + panic(errors.Wrap(err, "error constructing state from genesis file")) } return state, NewBlockStore(blockDB), func() { os.RemoveAll(config.RootDir) } } diff --git a/test/app/counter_test.sh b/test/app/counter_test.sh index 868f8d037..a4f7c83b9 100755 --- a/test/app/counter_test.sh +++ b/test/app/counter_test.sh @@ -1,5 +1,7 @@ #! /bin/bash +export GO111MODULE=on + if [[ "$GRPC_BROADCAST_TX" == "" ]]; then GRPC_BROADCAST_TX="" fi @@ -38,7 +40,7 @@ if [[ "$GRPC_BROADCAST_TX" != "" ]]; then rm grpc_client fi echo "... building grpc_client" - go build -o grpc_client grpc_client.go + go build -mod=readonly -o grpc_client grpc_client.go fi function sendTx() { diff --git a/test/docker/Dockerfile b/test/docker/Dockerfile index 1a64d4173..b39277bd9 100644 --- a/test/docker/Dockerfile +++ b/test/docker/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.10 +FROM golang:1.12 # Add testing deps for curl RUN echo 'deb http://httpredir.debian.org/debian testing main non-free contrib' >> /etc/apt/sources.list @@ -20,7 +20,6 @@ COPY . $REPO # Install the vendored dependencies # docker caching prevents reinstall on code change! RUN make get_tools -RUN make get_vendor_deps # install ABCI CLI RUN make install_abci @@ -28,7 +27,12 @@ RUN make install_abci # install Tendermint RUN make install -RUN tendermint testnet --node-dir-prefix="mach" --v=4 --populate-persistent-peers=false --o=$REPO/test/p2p/data +RUN tendermint testnet \ + --config $REPO/test/docker/config-template.toml \ + --node-dir-prefix="mach" \ + --v=4 \ + --populate-persistent-peers=false \ + --o=$REPO/test/p2p/data # Now copy in the code # NOTE: this will overwrite whatever is in vendor/ diff --git a/test/docker/config-template.toml b/test/docker/config-template.toml new file mode 100644 index 000000000..a90eb7bd5 --- /dev/null +++ b/test/docker/config-template.toml @@ -0,0 +1,2 @@ +[rpc] +laddr = "tcp://0.0.0.0:26657" diff --git a/tools/build/Makefile b/tools/build/Makefile index f9384ac64..8c33ffd5d 100644 --- a/tools/build/Makefile +++ b/tools/build/Makefile @@ -64,7 +64,7 @@ build-tendermint: git-branch gopath-setup @echo "*** Building tendermint" go get -d -u github.com/tendermint/tendermint/cmd/tendermint cd $(GOPATH)/src/github.com/tendermint/tendermint && git checkout "$(GIT_BRANCH)" && git pull - export PATH=$(GOPATH)/bin:$(PATH) && $(MAKE) -C $(GOPATH)/src/github.com/tendermint/tendermint get_tools get_vendor_deps build + export PATH=$(GOPATH)/bin:$(PATH) && $(MAKE) -C $(GOPATH)/src/github.com/tendermint/tendermint get_tools build cp $(GOPATH)/src/github.com/tendermint/tendermint/build/tendermint $(GOPATH)/bin @echo "*** Built tendermint" @@ -72,7 +72,7 @@ build-ethermint: git-branch gopath-setup @echo "*** Building ethermint" go get -d -u github.com/tendermint/ethermint/cmd/ethermint cd $(GOPATH)/src/github.com/tendermint/ethermint && git checkout "$(GIT_BRANCH)" && git pull - export PATH=$(GOPATH)/bin:$(PATH) && $(MAKE) -C $(GOPATH)/src/github.com/tendermint/ethermint get_vendor_deps build + export PATH=$(GOPATH)/bin:$(PATH) && $(MAKE) -C $(GOPATH)/src/github.com/tendermint/ethermint build cp $(GOPATH)/src/github.com/tendermint/ethermint/build/ethermint $(GOPATH)/bin @echo "*** Built ethermint" @@ -80,14 +80,14 @@ build-gaia: git-branch gopath-setup @echo "*** Building gaia" go get -d -u go github.com/cosmos/gaia || echo "Workaround for go downloads." cd $(GOPATH)/src/github.com/cosmos/gaia && git checkout "$(GIT_BRANCH)" && git pull - export PATH=$(GOPATH)/bin:$(PATH) && $(MAKE) -C $(GOPATH)/src/github.com/cosmos/gaia get_vendor_deps install + export PATH=$(GOPATH)/bin:$(PATH) && $(MAKE) -C $(GOPATH)/src/github.com/cosmos/gaia install @echo "*** Built gaia" build-basecoind: git-branch gopath-setup @echo "*** Building basecoind from cosmos-sdk" go get -d -u github.com/cosmos/cosmos-sdk/examples/basecoin/cmd/basecoind cd $(GOPATH)/src/github.com/cosmos/cosmos-sdk && git checkout "$(GIT_BRANCH)" && git pull - export PATH=$(GOPATH)/bin:$(PATH) && $(MAKE) -C $(GOPATH)/src/github.com/cosmos/cosmos-sdk get_tools get_vendor_deps build + export PATH=$(GOPATH)/bin:$(PATH) && $(MAKE) -C $(GOPATH)/src/github.com/cosmos/cosmos-sdk get_tools build cp $(GOPATH)/src/github.com/cosmos/cosmos-sdk/build/basecoind $(GOPATH)/bin/basecoind @echo "*** Built basecoind from cosmos-sdk" diff --git a/tools/tm-bench/Dockerfile.dev b/tools/tm-bench/Dockerfile.dev index 469bb8150..1151965a2 100644 --- a/tools/tm-bench/Dockerfile.dev +++ b/tools/tm-bench/Dockerfile.dev @@ -9,4 +9,3 @@ RUN make get_tools COPY . /go/src/github.com/tendermint/tendermint/tools/tm-bench -RUN make get_vendor_deps diff --git a/tools/tm-bench/README.md b/tools/tm-bench/README.md index b4e8cec5a..d5ed1231f 100644 --- a/tools/tm-bench/README.md +++ b/tools/tm-bench/README.md @@ -100,6 +100,5 @@ Each of the connections is handled via two separate goroutines. ## Development ``` -make get_vendor_deps make test ``` diff --git a/tools/tm-monitor/Dockerfile.dev b/tools/tm-monitor/Dockerfile.dev index 5bfbbfd5a..e593bf89c 100644 --- a/tools/tm-monitor/Dockerfile.dev +++ b/tools/tm-monitor/Dockerfile.dev @@ -9,4 +9,3 @@ RUN make get_tools COPY . /go/src/github.com/tendermint/tools/tm-monitor -RUN make get_vendor_deps diff --git a/tools/tm-monitor/Makefile b/tools/tm-monitor/Makefile index 901b0a14d..a71eb2642 100644 --- a/tools/tm-monitor/Makefile +++ b/tools/tm-monitor/Makefile @@ -36,7 +36,7 @@ dist: build-all build-docker: rm -f ./tm-monitor - docker run -it --rm -v "$(PWD)/../../:/go/src/github.com/tendermint/tendermint" -w "/go/src/github.com/tendermint/tendermint/tools/tm-monitor" -e "CGO_ENABLED=0" golang:alpine go build -ldflags "-s -w" -o tm-monitor + docker run -it --rm -v "$(PWD)/../../:/go/src/github.com/tendermint/tendermint" -w "/go/src/github.com/tendermint/tendermint/tools/tm-monitor" -e "GO111MODULE=on" -e "CGO_ENABLED=0" golang:1.12 go build -ldflags "-s -w" -o tm-monitor docker build -t "tendermint/monitor" . clean: diff --git a/tools/tm-monitor/README.md b/tools/tm-monitor/README.md index 374a56b0a..2bd367b99 100644 --- a/tools/tm-monitor/README.md +++ b/tools/tm-monitor/README.md @@ -87,6 +87,5 @@ websocket. ``` make get_tools -make get_vendor_deps make test ``` diff --git a/tools/tm-monitor/wire.go b/tools/tm-monitor/codec.go similarity index 100% rename from tools/tm-monitor/wire.go rename to tools/tm-monitor/codec.go diff --git a/tools/tm-monitor/mock/eventmeter.go b/tools/tm-monitor/mock/eventmeter.go index 7bbedc7fa..7119c4399 100644 --- a/tools/tm-monitor/mock/eventmeter.go +++ b/tools/tm-monitor/mock/eventmeter.go @@ -54,7 +54,7 @@ func (c *RpcClient) Call(method string, params map[string]interface{}, result in } rv, rt := reflect.ValueOf(result), reflect.TypeOf(result) - rv, rt = rv.Elem(), rt.Elem() + rv, _ = rv.Elem(), rt.Elem() rv.Set(reflect.ValueOf(s)) return s, nil diff --git a/tools/tm-monitor/monitor/wire.go b/tools/tm-monitor/monitor/codec.go similarity index 100% rename from tools/tm-monitor/monitor/wire.go rename to tools/tm-monitor/monitor/codec.go diff --git a/tools/tm-monitor/monitor/network.go b/tools/tm-monitor/monitor/network.go index bb5dd0baa..4d85d7ed6 100644 --- a/tools/tm-monitor/monitor/network.go +++ b/tools/tm-monitor/monitor/network.go @@ -163,11 +163,12 @@ func (n *Network) updateHealth() { // TODO: make sure they're all at the same height (within a block) // and all proposing (and possibly validating ) Alternatively, just // check there hasn't been a new round in numValidators rounds - if n.NumValidators != 0 && n.NumNodesMonitoredOnline == n.NumValidators { + switch { + case n.NumValidators != 0 && n.NumNodesMonitoredOnline == n.NumValidators: n.Health = FullHealth - } else if n.NumNodesMonitoredOnline > 0 && n.NumNodesMonitoredOnline <= n.NumNodesMonitored { + case n.NumNodesMonitoredOnline > 0 && n.NumNodesMonitoredOnline <= n.NumNodesMonitored: n.Health = ModerateHealth - } else { + default: n.Health = Dead } } diff --git a/tools/tm-monitor/rpc.go b/tools/tm-monitor/rpc.go index 4412e6e0b..42cc23075 100644 --- a/tools/tm-monitor/rpc.go +++ b/tools/tm-monitor/rpc.go @@ -5,9 +5,11 @@ import ( "net" "net/http" + rpctypes "github.com/tendermint/tendermint/rpc/lib/types" + "github.com/tendermint/tendermint/libs/log" rpc "github.com/tendermint/tendermint/rpc/lib/server" - monitor "github.com/tendermint/tendermint/tools/tm-monitor/monitor" + "github.com/tendermint/tendermint/tools/tm-monitor/monitor" ) func startRPC(listenAddr string, m *monitor.Monitor, logger log.Logger) net.Listener { @@ -41,33 +43,33 @@ func routes(m *monitor.Monitor) map[string]*rpc.RPCFunc { } // RPCStatus returns common statistics for the network and statistics per node. -func RPCStatus(m *monitor.Monitor) interface{} { - return func() (networkAndNodes, error) { +func RPCStatus(m *monitor.Monitor) func(*rpctypes.Context) (networkAndNodes, error) { + return func(_ *rpctypes.Context) (networkAndNodes, error) { return networkAndNodes{m.Network, m.Nodes}, nil } } // RPCNetworkStatus returns common statistics for the network. -func RPCNetworkStatus(m *monitor.Monitor) interface{} { - return func() (*monitor.Network, error) { +func RPCNetworkStatus(m *monitor.Monitor) func(*rpctypes.Context) (*monitor.Network, error) { + return func(_ *rpctypes.Context) (*monitor.Network, error) { return m.Network, nil } } // RPCNodeStatus returns statistics for the given node. -func RPCNodeStatus(m *monitor.Monitor) interface{} { - return func(name string) (*monitor.Node, error) { +func RPCNodeStatus(m *monitor.Monitor) func(*rpctypes.Context, string) (*monitor.Node, error) { + return func(_ *rpctypes.Context, name string) (*monitor.Node, error) { if i, n := m.NodeByName(name); i != -1 { return n, nil } - return nil, errors.New("Cannot find node with that name") + return nil, errors.New("cannot find node with that name") } } // RPCMonitor allows to dynamically add a endpoint to under the monitor. Safe // to call multiple times. -func RPCMonitor(m *monitor.Monitor) interface{} { - return func(endpoint string) (*monitor.Node, error) { +func RPCMonitor(m *monitor.Monitor) func(*rpctypes.Context, string) (*monitor.Node, error) { + return func(_ *rpctypes.Context, endpoint string) (*monitor.Node, error) { i, n := m.NodeByName(endpoint) if i == -1 { n = monitor.NewNode(endpoint) @@ -80,13 +82,13 @@ func RPCMonitor(m *monitor.Monitor) interface{} { } // RPCUnmonitor removes the given endpoint from under the monitor. -func RPCUnmonitor(m *monitor.Monitor) interface{} { - return func(endpoint string) (bool, error) { +func RPCUnmonitor(m *monitor.Monitor) func(*rpctypes.Context, string) (bool, error) { + return func(_ *rpctypes.Context, endpoint string) (bool, error) { if i, n := m.NodeByName(endpoint); i != -1 { m.Unmonitor(n) return true, nil } - return false, errors.New("Cannot find node with that name") + return false, errors.New("cannot find node with that name") } } diff --git a/tools/tm-signer-harness/internal/test_harness.go b/tools/tm-signer-harness/internal/test_harness.go index 7fefdfb42..216cf6851 100644 --- a/tools/tm-signer-harness/internal/test_harness.go +++ b/tools/tm-signer-harness/internal/test_harness.go @@ -49,7 +49,7 @@ var _ error = (*TestHarnessError)(nil) // with this version of Tendermint. type TestHarness struct { addr string - spv *privval.SignerValidatorEndpoint + signerClient *privval.SignerClient fpv *privval.FilePV chainID string acceptRetries int @@ -101,14 +101,19 @@ func NewTestHarness(logger log.Logger, cfg TestHarnessConfig) (*TestHarness, err } logger.Info("Loaded genesis file", "chainID", st.ChainID) - spv, err := newTestHarnessSocketVal(logger, cfg) + spv, err := newTestHarnessListener(logger, cfg) + if err != nil { + return nil, newTestHarnessError(ErrFailedToCreateListener, err, "") + } + + signerClient, err := privval.NewSignerClient(spv) if err != nil { return nil, newTestHarnessError(ErrFailedToCreateListener, err, "") } return &TestHarness{ addr: cfg.BindAddr, - spv: spv, + signerClient: signerClient, fpv: fpv, chainID: st.ChainID, acceptRetries: cfg.AcceptRetries, @@ -135,9 +140,11 @@ func (th *TestHarness) Run() { th.logger.Info("Starting test harness") accepted := false var startErr error + for acceptRetries := th.acceptRetries; acceptRetries > 0; acceptRetries-- { th.logger.Info("Attempting to accept incoming connection", "acceptRetries", acceptRetries) - if err := th.spv.Start(); err != nil { + + if err := th.signerClient.WaitForConnection(10 * time.Millisecond); err != nil { // if it wasn't a timeout error if _, ok := err.(timeoutError); !ok { th.logger.Error("Failed to start listener", "err", err) @@ -149,6 +156,7 @@ func (th *TestHarness) Run() { } startErr = err } else { + th.logger.Info("Accepted external connection") accepted = true break } @@ -182,8 +190,8 @@ func (th *TestHarness) Run() { func (th *TestHarness) TestPublicKey() error { th.logger.Info("TEST: Public key of remote signer") th.logger.Info("Local", "pubKey", th.fpv.GetPubKey()) - th.logger.Info("Remote", "pubKey", th.spv.GetPubKey()) - if th.fpv.GetPubKey() != th.spv.GetPubKey() { + th.logger.Info("Remote", "pubKey", th.signerClient.GetPubKey()) + if th.fpv.GetPubKey() != th.signerClient.GetPubKey() { th.logger.Error("FAILED: Local and remote public keys do not match") return newTestHarnessError(ErrTestPublicKeyFailed, nil, "") } @@ -211,7 +219,7 @@ func (th *TestHarness) TestSignProposal() error { Timestamp: time.Now(), } propBytes := prop.SignBytes(th.chainID) - if err := th.spv.SignProposal(th.chainID, prop); err != nil { + if err := th.signerClient.SignProposal(th.chainID, prop); err != nil { th.logger.Error("FAILED: Signing of proposal", "err", err) return newTestHarnessError(ErrTestSignProposalFailed, err, "") } @@ -222,7 +230,7 @@ func (th *TestHarness) TestSignProposal() error { return newTestHarnessError(ErrTestSignProposalFailed, err, "") } // now validate the signature on the proposal - if th.spv.GetPubKey().VerifyBytes(propBytes, prop.Signature) { + if th.signerClient.GetPubKey().VerifyBytes(propBytes, prop.Signature) { th.logger.Info("Successfully validated proposal signature") } else { th.logger.Error("FAILED: Proposal signature validation failed") @@ -255,7 +263,7 @@ func (th *TestHarness) TestSignVote() error { } voteBytes := vote.SignBytes(th.chainID) // sign the vote - if err := th.spv.SignVote(th.chainID, vote); err != nil { + if err := th.signerClient.SignVote(th.chainID, vote); err != nil { th.logger.Error("FAILED: Signing of vote", "err", err) return newTestHarnessError(ErrTestSignVoteFailed, err, fmt.Sprintf("voteType=%d", voteType)) } @@ -266,7 +274,7 @@ func (th *TestHarness) TestSignVote() error { return newTestHarnessError(ErrTestSignVoteFailed, err, fmt.Sprintf("voteType=%d", voteType)) } // now validate the signature on the proposal - if th.spv.GetPubKey().VerifyBytes(voteBytes, vote.Signature) { + if th.signerClient.GetPubKey().VerifyBytes(voteBytes, vote.Signature) { th.logger.Info("Successfully validated vote signature", "type", voteType) } else { th.logger.Error("FAILED: Vote signature validation failed", "type", voteType) @@ -301,10 +309,9 @@ func (th *TestHarness) Shutdown(err error) { }() } - if th.spv.IsRunning() { - if err := th.spv.Stop(); err != nil { - th.logger.Error("Failed to cleanly stop listener: %s", err.Error()) - } + err = th.signerClient.Close() + if err != nil { + th.logger.Error("Failed to cleanly stop listener: %s", err.Error()) } if th.exitWhenComplete { @@ -312,9 +319,8 @@ func (th *TestHarness) Shutdown(err error) { } } -// newTestHarnessSocketVal creates our client instance which we will use for -// testing. -func newTestHarnessSocketVal(logger log.Logger, cfg TestHarnessConfig) (*privval.SignerValidatorEndpoint, error) { +// newTestHarnessListener creates our client instance which we will use for testing. +func newTestHarnessListener(logger log.Logger, cfg TestHarnessConfig) (*privval.SignerListenerEndpoint, error) { proto, addr := cmn.ProtocolAndAddress(cfg.BindAddr) if proto == "unix" { // make sure the socket doesn't exist - if so, try to delete it @@ -329,7 +335,7 @@ func newTestHarnessSocketVal(logger log.Logger, cfg TestHarnessConfig) (*privval if err != nil { return nil, err } - logger.Info("Listening at", "proto", proto, "addr", addr) + logger.Info("Listening", "proto", proto, "addr", addr) var svln net.Listener switch proto { case "unix": @@ -347,7 +353,7 @@ func newTestHarnessSocketVal(logger log.Logger, cfg TestHarnessConfig) (*privval logger.Error("Unsupported protocol (must be unix:// or tcp://)", "proto", proto) return nil, newTestHarnessError(ErrInvalidParameters, nil, fmt.Sprintf("Unsupported protocol: %s", proto)) } - return privval.NewSignerValidatorEndpoint(logger, svln), nil + return privval.NewSignerListenerEndpoint(logger, svln), nil } func newTestHarnessError(code int, err error, info string) *TestHarnessError { diff --git a/tools/tm-signer-harness/internal/test_harness_test.go b/tools/tm-signer-harness/internal/test_harness_test.go index adb818b0b..9d444ef5f 100644 --- a/tools/tm-signer-harness/internal/test_harness_test.go +++ b/tools/tm-signer-harness/internal/test_harness_test.go @@ -3,19 +3,18 @@ package internal import ( "fmt" "io/ioutil" - "net" "os" "testing" "time" - "github.com/tendermint/tendermint/crypto" - "github.com/tendermint/tendermint/privval" - "github.com/tendermint/tendermint/types" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/privval" + "github.com/tendermint/tendermint/types" ) const ( @@ -84,8 +83,8 @@ func TestRemoteSignerTestHarnessMaxAcceptRetriesReached(t *testing.T) { func TestRemoteSignerTestHarnessSuccessfulRun(t *testing.T) { harnessTest( t, - func(th *TestHarness) *privval.SignerServiceEndpoint { - return newMockRemoteSigner(t, th, th.fpv.Key.PrivKey, false, false) + func(th *TestHarness) *privval.SignerServer { + return newMockSignerServer(t, th, th.fpv.Key.PrivKey, false, false) }, NoError, ) @@ -94,8 +93,8 @@ func TestRemoteSignerTestHarnessSuccessfulRun(t *testing.T) { func TestRemoteSignerPublicKeyCheckFailed(t *testing.T) { harnessTest( t, - func(th *TestHarness) *privval.SignerServiceEndpoint { - return newMockRemoteSigner(t, th, ed25519.GenPrivKey(), false, false) + func(th *TestHarness) *privval.SignerServer { + return newMockSignerServer(t, th, ed25519.GenPrivKey(), false, false) }, ErrTestPublicKeyFailed, ) @@ -104,8 +103,8 @@ func TestRemoteSignerPublicKeyCheckFailed(t *testing.T) { func TestRemoteSignerProposalSigningFailed(t *testing.T) { harnessTest( t, - func(th *TestHarness) *privval.SignerServiceEndpoint { - return newMockRemoteSigner(t, th, th.fpv.Key.PrivKey, true, false) + func(th *TestHarness) *privval.SignerServer { + return newMockSignerServer(t, th, th.fpv.Key.PrivKey, true, false) }, ErrTestSignProposalFailed, ) @@ -114,28 +113,30 @@ func TestRemoteSignerProposalSigningFailed(t *testing.T) { func TestRemoteSignerVoteSigningFailed(t *testing.T) { harnessTest( t, - func(th *TestHarness) *privval.SignerServiceEndpoint { - return newMockRemoteSigner(t, th, th.fpv.Key.PrivKey, false, true) + func(th *TestHarness) *privval.SignerServer { + return newMockSignerServer(t, th, th.fpv.Key.PrivKey, false, true) }, ErrTestSignVoteFailed, ) } -func newMockRemoteSigner(t *testing.T, th *TestHarness, privKey crypto.PrivKey, breakProposalSigning bool, breakVoteSigning bool) *privval.SignerServiceEndpoint { - return privval.NewSignerServiceEndpoint( +func newMockSignerServer(t *testing.T, th *TestHarness, privKey crypto.PrivKey, breakProposalSigning bool, breakVoteSigning bool) *privval.SignerServer { + mockPV := types.NewMockPVWithParams(privKey, breakProposalSigning, breakVoteSigning) + + dialerEndpoint := privval.NewSignerDialerEndpoint( th.logger, - th.chainID, - types.NewMockPVWithParams(privKey, breakProposalSigning, breakVoteSigning), privval.DialTCPFn( th.addr, time.Duration(defaultConnDeadline)*time.Millisecond, ed25519.GenPrivKey(), ), ) + + return privval.NewSignerServer(dialerEndpoint, th.chainID, mockPV) } // For running relatively standard tests. -func harnessTest(t *testing.T, rsMaker func(th *TestHarness) *privval.SignerServiceEndpoint, expectedExitCode int) { +func harnessTest(t *testing.T, signerServerMaker func(th *TestHarness) *privval.SignerServer, expectedExitCode int) { cfg := makeConfig(t, 100, 3) defer cleanup(cfg) @@ -147,10 +148,10 @@ func harnessTest(t *testing.T, rsMaker func(th *TestHarness) *privval.SignerServ th.Run() }() - rs := rsMaker(th) - require.NoError(t, rs.Start()) - assert.True(t, rs.IsRunning()) - defer rs.Stop() + ss := signerServerMaker(th) + require.NoError(t, ss.Start()) + assert.True(t, ss.IsRunning()) + defer ss.Stop() <-donec assert.Equal(t, expectedExitCode, th.exitCode) @@ -158,7 +159,7 @@ func harnessTest(t *testing.T, rsMaker func(th *TestHarness) *privval.SignerServ func makeConfig(t *testing.T, acceptDeadline, acceptRetries int) TestHarnessConfig { return TestHarnessConfig{ - BindAddr: testFreeTCPAddr(t), + BindAddr: privval.GetFreeLocalhostAddrPort(), KeyFile: makeTempFile("tm-testharness-keyfile", keyFileContents), StateFile: makeTempFile("tm-testharness-statefile", stateFileContents), GenesisFile: makeTempFile("tm-testharness-genesisfile", genesisFileContents), @@ -190,12 +191,3 @@ func makeTempFile(name, content string) string { } return tempFile.Name() } - -// testFreeTCPAddr claims a free port so we don't block on listener being ready. -func testFreeTCPAddr(t *testing.T) string { - ln, err := net.Listen("tcp", "127.0.0.1:0") - require.NoError(t, err) - defer ln.Close() - - return fmt.Sprintf("127.0.0.1:%d", ln.Addr().(*net.TCPAddr).Port) -} diff --git a/types/block.go b/types/block.go index bcaf0725c..18356417e 100644 --- a/types/block.go +++ b/types/block.go @@ -41,25 +41,6 @@ type Block struct { LastCommit *Commit `json:"last_commit"` } -// MakeBlock returns a new block with an empty header, except what can be -// computed from itself. -// It populates the same set of fields validated by ValidateBasic. -func MakeBlock(height int64, txs []Tx, lastCommit *Commit, evidence []Evidence) *Block { - block := &Block{ - Header: Header{ - Height: height, - NumTxs: int64(len(txs)), - }, - Data: Data{ - Txs: txs, - }, - Evidence: EvidenceData{Evidence: evidence}, - LastCommit: lastCommit, - } - block.fillHeader() - return block -} - // ValidateBasic performs basic validation that doesn't involve state data. // It checks the internal consistency of the block. // Further validation is done using state#ValidateBlock. @@ -198,7 +179,7 @@ func (b *Block) Hash() cmn.HexBytes { b.mtx.Lock() defer b.mtx.Unlock() - if b == nil || b.LastCommit == nil { + if b.LastCommit == nil { return nil } b.fillHeader() @@ -500,6 +481,8 @@ func (cs *CommitSig) toVote() *Vote { return &v } +//------------------------------------- + // Commit contains the evidence that a block was committed by a set of validators. // NOTE: Commit is empty for height 1, but never nil. type Commit struct { @@ -528,15 +511,61 @@ func NewCommit(blockID BlockID, precommits []*CommitSig) *Commit { } } +// Construct a VoteSet from the Commit and validator set. Panics +// if precommits from the commit can't be added to the voteset. +// Inverse of VoteSet.MakeCommit(). +func CommitToVoteSet(chainID string, commit *Commit, vals *ValidatorSet) *VoteSet { + height, round, typ := commit.Height(), commit.Round(), PrecommitType + voteSet := NewVoteSet(chainID, height, round, typ, vals) + for idx, precommit := range commit.Precommits { + if precommit == nil { + continue + } + added, err := voteSet.AddVote(commit.GetVote(idx)) + if !added || err != nil { + panic(fmt.Sprintf("Failed to reconstruct LastCommit: %v", err)) + } + } + return voteSet +} + +// GetVote converts the CommitSig for the given valIdx to a Vote. +// Returns nil if the precommit at valIdx is nil. +// Panics if valIdx >= commit.Size(). +func (commit *Commit) GetVote(valIdx int) *Vote { + commitSig := commit.Precommits[valIdx] + if commitSig == nil { + return nil + } + + // NOTE: this commitSig might be for a nil blockID, + // so we can't just use commit.BlockID here. + // For #1648, CommitSig will need to indicate what BlockID it's for ! + blockID := commitSig.BlockID + commit.memoizeHeightRound() + return &Vote{ + Type: PrecommitType, + Height: commit.height, + Round: commit.round, + BlockID: blockID, + Timestamp: commitSig.Timestamp, + ValidatorAddress: commitSig.ValidatorAddress, + ValidatorIndex: valIdx, + Signature: commitSig.Signature, + } +} + // VoteSignBytes constructs the SignBytes for the given CommitSig. // The only unique part of the SignBytes is the Timestamp - all other fields // signed over are otherwise the same for all validators. -func (commit *Commit) VoteSignBytes(chainID string, cs *CommitSig) []byte { - return commit.ToVote(cs).SignBytes(chainID) +// Panics if valIdx >= commit.Size(). +func (commit *Commit) VoteSignBytes(chainID string, valIdx int) []byte { + return commit.GetVote(valIdx).SignBytes(chainID) } // memoizeHeightRound memoizes the height and round of the commit using // the first non-nil vote. +// Should be called before any attempt to access `commit.height` or `commit.round`. func (commit *Commit) memoizeHeightRound() { if len(commit.Precommits) == 0 { return @@ -553,14 +582,6 @@ func (commit *Commit) memoizeHeightRound() { } } -// ToVote converts a CommitSig to a Vote. -// If the CommitSig is nil, the Vote will be nil. -func (commit *Commit) ToVote(cs *CommitSig) *Vote { - // TODO: use commit.validatorSet to reconstruct vote - // and deprecate .toVote - return cs.toVote() -} - // Height returns the height of the commit func (commit *Commit) Height() int64 { commit.memoizeHeightRound() @@ -602,8 +623,8 @@ func (commit *Commit) BitArray() *cmn.BitArray { // GetByIndex returns the vote corresponding to a given validator index. // Panics if `index >= commit.Size()`. // Implements VoteSetReader. -func (commit *Commit) GetByIndex(index int) *Vote { - return commit.ToVote(commit.Precommits[index]) +func (commit *Commit) GetByIndex(valIdx int) *Vote { + return commit.GetVote(valIdx) } // IsCommit returns true if there is at least one vote. @@ -727,7 +748,7 @@ func (sh SignedHeader) ValidateBasic(chainID string) error { // ValidateBasic on the Commit. err := sh.Commit.ValidateBasic() if err != nil { - return cmn.ErrorWrap(err, "commit.ValidateBasic failed during SignedHeader.ValidateBasic") + return errors.Wrap(err, "commit.ValidateBasic failed during SignedHeader.ValidateBasic") } return nil } diff --git a/types/block_test.go b/types/block_test.go index 75b5c19dd..716229bbc 100644 --- a/types/block_test.go +++ b/types/block_test.go @@ -342,3 +342,115 @@ func TestBlockMaxDataBytesUnknownEvidence(t *testing.T) { } } } + +func TestCommitToVoteSet(t *testing.T) { + lastID := makeBlockIDRandom() + h := int64(3) + + voteSet, valSet, vals := randVoteSet(h-1, 1, PrecommitType, 10, 1) + commit, err := MakeCommit(lastID, h-1, 1, voteSet, vals) + assert.NoError(t, err) + + chainID := voteSet.ChainID() + voteSet2 := CommitToVoteSet(chainID, commit, valSet) + + for i := 0; i < len(vals); i++ { + vote1 := voteSet.GetByIndex(i) + vote2 := voteSet2.GetByIndex(i) + vote3 := commit.GetVote(i) + + vote1bz := cdc.MustMarshalBinaryBare(vote1) + vote2bz := cdc.MustMarshalBinaryBare(vote2) + vote3bz := cdc.MustMarshalBinaryBare(vote3) + assert.Equal(t, vote1bz, vote2bz) + assert.Equal(t, vote1bz, vote3bz) + } +} + +func TestSignedHeaderValidateBasic(t *testing.T) { + commit := randCommit() + chainID := "𠜎" + timestamp := time.Date(math.MaxInt64, 0, 0, 0, 0, 0, math.MaxInt64, time.UTC) + h := Header{ + Version: version.Consensus{Block: math.MaxInt64, App: math.MaxInt64}, + ChainID: chainID, + Height: commit.Height(), + Time: timestamp, + NumTxs: math.MaxInt64, + TotalTxs: math.MaxInt64, + LastBlockID: commit.BlockID, + LastCommitHash: commit.Hash(), + DataHash: commit.Hash(), + ValidatorsHash: commit.Hash(), + NextValidatorsHash: commit.Hash(), + ConsensusHash: commit.Hash(), + AppHash: commit.Hash(), + LastResultsHash: commit.Hash(), + EvidenceHash: commit.Hash(), + ProposerAddress: crypto.AddressHash([]byte("proposer_address")), + } + + validSignedHeader := SignedHeader{Header: &h, Commit: commit} + validSignedHeader.Commit.BlockID.Hash = validSignedHeader.Hash() + invalidSignedHeader := SignedHeader{} + + testCases := []struct { + testName string + shHeader *Header + shCommit *Commit + expectErr bool + }{ + {"Valid Signed Header", validSignedHeader.Header, validSignedHeader.Commit, false}, + {"Invalid Signed Header", invalidSignedHeader.Header, validSignedHeader.Commit, true}, + {"Invalid Signed Header", validSignedHeader.Header, invalidSignedHeader.Commit, true}, + } + + for _, tc := range testCases { + t.Run(tc.testName, func(t *testing.T) { + sh := SignedHeader{ + Header: tc.shHeader, + Commit: tc.shCommit, + } + assert.Equal(t, tc.expectErr, sh.ValidateBasic(validSignedHeader.Header.ChainID) != nil, "Validate Basic had an unexpected result") + }) + } +} + +func TestBlockIDValidateBasic(t *testing.T) { + validBlockID := BlockID{ + Hash: cmn.HexBytes{}, + PartsHeader: PartSetHeader{ + Total: 1, + Hash: cmn.HexBytes{}, + }, + } + + invalidBlockID := BlockID{ + Hash: []byte{0}, + PartsHeader: PartSetHeader{ + Total: -1, + Hash: cmn.HexBytes{}, + }, + } + + testCases := []struct { + testName string + blockIDHash cmn.HexBytes + blockIDPartsHeader PartSetHeader + expectErr bool + }{ + {"Valid BlockID", validBlockID.Hash, validBlockID.PartsHeader, false}, + {"Invalid BlockID", invalidBlockID.Hash, validBlockID.PartsHeader, true}, + {"Invalid BlockID", validBlockID.Hash, invalidBlockID.PartsHeader, true}, + } + + for _, tc := range testCases { + t.Run(tc.testName, func(t *testing.T) { + blockID := BlockID{ + Hash: tc.blockIDHash, + PartsHeader: tc.blockIDPartsHeader, + } + assert.Equal(t, tc.expectErr, blockID.ValidateBasic() != nil, "Validate Basic had an unexpected result") + }) + } +} diff --git a/types/wire.go b/types/codec.go similarity index 100% rename from types/wire.go rename to types/codec.go diff --git a/types/errors.go b/types/errors.go new file mode 100644 index 000000000..603ac51d7 --- /dev/null +++ b/types/errors.go @@ -0,0 +1,41 @@ +package types + +import "fmt" + +type ( + // ErrInvalidCommitHeight is returned when we encounter a commit with an + // unexpected height. + ErrInvalidCommitHeight struct { + Expected int64 + Actual int64 + } + + // ErrInvalidCommitPrecommits is returned when we encounter a commit where + // the number of precommits doesn't match the number of validators. + ErrInvalidCommitPrecommits struct { + Expected int + Actual int + } +) + +func NewErrInvalidCommitHeight(expected, actual int64) ErrInvalidCommitHeight { + return ErrInvalidCommitHeight{ + Expected: expected, + Actual: actual, + } +} + +func (e ErrInvalidCommitHeight) Error() string { + return fmt.Sprintf("Invalid commit -- wrong height: %v vs %v", e.Expected, e.Actual) +} + +func NewErrInvalidCommitPrecommits(expected, actual int) ErrInvalidCommitPrecommits { + return ErrInvalidCommitPrecommits{ + Expected: expected, + Actual: actual, + } +} + +func (e ErrInvalidCommitPrecommits) Error() string { + return fmt.Sprintf("Invalid commit -- wrong set size: %v vs %v", e.Expected, e.Actual) +} diff --git a/types/event_bus.go b/types/event_bus.go index 9a9d34b3e..17191b520 100644 --- a/types/event_bus.go +++ b/types/event_bus.go @@ -4,6 +4,7 @@ import ( "context" "fmt" + "github.com/tendermint/tendermint/abci/types" cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/libs/log" tmpubsub "github.com/tendermint/tendermint/libs/pubsub" @@ -90,20 +91,32 @@ func (b *EventBus) UnsubscribeAll(ctx context.Context, subscriber string) error func (b *EventBus) Publish(eventType string, eventData TMEventData) error { // no explicit deadline for publishing events ctx := context.Background() - b.pubsub.PublishWithTags(ctx, eventData, map[string]string{EventTypeKey: eventType}) - return nil -} - -func (b *EventBus) validateAndStringifyTags(tags []cmn.KVPair, logger log.Logger) map[string]string { - result := make(map[string]string) - for _, tag := range tags { - // basic validation - if len(tag.Key) == 0 { - logger.Debug("Got tag with an empty key (skipping)", "tag", tag) + return b.pubsub.PublishWithEvents(ctx, eventData, map[string][]string{EventTypeKey: {eventType}}) +} + +// validateAndStringifyEvents takes a slice of event objects and creates a +// map of stringified events where each key is composed of the event +// type and each of the event's attributes keys in the form of +// "{event.Type}.{attribute.Key}" and the value is each attribute's value. +func (b *EventBus) validateAndStringifyEvents(events []types.Event, logger log.Logger) map[string][]string { + result := make(map[string][]string) + for _, event := range events { + if len(event.Type) == 0 { + logger.Debug("Got an event with an empty type (skipping)", "event", event) continue } - result[string(tag.Key)] = string(tag.Value) + + for _, attr := range event.Attributes { + if len(attr.Key) == 0 { + logger.Debug("Got an event attribute with an empty key(skipping)", "event", event) + continue + } + + compositeTag := fmt.Sprintf("%s.%s", event.Type, string(attr.Key)) + result[compositeTag] = append(result[compositeTag], string(attr.Value)) + } } + return result } @@ -111,31 +124,27 @@ func (b *EventBus) PublishEventNewBlock(data EventDataNewBlock) error { // no explicit deadline for publishing events ctx := context.Background() - resultTags := append(data.ResultBeginBlock.Tags, data.ResultEndBlock.Tags...) - tags := b.validateAndStringifyTags(resultTags, b.Logger.With("block", data.Block.StringShort())) + resultEvents := append(data.ResultBeginBlock.Events, data.ResultEndBlock.Events...) + events := b.validateAndStringifyEvents(resultEvents, b.Logger.With("block", data.Block.StringShort())) - // add predefined tags - logIfTagExists(EventTypeKey, tags, b.Logger) - tags[EventTypeKey] = EventNewBlock + // add predefined new block event + events[EventTypeKey] = append(events[EventTypeKey], EventNewBlock) - b.pubsub.PublishWithTags(ctx, data, tags) - return nil + return b.pubsub.PublishWithEvents(ctx, data, events) } func (b *EventBus) PublishEventNewBlockHeader(data EventDataNewBlockHeader) error { // no explicit deadline for publishing events ctx := context.Background() - resultTags := append(data.ResultBeginBlock.Tags, data.ResultEndBlock.Tags...) + resultTags := append(data.ResultBeginBlock.Events, data.ResultEndBlock.Events...) // TODO: Create StringShort method for Header and use it in logger. - tags := b.validateAndStringifyTags(resultTags, b.Logger.With("header", data.Header)) + events := b.validateAndStringifyEvents(resultTags, b.Logger.With("header", data.Header)) - // add predefined tags - logIfTagExists(EventTypeKey, tags, b.Logger) - tags[EventTypeKey] = EventNewBlockHeader + // add predefined new block header event + events[EventTypeKey] = append(events[EventTypeKey], EventNewBlockHeader) - b.pubsub.PublishWithTags(ctx, data, tags) - return nil + return b.pubsub.PublishWithEvents(ctx, data, events) } func (b *EventBus) PublishEventVote(data EventDataVote) error { @@ -153,20 +162,14 @@ func (b *EventBus) PublishEventTx(data EventDataTx) error { // no explicit deadline for publishing events ctx := context.Background() - tags := b.validateAndStringifyTags(data.Result.Tags, b.Logger.With("tx", data.Tx)) + events := b.validateAndStringifyEvents(data.Result.Events, b.Logger.With("tx", data.Tx)) // add predefined tags - logIfTagExists(EventTypeKey, tags, b.Logger) - tags[EventTypeKey] = EventTx - - logIfTagExists(TxHashKey, tags, b.Logger) - tags[TxHashKey] = fmt.Sprintf("%X", data.Tx.Hash()) - - logIfTagExists(TxHeightKey, tags, b.Logger) - tags[TxHeightKey] = fmt.Sprintf("%d", data.Height) + events[EventTypeKey] = append(events[EventTypeKey], EventTx) + events[TxHashKey] = append(events[TxHashKey], fmt.Sprintf("%X", data.Tx.Hash())) + events[TxHeightKey] = append(events[TxHeightKey], fmt.Sprintf("%d", data.Height)) - b.pubsub.PublishWithTags(ctx, data, tags) - return nil + return b.pubsub.PublishWithEvents(ctx, data, events) } func (b *EventBus) PublishEventNewRoundStep(data EventDataRoundState) error { @@ -213,12 +216,6 @@ func (b *EventBus) PublishEventValidatorSetUpdates(data EventDataValidatorSetUpd return b.Publish(EventValidatorSetUpdates, data) } -func logIfTagExists(tag string, tags map[string]string, logger log.Logger) { - if value, ok := tags[tag]; ok { - logger.Error("Found predefined tag (value will be overwritten)", "tag", tag, "value", value) - } -} - //----------------------------------------------------------------------------- type NopEventBus struct{} diff --git a/types/event_bus_test.go b/types/event_bus_test.go index 508b423a6..45590217f 100644 --- a/types/event_bus_test.go +++ b/types/event_bus_test.go @@ -22,10 +22,15 @@ func TestEventBusPublishEventTx(t *testing.T) { defer eventBus.Stop() tx := Tx("foo") - result := abci.ResponseDeliverTx{Data: []byte("bar"), Tags: []cmn.KVPair{{Key: []byte("baz"), Value: []byte("1")}}} + result := abci.ResponseDeliverTx{ + Data: []byte("bar"), + Events: []abci.Event{ + {Type: "testType", Attributes: []cmn.KVPair{{Key: []byte("baz"), Value: []byte("1")}}}, + }, + } // PublishEventTx adds all these 3 tags, so the query below should work - query := fmt.Sprintf("tm.event='Tx' AND tx.height=1 AND tx.hash='%X' AND baz=1", tx.Hash()) + query := fmt.Sprintf("tm.event='Tx' AND tx.height=1 AND tx.hash='%X' AND testType.baz=1", tx.Hash()) txsSub, err := eventBus.Subscribe(context.Background(), "test", tmquery.MustParse(query)) require.NoError(t, err) @@ -62,11 +67,19 @@ func TestEventBusPublishEventNewBlock(t *testing.T) { defer eventBus.Stop() block := MakeBlock(0, []Tx{}, nil, []Evidence{}) - resultBeginBlock := abci.ResponseBeginBlock{Tags: []cmn.KVPair{{Key: []byte("baz"), Value: []byte("1")}}} - resultEndBlock := abci.ResponseEndBlock{Tags: []cmn.KVPair{{Key: []byte("foz"), Value: []byte("2")}}} + resultBeginBlock := abci.ResponseBeginBlock{ + Events: []abci.Event{ + {Type: "testType", Attributes: []cmn.KVPair{{Key: []byte("baz"), Value: []byte("1")}}}, + }, + } + resultEndBlock := abci.ResponseEndBlock{ + Events: []abci.Event{ + {Type: "testType", Attributes: []cmn.KVPair{{Key: []byte("foz"), Value: []byte("2")}}}, + }, + } // PublishEventNewBlock adds the tm.event tag, so the query below should work - query := "tm.event='NewBlock' AND baz=1 AND foz=2" + query := "tm.event='NewBlock' AND testType.baz=1 AND testType.foz=2" blocksSub, err := eventBus.Subscribe(context.Background(), "test", tmquery.MustParse(query)) require.NoError(t, err) @@ -94,6 +107,106 @@ func TestEventBusPublishEventNewBlock(t *testing.T) { } } +func TestEventBusPublishEventTxDuplicateKeys(t *testing.T) { + eventBus := NewEventBus() + err := eventBus.Start() + require.NoError(t, err) + defer eventBus.Stop() + + tx := Tx("foo") + result := abci.ResponseDeliverTx{ + Data: []byte("bar"), + Events: []abci.Event{ + { + Type: "transfer", + Attributes: []cmn.KVPair{ + {Key: []byte("sender"), Value: []byte("foo")}, + {Key: []byte("recipient"), Value: []byte("bar")}, + {Key: []byte("amount"), Value: []byte("5")}, + }, + }, + { + Type: "transfer", + Attributes: []cmn.KVPair{ + {Key: []byte("sender"), Value: []byte("baz")}, + {Key: []byte("recipient"), Value: []byte("cat")}, + {Key: []byte("amount"), Value: []byte("13")}, + }, + }, + { + Type: "withdraw.rewards", + Attributes: []cmn.KVPair{ + {Key: []byte("address"), Value: []byte("bar")}, + {Key: []byte("source"), Value: []byte("iceman")}, + {Key: []byte("amount"), Value: []byte("33")}, + }, + }, + }, + } + + testCases := []struct { + query string + expectResults bool + }{ + { + "tm.event='Tx' AND tx.height=1 AND transfer.sender='DoesNotExist'", + false, + }, + { + "tm.event='Tx' AND tx.height=1 AND transfer.sender='foo'", + true, + }, + { + "tm.event='Tx' AND tx.height=1 AND transfer.sender='baz'", + true, + }, + { + "tm.event='Tx' AND tx.height=1 AND transfer.sender='foo' AND transfer.sender='baz'", + true, + }, + { + "tm.event='Tx' AND tx.height=1 AND transfer.sender='foo' AND transfer.sender='DoesNotExist'", + false, + }, + } + + for i, tc := range testCases { + sub, err := eventBus.Subscribe(context.Background(), fmt.Sprintf("client-%d", i), tmquery.MustParse(tc.query)) + require.NoError(t, err) + + done := make(chan struct{}) + + go func() { + msg := <-sub.Out() + data := msg.Data().(EventDataTx) + assert.Equal(t, int64(1), data.Height) + assert.Equal(t, uint32(0), data.Index) + assert.Equal(t, tx, data.Tx) + assert.Equal(t, result, data.Result) + close(done) + }() + + err = eventBus.PublishEventTx(EventDataTx{TxResult{ + Height: 1, + Index: 0, + Tx: tx, + Result: result, + }}) + assert.NoError(t, err) + + select { + case <-done: + if !tc.expectResults { + require.Fail(t, "unexpected transaction result(s) from subscription") + } + case <-time.After(1 * time.Second): + if tc.expectResults { + require.Fail(t, "failed to receive a transaction after 1 second") + } + } + } +} + func TestEventBusPublishEventNewBlockHeader(t *testing.T) { eventBus := NewEventBus() err := eventBus.Start() @@ -101,11 +214,19 @@ func TestEventBusPublishEventNewBlockHeader(t *testing.T) { defer eventBus.Stop() block := MakeBlock(0, []Tx{}, nil, []Evidence{}) - resultBeginBlock := abci.ResponseBeginBlock{Tags: []cmn.KVPair{{Key: []byte("baz"), Value: []byte("1")}}} - resultEndBlock := abci.ResponseEndBlock{Tags: []cmn.KVPair{{Key: []byte("foz"), Value: []byte("2")}}} + resultBeginBlock := abci.ResponseBeginBlock{ + Events: []abci.Event{ + {Type: "testType", Attributes: []cmn.KVPair{{Key: []byte("baz"), Value: []byte("1")}}}, + }, + } + resultEndBlock := abci.ResponseEndBlock{ + Events: []abci.Event{ + {Type: "testType", Attributes: []cmn.KVPair{{Key: []byte("foz"), Value: []byte("2")}}}, + }, + } // PublishEventNewBlockHeader adds the tm.event tag, so the query below should work - query := "tm.event='NewBlockHeader' AND baz=1 AND foz=2" + query := "tm.event='NewBlockHeader' AND testType.baz=1 AND testType.foz=2" headersSub, err := eventBus.Subscribe(context.Background(), "test", tmquery.MustParse(query)) require.NoError(t, err) diff --git a/types/evidence_test.go b/types/evidence_test.go index 1f1338cad..fc97ae409 100644 --- a/types/evidence_test.go +++ b/types/evidence_test.go @@ -157,3 +157,13 @@ func TestDuplicateVoteEvidenceValidation(t *testing.T) { }) } } + +func TestMockGoodEvidenceValidateBasic(t *testing.T) { + goodEvidence := NewMockGoodEvidence(int64(1), 1, []byte{1}) + assert.Nil(t, goodEvidence.ValidateBasic()) +} + +func TestMockBadEvidenceValidateBasic(t *testing.T) { + badEvidence := MockBadEvidence{MockGoodEvidence: NewMockGoodEvidence(int64(1), 1, []byte{1})} + assert.Nil(t, badEvidence.ValidateBasic()) +} diff --git a/types/genesis.go b/types/genesis.go index 54b81e9e2..94680bca8 100644 --- a/types/genesis.go +++ b/types/genesis.go @@ -7,6 +7,8 @@ import ( "io/ioutil" "time" + "github.com/pkg/errors" + "github.com/tendermint/tendermint/crypto" cmn "github.com/tendermint/tendermint/libs/common" tmtime "github.com/tendermint/tendermint/types/time" @@ -64,26 +66,24 @@ func (genDoc *GenesisDoc) ValidatorHash() []byte { // and fills in defaults for optional fields left empty func (genDoc *GenesisDoc) ValidateAndComplete() error { if genDoc.ChainID == "" { - return cmn.NewError("Genesis doc must include non-empty chain_id") + return errors.New("Genesis doc must include non-empty chain_id") } if len(genDoc.ChainID) > MaxChainIDLen { - return cmn.NewError("chain_id in genesis doc is too long (max: %d)", MaxChainIDLen) + return errors.Errorf("chain_id in genesis doc is too long (max: %d)", MaxChainIDLen) } if genDoc.ConsensusParams == nil { genDoc.ConsensusParams = DefaultConsensusParams() - } else { - if err := genDoc.ConsensusParams.Validate(); err != nil { - return err - } + } else if err := genDoc.ConsensusParams.Validate(); err != nil { + return err } for i, v := range genDoc.Validators { if v.Power == 0 { - return cmn.NewError("The genesis file cannot contain validators with no voting power: %v", v) + return errors.Errorf("The genesis file cannot contain validators with no voting power: %v", v) } if len(v.Address) > 0 && !bytes.Equal(v.PubKey.Address(), v.Address) { - return cmn.NewError("Incorrect address for validator %v in the genesis file, should be %v", v, v.PubKey.Address()) + return errors.Errorf("Incorrect address for validator %v in the genesis file, should be %v", v, v.PubKey.Address()) } if len(v.Address) == 0 { genDoc.Validators[i].Address = v.PubKey.Address() @@ -119,11 +119,11 @@ func GenesisDocFromJSON(jsonBlob []byte) (*GenesisDoc, error) { func GenesisDocFromFile(genDocFile string) (*GenesisDoc, error) { jsonBlob, err := ioutil.ReadFile(genDocFile) if err != nil { - return nil, cmn.ErrorWrap(err, "Couldn't read GenesisDoc file") + return nil, errors.Wrap(err, "Couldn't read GenesisDoc file") } genDoc, err := GenesisDocFromJSON(jsonBlob) if err != nil { - return nil, cmn.ErrorWrap(err, fmt.Sprintf("Error reading GenesisDoc at %v", genDocFile)) + return nil, errors.Wrap(err, fmt.Sprintf("Error reading GenesisDoc at %v", genDocFile)) } return genDoc, nil } diff --git a/types/genesis_test.go b/types/genesis_test.go index 0e81187e3..14eb01a32 100644 --- a/types/genesis_test.go +++ b/types/genesis_test.go @@ -68,7 +68,7 @@ func TestGenesisGood(t *testing.T) { genDoc.ConsensusParams.BlockSize.MaxBytes = 0 genDocBytes, err = cdc.MarshalJSON(genDoc) assert.NoError(t, err, "error marshalling genDoc") - genDoc, err = GenesisDocFromJSON(genDocBytes) + _, err = GenesisDocFromJSON(genDocBytes) assert.Error(t, err, "expected error for genDoc json with block size of 0") // Genesis doc from raw json diff --git a/types/part_set.go b/types/part_set.go index 4533fb759..389db7a0b 100644 --- a/types/part_set.go +++ b/types/part_set.go @@ -226,7 +226,7 @@ func (ps *PartSet) IsComplete() bool { func (ps *PartSet) GetReader() io.Reader { if !ps.IsComplete() { - cmn.PanicSanity("Cannot GetReader() on incomplete PartSet") + panic("Cannot GetReader() on incomplete PartSet") } return NewPartSetReader(ps.parts) } diff --git a/types/priv_validator.go b/types/priv_validator.go index ca0e5ed15..ab5e8ef1b 100644 --- a/types/priv_validator.go +++ b/types/priv_validator.go @@ -11,7 +11,7 @@ import ( // PrivValidator defines the functionality of a local Tendermint validator // that signs votes and proposals, and never double signs. type PrivValidator interface { - GetAddress() Address + // TODO: Extend the interface to return errors too. Issue: https://github.com/tendermint/tendermint/issues/3602 GetPubKey() crypto.PubKey SignVote(chainID string, vote *Vote) error @@ -28,7 +28,7 @@ func (pvs PrivValidatorsByAddress) Len() int { } func (pvs PrivValidatorsByAddress) Less(i, j int) bool { - return bytes.Compare(pvs[i].GetAddress(), pvs[j].GetAddress()) == -1 + return bytes.Compare(pvs[i].GetPubKey().Address(), pvs[j].GetPubKey().Address()) == -1 } func (pvs PrivValidatorsByAddress) Swap(i, j int) { diff --git a/types/protobuf_test.go b/types/protobuf_test.go index 64caa3f4c..833c7dc38 100644 --- a/types/protobuf_test.go +++ b/types/protobuf_test.go @@ -90,7 +90,7 @@ func TestABCIHeader(t *testing.T) { height, numTxs, []byte("lastCommitHash"), []byte("dataHash"), []byte("evidenceHash"), ) - protocolVersion := version.Consensus{7, 8} + protocolVersion := version.Consensus{Block: 7, App: 8} timestamp := time.Now() lastBlockID := BlockID{ Hash: []byte("hash"), diff --git a/types/results.go b/types/results.go index d7d82d894..6b0c37562 100644 --- a/types/results.go +++ b/types/results.go @@ -41,7 +41,7 @@ func NewResultFromResponse(response *abci.ResponseDeliverTx) ABCIResult { } } -// Bytes serializes the ABCIResponse using wire +// Bytes serializes the ABCIResponse using amino func (a ABCIResults) Bytes() []byte { bz, err := cdc.MarshalBinaryLengthPrefixed(a) if err != nil { diff --git a/types/test_util.go b/types/test_util.go index 18e472148..d226fd99e 100644 --- a/types/test_util.go +++ b/types/test_util.go @@ -5,8 +5,7 @@ import ( ) func MakeCommit(blockID BlockID, height int64, round int, - voteSet *VoteSet, - validators []PrivValidator) (*Commit, error) { + voteSet *VoteSet, validators []PrivValidator) (*Commit, error) { // all sign for i := 0; i < len(validators); i++ { @@ -37,3 +36,40 @@ func signAddVote(privVal PrivValidator, vote *Vote, voteSet *VoteSet) (signed bo } return voteSet.AddVote(vote) } + +func MakeVote(height int64, blockID BlockID, valSet *ValidatorSet, privVal PrivValidator, chainID string) (*Vote, error) { + addr := privVal.GetPubKey().Address() + idx, _ := valSet.GetByAddress(addr) + vote := &Vote{ + ValidatorAddress: addr, + ValidatorIndex: idx, + Height: height, + Round: 0, + Timestamp: tmtime.Now(), + Type: PrecommitType, + BlockID: blockID, + } + if err := privVal.SignVote(chainID, vote); err != nil { + return nil, err + } + return vote, nil +} + +// MakeBlock returns a new block with an empty header, except what can be +// computed from itself. +// It populates the same set of fields validated by ValidateBasic. +func MakeBlock(height int64, txs []Tx, lastCommit *Commit, evidence []Evidence) *Block { + block := &Block{ + Header: Header{ + Height: height, + NumTxs: int64(len(txs)), + }, + Data: Data{ + Txs: txs, + }, + Evidence: EvidenceData{Evidence: evidence}, + LastCommit: lastCommit, + } + block.fillHeader() + return block +} diff --git a/types/tx.go b/types/tx.go index 0c6845a7d..9cc787b34 100644 --- a/types/tx.go +++ b/types/tx.go @@ -122,6 +122,14 @@ type TxResult struct { Result abci.ResponseDeliverTx `json:"result"` } +// One usage is indexing transaction results. +type TxResultDeprecated struct { + Height int64 `json:"height"` + Index uint32 `json:"index"` + Tx Tx `json:"tx"` + Result abci.ResponseDeliverTxDeprecated `json:"result"` +} + // ComputeAminoOverhead calculates the overhead for amino encoding a transaction. // The overhead consists of varint encoding the field number and the wire type // (= length-delimited = 2), and another varint encoding the length of the diff --git a/types/validator.go b/types/validator.go index 325d20f5c..20069ff9a 100644 --- a/types/validator.go +++ b/types/validator.go @@ -41,19 +41,20 @@ func (v *Validator) CompareProposerPriority(other *Validator) *Validator { if v == nil { return other } - if v.ProposerPriority > other.ProposerPriority { + switch { + case v.ProposerPriority > other.ProposerPriority: return v - } else if v.ProposerPriority < other.ProposerPriority { + case v.ProposerPriority < other.ProposerPriority: return other - } else { + default: result := bytes.Compare(v.Address, other.Address) - if result < 0 { + switch { + case result < 0: return v - } else if result > 0 { + case result > 0: return other - } else { - cmn.PanicSanity("Cannot compare identical validators") - return nil + default: + panic("Cannot compare identical validators") } } } diff --git a/types/validator_set.go b/types/validator_set.go index 36ce67f06..33636d092 100644 --- a/types/validator_set.go +++ b/types/validator_set.go @@ -121,7 +121,7 @@ func (vals *ValidatorSet) RescalePriorities(diffMax int64) { ratio := (diff + diffMax - 1) / diffMax if diff > diffMax { for _, val := range vals.Validators { - val.ProposerPriority = val.ProposerPriority / ratio + val.ProposerPriority /= ratio } } } @@ -525,7 +525,7 @@ func (vals *ValidatorSet) applyRemovals(deletes []*Validator) { // The 'allowDeletes' flag is set to false by NewValidatorSet() and to true by UpdateWithChangeSet(). func (vals *ValidatorSet) updateWithChangeSet(changes []*Validator, allowDeletes bool) error { - if len(changes) <= 0 { + if len(changes) == 0 { return nil } @@ -594,10 +594,10 @@ func (vals *ValidatorSet) VerifyCommit(chainID string, blockID BlockID, height i return err } if vals.Size() != len(commit.Precommits) { - return fmt.Errorf("Invalid commit -- wrong set size: %v vs %v", vals.Size(), len(commit.Precommits)) + return NewErrInvalidCommitPrecommits(vals.Size(), len(commit.Precommits)) } if height != commit.Height() { - return fmt.Errorf("Invalid commit -- wrong height: %v vs %v", height, commit.Height()) + return NewErrInvalidCommitHeight(height, commit.Height()) } if !blockID.Equals(commit.BlockID) { return fmt.Errorf("Invalid commit -- wrong block id: want %v got %v", @@ -612,17 +612,18 @@ func (vals *ValidatorSet) VerifyCommit(chainID string, blockID BlockID, height i } _, val := vals.GetByIndex(idx) // Validate signature. - precommitSignBytes := commit.VoteSignBytes(chainID, precommit) + precommitSignBytes := commit.VoteSignBytes(chainID, idx) if !val.PubKey.VerifyBytes(precommitSignBytes, precommit.Signature) { return fmt.Errorf("Invalid commit -- invalid signature: %v", precommit) } // Good precommit! if blockID.Equals(precommit.BlockID) { talliedVotingPower += val.VotingPower - } else { - // It's OK that the BlockID doesn't match. We include stray - // precommits to measure validator availability. } + // else { + // It's OK that the BlockID doesn't match. We include stray + // precommits to measure validator availability. + // } } if talliedVotingPower > vals.TotalVotingPower()*2/3 { @@ -689,24 +690,25 @@ func (vals *ValidatorSet) VerifyFutureCommit(newSet *ValidatorSet, chainID strin return cmn.NewError("Invalid commit -- not precommit @ index %v", idx) } // See if this validator is in oldVals. - idx, val := oldVals.GetByAddress(precommit.ValidatorAddress) - if val == nil || seen[idx] { + oldIdx, val := oldVals.GetByAddress(precommit.ValidatorAddress) + if val == nil || seen[oldIdx] { continue // missing or double vote... } - seen[idx] = true + seen[oldIdx] = true // Validate signature. - precommitSignBytes := commit.VoteSignBytes(chainID, precommit) + precommitSignBytes := commit.VoteSignBytes(chainID, idx) if !val.PubKey.VerifyBytes(precommitSignBytes, precommit.Signature) { return cmn.NewError("Invalid commit -- invalid signature: %v", precommit) } // Good precommit! if blockID.Equals(precommit.BlockID) { oldVotingPower += val.VotingPower - } else { - // It's OK that the BlockID doesn't match. We include stray - // precommits to measure validator availability. } + // else { + // It's OK that the BlockID doesn't match. We include stray + // precommits to measure validator availability. + // } } if oldVotingPower <= oldVals.TotalVotingPower()*2/3 { diff --git a/types/vote.go b/types/vote.go index ad05d688d..c105312e7 100644 --- a/types/vote.go +++ b/types/vote.go @@ -93,7 +93,7 @@ func (vote *Vote) String() string { case PrecommitType: typeString = "Precommit" default: - cmn.PanicSanity("Unknown vote type") + panic("Unknown vote type") } return fmt.Sprintf("Vote{%v:%X %v/%02d/%v(%v) %X %X @ %s}", @@ -101,7 +101,7 @@ func (vote *Vote) String() string { cmn.Fingerprint(vote.ValidatorAddress), vote.Height, vote.Round, - vote.Type, + byte(vote.Type), typeString, cmn.Fingerprint(vote.BlockID.Hash), cmn.Fingerprint(vote.Signature), diff --git a/types/vote_set.go b/types/vote_set.go index 81b6a0c8c..8aac8a074 100644 --- a/types/vote_set.go +++ b/types/vote_set.go @@ -70,7 +70,7 @@ type VoteSet struct { // Constructs a new VoteSet struct used to accumulate votes for given height/round. func NewVoteSet(chainID string, height int64, round int, type_ SignedMsgType, valSet *ValidatorSet) *VoteSet { if height == 0 { - cmn.PanicSanity("Cannot make VoteSet for height == 0, doesn't make sense.") + panic("Cannot make VoteSet for height == 0, doesn't make sense.") } return &VoteSet{ chainID: chainID, @@ -145,7 +145,7 @@ func (voteSet *VoteSet) Copy() *VoteSet { // NOTE: Vote must not be nil func (voteSet *VoteSet) AddVote(vote *Vote) (added bool, err error) { if voteSet == nil { - cmn.PanicSanity("AddVote() on nil VoteSet") + panic("AddVote() on nil VoteSet") } voteSet.mtx.Lock() defer voteSet.mtx.Unlock() @@ -211,7 +211,7 @@ func (voteSet *VoteSet) addVote(vote *Vote) (added bool, err error) { return added, NewConflictingVoteError(val, conflicting, vote) } if !added { - cmn.PanicSanity("Expected to add non-conflicting vote") + panic("Expected to add non-conflicting vote") } return added, nil } @@ -235,7 +235,7 @@ func (voteSet *VoteSet) addVerifiedVote(vote *Vote, blockKey string, votingPower // Already exists in voteSet.votes? if existing := voteSet.votes[valIndex]; existing != nil { if existing.BlockID.Equals(vote.BlockID) { - cmn.PanicSanity("addVerifiedVote does not expect duplicate votes") + panic("addVerifiedVote does not expect duplicate votes") } else { conflicting = existing } @@ -305,7 +305,7 @@ func (voteSet *VoteSet) addVerifiedVote(vote *Vote, blockKey string, votingPower // NOTE: VoteSet must not be nil func (voteSet *VoteSet) SetPeerMaj23(peerID P2PID, blockID BlockID) error { if voteSet == nil { - cmn.PanicSanity("SetPeerMaj23() on nil VoteSet") + panic("SetPeerMaj23() on nil VoteSet") } voteSet.mtx.Lock() defer voteSet.mtx.Unlock() @@ -378,7 +378,7 @@ func (voteSet *VoteSet) GetByAddress(address []byte) *Vote { defer voteSet.mtx.Unlock() valIndex, val := voteSet.valSet.GetByAddress(address) if val == nil { - cmn.PanicSanity("GetByAddress(address) returned nil") + panic("GetByAddress(address) returned nil") } return voteSet.votes[valIndex] } @@ -543,16 +543,19 @@ func (voteSet *VoteSet) sumTotalFrac() (int64, int64, float64) { //-------------------------------------------------------------------------------- // Commit +// MakeCommit constructs a Commit from the VoteSet. +// Panics if the vote type is not PrecommitType or if +// there's no +2/3 votes for a single block. func (voteSet *VoteSet) MakeCommit() *Commit { if voteSet.type_ != PrecommitType { - cmn.PanicSanity("Cannot MakeCommit() unless VoteSet.Type is PrecommitType") + panic("Cannot MakeCommit() unless VoteSet.Type is PrecommitType") } voteSet.mtx.Lock() defer voteSet.mtx.Unlock() // Make sure we have a 2/3 majority if voteSet.maj23 == nil { - cmn.PanicSanity("Cannot MakeCommit() unless a blockhash has +2/3") + panic("Cannot MakeCommit() unless a blockhash has +2/3") } // For every validator, get the precommit diff --git a/types/vote_set_test.go b/types/vote_set_test.go index 59205efc6..2e217e940 100644 --- a/types/vote_set_test.go +++ b/types/vote_set_test.go @@ -4,9 +4,10 @@ import ( "bytes" "testing" + "github.com/stretchr/testify/assert" + "github.com/tendermint/tendermint/crypto" cmn "github.com/tendermint/tendermint/libs/common" - tst "github.com/tendermint/tendermint/libs/test" tmtime "github.com/tendermint/tendermint/types/time" ) @@ -490,7 +491,7 @@ func TestMakeCommit(t *testing.T) { } // MakeCommit should fail. - tst.AssertPanics(t, "Doesn't have +2/3 majority", func() { voteSet.MakeCommit() }) + assert.Panics(t, func() { voteSet.MakeCommit() }, "Doesn't have +2/3 majority") // 7th voted for some other block. { diff --git a/types/vote_test.go b/types/vote_test.go index af8a9625b..46373d61f 100644 --- a/types/vote_test.go +++ b/types/vote_test.go @@ -7,6 +7,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + amino "github.com/tendermint/go-amino" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" @@ -254,13 +255,13 @@ func TestMaxVoteBytes(t *testing.T) { func TestVoteString(t *testing.T) { str := examplePrecommit().String() - expected := `Vote{56789:6AF1F4111082 12345/02/2(Precommit) 8B01023386C3 000000000000 @ 2017-12-25T03:00:01.234Z}` + expected := `Vote{56789:6AF1F4111082 12345/02/precommit(Precommit) 8B01023386C3 000000000000 @ 2017-12-25T03:00:01.234Z}` if str != expected { t.Errorf("Got unexpected string for Vote. Expected:\n%v\nGot:\n%v", expected, str) } str2 := examplePrevote().String() - expected = `Vote{56789:6AF1F4111082 12345/02/1(Prevote) 8B01023386C3 000000000000 @ 2017-12-25T03:00:01.234Z}` + expected = `Vote{56789:6AF1F4111082 12345/02/prevote(Prevote) 8B01023386C3 000000000000 @ 2017-12-25T03:00:01.234Z}` if str2 != expected { t.Errorf("Got unexpected string for Vote. Expected:\n%v\nGot:\n%v", expected, str2) } diff --git a/version/version.go b/version/version.go index a7559d499..e1c01c4c2 100644 --- a/version/version.go +++ b/version/version.go @@ -20,10 +20,10 @@ const ( // Must be a string because scripts like dist.sh read this file. // XXX: Don't change the name of this variable or you will break // automation :) - TMCoreSemVer = "0.31.5" + TMCoreSemVer = "0.32.3" // ABCISemVer is the semantic version of the ABCI library - ABCISemVer = "0.16.0" + ABCISemVer = "0.16.1" ABCIVersion = ABCISemVer )