diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index b48354f014..286370b90f 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -7,4 +7,7 @@ # global owners are only requested if there isn't a more specific # codeowner specified below. For this reason, the global codeowners # are often repeated in package-level definitions. -* @ebuchman @cmwaters @tychoish @williambanfield @creachadair +* @liamsi @evan-forbes @Wondertan + +# Overrides for tooling packages +docs/ @liamsi @adlerjohn diff --git a/.github/ISSUE_TEMPLATE/bug-report.md b/.github/ISSUE_TEMPLATE/bug-report.md index e99d578800..46f6d3c5cf 100644 --- a/.github/ISSUE_TEMPLATE/bug-report.md +++ b/.github/ISSUE_TEMPLATE/bug-report.md @@ -10,15 +10,7 @@ Be ready for followup questions, and please respond in a timely manner. We might ask you to provide additional logs and data (tendermint & app). --> -**Tendermint version** (use `tendermint version` or `git rev-parse --verify HEAD` if installed from source): - - -**ABCI app** (name for built-in, URL for self-written if it's publicly available): - -**Environment**: -- **OS** (e.g. from /etc/os-release): -- **Install tools**: -- **Others**: +**Version** (use `git rev-parse --verify HEAD`): **What happened**: @@ -27,16 +19,14 @@ manner. We might ask you to provide additional logs and data (tendermint & app). **What you expected to happen**: -**Have you tried the latest version**: yes/no - **How to reproduce it** (as minimally and precisely as possible): **Logs (paste a small part showing an error (< 10 lines) or link a pastebin, gist, etc. containing more of the log file)**: -**Config (you can paste only the changes you've made)**: +**Config (you can paste only the changes you've made to the config)**: **node command runtime flags**: **Please provide the output from the `http://:/dump_consensus_state` RPC endpoint for consensus bugs** -**Anything else we need to know**: +**Anything else**: diff --git a/.github/ISSUE_TEMPLATE/feature-request.md b/.github/ISSUE_TEMPLATE/feature-request.md deleted file mode 100644 index 62c3e4f3aa..0000000000 --- a/.github/ISSUE_TEMPLATE/feature-request.md +++ /dev/null @@ -1,36 +0,0 @@ ---- -name: Feature Request -about: Create a proposal to request a feature - ---- - - - -## Summary - - - -## Problem Definition - - - -## Proposal - - - -____ - -#### For Admin Use - -- [ ] Not duplicate issue -- [ ] Appropriate labels applied -- [ ] Appropriate contributors tagged -- [ ] Contributor assigned/self-assigned diff --git a/.github/ISSUE_TEMPLATE/implement-feature.md b/.github/ISSUE_TEMPLATE/implement-feature.md new file mode 100644 index 0000000000..cac1cf69ed --- /dev/null +++ b/.github/ISSUE_TEMPLATE/implement-feature.md @@ -0,0 +1,45 @@ +--- +name: Actionable implementation task +about: A well-defined, already decided-on, actionable implementation task. + +--- + +## Summary + + + +## Details + + + +### Action Items + +- [ ] sub-task 1 + + +### References + + + diff --git a/.github/ISSUE_TEMPLATE/proposal.md b/.github/ISSUE_TEMPLATE/proposal.md new file mode 100644 index 0000000000..612dd9b4dd --- /dev/null +++ b/.github/ISSUE_TEMPLATE/proposal.md @@ -0,0 +1,35 @@ +--- +name: proposal +about: Propose an improvement, feature, or change to core components + +--- + + + +## Summary + + + +## Problem Definition + + + +## Proposal + + + +### Implementation + + + + + + diff --git a/.github/ISSUE_TEMPLATE/write-adr.md b/.github/ISSUE_TEMPLATE/write-adr.md new file mode 100644 index 0000000000..fe3360eb65 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/write-adr.md @@ -0,0 +1,43 @@ +--- +name: ADR +about: Write an ADR for certain aspect of the system + +--- + +## Summary + + + +## Details + + + + + + + +## References and Context + + diff --git a/.github/dependabot.yml b/.github/dependabot.yml index b6729552d2..22036cf683 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -20,8 +20,5 @@ updates: interval: daily time: "11:00" open-pull-requests-limit: 10 - reviewers: - - melekes - - tessr labels: - T:dependencies diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index ce6958eabc..5a40447604 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -58,25 +58,26 @@ jobs: shell: bash if: "env.GIT_DIFF != ''" - test_apps: - runs-on: ubuntu-latest - needs: build - timeout-minutes: 5 - steps: - - uses: actions/setup-go@v3 - with: - go-version: "1.17" - - uses: actions/checkout@v3 - - uses: technote-space/get-diff-action@v6 - with: - PATTERNS: | - **/**.go - go.mod - go.sum - - name: install - run: make install install_abci - if: "env.GIT_DIFF != ''" - - name: test_apps - run: test/app/test.sh - shell: bash - if: "env.GIT_DIFF != ''" + # TODO: re-enable this test after upgrading to v0.36.x + # test_apps: + # runs-on: ubuntu-latest + # needs: build + # timeout-minutes: 5 + # steps: + # - uses: actions/setup-go@v3 + # with: + # go-version: "1.17" + # - uses: actions/checkout@v3 + # - uses: technote-space/get-diff-action@v6 + # with: + # PATTERNS: | + # **/**.go + # go.mod + # go.sum + # - name: install + # run: make install install_abci + # if: "env.GIT_DIFF != ''" + # - name: test_apps + # run: test/app/test.sh + # shell: bash + # if: "env.GIT_DIFF != ''" diff --git a/.github/workflows/e2e-nightly-34x.yml b/.github/workflows/e2e-nightly-35x.yml similarity index 93% rename from .github/workflows/e2e-nightly-34x.yml rename to .github/workflows/e2e-nightly-35x.yml index 71b9c8182f..35cfda86ea 100644 --- a/.github/workflows/e2e-nightly-34x.yml +++ b/.github/workflows/e2e-nightly-35x.yml @@ -27,7 +27,7 @@ jobs: - uses: actions/checkout@v3 with: - ref: 'v0.34.x' + ref: 'v0.35.x' - name: Build working-directory: test/e2e @@ -56,7 +56,7 @@ jobs: SLACK_USERNAME: Nightly E2E Tests SLACK_ICON_EMOJI: ':skull:' SLACK_COLOR: danger - SLACK_MESSAGE: Nightly E2E tests failed on v0.34.x + SLACK_MESSAGE: Nightly E2E tests failed on v0.35.x SLACK_FOOTER: '' e2e-nightly-success: # may turn this off once they seem to pass consistently @@ -72,5 +72,5 @@ jobs: SLACK_USERNAME: Nightly E2E Tests SLACK_ICON_EMOJI: ':white_check_mark:' SLACK_COLOR: good - SLACK_MESSAGE: Nightly E2E tests passed on v0.34.x + SLACK_MESSAGE: Nightly E2E tests passed on v0.35.x SLACK_FOOTER: '' diff --git a/.github/workflows/e2e.yml b/.github/workflows/e2e.yml index 491363d661..505e40779c 100644 --- a/.github/workflows/e2e.yml +++ b/.github/workflows/e2e.yml @@ -12,7 +12,7 @@ on: jobs: e2e-test: runs-on: ubuntu-latest - timeout-minutes: 15 + timeout-minutes: 25 steps: - uses: actions/setup-go@v3 with: diff --git a/.github/workflows/linter.yml b/.github/workflows/linter.yml index ec560069df..356d4d550c 100644 --- a/.github/workflows/linter.yml +++ b/.github/workflows/linter.yml @@ -12,6 +12,7 @@ on: paths: - "**.md" - "**.yml" + - "**.yaml" jobs: build: @@ -20,11 +21,14 @@ jobs: steps: - name: Checkout Code uses: actions/checkout@v3 + with: + # Full git history is needed to get a proper list of changed files within `super-linter` + fetch-depth: 0 - name: Lint Code Base uses: docker://github/super-linter:v4 env: VALIDATE_ALL_CODEBASE: true - DEFAULT_BRANCH: master + DEFAULT_BRANCH: v0.35.x-celestia GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} VALIDATE_MD: true VALIDATE_OPENAPI: true diff --git a/DOCKER/Dockerfile b/DOCKER/Dockerfile index 0465bec09c..55b69f940c 100644 --- a/DOCKER/Dockerfile +++ b/DOCKER/Dockerfile @@ -1,5 +1,5 @@ # stage 1 Generate Tendermint Binary -FROM golang:1.16-alpine as builder +FROM golang:1.17-alpine as builder RUN apk update && \ apk upgrade && \ apk --no-cache add make diff --git a/Makefile b/Makefile index c9836956b3..29aeefa5f3 100644 --- a/Makefile +++ b/Makefile @@ -4,6 +4,8 @@ BUILDDIR ?= $(CURDIR)/build BUILD_TAGS?=tendermint +IMAGE := ghcr.io/tendermint/docker-build-proto:latest + # If building a release, please checkout the version tag to get the correct version setting ifneq ($(shell git symbolic-ref -q --short HEAD),) VERSION := unreleased-$(shell git symbolic-ref -q --short HEAD)-$(shell git rev-parse HEAD) @@ -15,7 +17,7 @@ LD_FLAGS = -X github.com/tendermint/tendermint/version.TMVersion=$(VERSION) BUILD_FLAGS = -mod=readonly -ldflags "$(LD_FLAGS)" HTTPS_GIT := https://github.com/tendermint/tendermint.git BUILD_IMAGE := ghcr.io/tendermint/docker-build-proto -BASE_BRANCH := v0.35.x +BASE_BRANCH := v0.35.x-celestia DOCKER_PROTO := docker run -v $(shell pwd):/workspace --workdir /workspace $(BUILD_IMAGE) CGO_ENABLED ?= 0 diff --git a/README.md b/README.md index 0fc06a7ade..365067139f 100644 --- a/README.md +++ b/README.md @@ -1,146 +1,70 @@ -# Tendermint +# Celestia Core -![banner](docs/tendermint-core-image.jpg) + + + -[Byzantine-Fault Tolerant](https://en.wikipedia.org/wiki/Byzantine_fault_tolerance) -[State Machines](https://en.wikipedia.org/wiki/State_machine_replication). -Or [Blockchain](), for short. +![GitHub go.mod Go version](https://img.shields.io/github/go-mod/go-version/celestiaorg/celestia-core) +[![Community](https://img.shields.io/badge/chat%20on-discord-orange?&logo=discord&logoColor=ffffff&color=7389D8&labelColor=6A7EC2)](https://discord.gg/YsnTPcSfWQ) +[![license](https://img.shields.io/github/license/tendermint/tendermint.svg)](https://github.com/celestiaorg/celestia-core/blob/v0.35.x-celestia/LICENSE) -[![version](https://img.shields.io/github/tag/tendermint/tendermint.svg)](https://github.com/tendermint/tendermint/releases/latest) -[![API Reference](https://camo.githubusercontent.com/915b7be44ada53c290eb157634330494ebe3e30a/68747470733a2f2f676f646f632e6f72672f6769746875622e636f6d2f676f6c616e672f6764646f3f7374617475732e737667)](https://pkg.go.dev/github.com/tendermint/tendermint) -[![Go version](https://img.shields.io/badge/go-1.16-blue.svg)](https://github.com/moovweb/gvm) -[![Discord chat](https://img.shields.io/discord/669268347736686612.svg)](https://discord.gg/cosmosnetwork) -[![license](https://img.shields.io/github/license/tendermint/tendermint.svg)](https://github.com/tendermint/tendermint/blob/master/LICENSE) -[![tendermint/tendermint](https://tokei.rs/b1/github/tendermint/tendermint?category=lines)](https://github.com/tendermint/tendermint) -[![Sourcegraph](https://sourcegraph.com/github.com/tendermint/tendermint/-/badge.svg)](https://sourcegraph.com/github.com/tendermint/tendermint?badge) +Celestia Core will power the Celestia main chain by leveraging Tendermint. -| Branch | Tests | Coverage | Linting | -|--------|--------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------| -| master | ![Tests](https://github.com/tendermint/tendermint/workflows/Tests/badge.svg?branch=master) | [![codecov](https://codecov.io/gh/tendermint/tendermint/branch/master/graph/badge.svg)](https://codecov.io/gh/tendermint/tendermint) | ![Lint](https://github.com/tendermint/tendermint/workflows/Lint/badge.svg) | +Celestia itself is a scale-out data availability-focused minimal blockchain. +It allows users to post arbitrary data on the chain, as well as define their own execution layers. +This data is ordered on-chain but not executed. This allows for the first scalable data layer for +decentralised applications, including optimistic rollup sidechains. Additionally, this design allows developers to +define their own execution environments. -Tendermint Core is a Byzantine Fault Tolerant (BFT) middleware that takes a state transition machine - written in any programming language - and securely replicates it on many machines. +Read this [blog post](https://blog.celestia.org/celestia-a-scalable-general-purpose-data-availability-layer-for-decentralized-apps-and-trust-minimized-sidechains/) +to learn more about what we are building. -For protocol details, see [the specification](https://github.com/tendermint/spec). - -For detailed analysis of the consensus protocol, including safety and liveness proofs, -see our recent paper, "[The latest gossip on BFT consensus](https://arxiv.org/abs/1807.04938)". - -## Releases - -Please do not depend on master as your production branch. Use [releases](https://github.com/tendermint/tendermint/releases) instead. - -Tendermint has been in the production of private and public environments, most notably the blockchains of the Cosmos Network. we haven't released v1.0 yet since we are making breaking changes to the protocol and the APIs. -See below for more details about [versioning](#versioning). - -In any case, if you intend to run Tendermint in production, we're happy to help. You can -contact us [over email](mailto:hello@interchain.berlin) or [join the chat](https://discord.gg/cosmosnetwork). - -## Security - -To report a security vulnerability, see our [bug bounty program](https://hackerone.com/cosmos). -For examples of the kinds of bugs we're looking for, see [our security policy](SECURITY.md). +## Documentation -We also maintain a dedicated mailing list for security updates. We will only ever use this mailing list -to notify you of vulnerabilities and fixes in Tendermint Core. You can subscribe [here](http://eepurl.com/gZ5hQD). +The original [whitepaper](https://arxiv.org/abs/1905.09274) and the +[specification](https://github.com/celestiaorg/celestia-specs) which we are currently wrapping up can give you +a more detailed overview what to expect from this repository. -## Minimum requirements +### Minimum requirements | Requirement | Notes | |-------------|------------------| -| Go version | Go1.16 or higher | - -## Documentation - -Complete documentation can be found on the [website](https://docs.tendermint.com/master/). - -### Install - -See the [install instructions](/docs/introduction/install.md). - -### Quick Start - -- [Single node](/docs/introduction/quick-start.md) -- [Local cluster using docker-compose](/docs/tools/docker-compose.md) -- [Remote cluster using Terraform and Ansible](/docs/tools/terraform-and-ansible.md) -- [Join the Cosmos testnet](https://cosmos.network/testnet) +| Go version | Go1.17 or higher | ## Contributing -Please abide by the [Code of Conduct](CODE_OF_CONDUCT.md) in all interactions. - Before contributing to the project, please take a look at the [contributing guidelines](CONTRIBUTING.md) -and the [style guide](STYLE_GUIDE.md). You may also find it helpful to read the -[specifications](https://github.com/tendermint/spec), watch the [Developer Sessions](/docs/DEV_SESSIONS.md), -and familiarize yourself with our -[Architectural Decision Records](https://github.com/tendermint/tendermint/tree/master/docs/architecture). +and the [style guide](STYLE_GUIDE.md). + +Join the community at [Telegram](https://t.me/CelestiaCommunity) or jump onto the [Forum](https://forum.celestia.org/) +to get more involved into discussions. + +Learn more by reading the code and the +[specifications](https://github.com/celestiaorg/celestia-specs). ## Versioning ### Semantic Versioning -Tendermint uses [Semantic Versioning](http://semver.org/) to determine when and how the version changes. +Celestia Core uses [Semantic Versioning](http://semver.org/) to determine when and how the version changes. According to SemVer, anything in the public API can change at any time before version 1.0.0 -To provide some stability to users of 0.X.X versions of Tendermint, the MINOR version is used -to signal breaking changes across Tendermint's API. This API includes all -publicly exposed types, functions, and methods in non-internal Go packages as well as -the types and methods accessible via the Tendermint RPC interface. - -Breaking changes to these public APIs will be documented in the CHANGELOG. - -### Upgrades - -In an effort to avoid accumulating technical debt prior to 1.0.0, -we do not guarantee that breaking changes (ie. bumps in the MINOR version) -will work with existing Tendermint blockchains. In these cases you will -have to start a new blockchain, or write something custom to get the old -data into the new chain. However, any bump in the PATCH version should be -compatible with existing blockchain histories. - - -For more information on upgrading, see [UPGRADING.md](./UPGRADING.md). - -### Supported Versions - -Because we are a small core team, we only ship patch updates, including security updates, -to the most recent minor release and the second-most recent minor release. Consequently, -we strongly recommend keeping Tendermint up-to-date. Upgrading instructions can be found -in [UPGRADING.md](./UPGRADING.md). - ## Resources -### Tendermint Core - -For details about the blockchain data structures and the p2p protocols, see the -[Tendermint specification](https://docs.tendermint.com/master/spec/). - -For details on using the software, see the [documentation](/docs/) which is also -hosted at: - -### Tools +### Celestia (formerly LazyLedger) -Benchmarking is provided by [`tm-load-test`](https://github.com/informalsystems/tm-load-test). -Additional tooling can be found in [/docs/tools](/docs/tools). +- [Ethereum research post](https://ethresear.ch/t/a-data-availability-blockchain-with-sub-linear-full-block-validation/5503) +- [Academic paper](https://arxiv.org/abs/1905.09274) +- [Blog](https://blog.celestia.org) +- [Project web site](https://celestia.org) +- [Academic prototype](https://github.com/celestiaorg/lazyledger-prototype) +- [Follow Celestia on Twitter](https://twitter.com/CelestiaOrg) -### Applications - -- [Cosmos SDK](http://github.com/cosmos/cosmos-sdk); a cryptocurrency application framework -- [Ethermint](http://github.com/cosmos/ethermint); Ethereum on Tendermint -- [Many more](https://tendermint.com/ecosystem) - -### Research - -- [The latest gossip on BFT consensus](https://arxiv.org/abs/1807.04938) -- [Master's Thesis on Tendermint](https://atrium.lib.uoguelph.ca/xmlui/handle/10214/9769) -- [Original Whitepaper: "Tendermint: Consensus Without Mining"](https://tendermint.com/static/docs/tendermint.pdf) -- [Tendermint Core Blog](https://medium.com/tendermint/tagged/tendermint-core) -- [Cosmos Blog](https://blog.cosmos.network/tendermint/home) +### Tendermint Core -## Join us! +For more information on Tendermint Core and pointers to documentation for Tendermint visit +this [repository](https://github.com/tendermint/tendermint). -Tendermint Core is maintained by [Interchain GmbH](https://interchain.berlin). -If you'd like to work full-time on Tendermint Core, [we're hiring](https://interchain-gmbh.breezy.hr/p/682fb7e8a6f601-software-engineer-tendermint-core)! +## Careers -Funding for Tendermint Core development comes primarily from the [Interchain Foundation](https://interchain.io), -a Swiss non-profit. The Tendermint trademark is owned by [Tendermint Inc.](https://tendermint.com), the for-profit entity - that also maintains [tendermint.com](https://tendermint.com). +We are hiring Go engineers! Join us in building the future of blockchain scaling and interoperability. [Apply here](https://angel.co/company/celestialabs/jobs). diff --git a/abci/client/client.go b/abci/client/client.go index 9725f88388..5f35cb9630 100644 --- a/abci/client/client.go +++ b/abci/client/client.go @@ -46,6 +46,8 @@ type Client interface { OfferSnapshotAsync(context.Context, types.RequestOfferSnapshot) (*ReqRes, error) LoadSnapshotChunkAsync(context.Context, types.RequestLoadSnapshotChunk) (*ReqRes, error) ApplySnapshotChunkAsync(context.Context, types.RequestApplySnapshotChunk) (*ReqRes, error) + PrepareProposalAsync(context.Context, types.RequestPrepareProposal) (*ReqRes, error) + ProcessProposalAsync(context.Context, types.RequestProcessProposal) (*ReqRes, error) // Synchronous requests FlushSync(context.Context) error @@ -62,6 +64,8 @@ type Client interface { OfferSnapshotSync(context.Context, types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) LoadSnapshotChunkSync(context.Context, types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) ApplySnapshotChunkSync(context.Context, types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) + PrepareProposalSync(context.Context, types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) + ProcessProposalSync(context.Context, types.RequestProcessProposal) (*types.ResponseProcessProposal, error) } //---------------------------------------- diff --git a/abci/client/grpc_client.go b/abci/client/grpc_client.go index 049910beaf..e1e79ebec1 100644 --- a/abci/client/grpc_client.go +++ b/abci/client/grpc_client.go @@ -315,6 +315,49 @@ func (cli *grpcClient) ApplySnapshotChunkAsync( ) } +func (cli *grpcClient) PrepareProposalAsync( + ctx context.Context, + params types.RequestPrepareProposal, +) (*ReqRes, error) { + + req := types.ToRequestPrepareProposal(params) + res, err := cli.client.PrepareProposal(ctx, req.GetPrepareProposal(), grpc.WaitForReady(true)) + if err != nil { + return nil, err + } + return cli.finishAsyncCall( + ctx, + req, + &types.Response{ + Value: &types.Response_PrepareProposal{ + PrepareProposal: res, + }, + }, + ) +} + +func (cli *grpcClient) ProcessProposalAsync( + ctx context.Context, + params types.RequestProcessProposal, +) (*ReqRes, error) { + + req := types.ToRequestProcessProposal(params) + res, err := cli.client.ProcessProposal(ctx, req.GetProcessProposal(), grpc.WaitForReady(true)) + if err != nil { + return nil, err + } + + return cli.finishAsyncCall( + ctx, + req, + &types.Response{ + Value: &types.Response_ProcessProposal{ + ProcessProposal: res, + }, + }, + ) +} + // finishAsyncCall creates a ReqRes for an async call, and immediately populates it // with the response. We don't complete it until it's been ordered via the channel. func (cli *grpcClient) finishAsyncCall(ctx context.Context, req *types.Request, res *types.Response) (*ReqRes, error) { @@ -505,3 +548,27 @@ func (cli *grpcClient) ApplySnapshotChunkSync( } return cli.finishSyncCall(reqres).GetApplySnapshotChunk(), cli.Error() } + +func (cli *grpcClient) PrepareProposalSync( + ctx context.Context, + params types.RequestPrepareProposal, +) (*types.ResponsePrepareProposal, error) { + + reqres, err := cli.PrepareProposalAsync(ctx, params) + if err != nil { + return nil, err + } + return cli.finishSyncCall(reqres).GetPrepareProposal(), cli.Error() +} + +func (cli *grpcClient) ProcessProposalSync( + ctx context.Context, + params types.RequestProcessProposal, +) (*types.ResponseProcessProposal, error) { + + reqres, err := cli.ProcessProposalAsync(ctx, params) + if err != nil { + return nil, err + } + return cli.finishSyncCall(reqres).GetProcessProposal(), cli.Error() +} diff --git a/abci/client/local_client.go b/abci/client/local_client.go index 33773e9363..776fc3dd80 100644 --- a/abci/client/local_client.go +++ b/abci/client/local_client.go @@ -202,6 +202,34 @@ func (app *localClient) ApplySnapshotChunkAsync( ), nil } +func (app *localClient) PrepareProposalAsync( + ctx context.Context, + req types.RequestPrepareProposal, +) (*ReqRes, error) { + app.mtx.Lock() + defer app.mtx.Unlock() + + res := app.Application.PrepareProposal(req) + return app.callback( + types.ToRequestPrepareProposal(req), + types.ToResponsePrepareProposal(res), + ), nil +} + +func (app *localClient) ProcessProposalAsync( + ctx context.Context, + req types.RequestProcessProposal, +) (*ReqRes, error) { + app.mtx.Lock() + defer app.mtx.Unlock() + + res := app.Application.ProcessProposal(req) + return app.callback( + types.ToRequestProcessProposal(req), + types.ToResponseProcessProposal(res), + ), nil +} + //------------------------------------------------------- func (app *localClient) FlushSync(ctx context.Context) error { @@ -344,6 +372,29 @@ func (app *localClient) ApplySnapshotChunkSync( return &res, nil } +func (app *localClient) PrepareProposalSync( + ctx context.Context, + req types.RequestPrepareProposal, +) (*types.ResponsePrepareProposal, error) { + + app.mtx.Lock() + defer app.mtx.Unlock() + + res := app.Application.PrepareProposal(req) + return &res, nil +} + +func (app *localClient) ProcessProposalSync( + ctx context.Context, + req types.RequestProcessProposal, +) (*types.ResponseProcessProposal, error) { + app.mtx.Lock() + defer app.mtx.Unlock() + + res := app.Application.ProcessProposal(req) + return &res, nil +} + //------------------------------------------------------- func (app *localClient) callback(req *types.Request, res *types.Response) *ReqRes { diff --git a/abci/client/mocks/client.go b/abci/client/mocks/client.go index 664646e61c..a8cba43b54 100644 --- a/abci/client/mocks/client.go +++ b/abci/client/mocks/client.go @@ -11,6 +11,8 @@ import ( mock "github.com/stretchr/testify/mock" + testing "testing" + types "github.com/tendermint/tendermint/abci/types" ) @@ -669,6 +671,98 @@ func (_m *Client) OnStop() { _m.Called() } +// PrepareProposalAsync provides a mock function with given fields: _a0, _a1 +func (_m *Client) PrepareProposalAsync(_a0 context.Context, _a1 types.RequestPrepareProposal) (*abciclient.ReqRes, error) { + ret := _m.Called(_a0, _a1) + + var r0 *abciclient.ReqRes + if rf, ok := ret.Get(0).(func(context.Context, types.RequestPrepareProposal) *abciclient.ReqRes); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*abciclient.ReqRes) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, types.RequestPrepareProposal) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// PrepareProposalSync provides a mock function with given fields: _a0, _a1 +func (_m *Client) PrepareProposalSync(_a0 context.Context, _a1 types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) { + ret := _m.Called(_a0, _a1) + + var r0 *types.ResponsePrepareProposal + if rf, ok := ret.Get(0).(func(context.Context, types.RequestPrepareProposal) *types.ResponsePrepareProposal); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.ResponsePrepareProposal) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, types.RequestPrepareProposal) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ProcessProposalAsync provides a mock function with given fields: _a0, _a1 +func (_m *Client) ProcessProposalAsync(_a0 context.Context, _a1 types.RequestProcessProposal) (*abciclient.ReqRes, error) { + ret := _m.Called(_a0, _a1) + + var r0 *abciclient.ReqRes + if rf, ok := ret.Get(0).(func(context.Context, types.RequestProcessProposal) *abciclient.ReqRes); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*abciclient.ReqRes) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, types.RequestProcessProposal) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ProcessProposalSync provides a mock function with given fields: _a0, _a1 +func (_m *Client) ProcessProposalSync(_a0 context.Context, _a1 types.RequestProcessProposal) (*types.ResponseProcessProposal, error) { + ret := _m.Called(_a0, _a1) + + var r0 *types.ResponseProcessProposal + if rf, ok := ret.Get(0).(func(context.Context, types.RequestProcessProposal) *types.ResponseProcessProposal); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.ResponseProcessProposal) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, types.RequestProcessProposal) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // QueryAsync provides a mock function with given fields: _a0, _a1 func (_m *Client) QueryAsync(_a0 context.Context, _a1 types.RequestQuery) (*abciclient.ReqRes, error) { ret := _m.Called(_a0, _a1) @@ -801,3 +895,13 @@ func (_m *Client) String() string { func (_m *Client) Wait() { _m.Called() } + +// NewClient creates a new instance of Client. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. +func NewClient(t testing.TB) *Client { + mock := &Client{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/abci/client/socket_client.go b/abci/client/socket_client.go index 8218458018..b19e150f3d 100644 --- a/abci/client/socket_client.go +++ b/abci/client/socket_client.go @@ -276,6 +276,20 @@ func (cli *socketClient) ApplySnapshotChunkAsync( return cli.queueRequestAsync(ctx, types.ToRequestApplySnapshotChunk(req)) } +func (cli *socketClient) PrepareProposalAsync( + ctx context.Context, + req types.RequestPrepareProposal, +) (*ReqRes, error) { + return cli.queueRequestAsync(ctx, types.ToRequestPrepareProposal(req)) +} + +func (cli *socketClient) ProcessProposalAsync( + ctx context.Context, + req types.RequestProcessProposal, +) (*ReqRes, error) { + return cli.queueRequestAsync(ctx, types.ToRequestProcessProposal(req)) +} + //---------------------------------------- func (cli *socketClient) FlushSync(ctx context.Context) error { @@ -446,6 +460,30 @@ func (cli *socketClient) ApplySnapshotChunkSync( return reqres.Response.GetApplySnapshotChunk(), nil } +func (cli *socketClient) PrepareProposalSync( + ctx context.Context, + req types.RequestPrepareProposal, +) (*types.ResponsePrepareProposal, error) { + + reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestPrepareProposal(req)) + if err != nil { + return nil, err + } + return reqres.Response.GetPrepareProposal(), nil +} + +func (cli *socketClient) ProcessProposalSync( + ctx context.Context, + req types.RequestProcessProposal, +) (*types.ResponseProcessProposal, error) { + + reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestProcessProposal(req)) + if err != nil { + return nil, err + } + return reqres.Response.GetProcessProposal(), nil +} + //---------------------------------------- // queueRequest enqueues req onto the queue. The request can break early if the @@ -562,6 +600,10 @@ func resMatchesReq(req *types.Request, res *types.Response) (ok bool) { _, ok = res.Value.(*types.Response_ListSnapshots) case *types.Request_OfferSnapshot: _, ok = res.Value.(*types.Response_OfferSnapshot) + case *types.Request_PrepareProposal: + _, ok = res.Value.(*types.Response_PrepareProposal) + case *types.Request_ProcessProposal: + _, ok = res.Value.(*types.Response_ProcessProposal) } return ok } diff --git a/abci/example/kvstore/kvstore.go b/abci/example/kvstore/kvstore.go index 97256c8ac4..b6cbce1d92 100644 --- a/abci/example/kvstore/kvstore.go +++ b/abci/example/kvstore/kvstore.go @@ -171,3 +171,9 @@ func (app *Application) Query(reqQuery types.RequestQuery) (resQuery types.Respo return resQuery } + +func (app *Application) PrepareProposal( + req types.RequestPrepareProposal) types.ResponsePrepareProposal { + return types.ResponsePrepareProposal{ + BlockData: req.BlockData} +} diff --git a/abci/example/kvstore/persistent_kvstore.go b/abci/example/kvstore/persistent_kvstore.go index 40451baa93..14f446693e 100644 --- a/abci/example/kvstore/persistent_kvstore.go +++ b/abci/example/kvstore/persistent_kvstore.go @@ -170,6 +170,21 @@ func (app *PersistentKVStoreApplication) ApplySnapshotChunk( return types.ResponseApplySnapshotChunk{Result: types.ResponseApplySnapshotChunk_ABORT} } +func (app *PersistentKVStoreApplication) PrepareProposal( + req types.RequestPrepareProposal) types.ResponsePrepareProposal { + return types.ResponsePrepareProposal{BlockData: req.BlockData} +} + +func (app *PersistentKVStoreApplication) ProcessProposal( + req types.RequestProcessProposal) types.ResponseProcessProposal { + for _, tx := range req.Txs { + if len(tx) == 0 { + return types.ResponseProcessProposal{Result: types.ResponseProcessProposal_REJECT} + } + } + return types.ResponseProcessProposal{Result: types.ResponseProcessProposal_ACCEPT} +} + //--------------------------------------------- // update validators diff --git a/abci/server/socket_server.go b/abci/server/socket_server.go index 85539645bf..116d840218 100644 --- a/abci/server/socket_server.go +++ b/abci/server/socket_server.go @@ -227,6 +227,12 @@ func (s *SocketServer) handleRequest(req *types.Request, responses chan<- *types case *types.Request_OfferSnapshot: res := s.app.OfferSnapshot(*r.OfferSnapshot) responses <- types.ToResponseOfferSnapshot(res) + case *types.Request_PrepareProposal: + res := s.app.PrepareProposal(*r.PrepareProposal) + responses <- types.ToResponsePrepareProposal(res) + case *types.Request_ProcessProposal: + res := s.app.ProcessProposal(*r.ProcessProposal) + responses <- types.ToResponseProcessProposal(res) case *types.Request_LoadSnapshotChunk: res := s.app.LoadSnapshotChunk(*r.LoadSnapshotChunk) responses <- types.ToResponseLoadSnapshotChunk(res) diff --git a/abci/types/application.go b/abci/types/application.go index 2a3cabd8bb..b74b1cbfef 100644 --- a/abci/types/application.go +++ b/abci/types/application.go @@ -17,11 +17,13 @@ type Application interface { CheckTx(RequestCheckTx) ResponseCheckTx // Validate a tx for the mempool // Consensus Connection - InitChain(RequestInitChain) ResponseInitChain // Initialize blockchain w validators/other info from TendermintCore - BeginBlock(RequestBeginBlock) ResponseBeginBlock // Signals the beginning of a block - DeliverTx(RequestDeliverTx) ResponseDeliverTx // Deliver a tx for full processing - EndBlock(RequestEndBlock) ResponseEndBlock // Signals the end of a block, returns changes to the validator set - Commit() ResponseCommit // Commit the state and return the application Merkle root hash + InitChain(RequestInitChain) ResponseInitChain // Initialize blockchain w validators/other info from TendermintCore + PrepareProposal(RequestPrepareProposal) ResponsePrepareProposal // Passes the block data to the application for modification before proposing + BeginBlock(RequestBeginBlock) ResponseBeginBlock // Signals the beginning of a block + DeliverTx(RequestDeliverTx) ResponseDeliverTx // Deliver a tx for full processing + EndBlock(RequestEndBlock) ResponseEndBlock // Signals the end of a block, returns changes to the validator set + Commit() ResponseCommit // Commit the state and return the application Merkle root hash + ProcessProposal(RequestProcessProposal) ResponseProcessProposal // Inspects the proposed block before voting on it during consensus // State Sync Connection ListSnapshots(RequestListSnapshots) ResponseListSnapshots // List available snapshots @@ -90,6 +92,14 @@ func (BaseApplication) ApplySnapshotChunk(req RequestApplySnapshotChunk) Respons return ResponseApplySnapshotChunk{} } +func (BaseApplication) PrepareProposal(req RequestPrepareProposal) ResponsePrepareProposal { + return ResponsePrepareProposal{BlockData: req.BlockData} +} + +func (BaseApplication) ProcessProposal(req RequestProcessProposal) ResponseProcessProposal { + return ResponseProcessProposal{Result: ResponseProcessProposal_ACCEPT} +} + //------------------------------------------------------- // GRPCApplication is a GRPC wrapper for Application @@ -172,3 +182,15 @@ func (app *GRPCApplication) ApplySnapshotChunk( res := app.app.ApplySnapshotChunk(*req) return &res, nil } + +func (app *GRPCApplication) PrepareProposal( + ctx context.Context, req *RequestPrepareProposal) (*ResponsePrepareProposal, error) { + res := app.app.PrepareProposal(*req) + return &res, nil +} + +func (app *GRPCApplication) ProcessProposal( + ctx context.Context, req *RequestProcessProposal) (*ResponseProcessProposal, error) { + res := app.app.ProcessProposal(*req) + return &res, nil +} diff --git a/abci/types/messages.go b/abci/types/messages.go index 74f3cc75c8..9098437bd3 100644 --- a/abci/types/messages.go +++ b/abci/types/messages.go @@ -110,6 +110,18 @@ func ToRequestApplySnapshotChunk(req RequestApplySnapshotChunk) *Request { } } +func ToRequestPrepareProposal(req RequestPrepareProposal) *Request { + return &Request{ + Value: &Request_PrepareProposal{&req}, + } +} + +func ToRequestProcessProposal(req RequestProcessProposal) *Request { + return &Request{ + Value: &Request_ProcessProposal{&req}, + } +} + //---------------------------------------- func ToResponseException(errStr string) *Response { @@ -200,3 +212,15 @@ func ToResponseApplySnapshotChunk(res ResponseApplySnapshotChunk) *Response { Value: &Response_ApplySnapshotChunk{&res}, } } + +func ToResponsePrepareProposal(res ResponsePrepareProposal) *Response { + return &Response{ + Value: &Response_PrepareProposal{&res}, + } +} + +func ToResponseProcessProposal(res ResponseProcessProposal) *Response { + return &Response{ + Value: &Response_ProcessProposal{&res}, + } +} diff --git a/abci/types/result.go b/abci/types/result.go index dba6bfd159..a071d303a4 100644 --- a/abci/types/result.go +++ b/abci/types/result.go @@ -41,6 +41,23 @@ func (r ResponseQuery) IsErr() bool { return r.Code != CodeTypeOK } +// IsOK returns true if Code is OK + +// IsUnknown returns true if Code is Unknown +func (r ResponseProcessProposal) IsUnknown() bool { + return r.Result == ResponseProcessProposal_UNKNOWN +} + +// IsOK returns true if Code is OK +func (r ResponseProcessProposal) IsOK() bool { + return r.Result == ResponseProcessProposal_ACCEPT +} + +// IsErr returns true if Code is something other than OK. +func (r ResponseProcessProposal) IsErr() bool { + return r.Result != ResponseProcessProposal_ACCEPT +} + //--------------------------------------------------------------------------- // override JSON marshaling so we emit defaults (ie. disable omitempty) diff --git a/abci/types/types.pb.go b/abci/types/types.pb.go index 6b00c587a1..e6fbde8566 100644 --- a/abci/types/types.pb.go +++ b/abci/types/types.pb.go @@ -120,7 +120,7 @@ func (x ResponseOfferSnapshot_Result) String() string { } func (ResponseOfferSnapshot_Result) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{28, 0} + return fileDescriptor_252557cfdd89a31a, []int{30, 0} } type ResponseApplySnapshotChunk_Result int32 @@ -157,7 +157,35 @@ func (x ResponseApplySnapshotChunk_Result) String() string { } func (ResponseApplySnapshotChunk_Result) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{30, 0} + return fileDescriptor_252557cfdd89a31a, []int{32, 0} +} + +type ResponseProcessProposal_Result int32 + +const ( + ResponseProcessProposal_UNKNOWN ResponseProcessProposal_Result = 0 + ResponseProcessProposal_ACCEPT ResponseProcessProposal_Result = 1 + ResponseProcessProposal_REJECT ResponseProcessProposal_Result = 2 +) + +var ResponseProcessProposal_Result_name = map[int32]string{ + 0: "UNKNOWN", + 1: "ACCEPT", + 2: "REJECT", +} + +var ResponseProcessProposal_Result_value = map[string]int32{ + "UNKNOWN": 0, + "ACCEPT": 1, + "REJECT": 2, +} + +func (x ResponseProcessProposal_Result) String() string { + return proto.EnumName(ResponseProcessProposal_Result_name, int32(x)) +} + +func (ResponseProcessProposal_Result) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_252557cfdd89a31a, []int{34, 0} } type Request struct { @@ -176,6 +204,8 @@ type Request struct { // *Request_OfferSnapshot // *Request_LoadSnapshotChunk // *Request_ApplySnapshotChunk + // *Request_PrepareProposal + // *Request_ProcessProposal Value isRequest_Value `protobuf_oneof:"value"` } @@ -260,6 +290,12 @@ type Request_LoadSnapshotChunk struct { type Request_ApplySnapshotChunk struct { ApplySnapshotChunk *RequestApplySnapshotChunk `protobuf:"bytes,14,opt,name=apply_snapshot_chunk,json=applySnapshotChunk,proto3,oneof" json:"apply_snapshot_chunk,omitempty"` } +type Request_PrepareProposal struct { + PrepareProposal *RequestPrepareProposal `protobuf:"bytes,15,opt,name=prepare_proposal,json=prepareProposal,proto3,oneof" json:"prepare_proposal,omitempty"` +} +type Request_ProcessProposal struct { + ProcessProposal *RequestProcessProposal `protobuf:"bytes,16,opt,name=process_proposal,json=processProposal,proto3,oneof" json:"process_proposal,omitempty"` +} func (*Request_Echo) isRequest_Value() {} func (*Request_Flush) isRequest_Value() {} @@ -275,6 +311,8 @@ func (*Request_ListSnapshots) isRequest_Value() {} func (*Request_OfferSnapshot) isRequest_Value() {} func (*Request_LoadSnapshotChunk) isRequest_Value() {} func (*Request_ApplySnapshotChunk) isRequest_Value() {} +func (*Request_PrepareProposal) isRequest_Value() {} +func (*Request_ProcessProposal) isRequest_Value() {} func (m *Request) GetValue() isRequest_Value { if m != nil { @@ -381,6 +419,20 @@ func (m *Request) GetApplySnapshotChunk() *RequestApplySnapshotChunk { return nil } +func (m *Request) GetPrepareProposal() *RequestPrepareProposal { + if x, ok := m.GetValue().(*Request_PrepareProposal); ok { + return x.PrepareProposal + } + return nil +} + +func (m *Request) GetProcessProposal() *RequestProcessProposal { + if x, ok := m.GetValue().(*Request_ProcessProposal); ok { + return x.ProcessProposal + } + return nil +} + // XXX_OneofWrappers is for the internal use of the proto package. func (*Request) XXX_OneofWrappers() []interface{} { return []interface{}{ @@ -398,6 +450,8 @@ func (*Request) XXX_OneofWrappers() []interface{} { (*Request_OfferSnapshot)(nil), (*Request_LoadSnapshotChunk)(nil), (*Request_ApplySnapshotChunk)(nil), + (*Request_PrepareProposal)(nil), + (*Request_ProcessProposal)(nil), } } @@ -1157,6 +1211,114 @@ func (m *RequestApplySnapshotChunk) GetSender() string { return "" } +type RequestPrepareProposal struct { + // block_data is an array of transactions that will be included in a block, + // sent to the app for possible modifications. + // applications can not exceed the size of the data passed to it. + BlockData *types1.Data `protobuf:"bytes,1,opt,name=block_data,json=blockData,proto3" json:"block_data,omitempty"` + // If an application decides to populate block_data with extra information, they can not exceed this value. + BlockDataSize int64 `protobuf:"varint,2,opt,name=block_data_size,json=blockDataSize,proto3" json:"block_data_size,omitempty"` +} + +func (m *RequestPrepareProposal) Reset() { *m = RequestPrepareProposal{} } +func (m *RequestPrepareProposal) String() string { return proto.CompactTextString(m) } +func (*RequestPrepareProposal) ProtoMessage() {} +func (*RequestPrepareProposal) Descriptor() ([]byte, []int) { + return fileDescriptor_252557cfdd89a31a, []int{15} +} +func (m *RequestPrepareProposal) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RequestPrepareProposal) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RequestPrepareProposal.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RequestPrepareProposal) XXX_Merge(src proto.Message) { + xxx_messageInfo_RequestPrepareProposal.Merge(m, src) +} +func (m *RequestPrepareProposal) XXX_Size() int { + return m.Size() +} +func (m *RequestPrepareProposal) XXX_DiscardUnknown() { + xxx_messageInfo_RequestPrepareProposal.DiscardUnknown(m) +} + +var xxx_messageInfo_RequestPrepareProposal proto.InternalMessageInfo + +func (m *RequestPrepareProposal) GetBlockData() *types1.Data { + if m != nil { + return m.BlockData + } + return nil +} + +func (m *RequestPrepareProposal) GetBlockDataSize() int64 { + if m != nil { + return m.BlockDataSize + } + return 0 +} + +type RequestProcessProposal struct { + Header types1.Header `protobuf:"bytes,1,opt,name=header,proto3" json:"header"` + Txs [][]byte `protobuf:"bytes,2,rep,name=txs,proto3" json:"txs,omitempty"` +} + +func (m *RequestProcessProposal) Reset() { *m = RequestProcessProposal{} } +func (m *RequestProcessProposal) String() string { return proto.CompactTextString(m) } +func (*RequestProcessProposal) ProtoMessage() {} +func (*RequestProcessProposal) Descriptor() ([]byte, []int) { + return fileDescriptor_252557cfdd89a31a, []int{16} +} +func (m *RequestProcessProposal) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RequestProcessProposal) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RequestProcessProposal.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RequestProcessProposal) XXX_Merge(src proto.Message) { + xxx_messageInfo_RequestProcessProposal.Merge(m, src) +} +func (m *RequestProcessProposal) XXX_Size() int { + return m.Size() +} +func (m *RequestProcessProposal) XXX_DiscardUnknown() { + xxx_messageInfo_RequestProcessProposal.DiscardUnknown(m) +} + +var xxx_messageInfo_RequestProcessProposal proto.InternalMessageInfo + +func (m *RequestProcessProposal) GetHeader() types1.Header { + if m != nil { + return m.Header + } + return types1.Header{} +} + +func (m *RequestProcessProposal) GetTxs() [][]byte { + if m != nil { + return m.Txs + } + return nil +} + type Response struct { // Types that are valid to be assigned to Value: // *Response_Exception @@ -1174,6 +1336,8 @@ type Response struct { // *Response_OfferSnapshot // *Response_LoadSnapshotChunk // *Response_ApplySnapshotChunk + // *Response_PrepareProposal + // *Response_ProcessProposal Value isResponse_Value `protobuf_oneof:"value"` } @@ -1181,7 +1345,7 @@ func (m *Response) Reset() { *m = Response{} } func (m *Response) String() string { return proto.CompactTextString(m) } func (*Response) ProtoMessage() {} func (*Response) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{15} + return fileDescriptor_252557cfdd89a31a, []int{17} } func (m *Response) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1261,6 +1425,12 @@ type Response_LoadSnapshotChunk struct { type Response_ApplySnapshotChunk struct { ApplySnapshotChunk *ResponseApplySnapshotChunk `protobuf:"bytes,15,opt,name=apply_snapshot_chunk,json=applySnapshotChunk,proto3,oneof" json:"apply_snapshot_chunk,omitempty"` } +type Response_PrepareProposal struct { + PrepareProposal *ResponsePrepareProposal `protobuf:"bytes,16,opt,name=prepare_proposal,json=prepareProposal,proto3,oneof" json:"prepare_proposal,omitempty"` +} +type Response_ProcessProposal struct { + ProcessProposal *ResponseProcessProposal `protobuf:"bytes,17,opt,name=process_proposal,json=processProposal,proto3,oneof" json:"process_proposal,omitempty"` +} func (*Response_Exception) isResponse_Value() {} func (*Response_Echo) isResponse_Value() {} @@ -1277,6 +1447,8 @@ func (*Response_ListSnapshots) isResponse_Value() {} func (*Response_OfferSnapshot) isResponse_Value() {} func (*Response_LoadSnapshotChunk) isResponse_Value() {} func (*Response_ApplySnapshotChunk) isResponse_Value() {} +func (*Response_PrepareProposal) isResponse_Value() {} +func (*Response_ProcessProposal) isResponse_Value() {} func (m *Response) GetValue() isResponse_Value { if m != nil { @@ -1390,6 +1562,20 @@ func (m *Response) GetApplySnapshotChunk() *ResponseApplySnapshotChunk { return nil } +func (m *Response) GetPrepareProposal() *ResponsePrepareProposal { + if x, ok := m.GetValue().(*Response_PrepareProposal); ok { + return x.PrepareProposal + } + return nil +} + +func (m *Response) GetProcessProposal() *ResponseProcessProposal { + if x, ok := m.GetValue().(*Response_ProcessProposal); ok { + return x.ProcessProposal + } + return nil +} + // XXX_OneofWrappers is for the internal use of the proto package. func (*Response) XXX_OneofWrappers() []interface{} { return []interface{}{ @@ -1408,6 +1594,8 @@ func (*Response) XXX_OneofWrappers() []interface{} { (*Response_OfferSnapshot)(nil), (*Response_LoadSnapshotChunk)(nil), (*Response_ApplySnapshotChunk)(nil), + (*Response_PrepareProposal)(nil), + (*Response_ProcessProposal)(nil), } } @@ -1420,7 +1608,7 @@ func (m *ResponseException) Reset() { *m = ResponseException{} } func (m *ResponseException) String() string { return proto.CompactTextString(m) } func (*ResponseException) ProtoMessage() {} func (*ResponseException) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{16} + return fileDescriptor_252557cfdd89a31a, []int{18} } func (m *ResponseException) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1464,7 +1652,7 @@ func (m *ResponseEcho) Reset() { *m = ResponseEcho{} } func (m *ResponseEcho) String() string { return proto.CompactTextString(m) } func (*ResponseEcho) ProtoMessage() {} func (*ResponseEcho) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{17} + return fileDescriptor_252557cfdd89a31a, []int{19} } func (m *ResponseEcho) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1507,7 +1695,7 @@ func (m *ResponseFlush) Reset() { *m = ResponseFlush{} } func (m *ResponseFlush) String() string { return proto.CompactTextString(m) } func (*ResponseFlush) ProtoMessage() {} func (*ResponseFlush) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{18} + return fileDescriptor_252557cfdd89a31a, []int{20} } func (m *ResponseFlush) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1549,7 +1737,7 @@ func (m *ResponseInfo) Reset() { *m = ResponseInfo{} } func (m *ResponseInfo) String() string { return proto.CompactTextString(m) } func (*ResponseInfo) ProtoMessage() {} func (*ResponseInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{19} + return fileDescriptor_252557cfdd89a31a, []int{21} } func (m *ResponseInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1623,7 +1811,7 @@ func (m *ResponseInitChain) Reset() { *m = ResponseInitChain{} } func (m *ResponseInitChain) String() string { return proto.CompactTextString(m) } func (*ResponseInitChain) ProtoMessage() {} func (*ResponseInitChain) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{20} + return fileDescriptor_252557cfdd89a31a, []int{22} } func (m *ResponseInitChain) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1690,7 +1878,7 @@ func (m *ResponseQuery) Reset() { *m = ResponseQuery{} } func (m *ResponseQuery) String() string { return proto.CompactTextString(m) } func (*ResponseQuery) ProtoMessage() {} func (*ResponseQuery) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{21} + return fileDescriptor_252557cfdd89a31a, []int{23} } func (m *ResponseQuery) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1790,7 +1978,7 @@ func (m *ResponseBeginBlock) Reset() { *m = ResponseBeginBlock{} } func (m *ResponseBeginBlock) String() string { return proto.CompactTextString(m) } func (*ResponseBeginBlock) ProtoMessage() {} func (*ResponseBeginBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{22} + return fileDescriptor_252557cfdd89a31a, []int{24} } func (m *ResponseBeginBlock) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1846,7 +2034,7 @@ func (m *ResponseCheckTx) Reset() { *m = ResponseCheckTx{} } func (m *ResponseCheckTx) String() string { return proto.CompactTextString(m) } func (*ResponseCheckTx) ProtoMessage() {} func (*ResponseCheckTx) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{23} + return fileDescriptor_252557cfdd89a31a, []int{25} } func (m *ResponseCheckTx) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1967,7 +2155,7 @@ func (m *ResponseDeliverTx) Reset() { *m = ResponseDeliverTx{} } func (m *ResponseDeliverTx) String() string { return proto.CompactTextString(m) } func (*ResponseDeliverTx) ProtoMessage() {} func (*ResponseDeliverTx) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{24} + return fileDescriptor_252557cfdd89a31a, []int{26} } func (m *ResponseDeliverTx) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2062,7 +2250,7 @@ func (m *ResponseEndBlock) Reset() { *m = ResponseEndBlock{} } func (m *ResponseEndBlock) String() string { return proto.CompactTextString(m) } func (*ResponseEndBlock) ProtoMessage() {} func (*ResponseEndBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{25} + return fileDescriptor_252557cfdd89a31a, []int{27} } func (m *ResponseEndBlock) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2122,7 +2310,7 @@ func (m *ResponseCommit) Reset() { *m = ResponseCommit{} } func (m *ResponseCommit) String() string { return proto.CompactTextString(m) } func (*ResponseCommit) ProtoMessage() {} func (*ResponseCommit) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{26} + return fileDescriptor_252557cfdd89a31a, []int{28} } func (m *ResponseCommit) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2173,7 +2361,7 @@ func (m *ResponseListSnapshots) Reset() { *m = ResponseListSnapshots{} } func (m *ResponseListSnapshots) String() string { return proto.CompactTextString(m) } func (*ResponseListSnapshots) ProtoMessage() {} func (*ResponseListSnapshots) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{27} + return fileDescriptor_252557cfdd89a31a, []int{29} } func (m *ResponseListSnapshots) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2217,7 +2405,7 @@ func (m *ResponseOfferSnapshot) Reset() { *m = ResponseOfferSnapshot{} } func (m *ResponseOfferSnapshot) String() string { return proto.CompactTextString(m) } func (*ResponseOfferSnapshot) ProtoMessage() {} func (*ResponseOfferSnapshot) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{28} + return fileDescriptor_252557cfdd89a31a, []int{30} } func (m *ResponseOfferSnapshot) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2261,7 +2449,7 @@ func (m *ResponseLoadSnapshotChunk) Reset() { *m = ResponseLoadSnapshotC func (m *ResponseLoadSnapshotChunk) String() string { return proto.CompactTextString(m) } func (*ResponseLoadSnapshotChunk) ProtoMessage() {} func (*ResponseLoadSnapshotChunk) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{29} + return fileDescriptor_252557cfdd89a31a, []int{31} } func (m *ResponseLoadSnapshotChunk) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2307,7 +2495,7 @@ func (m *ResponseApplySnapshotChunk) Reset() { *m = ResponseApplySnapsho func (m *ResponseApplySnapshotChunk) String() string { return proto.CompactTextString(m) } func (*ResponseApplySnapshotChunk) ProtoMessage() {} func (*ResponseApplySnapshotChunk) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{30} + return fileDescriptor_252557cfdd89a31a, []int{32} } func (m *ResponseApplySnapshotChunk) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2357,6 +2545,102 @@ func (m *ResponseApplySnapshotChunk) GetRejectSenders() []string { return nil } +type ResponsePrepareProposal struct { + BlockData *types1.Data `protobuf:"bytes,1,opt,name=block_data,json=blockData,proto3" json:"block_data,omitempty"` +} + +func (m *ResponsePrepareProposal) Reset() { *m = ResponsePrepareProposal{} } +func (m *ResponsePrepareProposal) String() string { return proto.CompactTextString(m) } +func (*ResponsePrepareProposal) ProtoMessage() {} +func (*ResponsePrepareProposal) Descriptor() ([]byte, []int) { + return fileDescriptor_252557cfdd89a31a, []int{33} +} +func (m *ResponsePrepareProposal) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResponsePrepareProposal) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResponsePrepareProposal.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ResponsePrepareProposal) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponsePrepareProposal.Merge(m, src) +} +func (m *ResponsePrepareProposal) XXX_Size() int { + return m.Size() +} +func (m *ResponsePrepareProposal) XXX_DiscardUnknown() { + xxx_messageInfo_ResponsePrepareProposal.DiscardUnknown(m) +} + +var xxx_messageInfo_ResponsePrepareProposal proto.InternalMessageInfo + +func (m *ResponsePrepareProposal) GetBlockData() *types1.Data { + if m != nil { + return m.BlockData + } + return nil +} + +type ResponseProcessProposal struct { + Result ResponseProcessProposal_Result `protobuf:"varint,1,opt,name=result,proto3,enum=tendermint.abci.ResponseProcessProposal_Result" json:"result,omitempty"` + Evidence [][]byte `protobuf:"bytes,2,rep,name=evidence,proto3" json:"evidence,omitempty"` +} + +func (m *ResponseProcessProposal) Reset() { *m = ResponseProcessProposal{} } +func (m *ResponseProcessProposal) String() string { return proto.CompactTextString(m) } +func (*ResponseProcessProposal) ProtoMessage() {} +func (*ResponseProcessProposal) Descriptor() ([]byte, []int) { + return fileDescriptor_252557cfdd89a31a, []int{34} +} +func (m *ResponseProcessProposal) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResponseProcessProposal) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResponseProcessProposal.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ResponseProcessProposal) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseProcessProposal.Merge(m, src) +} +func (m *ResponseProcessProposal) XXX_Size() int { + return m.Size() +} +func (m *ResponseProcessProposal) XXX_DiscardUnknown() { + xxx_messageInfo_ResponseProcessProposal.DiscardUnknown(m) +} + +var xxx_messageInfo_ResponseProcessProposal proto.InternalMessageInfo + +func (m *ResponseProcessProposal) GetResult() ResponseProcessProposal_Result { + if m != nil { + return m.Result + } + return ResponseProcessProposal_UNKNOWN +} + +func (m *ResponseProcessProposal) GetEvidence() [][]byte { + if m != nil { + return m.Evidence + } + return nil +} + type LastCommitInfo struct { Round int32 `protobuf:"varint,1,opt,name=round,proto3" json:"round,omitempty"` Votes []VoteInfo `protobuf:"bytes,2,rep,name=votes,proto3" json:"votes"` @@ -2366,7 +2650,7 @@ func (m *LastCommitInfo) Reset() { *m = LastCommitInfo{} } func (m *LastCommitInfo) String() string { return proto.CompactTextString(m) } func (*LastCommitInfo) ProtoMessage() {} func (*LastCommitInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{31} + return fileDescriptor_252557cfdd89a31a, []int{35} } func (m *LastCommitInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2421,7 +2705,7 @@ func (m *Event) Reset() { *m = Event{} } func (m *Event) String() string { return proto.CompactTextString(m) } func (*Event) ProtoMessage() {} func (*Event) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{32} + return fileDescriptor_252557cfdd89a31a, []int{36} } func (m *Event) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2475,7 +2759,7 @@ func (m *EventAttribute) Reset() { *m = EventAttribute{} } func (m *EventAttribute) String() string { return proto.CompactTextString(m) } func (*EventAttribute) ProtoMessage() {} func (*EventAttribute) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{33} + return fileDescriptor_252557cfdd89a31a, []int{37} } func (m *EventAttribute) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2529,17 +2813,18 @@ func (m *EventAttribute) GetIndex() bool { // // One usage is indexing transaction results. type TxResult struct { - Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` - Index uint32 `protobuf:"varint,2,opt,name=index,proto3" json:"index,omitempty"` - Tx []byte `protobuf:"bytes,3,opt,name=tx,proto3" json:"tx,omitempty"` - Result ResponseDeliverTx `protobuf:"bytes,4,opt,name=result,proto3" json:"result"` + Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` + Index uint32 `protobuf:"varint,2,opt,name=index,proto3" json:"index,omitempty"` + Tx []byte `protobuf:"bytes,3,opt,name=tx,proto3" json:"tx,omitempty"` + Result ResponseDeliverTx `protobuf:"bytes,4,opt,name=result,proto3" json:"result"` + OriginalHash []byte `protobuf:"bytes,5,opt,name=original_hash,json=originalHash,proto3" json:"original_hash,omitempty"` } func (m *TxResult) Reset() { *m = TxResult{} } func (m *TxResult) String() string { return proto.CompactTextString(m) } func (*TxResult) ProtoMessage() {} func (*TxResult) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{34} + return fileDescriptor_252557cfdd89a31a, []int{38} } func (m *TxResult) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2596,6 +2881,13 @@ func (m *TxResult) GetResult() ResponseDeliverTx { return ResponseDeliverTx{} } +func (m *TxResult) GetOriginalHash() []byte { + if m != nil { + return m.OriginalHash + } + return nil +} + // Validator type Validator struct { Address []byte `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` @@ -2607,7 +2899,7 @@ func (m *Validator) Reset() { *m = Validator{} } func (m *Validator) String() string { return proto.CompactTextString(m) } func (*Validator) ProtoMessage() {} func (*Validator) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{35} + return fileDescriptor_252557cfdd89a31a, []int{39} } func (m *Validator) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2660,7 +2952,7 @@ func (m *ValidatorUpdate) Reset() { *m = ValidatorUpdate{} } func (m *ValidatorUpdate) String() string { return proto.CompactTextString(m) } func (*ValidatorUpdate) ProtoMessage() {} func (*ValidatorUpdate) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{36} + return fileDescriptor_252557cfdd89a31a, []int{40} } func (m *ValidatorUpdate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2713,7 +3005,7 @@ func (m *VoteInfo) Reset() { *m = VoteInfo{} } func (m *VoteInfo) String() string { return proto.CompactTextString(m) } func (*VoteInfo) ProtoMessage() {} func (*VoteInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{37} + return fileDescriptor_252557cfdd89a31a, []int{41} } func (m *VoteInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2774,7 +3066,7 @@ func (m *Evidence) Reset() { *m = Evidence{} } func (m *Evidence) String() string { return proto.CompactTextString(m) } func (*Evidence) ProtoMessage() {} func (*Evidence) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{38} + return fileDescriptor_252557cfdd89a31a, []int{42} } func (m *Evidence) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2850,7 +3142,7 @@ func (m *Snapshot) Reset() { *m = Snapshot{} } func (m *Snapshot) String() string { return proto.CompactTextString(m) } func (*Snapshot) ProtoMessage() {} func (*Snapshot) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{39} + return fileDescriptor_252557cfdd89a31a, []int{43} } func (m *Snapshot) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2919,6 +3211,7 @@ func init() { proto.RegisterEnum("tendermint.abci.EvidenceType", EvidenceType_name, EvidenceType_value) proto.RegisterEnum("tendermint.abci.ResponseOfferSnapshot_Result", ResponseOfferSnapshot_Result_name, ResponseOfferSnapshot_Result_value) proto.RegisterEnum("tendermint.abci.ResponseApplySnapshotChunk_Result", ResponseApplySnapshotChunk_Result_name, ResponseApplySnapshotChunk_Result_value) + proto.RegisterEnum("tendermint.abci.ResponseProcessProposal_Result", ResponseProcessProposal_Result_name, ResponseProcessProposal_Result_value) proto.RegisterType((*Request)(nil), "tendermint.abci.Request") proto.RegisterType((*RequestEcho)(nil), "tendermint.abci.RequestEcho") proto.RegisterType((*RequestFlush)(nil), "tendermint.abci.RequestFlush") @@ -2934,6 +3227,8 @@ func init() { proto.RegisterType((*RequestOfferSnapshot)(nil), "tendermint.abci.RequestOfferSnapshot") proto.RegisterType((*RequestLoadSnapshotChunk)(nil), "tendermint.abci.RequestLoadSnapshotChunk") proto.RegisterType((*RequestApplySnapshotChunk)(nil), "tendermint.abci.RequestApplySnapshotChunk") + proto.RegisterType((*RequestPrepareProposal)(nil), "tendermint.abci.RequestPrepareProposal") + proto.RegisterType((*RequestProcessProposal)(nil), "tendermint.abci.RequestProcessProposal") proto.RegisterType((*Response)(nil), "tendermint.abci.Response") proto.RegisterType((*ResponseException)(nil), "tendermint.abci.ResponseException") proto.RegisterType((*ResponseEcho)(nil), "tendermint.abci.ResponseEcho") @@ -2950,6 +3245,8 @@ func init() { proto.RegisterType((*ResponseOfferSnapshot)(nil), "tendermint.abci.ResponseOfferSnapshot") proto.RegisterType((*ResponseLoadSnapshotChunk)(nil), "tendermint.abci.ResponseLoadSnapshotChunk") proto.RegisterType((*ResponseApplySnapshotChunk)(nil), "tendermint.abci.ResponseApplySnapshotChunk") + proto.RegisterType((*ResponsePrepareProposal)(nil), "tendermint.abci.ResponsePrepareProposal") + proto.RegisterType((*ResponseProcessProposal)(nil), "tendermint.abci.ResponseProcessProposal") proto.RegisterType((*LastCommitInfo)(nil), "tendermint.abci.LastCommitInfo") proto.RegisterType((*Event)(nil), "tendermint.abci.Event") proto.RegisterType((*EventAttribute)(nil), "tendermint.abci.EventAttribute") @@ -2964,172 +3261,186 @@ func init() { func init() { proto.RegisterFile("tendermint/abci/types.proto", fileDescriptor_252557cfdd89a31a) } var fileDescriptor_252557cfdd89a31a = []byte{ - // 2627 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x5a, 0xcd, 0x73, 0xdb, 0xc6, - 0x15, 0xe7, 0x37, 0x89, 0x47, 0x91, 0xa2, 0xd6, 0x8a, 0x43, 0x33, 0xb6, 0xe4, 0xc0, 0xe3, 0x34, - 0x76, 0x12, 0xa9, 0x91, 0xc7, 0xae, 0x33, 0xe9, 0x47, 0x44, 0x9a, 0x2e, 0x15, 0xab, 0x92, 0xba, - 0xa2, 0x9d, 0x49, 0xdb, 0x18, 0x01, 0x89, 0x15, 0x89, 0x98, 0x04, 0x10, 0x60, 0x29, 0x4b, 0x39, - 0x76, 0xda, 0x8b, 0xa7, 0x07, 0x1f, 0x7b, 0xc9, 0x4c, 0xff, 0x83, 0x5e, 0x7b, 0xea, 0xa9, 0x87, - 0x1c, 0xda, 0x99, 0x1c, 0x7b, 0xe8, 0xa4, 0x1d, 0xfb, 0xd6, 0x7f, 0xa0, 0xa7, 0xce, 0x74, 0xf6, - 0x03, 0x20, 0x40, 0x12, 0x22, 0xd5, 0xf4, 0xd6, 0xdb, 0xee, 0xc3, 0x7b, 0x8f, 0xbb, 0x6f, 0xf7, - 0xfd, 0xf6, 0xb7, 0x6f, 0x09, 0xaf, 0x51, 0x62, 0x19, 0xc4, 0x1d, 0x9a, 0x16, 0xdd, 0xd4, 0x3b, - 0x5d, 0x73, 0x93, 0x9e, 0x3a, 0xc4, 0xdb, 0x70, 0x5c, 0x9b, 0xda, 0x68, 0x79, 0xfc, 0x71, 0x83, - 0x7d, 0xac, 0x5d, 0x09, 0x69, 0x77, 0xdd, 0x53, 0x87, 0xda, 0x9b, 0x8e, 0x6b, 0xdb, 0x47, 0x42, - 0xbf, 0x76, 0x39, 0xf4, 0x99, 0xfb, 0x09, 0x7b, 0x8b, 0x7c, 0x95, 0xc6, 0x4f, 0xc8, 0xa9, 0xff, - 0xf5, 0xca, 0x94, 0xad, 0xa3, 0xbb, 0xfa, 0xd0, 0xff, 0xbc, 0xde, 0xb3, 0xed, 0xde, 0x80, 0x6c, - 0xf2, 0x5e, 0x67, 0x74, 0xb4, 0x49, 0xcd, 0x21, 0xf1, 0xa8, 0x3e, 0x74, 0xa4, 0xc2, 0x6a, 0xcf, - 0xee, 0xd9, 0xbc, 0xb9, 0xc9, 0x5a, 0x42, 0xaa, 0xfe, 0x25, 0x0f, 0x79, 0x4c, 0x3e, 0x1f, 0x11, - 0x8f, 0xa2, 0x2d, 0xc8, 0x90, 0x6e, 0xdf, 0xae, 0x26, 0xaf, 0x26, 0xdf, 0x2c, 0x6e, 0x5d, 0xde, - 0x98, 0x98, 0xdc, 0x86, 0xd4, 0x6b, 0x76, 0xfb, 0x76, 0x2b, 0x81, 0xb9, 0x2e, 0xba, 0x0d, 0xd9, - 0xa3, 0xc1, 0xc8, 0xeb, 0x57, 0x53, 0xdc, 0xe8, 0x4a, 0x9c, 0xd1, 0x7d, 0xa6, 0xd4, 0x4a, 0x60, - 0xa1, 0xcd, 0x7e, 0xca, 0xb4, 0x8e, 0xec, 0x6a, 0xfa, 0xec, 0x9f, 0xda, 0xb1, 0x8e, 0xf8, 0x4f, - 0x31, 0x5d, 0x54, 0x07, 0x30, 0x2d, 0x93, 0x6a, 0xdd, 0xbe, 0x6e, 0x5a, 0xd5, 0x0c, 0xb7, 0x7c, - 0x3d, 0xde, 0xd2, 0xa4, 0x0d, 0xa6, 0xd8, 0x4a, 0x60, 0xc5, 0xf4, 0x3b, 0x6c, 0xb8, 0x9f, 0x8f, - 0x88, 0x7b, 0x5a, 0xcd, 0x9e, 0x3d, 0xdc, 0x9f, 0x32, 0x25, 0x36, 0x5c, 0xae, 0x8d, 0x9a, 0x50, - 0xec, 0x90, 0x9e, 0x69, 0x69, 0x9d, 0x81, 0xdd, 0x7d, 0x52, 0xcd, 0x71, 0x63, 0x35, 0xce, 0xb8, - 0xce, 0x54, 0xeb, 0x4c, 0xb3, 0x95, 0xc0, 0xd0, 0x09, 0x7a, 0xe8, 0xfb, 0x50, 0xe8, 0xf6, 0x49, - 0xf7, 0x89, 0x46, 0x4f, 0xaa, 0x79, 0xee, 0x63, 0x3d, 0xce, 0x47, 0x83, 0xe9, 0xb5, 0x4f, 0x5a, - 0x09, 0x9c, 0xef, 0x8a, 0x26, 0x9b, 0xbf, 0x41, 0x06, 0xe6, 0x31, 0x71, 0x99, 0x7d, 0xe1, 0xec, - 0xf9, 0xdf, 0x13, 0x9a, 0xdc, 0x83, 0x62, 0xf8, 0x1d, 0xf4, 0x23, 0x50, 0x88, 0x65, 0xc8, 0x69, - 0x28, 0xdc, 0xc5, 0xd5, 0xd8, 0x75, 0xb6, 0x0c, 0x7f, 0x12, 0x05, 0x22, 0xdb, 0xe8, 0x2e, 0xe4, - 0xba, 0xf6, 0x70, 0x68, 0xd2, 0x2a, 0x70, 0xeb, 0xb5, 0xd8, 0x09, 0x70, 0xad, 0x56, 0x02, 0x4b, - 0x7d, 0xb4, 0x07, 0xe5, 0x81, 0xe9, 0x51, 0xcd, 0xb3, 0x74, 0xc7, 0xeb, 0xdb, 0xd4, 0xab, 0x16, - 0xb9, 0x87, 0xeb, 0x71, 0x1e, 0x76, 0x4d, 0x8f, 0x1e, 0xfa, 0xca, 0xad, 0x04, 0x2e, 0x0d, 0xc2, - 0x02, 0xe6, 0xcf, 0x3e, 0x3a, 0x22, 0x6e, 0xe0, 0xb0, 0xba, 0x74, 0xb6, 0xbf, 0x7d, 0xa6, 0xed, - 0xdb, 0x33, 0x7f, 0x76, 0x58, 0x80, 0x7e, 0x0e, 0x17, 0x06, 0xb6, 0x6e, 0x04, 0xee, 0xb4, 0x6e, - 0x7f, 0x64, 0x3d, 0xa9, 0x96, 0xb8, 0xd3, 0x1b, 0xb1, 0x83, 0xb4, 0x75, 0xc3, 0x77, 0xd1, 0x60, - 0x06, 0xad, 0x04, 0x5e, 0x19, 0x4c, 0x0a, 0xd1, 0x63, 0x58, 0xd5, 0x1d, 0x67, 0x70, 0x3a, 0xe9, - 0xbd, 0xcc, 0xbd, 0xdf, 0x8c, 0xf3, 0xbe, 0xcd, 0x6c, 0x26, 0xdd, 0x23, 0x7d, 0x4a, 0x5a, 0xcf, - 0x43, 0xf6, 0x58, 0x1f, 0x8c, 0x88, 0xfa, 0x1d, 0x28, 0x86, 0xd2, 0x14, 0x55, 0x21, 0x3f, 0x24, - 0x9e, 0xa7, 0xf7, 0x08, 0xcf, 0x6a, 0x05, 0xfb, 0x5d, 0xb5, 0x0c, 0x4b, 0xe1, 0xd4, 0x54, 0x9f, - 0x27, 0x03, 0x4b, 0x96, 0x75, 0xcc, 0xf2, 0x98, 0xb8, 0x9e, 0x69, 0x5b, 0xbe, 0xa5, 0xec, 0xa2, - 0x6b, 0x50, 0xe2, 0xfb, 0x47, 0xf3, 0xbf, 0xb3, 0xd4, 0xcf, 0xe0, 0x25, 0x2e, 0x7c, 0x24, 0x95, - 0xd6, 0xa1, 0xe8, 0x6c, 0x39, 0x81, 0x4a, 0x9a, 0xab, 0x80, 0xb3, 0xe5, 0xf8, 0x0a, 0xaf, 0xc3, - 0x12, 0x9b, 0x69, 0xa0, 0x91, 0xe1, 0x3f, 0x52, 0x64, 0x32, 0xa9, 0xa2, 0xfe, 0x39, 0x05, 0x95, - 0xc9, 0x74, 0x46, 0x77, 0x21, 0xc3, 0x90, 0x4d, 0x82, 0x54, 0x6d, 0x43, 0xc0, 0xde, 0x86, 0x0f, - 0x7b, 0x1b, 0x6d, 0x1f, 0xf6, 0xea, 0x85, 0xaf, 0xbe, 0x59, 0x4f, 0x3c, 0xff, 0xfb, 0x7a, 0x12, - 0x73, 0x0b, 0x74, 0x89, 0x65, 0x9f, 0x6e, 0x5a, 0x9a, 0x69, 0xf0, 0x21, 0x2b, 0x2c, 0xb5, 0x74, - 0xd3, 0xda, 0x31, 0xd0, 0x2e, 0x54, 0xba, 0xb6, 0xe5, 0x11, 0xcb, 0x1b, 0x79, 0x9a, 0x80, 0x55, - 0x09, 0x4d, 0x91, 0x04, 0x13, 0x60, 0xdd, 0xf0, 0x35, 0x0f, 0xb8, 0x22, 0x5e, 0xee, 0x46, 0x05, - 0xe8, 0x3e, 0xc0, 0xb1, 0x3e, 0x30, 0x0d, 0x9d, 0xda, 0xae, 0x57, 0xcd, 0x5c, 0x4d, 0xcf, 0xcc, - 0xb2, 0x47, 0xbe, 0xca, 0x43, 0xc7, 0xd0, 0x29, 0xa9, 0x67, 0xd8, 0x70, 0x71, 0xc8, 0x12, 0xbd, - 0x01, 0xcb, 0xba, 0xe3, 0x68, 0x1e, 0xd5, 0x29, 0xd1, 0x3a, 0xa7, 0x94, 0x78, 0x1c, 0xb6, 0x96, - 0x70, 0x49, 0x77, 0x9c, 0x43, 0x26, 0xad, 0x33, 0x21, 0xba, 0x0e, 0x65, 0x86, 0x70, 0xa6, 0x3e, - 0xd0, 0xfa, 0xc4, 0xec, 0xf5, 0x29, 0x07, 0xa8, 0x34, 0x2e, 0x49, 0x69, 0x8b, 0x0b, 0x55, 0x23, - 0x58, 0x71, 0x8e, 0x6e, 0x08, 0x41, 0xc6, 0xd0, 0xa9, 0xce, 0x23, 0xb9, 0x84, 0x79, 0x9b, 0xc9, - 0x1c, 0x9d, 0xf6, 0x65, 0x7c, 0x78, 0x1b, 0x5d, 0x84, 0x9c, 0x74, 0x9b, 0xe6, 0x6e, 0x65, 0x0f, - 0xad, 0x42, 0xd6, 0x71, 0xed, 0x63, 0xc2, 0x97, 0xae, 0x80, 0x45, 0x47, 0xfd, 0x55, 0x0a, 0x56, - 0xa6, 0x70, 0x90, 0xf9, 0xed, 0xeb, 0x5e, 0xdf, 0xff, 0x2d, 0xd6, 0x46, 0x77, 0x98, 0x5f, 0xdd, - 0x20, 0xae, 0x3c, 0x3b, 0xaa, 0xd3, 0xa1, 0x6e, 0xf1, 0xef, 0x32, 0x34, 0x52, 0x1b, 0xed, 0x43, - 0x65, 0xa0, 0x7b, 0x54, 0x13, 0xb8, 0xa2, 0x85, 0xce, 0x91, 0x69, 0x34, 0xdd, 0xd5, 0x7d, 0x24, - 0x62, 0x9b, 0x5a, 0x3a, 0x2a, 0x0f, 0x22, 0x52, 0x84, 0x61, 0xb5, 0x73, 0xfa, 0x85, 0x6e, 0x51, - 0xd3, 0x22, 0xda, 0xd4, 0xca, 0x5d, 0x9a, 0x72, 0xda, 0x3c, 0x36, 0x0d, 0x62, 0x75, 0xfd, 0x25, - 0xbb, 0x10, 0x18, 0x07, 0x4b, 0xea, 0xa9, 0x18, 0xca, 0x51, 0x24, 0x47, 0x65, 0x48, 0xd1, 0x13, - 0x19, 0x80, 0x14, 0x3d, 0x41, 0xdf, 0x85, 0x0c, 0x9b, 0x24, 0x9f, 0x7c, 0x79, 0xc6, 0x11, 0x28, - 0xed, 0xda, 0xa7, 0x0e, 0xc1, 0x5c, 0x53, 0x55, 0x83, 0x74, 0x08, 0xd0, 0x7d, 0xd2, 0xab, 0x7a, - 0x03, 0x96, 0x27, 0xe0, 0x3b, 0xb4, 0x7e, 0xc9, 0xf0, 0xfa, 0xa9, 0xcb, 0x50, 0x8a, 0x60, 0xb5, - 0x7a, 0x11, 0x56, 0x67, 0x41, 0xaf, 0xda, 0x0f, 0xe4, 0x11, 0x08, 0x45, 0xb7, 0xa1, 0x10, 0x60, - 0xaf, 0x48, 0xc7, 0xe9, 0x58, 0xf9, 0xca, 0x38, 0x50, 0x65, 0x79, 0xc8, 0xb6, 0x35, 0xdf, 0x0f, - 0x29, 0x3e, 0xf0, 0xbc, 0xee, 0x38, 0x2d, 0xdd, 0xeb, 0xab, 0x9f, 0x42, 0x35, 0x0e, 0x57, 0x27, - 0xa6, 0x91, 0x09, 0xb6, 0xe1, 0x45, 0xc8, 0x1d, 0xd9, 0xee, 0x50, 0xa7, 0xdc, 0x59, 0x09, 0xcb, - 0x1e, 0xdb, 0x9e, 0x02, 0x63, 0xd3, 0x5c, 0x2c, 0x3a, 0xaa, 0x06, 0x97, 0x62, 0xb1, 0x95, 0x99, - 0x98, 0x96, 0x41, 0x44, 0x3c, 0x4b, 0x58, 0x74, 0xc6, 0x8e, 0xc4, 0x60, 0x45, 0x87, 0xfd, 0xac, - 0xc7, 0xe7, 0xca, 0xfd, 0x2b, 0x58, 0xf6, 0xd4, 0xdf, 0x15, 0xa0, 0x80, 0x89, 0xe7, 0x30, 0x4c, - 0x40, 0x75, 0x50, 0xc8, 0x49, 0x97, 0x38, 0xd4, 0x87, 0xd1, 0xd9, 0xac, 0x41, 0x68, 0x37, 0x7d, - 0x4d, 0x76, 0x64, 0x07, 0x66, 0xe8, 0x96, 0x64, 0x65, 0xf1, 0x04, 0x4b, 0x9a, 0x87, 0x69, 0xd9, - 0x1d, 0x9f, 0x96, 0xa5, 0x63, 0x4f, 0x69, 0x61, 0x35, 0xc1, 0xcb, 0x6e, 0x49, 0x5e, 0x96, 0x99, - 0xf3, 0x63, 0x11, 0x62, 0xd6, 0x88, 0x10, 0xb3, 0xec, 0x9c, 0x69, 0xc6, 0x30, 0xb3, 0x3b, 0x3e, - 0x33, 0xcb, 0xcd, 0x19, 0xf1, 0x04, 0x35, 0xbb, 0x1f, 0xa5, 0x66, 0x82, 0x56, 0x5d, 0x8b, 0xb5, - 0x8e, 0xe5, 0x66, 0x3f, 0x08, 0x71, 0xb3, 0x42, 0x2c, 0x31, 0x12, 0x4e, 0x66, 0x90, 0xb3, 0x46, - 0x84, 0x9c, 0x29, 0x73, 0x62, 0x10, 0xc3, 0xce, 0x3e, 0x08, 0xb3, 0x33, 0x88, 0x25, 0x78, 0x72, - 0xbd, 0x67, 0xd1, 0xb3, 0xf7, 0x02, 0x7a, 0x56, 0x8c, 0xe5, 0x97, 0x72, 0x0e, 0x93, 0xfc, 0x6c, - 0x7f, 0x8a, 0x9f, 0x09, 0x3e, 0xf5, 0x46, 0xac, 0x8b, 0x39, 0x04, 0x6d, 0x7f, 0x8a, 0xa0, 0x95, - 0xe6, 0x38, 0x9c, 0xc3, 0xd0, 0x7e, 0x31, 0x9b, 0xa1, 0xc5, 0x73, 0x28, 0x39, 0xcc, 0xc5, 0x28, - 0x9a, 0x16, 0x43, 0xd1, 0x96, 0xb9, 0xfb, 0xb7, 0x62, 0xdd, 0x9f, 0x9f, 0xa3, 0xdd, 0x60, 0x27, - 0xe4, 0x44, 0xce, 0x33, 0x94, 0x21, 0xae, 0x6b, 0xbb, 0x92, 0x6d, 0x89, 0x8e, 0xfa, 0x26, 0x3b, - 0xb3, 0xc7, 0xf9, 0x7d, 0x06, 0x9f, 0xe3, 0x68, 0x1e, 0xca, 0x69, 0xf5, 0x0f, 0xc9, 0xb1, 0x2d, - 0x3f, 0xe6, 0xc2, 0xe7, 0xbd, 0x22, 0xcf, 0xfb, 0x10, 0xcb, 0x4b, 0x45, 0x59, 0xde, 0x3a, 0x14, - 0x19, 0x4a, 0x4f, 0x10, 0x38, 0xdd, 0x09, 0x08, 0xdc, 0x4d, 0x58, 0xe1, 0xc7, 0xb0, 0xe0, 0x82, - 0x12, 0x9a, 0x33, 0xfc, 0x84, 0x59, 0x66, 0x1f, 0xc4, 0xe6, 0x14, 0x18, 0xfd, 0x0e, 0x5c, 0x08, - 0xe9, 0x06, 0xe8, 0x2f, 0xd8, 0x4c, 0x25, 0xd0, 0xde, 0x96, 0xc7, 0xc0, 0x9f, 0x92, 0xe3, 0x08, - 0x8d, 0x99, 0xdf, 0x2c, 0x92, 0x96, 0xfc, 0x1f, 0x91, 0xb4, 0xd4, 0x7f, 0x4d, 0xd2, 0xc2, 0xa7, - 0x59, 0x3a, 0x7a, 0x9a, 0xfd, 0x2b, 0x39, 0x5e, 0x93, 0x80, 0x72, 0x75, 0x6d, 0x83, 0xc8, 0xf3, - 0x85, 0xb7, 0x51, 0x05, 0xd2, 0x03, 0xbb, 0x27, 0x4f, 0x11, 0xd6, 0x64, 0x5a, 0x01, 0x08, 0x2b, - 0x12, 0x63, 0x83, 0xa3, 0x29, 0xcb, 0x23, 0x2c, 0x8f, 0xa6, 0x0a, 0xa4, 0x9f, 0x10, 0x01, 0x99, - 0x4b, 0x98, 0x35, 0x99, 0x1e, 0xdf, 0x64, 0x1c, 0x08, 0x97, 0xb0, 0xe8, 0xa0, 0xbb, 0xa0, 0xf0, - 0x32, 0x84, 0x66, 0x3b, 0x9e, 0x44, 0xb7, 0xd7, 0xc2, 0x73, 0x15, 0xd5, 0x86, 0x8d, 0x03, 0xa6, - 0xb3, 0xef, 0x78, 0xb8, 0xe0, 0xc8, 0x56, 0xe8, 0xd4, 0x55, 0x22, 0xe4, 0xef, 0x32, 0x28, 0x6c, - 0xf4, 0x9e, 0xa3, 0x77, 0x09, 0x87, 0x2a, 0x05, 0x8f, 0x05, 0xea, 0x63, 0x40, 0xd3, 0x80, 0x8b, - 0x5a, 0x90, 0x23, 0xc7, 0xc4, 0xa2, 0x6c, 0xd9, 0x58, 0xb8, 0x2f, 0xce, 0x60, 0x56, 0xc4, 0xa2, - 0xf5, 0x2a, 0x0b, 0xf2, 0x3f, 0xbf, 0x59, 0xaf, 0x08, 0xed, 0xb7, 0xed, 0xa1, 0x49, 0xc9, 0xd0, - 0xa1, 0xa7, 0x58, 0xda, 0xab, 0x7f, 0x4b, 0x31, 0x9a, 0x13, 0x01, 0xe3, 0x99, 0xb1, 0xf5, 0xb7, - 0x7c, 0x2a, 0x44, 0x71, 0x17, 0x8b, 0xf7, 0x1a, 0x40, 0x4f, 0xf7, 0xb4, 0xa7, 0xba, 0x45, 0x89, - 0x21, 0x83, 0x1e, 0x92, 0xa0, 0x1a, 0x14, 0x58, 0x6f, 0xe4, 0x11, 0x43, 0xb2, 0xed, 0xa0, 0x1f, - 0x9a, 0x67, 0xfe, 0xdb, 0xcd, 0x33, 0x1a, 0xe5, 0xc2, 0x44, 0x94, 0x43, 0x14, 0x44, 0x09, 0x53, - 0x10, 0x36, 0x36, 0xc7, 0x35, 0x6d, 0xd7, 0xa4, 0xa7, 0x7c, 0x69, 0xd2, 0x38, 0xe8, 0xb3, 0xcb, - 0xdb, 0x90, 0x0c, 0x1d, 0xdb, 0x1e, 0x68, 0x02, 0x6e, 0x8a, 0xdc, 0x74, 0x49, 0x0a, 0x9b, 0x1c, - 0x75, 0x7e, 0x9d, 0x1a, 0xe7, 0xdf, 0x98, 0x6a, 0xfe, 0xdf, 0x05, 0x58, 0xfd, 0x0d, 0xbf, 0x80, - 0x46, 0x8f, 0x5b, 0x74, 0x08, 0x2b, 0x41, 0xfa, 0x6b, 0x23, 0x0e, 0x0b, 0xfe, 0x86, 0x5e, 0x14, - 0x3f, 0x2a, 0xc7, 0x51, 0xb1, 0x87, 0x3e, 0x86, 0x57, 0x27, 0xb0, 0x2d, 0x70, 0x9d, 0x5a, 0x14, - 0xe2, 0x5e, 0x89, 0x42, 0x9c, 0xef, 0x7a, 0x1c, 0xac, 0xf4, 0xb7, 0xcc, 0xba, 0x1d, 0x76, 0xa7, - 0x09, 0xb3, 0x87, 0x99, 0xcb, 0x7f, 0x0d, 0x4a, 0x2e, 0xa1, 0xec, 0x9e, 0x1d, 0xb9, 0x35, 0x2e, - 0x09, 0xa1, 0xbc, 0x8b, 0x1e, 0xc0, 0x2b, 0x33, 0x59, 0x04, 0xfa, 0x1e, 0x28, 0x63, 0x02, 0x92, - 0x8c, 0xb9, 0x80, 0x05, 0x97, 0x8a, 0xb1, 0xae, 0xfa, 0xc7, 0xe4, 0xd8, 0x65, 0xf4, 0x9a, 0xd2, - 0x84, 0x9c, 0x4b, 0xbc, 0xd1, 0x40, 0x5c, 0x1c, 0xca, 0x5b, 0xef, 0x2c, 0xc6, 0x3f, 0x98, 0x74, - 0x34, 0xa0, 0x58, 0x1a, 0xab, 0x8f, 0x21, 0x27, 0x24, 0xa8, 0x08, 0xf9, 0x87, 0x7b, 0x0f, 0xf6, - 0xf6, 0x3f, 0xda, 0xab, 0x24, 0x10, 0x40, 0x6e, 0xbb, 0xd1, 0x68, 0x1e, 0xb4, 0x2b, 0x49, 0xa4, - 0x40, 0x76, 0xbb, 0xbe, 0x8f, 0xdb, 0x95, 0x14, 0x13, 0xe3, 0xe6, 0x87, 0xcd, 0x46, 0xbb, 0x92, - 0x46, 0x2b, 0x50, 0x12, 0x6d, 0xed, 0xfe, 0x3e, 0xfe, 0xc9, 0x76, 0xbb, 0x92, 0x09, 0x89, 0x0e, - 0x9b, 0x7b, 0xf7, 0x9a, 0xb8, 0x92, 0x55, 0xdf, 0x65, 0x37, 0x93, 0x18, 0xc6, 0x32, 0xbe, 0x83, - 0x24, 0x43, 0x77, 0x10, 0xf5, 0xb7, 0x29, 0xa8, 0xc5, 0xd3, 0x10, 0xf4, 0xe1, 0xc4, 0xc4, 0xb7, - 0xce, 0xc1, 0x61, 0x26, 0x66, 0x8f, 0xae, 0x43, 0xd9, 0x25, 0x47, 0x84, 0x76, 0xfb, 0x82, 0x16, - 0x89, 0x23, 0xb3, 0x84, 0x4b, 0x52, 0xca, 0x8d, 0x3c, 0xa1, 0xf6, 0x19, 0xe9, 0x52, 0x4d, 0x60, - 0x91, 0xd8, 0x74, 0x0a, 0x53, 0x63, 0xd2, 0x43, 0x21, 0x54, 0x3f, 0x3d, 0x57, 0x2c, 0x15, 0xc8, - 0xe2, 0x66, 0x1b, 0x7f, 0x5c, 0x49, 0x23, 0x04, 0x65, 0xde, 0xd4, 0x0e, 0xf7, 0xb6, 0x0f, 0x0e, - 0x5b, 0xfb, 0x2c, 0x96, 0x17, 0x60, 0xd9, 0x8f, 0xa5, 0x2f, 0xcc, 0xaa, 0x9f, 0x40, 0x39, 0x7a, - 0xf7, 0x67, 0x21, 0x74, 0xed, 0x91, 0x65, 0xf0, 0x60, 0x64, 0xb1, 0xe8, 0xa0, 0xdb, 0x90, 0x3d, - 0xb6, 0x45, 0x9a, 0xcd, 0xde, 0x6b, 0x8f, 0x6c, 0x4a, 0x42, 0xb5, 0x03, 0xa1, 0xad, 0x7e, 0x01, - 0x59, 0x9e, 0x35, 0x2c, 0x03, 0xf8, 0x2d, 0x5e, 0x92, 0x2a, 0xd6, 0x46, 0x9f, 0x00, 0xe8, 0x94, - 0xba, 0x66, 0x67, 0x34, 0x76, 0xbc, 0x3e, 0x3b, 0xeb, 0xb6, 0x7d, 0xbd, 0xfa, 0x65, 0x99, 0x7e, - 0xab, 0x63, 0xd3, 0x50, 0x0a, 0x86, 0x1c, 0xaa, 0x7b, 0x50, 0x8e, 0xda, 0xfa, 0x34, 0x40, 0x8c, - 0x21, 0x4a, 0x03, 0x04, 0xab, 0x93, 0x34, 0x20, 0x20, 0x11, 0x69, 0x51, 0xb1, 0xe1, 0x1d, 0xf5, - 0x59, 0x12, 0x0a, 0xed, 0x13, 0xb9, 0x1e, 0x31, 0xc5, 0x82, 0xb1, 0x69, 0x2a, 0x7c, 0x35, 0x16, - 0xd5, 0x87, 0x74, 0x50, 0xd3, 0xf8, 0x20, 0xd8, 0x71, 0x99, 0x45, 0x6f, 0x40, 0x7e, 0x71, 0x47, - 0x66, 0xd9, 0xfb, 0xa0, 0x04, 0x98, 0xc9, 0xd8, 0xa9, 0x6e, 0x18, 0x2e, 0xf1, 0x3c, 0xb9, 0xef, - 0xfd, 0x2e, 0xaf, 0x3d, 0xd9, 0x4f, 0xe5, 0xe5, 0x3b, 0x8d, 0x45, 0x47, 0x35, 0x60, 0x79, 0x02, - 0x70, 0xd1, 0xfb, 0x90, 0x77, 0x46, 0x1d, 0xcd, 0x0f, 0xcf, 0xc4, 0x5b, 0x83, 0xcf, 0x7b, 0x46, - 0x9d, 0x81, 0xd9, 0x7d, 0x40, 0x4e, 0xfd, 0xc1, 0x38, 0xa3, 0xce, 0x03, 0x11, 0x45, 0xf1, 0x2b, - 0xa9, 0xf0, 0xaf, 0x1c, 0x43, 0xc1, 0xdf, 0x14, 0xe8, 0x87, 0xa0, 0x04, 0x58, 0x1e, 0x94, 0x24, - 0x63, 0x0f, 0x01, 0xe9, 0x7e, 0x6c, 0xc2, 0x48, 0xb4, 0x67, 0xf6, 0x2c, 0x62, 0x68, 0x63, 0x7e, - 0xcc, 0x7f, 0xad, 0x80, 0x97, 0xc5, 0x87, 0x5d, 0x9f, 0x1c, 0xab, 0xff, 0x4e, 0x42, 0xc1, 0x2f, - 0x3d, 0xa1, 0x77, 0x43, 0xfb, 0xae, 0x3c, 0xe3, 0xa2, 0xee, 0x2b, 0x8e, 0xcb, 0x47, 0xd1, 0xb1, - 0xa6, 0xce, 0x3f, 0xd6, 0xb8, 0x3a, 0xa0, 0x5f, 0x91, 0xcd, 0x9c, 0xbb, 0x22, 0xfb, 0x36, 0x20, - 0x6a, 0x53, 0x7d, 0xa0, 0x1d, 0xdb, 0xd4, 0xb4, 0x7a, 0x9a, 0x08, 0xb6, 0xe0, 0x02, 0x15, 0xfe, - 0xe5, 0x11, 0xff, 0x70, 0xc0, 0xe3, 0xfe, 0xcb, 0x24, 0x14, 0x02, 0x50, 0x3f, 0x6f, 0x35, 0xe8, - 0x22, 0xe4, 0x24, 0x6e, 0x89, 0x72, 0x90, 0xec, 0x05, 0x85, 0xc9, 0x4c, 0xa8, 0x30, 0x59, 0x83, - 0xc2, 0x90, 0x50, 0x9d, 0x9f, 0x6c, 0xe2, 0x8a, 0x12, 0xf4, 0x6f, 0xbe, 0x07, 0xc5, 0x50, 0x61, - 0x8e, 0x65, 0xde, 0x5e, 0xf3, 0xa3, 0x4a, 0xa2, 0x96, 0x7f, 0xf6, 0xe5, 0xd5, 0xf4, 0x1e, 0x79, - 0xca, 0xf6, 0x2c, 0x6e, 0x36, 0x5a, 0xcd, 0xc6, 0x83, 0x4a, 0xb2, 0x56, 0x7c, 0xf6, 0xe5, 0xd5, - 0x3c, 0x26, 0xbc, 0x48, 0x70, 0xb3, 0x05, 0x4b, 0xe1, 0x55, 0x89, 0x42, 0x1f, 0x82, 0xf2, 0xbd, - 0x87, 0x07, 0xbb, 0x3b, 0x8d, 0xed, 0x76, 0x53, 0x7b, 0xb4, 0xdf, 0x6e, 0x56, 0x92, 0xe8, 0x55, - 0xb8, 0xb0, 0xbb, 0xf3, 0xe3, 0x56, 0x5b, 0x6b, 0xec, 0xee, 0x34, 0xf7, 0xda, 0xda, 0x76, 0xbb, - 0xbd, 0xdd, 0x78, 0x50, 0x49, 0x6d, 0xfd, 0x5e, 0x81, 0xe5, 0xed, 0x7a, 0x63, 0x87, 0xc1, 0xb6, - 0xd9, 0xd5, 0xf9, 0xfd, 0xb1, 0x01, 0x19, 0x7e, 0x43, 0x3c, 0xf3, 0xd9, 0xae, 0x76, 0x76, 0xf9, - 0x08, 0xdd, 0x87, 0x2c, 0xbf, 0x3c, 0xa2, 0xb3, 0xdf, 0xf1, 0x6a, 0x73, 0xea, 0x49, 0x6c, 0x30, - 0x3c, 0x3d, 0xce, 0x7c, 0xd8, 0xab, 0x9d, 0x5d, 0x5e, 0x42, 0x18, 0x94, 0x31, 0xf9, 0x9c, 0xff, - 0xd0, 0x55, 0x5b, 0x00, 0x6c, 0xd0, 0x2e, 0xe4, 0xfd, 0xfb, 0xc2, 0xbc, 0xa7, 0xb7, 0xda, 0xdc, - 0xfa, 0x0f, 0x0b, 0x97, 0xb8, 0xd7, 0x9d, 0xfd, 0x8e, 0x58, 0x9b, 0x53, 0xcc, 0x42, 0x3b, 0x90, - 0x93, 0x84, 0x6a, 0xce, 0x73, 0x5a, 0x6d, 0x5e, 0x3d, 0x87, 0x05, 0x6d, 0x7c, 0x63, 0x9e, 0xff, - 0x3a, 0x5a, 0x5b, 0xa0, 0x4e, 0x87, 0x1e, 0x02, 0x84, 0x6e, 0x71, 0x0b, 0x3c, 0x7b, 0xd6, 0x16, - 0xa9, 0xbf, 0xa1, 0x7d, 0x28, 0x04, 0xa4, 0x7a, 0xee, 0x23, 0x64, 0x6d, 0x7e, 0x21, 0x0c, 0x3d, - 0x86, 0x52, 0x94, 0x4c, 0x2e, 0xf6, 0xb4, 0x58, 0x5b, 0xb0, 0xc2, 0xc5, 0xfc, 0x47, 0x99, 0xe5, - 0x62, 0x4f, 0x8d, 0xb5, 0x05, 0x0b, 0x5e, 0xe8, 0x33, 0x58, 0x99, 0x66, 0x7e, 0x8b, 0xbf, 0x3c, - 0xd6, 0xce, 0x51, 0x02, 0x43, 0x43, 0x40, 0x33, 0x18, 0xe3, 0x39, 0x1e, 0x22, 0x6b, 0xe7, 0xa9, - 0x88, 0xd5, 0x9b, 0x5f, 0xbd, 0x58, 0x4b, 0x7e, 0xfd, 0x62, 0x2d, 0xf9, 0x8f, 0x17, 0x6b, 0xc9, - 0xe7, 0x2f, 0xd7, 0x12, 0x5f, 0xbf, 0x5c, 0x4b, 0xfc, 0xf5, 0xe5, 0x5a, 0xe2, 0x67, 0x6f, 0xf5, - 0x4c, 0xda, 0x1f, 0x75, 0x36, 0xba, 0xf6, 0x70, 0x33, 0xfc, 0x0f, 0x87, 0x59, 0xff, 0xba, 0xe8, - 0xe4, 0xf8, 0xa1, 0x72, 0xeb, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x92, 0xa5, 0x39, 0xcc, 0x95, - 0x21, 0x00, 0x00, + // 2856 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x5a, 0x4b, 0x73, 0x1b, 0xc7, + 0xf1, 0xc7, 0x93, 0x04, 0x1a, 0x4f, 0x8e, 0x64, 0x19, 0x5e, 0xcb, 0xa4, 0xbc, 0x2a, 0xdb, 0xb2, + 0x6c, 0x93, 0x7f, 0xd3, 0x25, 0xff, 0xed, 0x72, 0x1e, 0x26, 0x20, 0xc8, 0xa0, 0xc5, 0x90, 0xcc, + 0x10, 0x92, 0xcb, 0x49, 0xac, 0xf5, 0x02, 0x3b, 0x04, 0xd6, 0x02, 0x76, 0xd7, 0xbb, 0x03, 0x8a, + 0xd4, 0x31, 0x95, 0x5c, 0x5c, 0x39, 0xf8, 0x98, 0x8b, 0x3f, 0x42, 0x2a, 0xd7, 0x9c, 0x72, 0xca, + 0xc1, 0x87, 0x54, 0xca, 0xc7, 0x1c, 0x52, 0x4e, 0xca, 0xba, 0xe5, 0x0b, 0xa4, 0x52, 0xa9, 0x54, + 0xa5, 0xe6, 0xb1, 0x2f, 0x00, 0x4b, 0x80, 0x76, 0x6e, 0xb9, 0xcd, 0xf4, 0x76, 0xf7, 0xce, 0xf4, + 0xcc, 0xfe, 0xfa, 0x37, 0x3d, 0x0b, 0xcf, 0x52, 0x62, 0x19, 0xc4, 0x1d, 0x9b, 0x16, 0xdd, 0xd2, + 0x7b, 0x7d, 0x73, 0x8b, 0x9e, 0x39, 0xc4, 0xdb, 0x74, 0x5c, 0x9b, 0xda, 0xa8, 0x16, 0x3e, 0xdc, + 0x64, 0x0f, 0x95, 0xe7, 0x22, 0xda, 0x7d, 0xf7, 0xcc, 0xa1, 0xf6, 0x96, 0xe3, 0xda, 0xf6, 0xb1, + 0xd0, 0x57, 0xae, 0x46, 0x1e, 0x73, 0x3f, 0x51, 0x6f, 0xb1, 0xa7, 0xd2, 0xf8, 0x21, 0x39, 0xf3, + 0x9f, 0x3e, 0x37, 0x63, 0xeb, 0xe8, 0xae, 0x3e, 0xf6, 0x1f, 0x6f, 0x0c, 0x6c, 0x7b, 0x30, 0x22, + 0x5b, 0xbc, 0xd7, 0x9b, 0x1c, 0x6f, 0x51, 0x73, 0x4c, 0x3c, 0xaa, 0x8f, 0x1d, 0xa9, 0x70, 0x79, + 0x60, 0x0f, 0x6c, 0xde, 0xdc, 0x62, 0x2d, 0x21, 0x55, 0xff, 0x59, 0x80, 0x55, 0x4c, 0x3e, 0x9d, + 0x10, 0x8f, 0xa2, 0x6d, 0xc8, 0x91, 0xfe, 0xd0, 0x6e, 0xa4, 0xaf, 0xa5, 0x6f, 0x94, 0xb6, 0xaf, + 0x6e, 0x4e, 0x4d, 0x6e, 0x53, 0xea, 0xb5, 0xfb, 0x43, 0xbb, 0x93, 0xc2, 0x5c, 0x17, 0xdd, 0x82, + 0xfc, 0xf1, 0x68, 0xe2, 0x0d, 0x1b, 0x19, 0x6e, 0xf4, 0x5c, 0x92, 0xd1, 0x1d, 0xa6, 0xd4, 0x49, + 0x61, 0xa1, 0xcd, 0x5e, 0x65, 0x5a, 0xc7, 0x76, 0x23, 0x7b, 0xfe, 0xab, 0x76, 0xad, 0x63, 0xfe, + 0x2a, 0xa6, 0x8b, 0x9a, 0x00, 0xa6, 0x65, 0x52, 0xad, 0x3f, 0xd4, 0x4d, 0xab, 0x91, 0xe3, 0x96, + 0xcf, 0x27, 0x5b, 0x9a, 0xb4, 0xc5, 0x14, 0x3b, 0x29, 0x5c, 0x34, 0xfd, 0x0e, 0x1b, 0xee, 0xa7, + 0x13, 0xe2, 0x9e, 0x35, 0xf2, 0xe7, 0x0f, 0xf7, 0xc7, 0x4c, 0x89, 0x0d, 0x97, 0x6b, 0xa3, 0x36, + 0x94, 0x7a, 0x64, 0x60, 0x5a, 0x5a, 0x6f, 0x64, 0xf7, 0x1f, 0x36, 0x56, 0xb8, 0xb1, 0x9a, 0x64, + 0xdc, 0x64, 0xaa, 0x4d, 0xa6, 0xd9, 0x49, 0x61, 0xe8, 0x05, 0x3d, 0xf4, 0x3d, 0x28, 0xf4, 0x87, + 0xa4, 0xff, 0x50, 0xa3, 0xa7, 0x8d, 0x55, 0xee, 0x63, 0x23, 0xc9, 0x47, 0x8b, 0xe9, 0x75, 0x4f, + 0x3b, 0x29, 0xbc, 0xda, 0x17, 0x4d, 0x36, 0x7f, 0x83, 0x8c, 0xcc, 0x13, 0xe2, 0x32, 0xfb, 0xc2, + 0xf9, 0xf3, 0xbf, 0x2d, 0x34, 0xb9, 0x87, 0xa2, 0xe1, 0x77, 0xd0, 0x0f, 0xa1, 0x48, 0x2c, 0x43, + 0x4e, 0xa3, 0xc8, 0x5d, 0x5c, 0x4b, 0x5c, 0x67, 0xcb, 0xf0, 0x27, 0x51, 0x20, 0xb2, 0x8d, 0xde, + 0x82, 0x95, 0xbe, 0x3d, 0x1e, 0x9b, 0xb4, 0x01, 0xdc, 0x7a, 0x3d, 0x71, 0x02, 0x5c, 0xab, 0x93, + 0xc2, 0x52, 0x1f, 0xed, 0x43, 0x75, 0x64, 0x7a, 0x54, 0xf3, 0x2c, 0xdd, 0xf1, 0x86, 0x36, 0xf5, + 0x1a, 0x25, 0xee, 0xe1, 0x85, 0x24, 0x0f, 0x7b, 0xa6, 0x47, 0x8f, 0x7c, 0xe5, 0x4e, 0x0a, 0x57, + 0x46, 0x51, 0x01, 0xf3, 0x67, 0x1f, 0x1f, 0x13, 0x37, 0x70, 0xd8, 0x28, 0x9f, 0xef, 0xef, 0x80, + 0x69, 0xfb, 0xf6, 0xcc, 0x9f, 0x1d, 0x15, 0xa0, 0x9f, 0xc2, 0xa5, 0x91, 0xad, 0x1b, 0x81, 0x3b, + 0xad, 0x3f, 0x9c, 0x58, 0x0f, 0x1b, 0x15, 0xee, 0xf4, 0xe5, 0xc4, 0x41, 0xda, 0xba, 0xe1, 0xbb, + 0x68, 0x31, 0x83, 0x4e, 0x0a, 0xaf, 0x8d, 0xa6, 0x85, 0xe8, 0x01, 0x5c, 0xd6, 0x1d, 0x67, 0x74, + 0x36, 0xed, 0xbd, 0xca, 0xbd, 0xdf, 0x4c, 0xf2, 0xbe, 0xc3, 0x6c, 0xa6, 0xdd, 0x23, 0x7d, 0x46, + 0x8a, 0xba, 0x50, 0x77, 0x5c, 0xe2, 0xe8, 0x2e, 0xd1, 0x1c, 0xd7, 0x76, 0x6c, 0x4f, 0x1f, 0x35, + 0x6a, 0xdc, 0xf7, 0x4b, 0x49, 0xbe, 0x0f, 0x85, 0xfe, 0xa1, 0x54, 0xef, 0xa4, 0x70, 0xcd, 0x89, + 0x8b, 0x84, 0x57, 0xbb, 0x4f, 0x3c, 0x2f, 0xf4, 0x5a, 0x5f, 0xe4, 0x95, 0xeb, 0xc7, 0xbd, 0xc6, + 0x44, 0xcd, 0x55, 0xc8, 0x9f, 0xe8, 0xa3, 0x09, 0x51, 0x5f, 0x82, 0x52, 0x04, 0x52, 0x50, 0x03, + 0x56, 0xc7, 0xc4, 0xf3, 0xf4, 0x01, 0xe1, 0x08, 0x54, 0xc4, 0x7e, 0x57, 0xad, 0x42, 0x39, 0x0a, + 0x23, 0xea, 0xe7, 0xe9, 0xc0, 0x92, 0x21, 0x04, 0xb3, 0x3c, 0x21, 0xae, 0x67, 0xda, 0x96, 0x6f, + 0x29, 0xbb, 0xe8, 0x3a, 0x54, 0xf8, 0x5e, 0xd7, 0xfc, 0xe7, 0x0c, 0xa6, 0x72, 0xb8, 0xcc, 0x85, + 0xf7, 0xa5, 0xd2, 0x06, 0x94, 0x9c, 0x6d, 0x27, 0x50, 0xc9, 0x72, 0x15, 0x70, 0xb6, 0x1d, 0x5f, + 0xe1, 0x79, 0x28, 0xb3, 0x39, 0x06, 0x1a, 0x39, 0xfe, 0x92, 0x12, 0x93, 0x49, 0x15, 0xf5, 0x8f, + 0x19, 0xa8, 0x4f, 0x43, 0x0f, 0x7a, 0x0b, 0x72, 0x0c, 0x85, 0x25, 0xa0, 0x2a, 0x9b, 0x02, 0xa2, + 0x37, 0x7d, 0x88, 0xde, 0xec, 0xfa, 0x10, 0xdd, 0x2c, 0x7c, 0xf9, 0xf5, 0x46, 0xea, 0xf3, 0xbf, + 0x6e, 0xa4, 0x31, 0xb7, 0x40, 0xcf, 0x30, 0xa4, 0xd0, 0x4d, 0x4b, 0x33, 0x0d, 0x3e, 0xe4, 0x22, + 0x83, 0x01, 0xdd, 0xb4, 0x76, 0x0d, 0xb4, 0x07, 0xf5, 0xbe, 0x6d, 0x79, 0xc4, 0xf2, 0x26, 0x9e, + 0x26, 0x52, 0x80, 0x84, 0xd1, 0x18, 0x18, 0x88, 0xc4, 0xd2, 0xf2, 0x35, 0x0f, 0xb9, 0x22, 0xae, + 0xf5, 0xe3, 0x02, 0x74, 0x07, 0xe0, 0x44, 0x1f, 0x99, 0x86, 0x4e, 0x6d, 0xd7, 0x6b, 0xe4, 0xae, + 0x65, 0xe7, 0x22, 0xc2, 0x7d, 0x5f, 0xe5, 0x9e, 0x63, 0xe8, 0x94, 0x34, 0x73, 0x6c, 0xb8, 0x38, + 0x62, 0x89, 0x5e, 0x84, 0x9a, 0xee, 0x38, 0x9a, 0x47, 0x75, 0x4a, 0xb4, 0xde, 0x19, 0x25, 0x1e, + 0x87, 0xd8, 0x32, 0xae, 0xe8, 0x8e, 0x73, 0xc4, 0xa4, 0x4d, 0x26, 0x44, 0x2f, 0x40, 0x95, 0xa1, + 0xb1, 0xa9, 0x8f, 0xb4, 0x21, 0x31, 0x07, 0x43, 0xca, 0xc1, 0x34, 0x8b, 0x2b, 0x52, 0xda, 0xe1, + 0x42, 0xd5, 0x08, 0x56, 0x9c, 0x23, 0x31, 0x42, 0x90, 0x33, 0x74, 0xaa, 0xf3, 0x48, 0x96, 0x31, + 0x6f, 0x33, 0x99, 0xa3, 0xd3, 0xa1, 0x8c, 0x0f, 0x6f, 0xa3, 0x2b, 0xb0, 0x22, 0xdd, 0x66, 0xb9, + 0x5b, 0xd9, 0x43, 0x97, 0x21, 0xef, 0xb8, 0xf6, 0x09, 0xe1, 0x4b, 0x57, 0xc0, 0xa2, 0xa3, 0xfe, + 0x22, 0x03, 0x6b, 0x33, 0x98, 0xcd, 0xfc, 0x0e, 0x75, 0x6f, 0xe8, 0xbf, 0x8b, 0xb5, 0xd1, 0x9b, + 0xcc, 0xaf, 0x6e, 0x10, 0x57, 0xe6, 0xb9, 0xc6, 0x6c, 0xa8, 0x3b, 0xfc, 0xb9, 0x0c, 0x8d, 0xd4, + 0x46, 0x07, 0x50, 0x1f, 0xe9, 0x1e, 0xd5, 0x04, 0x06, 0x6a, 0x91, 0x9c, 0x37, 0x8b, 0xfc, 0x7b, + 0xba, 0x8f, 0x9a, 0x6c, 0x53, 0x4b, 0x47, 0xd5, 0x51, 0x4c, 0x8a, 0x30, 0x5c, 0xee, 0x9d, 0x3d, + 0xd6, 0x2d, 0x6a, 0x5a, 0x44, 0x9b, 0x59, 0xb9, 0x67, 0x66, 0x9c, 0xb6, 0x4f, 0x4c, 0x83, 0x58, + 0x7d, 0x7f, 0xc9, 0x2e, 0x05, 0xc6, 0xc1, 0x92, 0x7a, 0x2a, 0x86, 0x6a, 0x3c, 0xeb, 0xa0, 0x2a, + 0x64, 0xe8, 0xa9, 0x0c, 0x40, 0x86, 0x9e, 0xa2, 0xff, 0x83, 0x1c, 0x9b, 0x24, 0x9f, 0x7c, 0x75, + 0x4e, 0xba, 0x96, 0x76, 0xdd, 0x33, 0x87, 0x60, 0xae, 0xa9, 0xaa, 0xc1, 0xe7, 0x10, 0x64, 0xa2, + 0x69, 0xaf, 0xea, 0xcb, 0x50, 0x9b, 0x4a, 0x35, 0x91, 0xf5, 0x4b, 0x47, 0xd7, 0x4f, 0xad, 0x41, + 0x25, 0x96, 0x57, 0xd4, 0x2b, 0x70, 0x79, 0x5e, 0x9a, 0x50, 0x87, 0x81, 0x3c, 0x06, 0xf7, 0xe8, + 0x16, 0x14, 0x82, 0x3c, 0x21, 0x3e, 0xc7, 0xd9, 0x58, 0xf9, 0xca, 0x38, 0x50, 0x65, 0xdf, 0x21, + 0xdb, 0xd6, 0x7c, 0x3f, 0x64, 0xf8, 0xc0, 0x57, 0x75, 0xc7, 0xe9, 0xe8, 0xde, 0x50, 0xfd, 0x18, + 0x1a, 0x49, 0x39, 0x60, 0x6a, 0x1a, 0xb9, 0x60, 0x1b, 0x5e, 0x81, 0x95, 0x63, 0xdb, 0x1d, 0xeb, + 0x94, 0x3b, 0xab, 0x60, 0xd9, 0x63, 0xdb, 0x53, 0xe4, 0x83, 0x2c, 0x17, 0x8b, 0x8e, 0xaa, 0xc1, + 0x33, 0x89, 0x79, 0x80, 0x99, 0x98, 0x96, 0x41, 0x44, 0x3c, 0x2b, 0x58, 0x74, 0x42, 0x47, 0x62, + 0xb0, 0xa2, 0xc3, 0x5e, 0xeb, 0xf1, 0xb9, 0x72, 0xff, 0x45, 0x2c, 0x7b, 0xea, 0x23, 0xb8, 0x32, + 0x3f, 0x19, 0xa0, 0x5b, 0x00, 0x02, 0x37, 0x83, 0xaf, 0xae, 0xb4, 0x7d, 0x65, 0x76, 0xcf, 0xdf, + 0xd6, 0xa9, 0x8e, 0x8b, 0x5c, 0x93, 0x35, 0x19, 0x0a, 0x84, 0x66, 0x9a, 0x67, 0x3e, 0x16, 0x5b, + 0x26, 0x8b, 0x2b, 0x81, 0xce, 0x91, 0xf9, 0x98, 0xa8, 0xbd, 0xc8, 0x8b, 0x63, 0xc9, 0x21, 0xf2, + 0xa1, 0xa5, 0x2f, 0xf4, 0xa1, 0xd5, 0x21, 0x4b, 0x4f, 0xbd, 0x46, 0xe6, 0x5a, 0xf6, 0x46, 0x19, + 0xb3, 0xa6, 0xfa, 0xa7, 0x22, 0x14, 0x30, 0xf1, 0x1c, 0x06, 0x78, 0xa8, 0x09, 0x45, 0x72, 0xda, + 0x27, 0x0e, 0xf5, 0x73, 0xc4, 0x7c, 0xfa, 0x26, 0xb4, 0xdb, 0xbe, 0x26, 0xe3, 0x4e, 0x81, 0x19, + 0x7a, 0x43, 0xd2, 0xe3, 0x64, 0xa6, 0x2b, 0xcd, 0xa3, 0xfc, 0xf8, 0x4d, 0x9f, 0x1f, 0x67, 0x13, + 0xe9, 0x92, 0xb0, 0x9a, 0x22, 0xc8, 0x6f, 0x48, 0x82, 0x9c, 0x5b, 0xf0, 0xb2, 0x18, 0x43, 0x6e, + 0xc5, 0x18, 0x72, 0x7e, 0xc1, 0x34, 0x13, 0x28, 0xf2, 0x9b, 0x3e, 0x45, 0x5e, 0x59, 0x30, 0xe2, + 0x29, 0x8e, 0x7c, 0x27, 0xce, 0x91, 0x05, 0xbf, 0xbd, 0x9e, 0x68, 0x9d, 0x48, 0x92, 0xbf, 0x1f, + 0x21, 0xc9, 0x85, 0x44, 0x86, 0x2a, 0x9c, 0xcc, 0x61, 0xc9, 0xad, 0x18, 0x4b, 0x2e, 0x2e, 0x88, + 0x41, 0x02, 0x4d, 0x7e, 0x37, 0x4a, 0x93, 0x21, 0x91, 0x69, 0xcb, 0xf5, 0x9e, 0xc7, 0x93, 0xdf, + 0x0e, 0x78, 0x72, 0x29, 0x91, 0xe8, 0xcb, 0x39, 0x4c, 0x13, 0xe5, 0x83, 0x19, 0xa2, 0x2c, 0x88, + 0xed, 0x8b, 0x89, 0x2e, 0x16, 0x30, 0xe5, 0x83, 0x19, 0xa6, 0x5c, 0x59, 0xe0, 0x70, 0x01, 0x55, + 0xfe, 0xd9, 0x7c, 0xaa, 0x9c, 0x4c, 0x66, 0xe5, 0x30, 0x97, 0xe3, 0xca, 0x5a, 0x02, 0x57, 0x16, + 0x7c, 0xf6, 0x95, 0x44, 0xf7, 0x4b, 0x93, 0xe5, 0x7b, 0x73, 0xc8, 0xb2, 0xa0, 0xb5, 0x37, 0x12, + 0x9d, 0x2f, 0xc1, 0x96, 0xef, 0xcd, 0x61, 0xcb, 0x6b, 0x0b, 0xdd, 0x2e, 0x4f, 0x97, 0x5f, 0x66, + 0x64, 0x65, 0x0a, 0xa1, 0x18, 0xe0, 0x13, 0xd7, 0xb5, 0x5d, 0x49, 0x7c, 0x45, 0x47, 0xbd, 0xc1, + 0xe8, 0x53, 0x88, 0x46, 0xe7, 0x50, 0x6b, 0x9e, 0x58, 0x23, 0x08, 0xa4, 0xfe, 0x2e, 0x1d, 0xda, + 0x72, 0xc6, 0x11, 0xa5, 0x5e, 0x45, 0x49, 0xbd, 0x22, 0x84, 0x3b, 0x13, 0x27, 0xdc, 0x1b, 0x50, + 0x62, 0x09, 0x73, 0x8a, 0x4b, 0xeb, 0x4e, 0xc0, 0xa5, 0x6f, 0xc2, 0x1a, 0x67, 0x44, 0x22, 0x4f, + 0xc8, 0x2c, 0x99, 0xe3, 0x49, 0xa2, 0xc6, 0x1e, 0x88, 0x4f, 0x49, 0xa4, 0xcb, 0xd7, 0xe0, 0x52, + 0x44, 0x37, 0x48, 0xc4, 0x82, 0x58, 0xd6, 0x03, 0xed, 0x1d, 0x99, 0x91, 0xff, 0x90, 0x0e, 0x23, + 0x14, 0x92, 0xf0, 0x79, 0x7c, 0x39, 0xfd, 0x5f, 0xe2, 0xcb, 0x99, 0x6f, 0xcd, 0x97, 0xa3, 0xc4, + 0x22, 0x1b, 0x27, 0x16, 0xff, 0x48, 0x87, 0x6b, 0x12, 0xb0, 0xdf, 0xbe, 0x6d, 0x10, 0x99, 0xea, + 0x79, 0x9b, 0x25, 0xbc, 0x91, 0x3d, 0x90, 0x09, 0x9d, 0x35, 0x99, 0x56, 0x90, 0x32, 0x8a, 0x32, + 0x23, 0x04, 0x2c, 0x21, 0xcf, 0x23, 0x2c, 0x59, 0x42, 0x1d, 0xb2, 0x0f, 0x89, 0x00, 0xf8, 0x32, + 0x66, 0x4d, 0xa6, 0xc7, 0x37, 0x19, 0x87, 0xed, 0x32, 0x16, 0x1d, 0xf4, 0x16, 0x14, 0x79, 0xf5, + 0x4a, 0xb3, 0x1d, 0x4f, 0x62, 0xf1, 0xb3, 0xd1, 0xb9, 0x8a, 0x22, 0xd5, 0xe6, 0x21, 0xd3, 0x39, + 0x70, 0x3c, 0x5c, 0x70, 0x64, 0x2b, 0x42, 0x80, 0x8a, 0x31, 0x1e, 0x7e, 0x15, 0x8a, 0x6c, 0xf4, + 0x9e, 0xa3, 0xf7, 0x09, 0x07, 0xd6, 0x22, 0x0e, 0x05, 0xea, 0x03, 0x40, 0xb3, 0xe9, 0x01, 0x75, + 0x60, 0x85, 0x9c, 0x10, 0x8b, 0xb2, 0x65, 0xcb, 0x4e, 0xf3, 0x10, 0x49, 0x72, 0x89, 0x45, 0x9b, + 0x0d, 0x16, 0xe4, 0xbf, 0x7f, 0xbd, 0x51, 0x17, 0xda, 0xaf, 0xda, 0x63, 0x93, 0x92, 0xb1, 0x43, + 0xcf, 0xb0, 0xb4, 0x57, 0xff, 0x92, 0x61, 0x8c, 0x33, 0x96, 0x3a, 0xe6, 0xc6, 0xd6, 0xdf, 0xf2, + 0x99, 0xc8, 0x69, 0x63, 0xb9, 0x78, 0xaf, 0x03, 0x0c, 0x74, 0x4f, 0x7b, 0xa4, 0x5b, 0x94, 0x18, + 0x32, 0xe8, 0x11, 0x09, 0x52, 0xa0, 0xc0, 0x7a, 0x13, 0x8f, 0x18, 0xf2, 0xe0, 0x13, 0xf4, 0x23, + 0xf3, 0x5c, 0xfd, 0x6e, 0xf3, 0x8c, 0x47, 0xb9, 0x30, 0x15, 0xe5, 0x08, 0x1b, 0x2c, 0x46, 0xd9, + 0x20, 0x1b, 0x9b, 0xe3, 0x9a, 0xb6, 0x6b, 0xd2, 0x33, 0xbe, 0x34, 0x59, 0x1c, 0xf4, 0xd9, 0x39, + 0x7a, 0x4c, 0xc6, 0x8e, 0x6d, 0x8f, 0x34, 0x01, 0x37, 0x25, 0x6e, 0x5a, 0x96, 0xc2, 0x36, 0x47, + 0x9d, 0x5f, 0x66, 0xc2, 0xef, 0x2f, 0x64, 0xfd, 0xff, 0x73, 0x01, 0x56, 0x7f, 0xc5, 0x6b, 0x01, + 0x71, 0x72, 0x80, 0x8e, 0x60, 0x2d, 0xf8, 0xfc, 0xb5, 0x09, 0x87, 0x05, 0x7f, 0x43, 0x2f, 0x8b, + 0x1f, 0xf5, 0x93, 0xb8, 0xd8, 0x43, 0x1f, 0xc2, 0xd3, 0x53, 0xd8, 0x16, 0xb8, 0xce, 0x2c, 0x0b, + 0x71, 0x4f, 0xc5, 0x21, 0xce, 0x77, 0x1d, 0x06, 0x2b, 0xfb, 0x1d, 0xbf, 0xba, 0x5d, 0x76, 0xbc, + 0x8c, 0x72, 0x9d, 0xb9, 0xcb, 0x7f, 0x1d, 0x2a, 0x2e, 0xa1, 0xba, 0x69, 0x69, 0xb1, 0x03, 0x7c, + 0x59, 0x08, 0x65, 0x59, 0xe0, 0x10, 0x9e, 0x9a, 0xcb, 0x79, 0xd0, 0xff, 0x43, 0x31, 0xa4, 0x4b, + 0xe9, 0x84, 0xb3, 0x70, 0x70, 0xbe, 0x0b, 0x75, 0xd5, 0xdf, 0xa7, 0x43, 0x97, 0xf1, 0x13, 0x63, + 0x1b, 0x56, 0x5c, 0xe2, 0x4d, 0x46, 0xe2, 0x0c, 0x57, 0xdd, 0x7e, 0x6d, 0x39, 0xb6, 0xc4, 0xa4, + 0x93, 0x11, 0xc5, 0xd2, 0x58, 0x7d, 0x00, 0x2b, 0x42, 0x82, 0x4a, 0xb0, 0x7a, 0x6f, 0xff, 0xee, + 0xfe, 0xc1, 0x07, 0xfb, 0xf5, 0x14, 0x02, 0x58, 0xd9, 0x69, 0xb5, 0xda, 0x87, 0xdd, 0x7a, 0x1a, + 0x15, 0x21, 0xbf, 0xd3, 0x3c, 0xc0, 0xdd, 0x7a, 0x86, 0x89, 0x71, 0xfb, 0xfd, 0x76, 0xab, 0x5b, + 0xcf, 0xa2, 0x35, 0xa8, 0x88, 0xb6, 0x76, 0xe7, 0x00, 0xff, 0x68, 0xa7, 0x5b, 0xcf, 0x45, 0x44, + 0x47, 0xed, 0xfd, 0xdb, 0x6d, 0x5c, 0xcf, 0xab, 0xaf, 0xb3, 0x43, 0x62, 0x02, 0xbf, 0x0a, 0x8f, + 0x83, 0xe9, 0xc8, 0x71, 0x50, 0xfd, 0x75, 0x06, 0x94, 0x64, 0xd2, 0x84, 0xde, 0x9f, 0x9a, 0xf8, + 0xf6, 0x05, 0x18, 0xd7, 0xd4, 0xec, 0xd1, 0x0b, 0x50, 0x75, 0xc9, 0x31, 0xa1, 0xfd, 0xa1, 0x20, + 0x71, 0x22, 0x65, 0x56, 0x70, 0x45, 0x4a, 0xb9, 0x91, 0x27, 0xd4, 0x3e, 0x21, 0x7d, 0xaa, 0x09, + 0x2c, 0x12, 0x9b, 0xae, 0xc8, 0xd4, 0x98, 0xf4, 0x48, 0x08, 0xd5, 0x8f, 0x2f, 0x14, 0xcb, 0x22, + 0xe4, 0x71, 0xbb, 0x8b, 0x3f, 0xac, 0x67, 0x11, 0x82, 0x2a, 0x6f, 0x6a, 0x47, 0xfb, 0x3b, 0x87, + 0x47, 0x9d, 0x03, 0x16, 0xcb, 0x4b, 0x50, 0xf3, 0x63, 0xe9, 0x0b, 0xf3, 0xea, 0x21, 0x3c, 0x9d, + 0xc0, 0xf8, 0xbe, 0xe5, 0x91, 0x58, 0xfd, 0x4d, 0x3a, 0xea, 0x32, 0x7e, 0xd8, 0x7d, 0x6f, 0x2a, + 0xd2, 0x5b, 0xcb, 0xf2, 0xc4, 0xe9, 0x30, 0x2b, 0x50, 0x20, 0xb2, 0xd0, 0x23, 0x8f, 0xc0, 0x41, + 0x5f, 0x7d, 0x6d, 0x71, 0xd0, 0xc2, 0x5d, 0x97, 0x51, 0x3f, 0x82, 0x6a, 0xbc, 0x10, 0xc5, 0x36, + 0x91, 0x6b, 0x4f, 0x2c, 0x83, 0x0f, 0x32, 0x8f, 0x45, 0x07, 0xdd, 0x82, 0xfc, 0x89, 0x2d, 0x80, + 0x66, 0xfe, 0xd7, 0x76, 0xdf, 0xa6, 0x24, 0x52, 0xc8, 0x12, 0xda, 0xea, 0x63, 0xc8, 0x73, 0xdc, + 0x60, 0x18, 0xc0, 0x4b, 0x4a, 0x92, 0x56, 0xb2, 0x36, 0xfa, 0x08, 0x40, 0xa7, 0xd4, 0x35, 0x7b, + 0x93, 0xd0, 0xf1, 0xc6, 0x7c, 0xdc, 0xd9, 0xf1, 0xf5, 0x9a, 0x57, 0x25, 0x00, 0x5d, 0x0e, 0x4d, + 0x23, 0x20, 0x14, 0x71, 0xa8, 0xee, 0x43, 0x35, 0x6e, 0xeb, 0x13, 0x21, 0x31, 0x86, 0x38, 0x11, + 0x12, 0xbc, 0x56, 0x12, 0xa1, 0x80, 0x46, 0x65, 0x45, 0xf9, 0x90, 0x77, 0xd4, 0xdf, 0xa6, 0xa1, + 0xd0, 0x3d, 0x95, 0xc1, 0x4d, 0xa8, 0x5c, 0x85, 0xa6, 0x99, 0x68, 0x9d, 0x46, 0x94, 0xc2, 0xb2, + 0x41, 0x81, 0xed, 0xdd, 0x60, 0x27, 0xe4, 0x96, 0x3d, 0xb1, 0xfa, 0x05, 0x10, 0xb9, 0x05, 0xae, + 0x43, 0xc5, 0x76, 0xcd, 0x81, 0x69, 0xe9, 0xa3, 0x28, 0x4b, 0x2e, 0xfb, 0x42, 0x4e, 0x2d, 0xdf, + 0x81, 0x62, 0x90, 0x5a, 0x18, 0x89, 0xd7, 0x0d, 0xc3, 0x25, 0x9e, 0x27, 0xe1, 0xc1, 0xef, 0xf2, + 0x6a, 0xa9, 0xfd, 0x48, 0x96, 0x8b, 0xb2, 0x58, 0x74, 0x54, 0x03, 0x6a, 0x53, 0x79, 0x09, 0xbd, + 0x03, 0xab, 0xce, 0xa4, 0xa7, 0xf9, 0x31, 0x9c, 0xba, 0xc9, 0xf3, 0xe9, 0xe1, 0xa4, 0x37, 0x32, + 0xfb, 0x77, 0xc9, 0x99, 0x3f, 0x62, 0x67, 0xd2, 0xbb, 0x2b, 0x42, 0x2d, 0xde, 0x92, 0x89, 0xbe, + 0xe5, 0x04, 0x0a, 0xfe, 0xce, 0x41, 0x3f, 0x80, 0x62, 0x90, 0xf2, 0x82, 0x22, 0x7a, 0x62, 0xae, + 0x94, 0xee, 0x43, 0x13, 0x76, 0xd6, 0xf0, 0xcc, 0x81, 0x45, 0x0c, 0x2d, 0x3c, 0x46, 0xf0, 0xb7, + 0x15, 0x70, 0x4d, 0x3c, 0xd8, 0xf3, 0xcf, 0x10, 0xea, 0xbf, 0xd3, 0x50, 0xf0, 0x8b, 0xa5, 0xe8, + 0xf5, 0xc8, 0xe6, 0xac, 0xce, 0xa9, 0xbe, 0xf8, 0x8a, 0x61, 0xc1, 0x33, 0x3e, 0xd6, 0xcc, 0xc5, + 0xc7, 0x9a, 0x54, 0xb9, 0xf6, 0xef, 0x10, 0x72, 0x17, 0xbe, 0x43, 0x78, 0x15, 0x10, 0xb5, 0xa9, + 0x3e, 0xd2, 0x4e, 0x6c, 0x6a, 0x5a, 0x03, 0x4d, 0x04, 0x5b, 0x50, 0xa6, 0x3a, 0x7f, 0x72, 0x9f, + 0x3f, 0x38, 0xe4, 0x71, 0xff, 0x79, 0x1a, 0x0a, 0x41, 0xee, 0xbb, 0x68, 0xfd, 0xf2, 0x0a, 0xac, + 0x48, 0x78, 0x17, 0x05, 0x4c, 0xd9, 0x0b, 0x4a, 0xe9, 0xb9, 0x48, 0x29, 0x5d, 0x81, 0xc2, 0x98, + 0x50, 0x9d, 0xa3, 0xa8, 0xd8, 0xa3, 0x41, 0xff, 0xe6, 0xdb, 0x50, 0x8a, 0x94, 0x92, 0xd9, 0xe7, + 0xb9, 0xdf, 0xfe, 0xa0, 0x9e, 0x52, 0x56, 0x3f, 0xfb, 0xe2, 0x5a, 0x76, 0x9f, 0x3c, 0x62, 0x7b, + 0x16, 0xb7, 0x5b, 0x9d, 0x76, 0xeb, 0x6e, 0x3d, 0xad, 0x94, 0x3e, 0xfb, 0xe2, 0xda, 0x2a, 0x26, + 0xbc, 0xf2, 0x73, 0xb3, 0x03, 0xe5, 0xe8, 0xaa, 0xc4, 0xc1, 0x0e, 0x41, 0xf5, 0xf6, 0xbd, 0xc3, + 0xbd, 0xdd, 0xd6, 0x4e, 0xb7, 0xad, 0xdd, 0x3f, 0xe8, 0xb6, 0xeb, 0x69, 0xf4, 0x34, 0x5c, 0xda, + 0xdb, 0x7d, 0xaf, 0xd3, 0xd5, 0x5a, 0x7b, 0xbb, 0xed, 0xfd, 0xae, 0xb6, 0xd3, 0xed, 0xee, 0xb4, + 0xee, 0xd6, 0x33, 0xdb, 0xff, 0x02, 0xa8, 0xed, 0x34, 0x5b, 0xbb, 0x2c, 0xbb, 0x99, 0x7d, 0x9d, + 0x1f, 0xb3, 0x5b, 0x90, 0xe3, 0x07, 0xe9, 0x73, 0x2f, 0xc5, 0x95, 0xf3, 0x6b, 0x82, 0xe8, 0x0e, + 0xe4, 0xf9, 0x19, 0x1b, 0x9d, 0x7f, 0x4b, 0xae, 0x2c, 0x28, 0x12, 0xb2, 0xc1, 0xf0, 0xcf, 0xe3, + 0xdc, 0x6b, 0x73, 0xe5, 0xfc, 0x9a, 0x21, 0xc2, 0x50, 0x0c, 0x39, 0xfa, 0xe2, 0x6b, 0x64, 0x65, + 0x09, 0x44, 0x42, 0x7b, 0xb0, 0xea, 0x1f, 0xab, 0x16, 0x5d, 0x6c, 0x2b, 0x0b, 0x8b, 0x7a, 0x2c, + 0x5c, 0xe2, 0xf8, 0x7b, 0xfe, 0x2d, 0xbd, 0xb2, 0xa0, 0x42, 0x89, 0x76, 0x61, 0x45, 0xf2, 0xce, + 0x05, 0x97, 0xd5, 0xca, 0xa2, 0x22, 0x1d, 0x0b, 0x5a, 0x58, 0x58, 0x58, 0xfc, 0xef, 0x81, 0xb2, + 0x44, 0xf1, 0x15, 0xdd, 0x03, 0x88, 0x1c, 0x76, 0x97, 0xf8, 0xa9, 0x40, 0x59, 0xa6, 0xa8, 0x8a, + 0x0e, 0xa0, 0x10, 0x9c, 0x3d, 0x16, 0x5e, 0xf1, 0x2b, 0x8b, 0xab, 0x9b, 0xe8, 0x01, 0x54, 0xe2, + 0x9c, 0x7b, 0xb9, 0x8b, 0x7b, 0x65, 0xc9, 0xb2, 0x25, 0xf3, 0x1f, 0x27, 0xe0, 0xcb, 0x5d, 0xe4, + 0x2b, 0x4b, 0x56, 0x31, 0xd1, 0x27, 0xb0, 0x36, 0x4b, 0x90, 0x97, 0xbf, 0xd7, 0x57, 0x2e, 0x50, + 0xd7, 0x44, 0x63, 0x40, 0x73, 0x88, 0xf5, 0x05, 0xae, 0xf9, 0x95, 0x8b, 0x94, 0x39, 0x91, 0x01, + 0xb5, 0x69, 0xb6, 0xba, 0xec, 0xb5, 0xbf, 0xb2, 0x74, 0xc9, 0x53, 0xbc, 0x25, 0x4e, 0x60, 0x97, + 0xfd, 0x0d, 0x40, 0x59, 0xba, 0x02, 0xda, 0x6c, 0x7f, 0xf9, 0xcd, 0x7a, 0xfa, 0xab, 0x6f, 0xd6, + 0xd3, 0x7f, 0xfb, 0x66, 0x3d, 0xfd, 0xf9, 0x93, 0xf5, 0xd4, 0x57, 0x4f, 0xd6, 0x53, 0x7f, 0x7e, + 0xb2, 0x9e, 0xfa, 0xc9, 0x2b, 0x03, 0x93, 0x0e, 0x27, 0xbd, 0xcd, 0xbe, 0x3d, 0xde, 0x8a, 0xfe, + 0x0b, 0x35, 0xef, 0xff, 0xac, 0xde, 0x0a, 0x4f, 0x90, 0x6f, 0xfc, 0x27, 0x00, 0x00, 0xff, 0xff, + 0x23, 0x35, 0x8c, 0x56, 0xbf, 0x25, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -3158,6 +3469,8 @@ type ABCIApplicationClient interface { OfferSnapshot(ctx context.Context, in *RequestOfferSnapshot, opts ...grpc.CallOption) (*ResponseOfferSnapshot, error) LoadSnapshotChunk(ctx context.Context, in *RequestLoadSnapshotChunk, opts ...grpc.CallOption) (*ResponseLoadSnapshotChunk, error) ApplySnapshotChunk(ctx context.Context, in *RequestApplySnapshotChunk, opts ...grpc.CallOption) (*ResponseApplySnapshotChunk, error) + PrepareProposal(ctx context.Context, in *RequestPrepareProposal, opts ...grpc.CallOption) (*ResponsePrepareProposal, error) + ProcessProposal(ctx context.Context, in *RequestProcessProposal, opts ...grpc.CallOption) (*ResponseProcessProposal, error) } type aBCIApplicationClient struct { @@ -3294,6 +3607,24 @@ func (c *aBCIApplicationClient) ApplySnapshotChunk(ctx context.Context, in *Requ return out, nil } +func (c *aBCIApplicationClient) PrepareProposal(ctx context.Context, in *RequestPrepareProposal, opts ...grpc.CallOption) (*ResponsePrepareProposal, error) { + out := new(ResponsePrepareProposal) + err := c.cc.Invoke(ctx, "/tendermint.abci.ABCIApplication/PrepareProposal", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aBCIApplicationClient) ProcessProposal(ctx context.Context, in *RequestProcessProposal, opts ...grpc.CallOption) (*ResponseProcessProposal, error) { + out := new(ResponseProcessProposal) + err := c.cc.Invoke(ctx, "/tendermint.abci.ABCIApplication/ProcessProposal", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // ABCIApplicationServer is the server API for ABCIApplication service. type ABCIApplicationServer interface { Echo(context.Context, *RequestEcho) (*ResponseEcho, error) @@ -3310,6 +3641,8 @@ type ABCIApplicationServer interface { OfferSnapshot(context.Context, *RequestOfferSnapshot) (*ResponseOfferSnapshot, error) LoadSnapshotChunk(context.Context, *RequestLoadSnapshotChunk) (*ResponseLoadSnapshotChunk, error) ApplySnapshotChunk(context.Context, *RequestApplySnapshotChunk) (*ResponseApplySnapshotChunk, error) + PrepareProposal(context.Context, *RequestPrepareProposal) (*ResponsePrepareProposal, error) + ProcessProposal(context.Context, *RequestProcessProposal) (*ResponseProcessProposal, error) } // UnimplementedABCIApplicationServer can be embedded to have forward compatible implementations. @@ -3358,6 +3691,12 @@ func (*UnimplementedABCIApplicationServer) LoadSnapshotChunk(ctx context.Context func (*UnimplementedABCIApplicationServer) ApplySnapshotChunk(ctx context.Context, req *RequestApplySnapshotChunk) (*ResponseApplySnapshotChunk, error) { return nil, status.Errorf(codes.Unimplemented, "method ApplySnapshotChunk not implemented") } +func (*UnimplementedABCIApplicationServer) PrepareProposal(ctx context.Context, req *RequestPrepareProposal) (*ResponsePrepareProposal, error) { + return nil, status.Errorf(codes.Unimplemented, "method PrepareProposal not implemented") +} +func (*UnimplementedABCIApplicationServer) ProcessProposal(ctx context.Context, req *RequestProcessProposal) (*ResponseProcessProposal, error) { + return nil, status.Errorf(codes.Unimplemented, "method ProcessProposal not implemented") +} func RegisterABCIApplicationServer(s *grpc.Server, srv ABCIApplicationServer) { s.RegisterService(&_ABCIApplication_serviceDesc, srv) @@ -3615,6 +3954,42 @@ func _ABCIApplication_ApplySnapshotChunk_Handler(srv interface{}, ctx context.Co return interceptor(ctx, in, info, handler) } +func _ABCIApplication_PrepareProposal_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestPrepareProposal) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIApplicationServer).PrepareProposal(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tendermint.abci.ABCIApplication/PrepareProposal", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIApplicationServer).PrepareProposal(ctx, req.(*RequestPrepareProposal)) + } + return interceptor(ctx, in, info, handler) +} + +func _ABCIApplication_ProcessProposal_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestProcessProposal) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIApplicationServer).ProcessProposal(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tendermint.abci.ABCIApplication/ProcessProposal", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIApplicationServer).ProcessProposal(ctx, req.(*RequestProcessProposal)) + } + return interceptor(ctx, in, info, handler) +} + var _ABCIApplication_serviceDesc = grpc.ServiceDesc{ ServiceName: "tendermint.abci.ABCIApplication", HandlerType: (*ABCIApplicationServer)(nil), @@ -3675,6 +4050,14 @@ var _ABCIApplication_serviceDesc = grpc.ServiceDesc{ MethodName: "ApplySnapshotChunk", Handler: _ABCIApplication_ApplySnapshotChunk_Handler, }, + { + MethodName: "PrepareProposal", + Handler: _ABCIApplication_PrepareProposal_Handler, + }, + { + MethodName: "ProcessProposal", + Handler: _ABCIApplication_ProcessProposal_Handler, + }, }, Streams: []grpc.StreamDesc{}, Metadata: "tendermint/abci/types.proto", @@ -4006,7 +4389,51 @@ func (m *Request_ApplySnapshotChunk) MarshalToSizedBuffer(dAtA []byte) (int, err } return len(dAtA) - i, nil } -func (m *RequestEcho) Marshal() (dAtA []byte, err error) { +func (m *Request_PrepareProposal) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Request_PrepareProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.PrepareProposal != nil { + { + size, err := m.PrepareProposal.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x7a + } + return len(dAtA) - i, nil +} +func (m *Request_ProcessProposal) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Request_ProcessProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ProcessProposal != nil { + { + size, err := m.ProcessProposal.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } + return len(dAtA) - i, nil +} +func (m *RequestEcho) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -4171,12 +4598,12 @@ func (m *RequestInitChain) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x12 } - n16, err16 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Time):]) - if err16 != nil { - return 0, err16 + n18, err18 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Time):]) + if err18 != nil { + return 0, err18 } - i -= n16 - i = encodeVarintTypes(dAtA, i, uint64(n16)) + i -= n18 + i = encodeVarintTypes(dAtA, i, uint64(n18)) i-- dAtA[i] = 0xa return len(dAtA) - i, nil @@ -4559,6 +4986,88 @@ func (m *RequestApplySnapshotChunk) MarshalToSizedBuffer(dAtA []byte) (int, erro return len(dAtA) - i, nil } +func (m *RequestPrepareProposal) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RequestPrepareProposal) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RequestPrepareProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.BlockDataSize != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.BlockDataSize)) + i-- + dAtA[i] = 0x10 + } + if m.BlockData != nil { + { + size, err := m.BlockData.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RequestProcessProposal) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RequestProcessProposal) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RequestProcessProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Txs) > 0 { + for iNdEx := len(m.Txs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Txs[iNdEx]) + copy(dAtA[i:], m.Txs[iNdEx]) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Txs[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *Response) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -4906,6 +5415,52 @@ func (m *Response_ApplySnapshotChunk) MarshalToSizedBuffer(dAtA []byte) (int, er } return len(dAtA) - i, nil } +func (m *Response_PrepareProposal) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Response_PrepareProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.PrepareProposal != nil { + { + size, err := m.PrepareProposal.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } + return len(dAtA) - i, nil +} +func (m *Response_ProcessProposal) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Response_ProcessProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ProcessProposal != nil { + { + size, err := m.ProcessProposal.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + } + return len(dAtA) - i, nil +} func (m *ResponseException) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -5623,20 +6178,20 @@ func (m *ResponseApplySnapshotChunk) MarshalToSizedBuffer(dAtA []byte) (int, err } } if len(m.RefetchChunks) > 0 { - dAtA39 := make([]byte, len(m.RefetchChunks)*10) - var j38 int + dAtA45 := make([]byte, len(m.RefetchChunks)*10) + var j44 int for _, num := range m.RefetchChunks { for num >= 1<<7 { - dAtA39[j38] = uint8(uint64(num)&0x7f | 0x80) + dAtA45[j44] = uint8(uint64(num)&0x7f | 0x80) num >>= 7 - j38++ + j44++ } - dAtA39[j38] = uint8(num) - j38++ + dAtA45[j44] = uint8(num) + j44++ } - i -= j38 - copy(dAtA[i:], dAtA39[:j38]) - i = encodeVarintTypes(dAtA, i, uint64(j38)) + i -= j44 + copy(dAtA[i:], dAtA45[:j44]) + i = encodeVarintTypes(dAtA, i, uint64(j44)) i-- dAtA[i] = 0x12 } @@ -5648,6 +6203,78 @@ func (m *ResponseApplySnapshotChunk) MarshalToSizedBuffer(dAtA []byte) (int, err return len(dAtA) - i, nil } +func (m *ResponsePrepareProposal) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponsePrepareProposal) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResponsePrepareProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.BlockData != nil { + { + size, err := m.BlockData.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ResponseProcessProposal) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseProcessProposal) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResponseProcessProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Evidence) > 0 { + for iNdEx := len(m.Evidence) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Evidence[iNdEx]) + copy(dAtA[i:], m.Evidence[iNdEx]) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Evidence[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if m.Result != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Result)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + func (m *LastCommitInfo) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -5801,6 +6428,13 @@ func (m *TxResult) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.OriginalHash) > 0 { + i -= len(m.OriginalHash) + copy(dAtA[i:], m.OriginalHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.OriginalHash))) + i-- + dAtA[i] = 0x2a + } { size, err := m.Result.MarshalToSizedBuffer(dAtA[:i]) if err != nil { @@ -5972,12 +6606,12 @@ func (m *Evidence) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x28 } - n43, err43 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Time):]) - if err43 != nil { - return 0, err43 + n50, err50 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Time):]) + if err50 != nil { + return 0, err50 } - i -= n43 - i = encodeVarintTypes(dAtA, i, uint64(n43)) + i -= n50 + i = encodeVarintTypes(dAtA, i, uint64(n50)) i-- dAtA[i] = 0x22 if m.Height != 0 { @@ -6246,6 +6880,30 @@ func (m *Request_ApplySnapshotChunk) Size() (n int) { } return n } +func (m *Request_PrepareProposal) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PrepareProposal != nil { + l = m.PrepareProposal.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Request_ProcessProposal) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ProcessProposal != nil { + l = m.ProcessProposal.Size() + n += 2 + l + sovTypes(uint64(l)) + } + return n +} func (m *RequestEcho) Size() (n int) { if m == nil { return 0 @@ -6483,6 +7141,39 @@ func (m *RequestApplySnapshotChunk) Size() (n int) { return n } +func (m *RequestPrepareProposal) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.BlockData != nil { + l = m.BlockData.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.BlockDataSize != 0 { + n += 1 + sovTypes(uint64(m.BlockDataSize)) + } + return n +} + +func (m *RequestProcessProposal) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Header.Size() + n += 1 + l + sovTypes(uint64(l)) + if len(m.Txs) > 0 { + for _, b := range m.Txs { + l = len(b) + n += 1 + l + sovTypes(uint64(l)) + } + } + return n +} + func (m *Response) Size() (n int) { if m == nil { return 0 @@ -6675,6 +7366,30 @@ func (m *Response_ApplySnapshotChunk) Size() (n int) { } return n } +func (m *Response_PrepareProposal) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PrepareProposal != nil { + l = m.PrepareProposal.Size() + n += 2 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Response_ProcessProposal) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ProcessProposal != nil { + l = m.ProcessProposal.Size() + n += 2 + l + sovTypes(uint64(l)) + } + return n +} func (m *ResponseException) Size() (n int) { if m == nil { return 0 @@ -7014,7 +7729,38 @@ func (m *ResponseApplySnapshotChunk) Size() (n int) { return n } -func (m *LastCommitInfo) Size() (n int) { +func (m *ResponsePrepareProposal) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.BlockData != nil { + l = m.BlockData.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *ResponseProcessProposal) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Result != 0 { + n += 1 + sovTypes(uint64(m.Result)) + } + if len(m.Evidence) > 0 { + for _, b := range m.Evidence { + l = len(b) + n += 1 + l + sovTypes(uint64(l)) + } + } + return n +} + +func (m *LastCommitInfo) Size() (n int) { if m == nil { return 0 } @@ -7089,6 +7835,10 @@ func (m *TxResult) Size() (n int) { } l = m.Result.Size() n += 1 + l + sovTypes(uint64(l)) + l = len(m.OriginalHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } return n } @@ -7709,6 +8459,76 @@ func (m *Request) Unmarshal(dAtA []byte) error { } m.Value = &Request_ApplySnapshotChunk{v} iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PrepareProposal", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &RequestPrepareProposal{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Request_PrepareProposal{v} + iNdEx = postIndex + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProcessProposal", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &RequestProcessProposal{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Request_ProcessProposal{v} + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) @@ -9309,6 +10129,226 @@ func (m *RequestApplySnapshotChunk) Unmarshal(dAtA []byte) error { } return nil } +func (m *RequestPrepareProposal) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RequestPrepareProposal: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RequestPrepareProposal: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BlockData", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.BlockData == nil { + m.BlockData = &types1.Data{} + } + if err := m.BlockData.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BlockDataSize", wireType) + } + m.BlockDataSize = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.BlockDataSize |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RequestProcessProposal) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RequestProcessProposal: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RequestProcessProposal: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Txs", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Txs = append(m.Txs, make([]byte, postIndex-iNdEx)) + copy(m.Txs[len(m.Txs)-1], dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *Response) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -9612,15 +10652,85 @@ func (m *Response) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &ResponseCheckTx{} + v := &ResponseCheckTx{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Response_CheckTx{v} + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeliverTx", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ResponseDeliverTx{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Response_DeliverTx{v} + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EndBlock", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ResponseEndBlock{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Value = &Response_CheckTx{v} + m.Value = &Response_EndBlock{v} iNdEx = postIndex - case 9: + case 11: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DeliverTx", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -9647,15 +10757,15 @@ func (m *Response) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &ResponseDeliverTx{} + v := &ResponseCommit{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Value = &Response_DeliverTx{v} + m.Value = &Response_Commit{v} iNdEx = postIndex - case 10: + case 12: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EndBlock", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListSnapshots", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -9682,15 +10792,15 @@ func (m *Response) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &ResponseEndBlock{} + v := &ResponseListSnapshots{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Value = &Response_EndBlock{v} + m.Value = &Response_ListSnapshots{v} iNdEx = postIndex - case 11: + case 13: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field OfferSnapshot", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -9717,15 +10827,15 @@ func (m *Response) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &ResponseCommit{} + v := &ResponseOfferSnapshot{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Value = &Response_Commit{v} + m.Value = &Response_OfferSnapshot{v} iNdEx = postIndex - case 12: + case 14: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListSnapshots", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LoadSnapshotChunk", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -9752,15 +10862,15 @@ func (m *Response) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &ResponseListSnapshots{} + v := &ResponseLoadSnapshotChunk{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Value = &Response_ListSnapshots{v} + m.Value = &Response_LoadSnapshotChunk{v} iNdEx = postIndex - case 13: + case 15: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field OfferSnapshot", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ApplySnapshotChunk", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -9787,15 +10897,15 @@ func (m *Response) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &ResponseOfferSnapshot{} + v := &ResponseApplySnapshotChunk{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Value = &Response_OfferSnapshot{v} + m.Value = &Response_ApplySnapshotChunk{v} iNdEx = postIndex - case 14: + case 16: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LoadSnapshotChunk", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PrepareProposal", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -9822,15 +10932,15 @@ func (m *Response) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &ResponseLoadSnapshotChunk{} + v := &ResponsePrepareProposal{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Value = &Response_LoadSnapshotChunk{v} + m.Value = &Response_PrepareProposal{v} iNdEx = postIndex - case 15: + case 17: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ApplySnapshotChunk", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ProcessProposal", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -9857,11 +10967,11 @@ func (m *Response) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &ResponseApplySnapshotChunk{} + v := &ResponseProcessProposal{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Value = &Response_ApplySnapshotChunk{v} + m.Value = &Response_ProcessProposal{v} iNdEx = postIndex default: iNdEx = preIndex @@ -12125,6 +13235,193 @@ func (m *ResponseApplySnapshotChunk) Unmarshal(dAtA []byte) error { } return nil } +func (m *ResponsePrepareProposal) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResponsePrepareProposal: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResponsePrepareProposal: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BlockData", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.BlockData == nil { + m.BlockData = &types1.Data{} + } + if err := m.BlockData.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResponseProcessProposal) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResponseProcessProposal: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResponseProcessProposal: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + } + m.Result = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Result |= ResponseProcessProposal_Result(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Evidence", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Evidence = append(m.Evidence, make([]byte, postIndex-iNdEx)) + copy(m.Evidence[len(m.Evidence)-1], dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *LastCommitInfo) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -12612,6 +13909,40 @@ func (m *TxResult) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OriginalHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OriginalHash = append(m.OriginalHash[:0], dAtA[iNdEx:postIndex]...) + if m.OriginalHash == nil { + m.OriginalHash = []byte{} + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) diff --git a/docs/architecture/README.md b/docs/architecture/README.md new file mode 100644 index 0000000000..e6c9c0a06e --- /dev/null +++ b/docs/architecture/README.md @@ -0,0 +1,113 @@ +--- +order: 1 +parent: + order: false +--- + +# Architecture Decision Records (ADR) + +This is a location to record all high-level architecture decisions in the tendermint project. + +You can read more about the ADR concept in this [blog post](https://product.reverb.com/documenting-architecture-decisions-the-reverb-way-a3563bb24bd0#.78xhdix6t). + +An ADR should provide: + +- Context on the relevant goals and the current state +- Proposed changes to achieve the goals +- Summary of pros and cons +- References +- Changelog + +Note the distinction between an ADR and a spec. The ADR provides the context, intuition, reasoning, and +justification for a change in architecture, or for the architecture of something +new. The spec is much more compressed and streamlined summary of everything as +it stands today. + +If recorded decisions turned out to be lacking, convene a discussion, record the new decisions here, and then modify the code to match. + +Note the context/background should be written in the present tense. + +## Table of Contents + +### Implemented + +- [ADR-001: Logging](./adr-001-logging.md) +- [ADR-002: Event-Subscription](./adr-002-event-subscription.md) +- [ADR-003: ABCI-APP-RPC](./adr-003-abci-app-rpc.md) +- [ADR-004: Historical-Validators](./adr-004-historical-validators.md) +- [ADR-005: Consensus-Params](./adr-005-consensus-params.md) +- [ADR-008: Priv-Validator](./adr-008-priv-validator.md) +- [ADR-009: ABCI-Design](./adr-009-ABCI-design.md) +- [ADR-010: Crypto-Changes](./adr-010-crypto-changes.md) +- [ADR-011: Monitoring](./adr-011-monitoring.md) +- [ADR-014: Secp-Malleability](./adr-014-secp-malleability.md) +- [ADR-015: Crypto-Encoding](./adr-015-crypto-encoding.md) +- [ADR-016: Protocol-Versions](./adr-016-protocol-versions.md) +- [ADR-017: Chain-Versions](./adr-017-chain-versions.md) +- [ADR-018: ABCI-Validators](./adr-018-ABCI-Validators.md) +- [ADR-019: Multisigs](./adr-019-multisigs.md) +- [ADR-020: Block-Size](./adr-020-block-size.md) +- [ADR-021: ABCI-Events](./adr-021-abci-events.md) +- [ADR-025: Commit](./adr-025-commit.md) +- [ADR-026: General-Merkle-Proof](./adr-026-general-merkle-proof.md) +- [ADR-033: Pubsub](./adr-033-pubsub.md) +- [ADR-034: Priv-Validator-File-Structure](./adr-034-priv-validator-file-structure.md) +- [ADR-043: Blockchain-RiRi-Org](./adr-043-blockchain-riri-org.md) +- [ADR-044: Lite-Client-With-Weak-Subjectivity](./adr-044-lite-client-with-weak-subjectivity.md) +- [ADR-046: Light-Client-Implementation](./adr-046-light-client-implementation.md) +- [ADR-047: Handling-Evidence-From-Light-Client](./adr-047-handling-evidence-from-light-client.md) +- [ADR-051: Double-Signing-Risk-Reduction](./adr-051-double-signing-risk-reduction.md) +- [ADR-052: Tendermint-Mode](./adr-052-tendermint-mode.md) +- [ADR-053: State-Sync-Prototype](./adr-053-state-sync-prototype.md) +- [ADR-054: Crypto-Encoding-2](./adr-054-crypto-encoding-2.md) +- [ADR-055: Protobuf-Design](./adr-055-protobuf-design.md) +- [ADR-056: Light-Client-Amnesia-Attacks](./adr-056-light-client-amnesia-attacks.md) +- [ADR-059: Evidence-Composition-and-Lifecycle](./adr-059-evidence-composition-and-lifecycle.md) +- [ADR-062: P2P-Architecture](./adr-062-p2p-architecture.md) +- [ADR-063: Privval-gRPC](./adr-063-privval-grpc.md) +- [ADR-064: Batch-Verification](./adr-064-batch-verification.md) +- [ADR-066-E2E-Testing](./adr-066-e2e-testing.md) + +### Accepted + +- [ADR-006: Trust-Metric](./adr-006-trust-metric.md) +- [ADR-024: Sign-Bytes](./adr-024-sign-bytes.md) +- [ADR-035: Documentation](./adr-035-documentation.md) +- [ADR-039: Peer-Behaviour](./adr-039-peer-behaviour.md) +- [ADR-060: Go-API-Stability](./adr-060-go-api-stability.md) +- [ADR-061: P2P-Refactor-Scope](./adr-061-p2p-refactor-scope.md) +- [ADR-065: Custom Event Indexing](./adr-065-custom-event-indexing.md) +- [ADR-068: Reverse-Sync](./adr-068-reverse-sync.md) +- [ADR-067: Mempool Refactor](./adr-067-mempool-refactor.md) + +### Rejected + +- [ADR-023: ABCI-Propose-tx](./adr-023-ABCI-propose-tx.md) +- [ADR-029: Check-Tx-Consensus](./adr-029-check-tx-consensus.md) +- [ADR-058: Event-Hashing](./adr-058-event-hashing.md) + + +### Proposed + +- [ADR-007: Trust-Metric-Usage](./adr-007-trust-metric-usage.md) +- [ADR-012: Peer-Transport](./adr-012-peer-transport.md) +- [ADR-013: Symmetric-Crypto](./adr-013-symmetric-crypto.md) +- [ADR-022: ABCI-Errors](./adr-022-abci-errors.md) +- [ADR-030: Consensus-Refactor](./adr-030-consensus-refactor.md) +- [ADR-037: Deliver-Block](./adr-037-deliver-block.md) +- [ADR-038: Non-Zero-Start-Height](./adr-038-non-zero-start-height.md) +- [ADR-041: Proposer-Selection-via-ABCI](./adr-041-proposer-selection-via-abci.md) +- [ADR-042: State-Sync](./adr-042-state-sync.md) +- [ADR-045: ABCI-Evidence](./adr-045-abci-evidence.md) +- [ADR-050: Improved-Trusted-Peering](./adr-050-improved-trusted-peering.md) +- [ADR 056: Proving amnesia attacks](./adr-056-proving-amnesia-attacks.md) +- [ADR-057: RPC](./adr-057-RPC.md) +- [ADR-069: Flexible-Node-Initialization](./adr-069-flexible-node-intitalization.md) +- [ADR-071: Proposer-Based-Timestamps](./adr-071-proposer-based-timestamps.md) +- [ADR-072: Restore Requests for Comments](./adr-072-request-for-comments.md) + +### Draft +- [ADR 028: LibP2P Integration](./adr-028-libp2p.md) +- [ADR 031: Changelog Structure](./adr-031-changelog.md) +- [ADR-036: Empty-Blocks-ABCI](./adr-036-empty-blocks-abci.md) +- [ADR-040: Blockchain-Reactor-Refactor](./adr-040-blockchain-reactor-refactor.md) diff --git a/docs/celestia-architecture/README.md b/docs/celestia-architecture/README.md new file mode 100644 index 0000000000..2bae6abd8b --- /dev/null +++ b/docs/celestia-architecture/README.md @@ -0,0 +1,62 @@ +--- +order: 1 +parent: + order: false +--- + +# Tendermint and Celestia + +celestia-core is not meant to be used as a general purpose framework. +Instead, its main purpose is to provide certain components (mainly consensus but also a p2p layer for Tx gossiping) for the Celestia main chain. +Hence, we do not provide any extensive documentation here. + +Instead of keeping a copy of the Tendermint documentation, we refer to the existing extensive and maintained documentation and specification: + +- https://docs.tendermint.com/ +- https://github.com/tendermint/tendermint/tree/master/docs/ +- https://github.com/tendermint/spec + +Reading these will give you a lot of background and context on Tendermint which will also help you understand how celestia-core and [celestia-app](https://github.com/celestiaorg/celestia-app) interact with each other. + +## Celestia + +As mentioned above, celestia-core aims to be more focused on the Celestia use-case than vanilla Tendermint. +Moving forward we might provide a clear overview on the changes we incorporated. +For now, we refer to the Celestia specific [ADRs](./adr) in this repository as well as to the Celestia specification: + +- [celestia-specs](https://github.com/celestiaorg/celestia-specs)**** + +## Architecture Decision Records (ADR) + +This is a location to record all high-level architecture decisions in this repository. + +You can read more about the ADR concept in this [blog post](https://product.reverb.com/documenting-architecture-decisions-the-reverb-way-a3563bb24bd0#.78xhdix6t). + +An ADR should provide: + +- Context on the relevant goals and the current state +- Proposed changes to achieve the goals +- Summary of pros and cons +- References +- Changelog + +Note the distinction between an ADR and a spec. The ADR provides the context, intuition, reasoning, and +justification for a change in architecture, or for the architecture of something +new. The spec is much more compressed and streamlined summary of everything as +it stands today. + +If recorded decisions turned out to be lacking, convene a discussion, record the new decisions here, and then modify the code to match. + +Note the context/background should be written in the present tense. + +To start a new ADR, you can use this template: [adr-template.md](./adr-template.md) + +### Table of Contents + +- [ADR 001: Erasure Coding Block Propagation](./adr-001-block-propagation.md) +- [ADR 002: Sampling erasure coded Block chunks](./adr-002-ipld-da-sampling.md) +- [ADR 003: Retrieving Application messages](./adr-003-application-data-retrieval.md) +- [ADR 004: Data Availability Sampling Light Client](./adr-004-mvp-light-client.md) +- [ADR 005: Decouple BlockID and PartSetHeader](./adr-005-decouple-blockid-and-partsetheader) +- [ADR 006: Row Propagation](./adr-006-row-propagation) +- [ADR 007: Minimal Changes to Tendermint](./adr-007-minimal-changes-to-tendermint) \ No newline at end of file diff --git a/docs/celestia-architecture/adr-001-block-propagation.md b/docs/celestia-architecture/adr-001-block-propagation.md new file mode 100644 index 0000000000..6f015be391 --- /dev/null +++ b/docs/celestia-architecture/adr-001-block-propagation.md @@ -0,0 +1,124 @@ +# ADR 001: Erasure Coding Block Propagation + +## Changelog + +- 16-2-2021: Created + +## Context + +Block propagation is currently done by splitting the block into arbitrary chunks and gossiping them to validators via a gossip routine. While this does not have downsides it does not meet the needs of the Celestia chain. The celestia chain requires blocks to be encoded in a different way and for the proposer to not propagate the chunks to peers. + +Celestia wants validators to pull the block from a IPFS network. What does this mean? As I touched on earlier the proposer pushes the block to the network, this in turn means that each validator downloads and reconstructs the block each time to verify it. Instead Celestia will encode and split up the block via erasure codes, stored locally in the nodes IPFS daemon. After the proposer has sent the block to IPFS and received the CIDs it will include them into the proposal. This proposal will be gossiped to other validators, once a validator receives the proposal it will begin requesting the CIDs included in the proposal. + +There are two forms of a validator, one that downloads the block and one that samples it. What does sampling mean? Sampling is the act of checking that a portion or entire block is available for download. + +## Detailed Design + +The proposed design is as follows. + +### Types + +The proposal and vote types have a BlockID, this will be replaced with a header hash. The proposal will contain add fields. + +The current proposal will be updated to include required fields. The entirety of the message will be reworked at a later date. To see the extent of the needed changes you can visit the [spec repo](https://github.com/celestiaorg/celestia-specs/blob/master/specs/proto/consensus.proto#L19) + +```proto +message Proposal { + SignedMsgType type = 1; + int64 height = 2; + int32 round = 3; + int32 pol_round = 4; + + +++ + // 32-byte hash + bytes last_header_hash = 5; + // 32-byte hash + bytes last_commit_hash = 6; + // 32-byte hash + bytes consensus_root = 7; + FeeHeader fee_header = 8; + // 32-byte hash + bytes state_commitment = 9; + uint64 available_data_original_shares_used = 10; + AvailableDataHeader available_data_header = 11; + +++ + + google.protobuf.Timestamp timestamp = 12 + [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; + bytes signature = 12; +} +``` + +```proto +// Vote represents a prevote, precommit, or commit vote from validators for +// consensus. +message Vote { + SignedMsgType type = 1; + int64 height = 2; + int32 round = 3; + +++ + bytes header_hash = 4; + +++ + google.protobuf.Timestamp timestamp = 5 + [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; + bytes validator_address = 6; + int32 validator_index = 7; + bytes signature = 8; +} +``` + +See [specs](https://github.com/celestiaorg/celestia-specs/blob/master/specs/data_structures.md#vote) for more details on the vote. + +### Disk Storage + +Currently celestia-core stores all blocks in its store. Going forward only the headers of the blocks within the unbonding period will be stored. This will drastically reduce the amount of storage required by a celestia-core node. After the unbonding period all headers will have the option of being pruned. + +Proposed amendment to `BlockStore` interface + +```go +type BlockStore interface { + Base() int64 + Height() int64 + Size() int64 + + LoadBlockMeta(height int64) *types.BlockMeta + LoadHeader(height int64) *types.Header + LoadDAHeader(height int64) *types.DataAvailabilityHeader + + SaveHeaders(header *types.Header, daHeader *types.DataAvailabilityHeader, seenCommit *types.Commit) + + PruneHeaders(height int64) (uint64, error) + + LoadBlockCommit(height int64) *types.Commit + LoadSeenCommit(height int64) *types.Commit +} +``` + +Along side these changes the rpc layer will need to change. Instead of querying the LL-core store, the node will redirect the query through IPFS. + +Example: + +When a user requests a block from the LL node, the request will be set to the IPLD plugin. If the IPLD does not have the requested block, it will make a request to the celestia IPFS network for the required CIDs. If the full node does not have the DAheader they will not be able to request the block data. + +![user request flow](./assets/user-request.png) + +The goal is to not change the public interface for RPC's. It is yet to be seen if this possible. This means that CIDs will need to be set and loaded from the store in order to get all the related block information an user requires. + +## Status + +Proposed + + +### Positive + +- Minimal breakage to public interface +- Only store the block in a single place (IPFS) +- Reduce the public interface of the storage within Celestia. + +### Negative + +- User requests may take more time to process + +### Neutral + +## References diff --git a/docs/celestia-architecture/adr-002-ipld-da-sampling.md b/docs/celestia-architecture/adr-002-ipld-da-sampling.md new file mode 100644 index 0000000000..10a642f619 --- /dev/null +++ b/docs/celestia-architecture/adr-002-ipld-da-sampling.md @@ -0,0 +1,280 @@ +# ADR 002: Sampling erasure coded Block chunks + +## Changelog + +- 26-2-2021: Created + +## Context + +In Tendermint's block gossiping each peer gossips random parts of block data to peers. +For Celestia, we need nodes (from light-clients to validators) to be able to sample row-/column-chunks of the erasure coded +block (aka the extended data square) from the network. +This is necessary for Data Availability proofs. + +![extended_square.png](img/extended_square.png) + +A high-level, implementation-independent formalization of above mentioned sampling and Data Availability proofs can be found in: +[_Fraud and Data Availability Proofs: Detecting Invalid Blocks in Light Clients_](https://fc21.ifca.ai/papers/83.pdf). + +For the time being, besides the academic paper, no other formalization or specification of the protocol exists. +Currently, the Celestia specification itself only describes the [erasure coding](https://github.com/celestiaorg/celestia-specs/blob/master/specs/data_structures.md#erasure-coding) +and how to construct the extended data square from the block data. + +This ADR: +- describes the high-level requirements +- defines the API that and how it can be used by different components of Celestia (block gossiping, block sync, DA proofs) +- documents decision on how to implement this. + + +The core data structures and the erasure coding of the block are already implemented in celestia-core ([#17], [#19], [#83]). +While there are no ADRs for these changes, we can refer to the Celestia specification in this case. +For this aspect, the existing implementation and specification should already be on par for the most part. +The exact arrangement of the data as described in this [rationale document](https://github.com/celestiaorg/celestia-specs/blob/master/rationale/message_block_layout.md) +in the specification can happen at app-side of the ABCI boundary. +The latter was implemented in [celestiaorg/celestia-app#21](https://github.com/celestiaorg/celestia-app/pull/21) +leveraging a new ABCI method, added in [#110](https://github.com/celestiaorg/celestia-core/pull/110). +This new method is a sub-set of the proposed ABCI changes aka [ABCI++](https://github.com/tendermint/spec/pull/254). + +Mustafa Al-Bassam (@musalbas) implemented a [prototype](https://github.com/celestiaorg/celestia-prototype) +whose main purpose is to realistically analyse the protocol. +Although the prototype does not make any network requests and only operates locally, it can partly serve as a reference implementation. +It uses the [rsmt2d] library. + +The implementation will essentially use IPFS' APIs. For reading (and writing) chunks it +will use the IPLD [`DagService`](https://github.com/ipfs/go-ipld-format/blob/d2e09424ddee0d7e696d01143318d32d0fb1ae63/merkledag.go#L54), +more precisely the [`NodeGetter`](https://github.com/ipfs/go-ipld-format/blob/d2e09424ddee0d7e696d01143318d32d0fb1ae63/merkledag.go#L18-L27) +and [`NodeAdder`](https://github.com/ipfs/go-ipld-format/blob/d2e09424ddee0d7e696d01143318d32d0fb1ae63/merkledag.go#L29-L39). +As an optimization, we can also use a [`Batch`](https://github.com/ipfs/go-ipld-format/blob/d2e09424ddee0d7e696d01143318d32d0fb1ae63/batch.go#L29) +to batch adding and removing nodes. +This will be achieved by passing around a [CoreAPI](https://github.com/ipfs/interface-go-ipfs-core/blob/b935dfe5375eac7ea3c65b14b3f9a0242861d0b3/coreapi.go#L15) +object, which derive from the IPFS node which is created along a with a tendermint node (see [#152]). +This code snippet does exactly that (see the [go-ipfs documentation] for more examples): +```go +// This constructs an IPFS node instance +node, _ := core.NewNode(ctx, nodeOptions) +// This attaches the Core API to the constructed node +coreApi := coreapi.NewCoreAPI(node) +``` + +The above mentioned IPLD methods operate on so called [ipld.Nodes]. +When computing the data root, we can pass in a [`NodeVisitor`](https://github.com/celestia/nmt/blob/b22170d6f23796a186c07e87e4ef9856282ffd1a/nmt.go#L22) +into the Namespaced Merkle Tree library to create these (each inner- and leaf-node in the tree becomes an ipld node). +As a peer that requests such an IPLD node, the Celestia IPLD plugin provides the [function](https://github.com/celestiaorg/celestia-core/blob/ceb881a177b6a4a7e456c7c4ab1dd0eb2b263066/p2p/ipld/plugin/nodes/nodes.go#L175) +`NmtNodeParser` to transform the retrieved raw data back into an `ipld.Node`. + +A more high-level description on the changes required to rip out the current block gossiping routine, +including changes to block storage-, RPC-layer, and potential changes to reactors is either handled in [ADR 001](./adr-001-block-propagation.md), +and/or in a few smaller, separate followup ADRs. + +## Alternative Approaches + +Instead of creating a full IPFS node object and passing it around as explained above + - use API (http) + - use ipld-light + - use alternative client + +Also, for better performance + - use [graph-sync], [IPLD selectors], e.g. via [ipld-prime] + +Also, there is the idea, that nodes only receive the [Header] with the data root only +and, in an additional step/request, download the DA header using the library, too. +While this feature is not considered here, and we assume each node that uses this library has the DA header, this assumption +is likely to change when flesh out other parts of the system in more detail. +Note that this also means that light clients would still need to validate that the data root and merkelizing the DA header yield the same result. + +## Decision + +> This section records the decision that was made. +> It is best to record as much info as possible from the discussion that happened. This aids in not having to go back to the Pull Request to get the needed information. + +> - TODO: briefly summarize github, discord, and slack discussions (?) +> - also mention Mustafa's prototype and compare both apis briefly (RequestSamples, RespondSamples, ProcessSamplesResponse) +> - mention [ipld experiments] + + + +## Detailed Design + +Add a package to the library that provides the following features: + 1. sample a given number of random row/col indices of extended data square given a DA header and indicate if successful or timeout/other error occurred + 2. store the block in the network by adding it to the peer's local Merkle-DAG whose content is discoverable via a DHT + 3. store the sampled chunks in the network + 4. reconstruct the whole block from a given DA header + 5. get all messages of a particular namespace ID. + +We mention 5. here mostly for completeness. Its details will be described / implemented in a separate ADR / PR. + +Apart from the above mentioned features, we informally collect additional requirements: +- where randomness is needed, the randomness source should be configurable +- all replies by the network should be verified if this is not sufficiently covered by the used libraries already (IPFS) +- where possible, the requests to the network should happen in parallel (without DoSing the proposer for instance). + +This library should be implemented as two new packages: + +First, a sub-package should be added to the layzledger-core [p2p] package +which does not know anything about the core data structures (Block, DA header etc). +It handles the actual network requests to the IPFS network and operates on IPFS/IPLD objects +directly and hence should live under [p2p/ipld]. +To a some extent this part of the stack already exists. + +Second, a high-level API that can "live" closer to the actual types, e.g., in a sub-package in [celestia-core/types] +or in a new sub-package `da`. + +We first describe the high-level library here and describe functions in +more detail inline with their godoc comments below. + +### API that operates on celestia-core types + +As mentioned above this part of the library has knowledge of the core types (and hence depends on them). +It does not deal with IPFS internals. + +```go +// ValidateAvailability implements the protocol described in https://fc21.ifca.ai/papers/83.pdf. +// Specifically all steps of the protocol described in section +// _5.2 Random Sampling and Network Block Recovery_ are carried out. +// +// In more detail it will first create numSamples random unique coordinates. +// Then, it will ask the network for the leaf data corresponding to these coordinates. +// Additionally to the number of requests, the caller can pass in a callback, +// which will be called on for each retrieved leaf with a verified Merkle proof. +// +// Among other use-cases, the callback can be useful to monitoring (progress), or, +// to process the leaf data the moment it was validated. +// The context can be used to provide a timeout. +// TODO: Should there be a constant = lower bound for #samples +func ValidateAvailability( + ctx contex.Context, + dah *DataAvailabilityHeader, + numSamples int, + onLeafValidity func(namespace.PrefixedData8), +) error { /* ... */} + +// RetrieveBlockData can be used to recover the block Data. +// It will carry out a similar protocol as described for ValidateAvailability. +// The key difference is that it will sample enough chunks until it can recover the +// full extended data square, including original data (e.g. by using rsmt2d.RepairExtendedDataSquare). +func RetrieveBlockData( + ctx contex.Context, + dah *DataAvailabilityHeader, + api coreiface.CoreAPI, + codec rsmt2d.Codec, + ) (types.Data, error) {/* ... */} + +// PutBlock operates directly on the Block. +// It first computes the erasure coding, aka the extended data square. +// Row by row ir calls a lower level library which handles adding the +// the row to the Merkle Dag, in our case a Namespaced Merkle Tree. +// Note, that this method could also fill the DA header. +// The data will be pinned by default. +func (b *Block) PutBlock(ctx contex.Context, nodeAdder ipld.NodeAdder) error +``` + +We now describe the lower-level library that will be used by above methods. +Again we provide more details inline in the godoc comments directly. + +`PutBlock` is a method on `Block` as the erasure coding can then be cached, e.g. in a private field +in the block. + +### Changes to the lower level API closer to IPFS (p2p/ipld) + +```go +// GetLeafData takes in a Namespaced Merkle tree root transformed into a Cid +// and the leaf index to retrieve. +// Callers also need to pass in the total number of leaves of that tree. +// Internally, this will be translated to a IPLD path and corresponds to +// an ipfs dag get request, e.g. namespacedCID/0/1/0/0/1. +// The retrieved data should be pinned by default. +func GetLeafData( + ctx context.Context, + rootCid cid.Cid, + leafIndex uint32, + totalLeafs uint32, // this corresponds to the extended square width + api coreiface.CoreAPI, +) ([]byte, error) +``` + +`GetLeafData` can be used by above `ValidateAvailability` and `RetrieveBlock` and +`PutLeaves` by `PutBlock`. + +### A Note on IPFS/IPLD + +In IPFS all data is _content addressed_ which basically means the data is identified by its hash. +Particularly, in the Celestia case, the root CID identifies the Namespaced Merkle tree including all its contents (inner and leaf nodes). +This means that if a `GetLeafData` request succeeds, the retrieved leaf data is in fact the leaf data in the tree. +We do not need to additionally verify Merkle proofs per leaf as this will essentially be done via IPFS on each layer while +resolving and getting to the leaf data. + +> TODO: validate this assumption and link to code that shows how this is done internally + +### Implementation plan + +As fully integrating Data Available proofs into tendermint, is a rather larger change we break up the work into the +following packages (not mentioning the implementation work that was already done): + +1. Flesh out the changes in the consensus messages ([celestia-specs#126], [celestia-specs#127]) +2. Flesh out the changes that would be necessary to replace the current block gossiping ([ADR 001](./adr-001-block-propagation.md)) +3. Add the possibility of storing and retrieving block data (samples or whole block) to celestia-core (this ADR and related PRs). +4. Integrate above API (3.) as an addition into celestia-core without directly replacing the tendermint counterparts (block gossip etc). +5. Rip out each component that will be redundant with above integration in one or even several smaller PRs: + - block gossiping (see ADR 001) + - modify block store (see ADR 001) + - make downloading full Blocks optional (flag/config) + - route some RPC requests to IPFS (see ADR 001) + + +## Status + +Proposed + +## Consequences + +### Positive + +- simplicity & ease of implementation +- can re-use an existing networking and p2p stack (go-ipfs) +- potential support of large, cool, and helpful community +- high-level API definitions independent of the used stack + +### Negative + +- latency +- being connected to the public IPFS network might be overkill if peers should in fact only care about a subset that participates in the Celestia protocol +- dependency on a large code-base with lots of features and options of which we only need a small subset of + +### Neutral +- two different p2p layers exist in celestia-core + +## References + +- https://github.com/celestiaorg/celestia-core/issues/85 +- https://github.com/celestiaorg/celestia-core/issues/167 + +- https://docs.ipld.io/#nodes +- https://arxiv.org/abs/1809.09044 +- https://fc21.ifca.ai/papers/83.pdf +- https://github.com/tendermint/spec/pull/254 + + +[#17]: https://github.com/celestiaorg/celestia-core/pull/17 +[#19]: https://github.com/celestiaorg/celestia-core/pull/19 +[#83]: https://github.com/celestiaorg/celestia-core/pull/83 + +[#152]: https://github.com/celestiaorg/celestia-core/pull/152 + +[celestia-specs#126]: https://github.com/celestiaorg/celestia-specs/issues/126 +[celestia-specs#127]: https://github.com/celestiaorg/celestia-specs/pulls/127 +[Header]: https://github.com/celestiaorg/celestia-specs/blob/master/specs/data_structures.md#header + +[go-ipfs documentation]: https://github.com/ipfs/go-ipfs/tree/master/docs/examples/go-ipfs-as-a-library#use-go-ipfs-as-a-library-to-spawn-a-node-and-add-a-file +[ipld experiments]: https://github.com/celestia/ipld-plugin-experiments +[ipld.Nodes]: https://github.com/ipfs/go-ipld-format/blob/d2e09424ddee0d7e696d01143318d32d0fb1ae63/format.go#L22-L45 +[graph-sync]: https://github.com/ipld/specs/blob/master/block-layer/graphsync/graphsync.md +[IPLD selectors]: https://github.com/ipld/specs/blob/master/selectors/selectors.md +[ipld-prime]: https://github.com/ipld/go-ipld-prime + +[rsmt2d]: https://github.com/celestia/rsmt2d + + +[p2p]: https://github.com/celestiaorg/celestia-core/tree/0eccfb24e2aa1bb9c4428e20dd7828c93f300e60/p2p +[p2p/ipld]: https://github.com/celestiaorg/celestia-core/tree/0eccfb24e2aa1bb9c4428e20dd7828c93f300e60/p2p/ipld +[celestia-core/types]: https://github.com/celestiaorg/celestia-core/tree/0eccfb24e2aa1bb9c4428e20dd7828c93f300e60/types diff --git a/docs/celestia-architecture/adr-003-application-data-retrieval.md b/docs/celestia-architecture/adr-003-application-data-retrieval.md new file mode 100644 index 0000000000..fdefa51cb8 --- /dev/null +++ b/docs/celestia-architecture/adr-003-application-data-retrieval.md @@ -0,0 +1,141 @@ +# ADR 003: Retrieving Application messages + +## Changelog + +- 2021-04-25: initial draft + +## Context + +This ADR builds on top of [ADR 002](adr-002-ipld-da-sampling.md) and will use the implemented APIs described there. +The reader should familiarize themselves at least with the high-level concepts the as well as in the [specs](https://github.com/celestiaorg/celestia-specs/blob/master/specs/data_structures.md#2d-reed-solomon-encoding-scheme). + +The academic [paper](https://arxiv.org/abs/1905.09274) describes the motivation and context for this API. +The main motivation can be quoted from section 3.3 of that paper: + +> (Property1) **Application message retrieval partitioning.** Client nodes must be able to download all of the messages relevant to the applications they use [...], without needing to downloading any messages for other applications. + +> (Property2) **Application message retrieval completeness.** When client nodes download messages relevant to the applications they use [...], they must be able to verify that the messages they received are the complete set of messages relevant to their applications, for specific +blocks, and that there are no omitted messages. + + + +The main data structure that enables above properties is called a Namespaced Merkle Tree (NMT), an ordered binary Merkle tree where: +1. each node in the tree includes the range of namespaces of the messages in all descendants of each node +2. leaves in the tree are ordered by the namespace identifiers of the leaf messages + +A more formal description can be found the [specification](https://github.com/celestiaorg/celestia-specs/blob/de5f4f74f56922e9fa735ef79d9e6e6492a2bad1/specs/data_structures.md#namespace-merkle-tree). +An implementation can be found in [this repository](https://github.com/celestiaorg/nmt). + +This ADR basically describes version of the [`GetWithProof`](https://github.com/celestiaorg/nmt/blob/ddcc72040149c115f83b2199eafabf3127ae12ac/nmt.go#L193-L196) of the NMT that leverages the fact that IPFS uses content addressing and that we have implemented an [IPLD plugin](https://github.com/celestiaorg/celestia-core/tree/37502aac69d755c189df37642b87327772f4ac2a/p2p/ipld) for an NMT. + +**Note**: The APIs defined here will be particularly relevant for Optimistic Rollup (full) nodes that want to download their Rollup's data (see [celestiaorg/optimint#48](https://github.com/celestiaorg/optimint/issues/48)). +Another potential use-case of this API could be for so-called [light validator nodes](https://github.com/celestiaorg/celestia-specs/blob/master/specs/node_types.md#node-type-definitions) that want to download and replay the state-relevant portion of the block data, i.e. transactions with [reserved namespace IDs](https://github.com/celestiaorg/celestia-specs/blob/master/specs/consensus.md#reserved-namespace-ids). + +## Alternative Approaches + +The approach described below will rely on IPFS' block exchange protocol (bitswap) and DHT; IPFS's implementation will be used as a black box to find peers that can serve the requested data. +This will likely be much slower than it potentially could be and for a first implementation we intentionally do not incorporate the optimizations that we could. + +We briefly mention potential optimizations for the future here: +- Use of [graphsync](https://github.com/ipld/specs/blob/5d3a3485c5fe2863d613cd9d6e18f96e5e568d16/block-layer/graphsync/graphsync.md) instead of [bitswap](https://docs.ipfs.io/concepts/bitswap/) and use of [IPLD selectors](https://github.com/ipld/specs/blob/5d3a3485c5fe2863d613cd9d6e18f96e5e568d16/design/history/exploration-reports/2018.10-selectors-design-goals.md) +- expose an API to be able to download application specific data by namespace (including proofs) with the minimal number of round-trips (e.g. finding nodes that expose an RPC endpoint like [`GetWithProof`](https://github.com/celestiaorg/nmt/blob/ddcc72040149c115f83b2199eafabf3127ae12ac/nmt.go#L193-L196)) + +## Decision + +Most discussions on this particular API happened either on calls or on other non-documented way. +We only describe the decision in this section. + +We decide to implement the simplest approach first. +We first describe the protocol informally here and explain why this fulfils (Property1) and (Property2) in the [Context](#context) section above. + +In the case that leaves with the requested namespace exist, this basically boils down to the following: traverse the tree starting from the root until finding first leaf (start) with the namespace in question, then directly request and download all leaves coming after the start until the namespace changes to a greater than the requested one again. +In the case that no leaves with the requested namespace exist in the tree, we traverse the tree to find the leaf in the position in the tree where the namespace would have been and download the neighbouring leaves. + +This is pretty much what the [`ProveNamespace`](https://github.com/celestiaorg/nmt/blob/ddcc72040149c115f83b2199eafabf3127ae12ac/nmt.go#L132-L146) method does but using IPFS we can simply locate and then request the leaves, and the corresponding inner proof nodes will automatically be downloaded on the way, too. + +## Detailed Design + +We define one function that returns all shares of a block belonging to a requested namespace and block (via the block's data availability header). +See [`ComputeShares`](https://github.com/celestiaorg/celestia-core/blob/1a08b430a8885654b6e020ac588b1080e999170c/types/block.go#L1371) for reference how encode the block data into namespace shares. + +```go +// RetrieveShares returns all raw data (raw shares) of the passed-in +// namespace ID nID and included in the block with the DataAvailabilityHeader dah. +func RetrieveShares( + ctx context.Context, + nID namespace.ID, + dah *types.DataAvailabilityHeader, + api coreiface.CoreAPI, +) ([][]byte, error) { + // 1. Find the row root(s) that contains the namespace ID nID + // 2. Traverse the corresponding tree(s) according to the + // above informally described algorithm and get the corresponding + // leaves (if any) + // 3. Return all (raw) shares corresponding to the nID +} + +``` + +Additionally, we define two functions that use the first one above to: +1. return all the parsed (non-padding) data with [reserved namespace IDs](https://github.com/celestiaorg/celestia-specs/blob/de5f4f74f56922e9fa735ef79d9e6e6492a2bad1/specs/consensus.md#reserved-namespace-ids): transactions, intermediate state roots, evidence. +2. return all application specific blobs (shares) belonging to one namespace ID parsed as a slice of Messages ([specification](https://github.com/celestiaorg/celestia-specs/blob/de5f4f74f56922e9fa735ef79d9e6e6492a2bad1/specs/data_structures.md#message) and [code](https://github.com/celestiaorg/celestia-core/blob/1a08b430a8885654b6e020ac588b1080e999170c/types/block.go#L1336)). + +The latter two methods might require moving or exporting a few currently unexported functions that (currently) live in [share_merging.go](https://github.com/celestiaorg/celestia-core/blob/1a08b430a8885654b6e020ac588b1080e999170c/types/share_merging.go#L57-L76) and could be implemented in a separate pull request. + +```go +// RetrieveStateRelevantMessages returns all state-relevant transactions +// (transactions, intermediate state roots, and evidence) included in a block +// with the DataAvailabilityHeader dah. +func RetrieveStateRelevantMessages( + ctx context.Context, + nID namespace.ID, + dah *types.DataAvailabilityHeader, + api coreiface.CoreAPI, +) (Txs, IntermediateStateRoots, EvidenceData, error) { + // like RetrieveShares but for all reserved namespaces + // additionally the shares are parsed (merged) into the + // corresponding types in the return arguments +} +``` + +```go +// RetrieveMessages returns all Messages of the passed-in +// namespace ID and included in the block with the DataAvailabilityHeader dah. +func RetrieveMessages( + ctx context.Context, + nID namespace.ID, + dah *types.DataAvailabilityHeader, + api coreiface.CoreAPI, +) (Messages, error) { + // like RetrieveShares but this additionally parsed the shares + // into the Messages type +} +``` + +## Status + +Proposed + +## Consequences + +This API will most likely be used by Rollups too. +We should document it properly and move it together with relevant parts from ADR 002 into a separate go-package. + +### Positive + +- easy to implement with the existing code (see [ADR 002](https://github.com/celestiaorg/celestia-core/blob/47d6c965704e102ae877b2f4e10aeab782d9c648/docs/adr/adr-002-ipld-da-sampling.md#detailed-design)) +- resilient data retrieval via a p2p network +- dependence on a mature and well-tested code-base with a large and welcoming community + +### Negative + +- with IPFS, we inherit the fact that potentially a lot of round-trips are done until the data is fully downloaded; in other words: this could end up way slower than potentially possible +- anyone interacting with that API needs to run an IPFS node + +### Neutral + +- optimizations can happen incrementally once we have an initial working version + +## References + +We've linked to all references throughout the ADR. diff --git a/docs/celestia-architecture/adr-004-mvp-light-client.md b/docs/celestia-architecture/adr-004-mvp-light-client.md new file mode 100644 index 0000000000..4dac26b890 --- /dev/null +++ b/docs/celestia-architecture/adr-004-mvp-light-client.md @@ -0,0 +1,292 @@ +# ADR 004: Data Availability Sampling Light Client + +## Changelog + +- 2021-05-03: Initial Draft + +## Context + +We decided to augment the existing [RPC-based Tendermint light client](https://github.com/tendermint/tendermint/blob/bc643b19c48495077e0394d3e21e1d2a52c99548/light/doc.go#L2-L126) by adding the possibility to additionally validate blocks by doing Data Availability Sampling (DAS). +In general, DAS gives light clients assurance that the data behind the block header they validated is actually available in the network and hence, that state fraud proofs could be generated. +See [ADR 002](adr-002-ipld-da-sampling.md) for more context on DAS. + +A great introduction on the Tendermint light client (and light clients in general) can be found in this series of [blog posts](https://medium.com/tendermint/everything-you-need-to-know-about-the-tendermint-light-client-f80d03856f98) as well as this [paper](https://arxiv.org/abs/2010.07031). + +This ADR describes the changes necessary to augment the existing Tendermint light client implementation with DAS from a UX as well as from a protocol perspective. + +## Alternative Approaches + +Ideally, the light client should not just request [signed headers](https://github.com/tendermint/tendermint/blob/bc643b19c48495077e0394d3e21e1d2a52c99548/light/doc.go#L35-L52) from [a few pre-configured peers](https://github.com/tendermint/tendermint/blob/bc643b19c48495077e0394d3e21e1d2a52c99548/light/setup.go#L51-L52) but instead also discover peers from a p2p network. +We will eventually implement this. For more context, we refer to this [issue](https://github.com/celestiaorg/celestia-core/issues/86). +This would require that the (signed) headers are provided via other means than the RPC. +See this [abandoned pull request](https://github.com/tendermint/tendermint/pull/4508) and [issue](https://github.com/tendermint/tendermint/issues/4456) in the Tendermint repository and also this [suggestion](https://github.com/celestiaorg/celestia-core/issues/86#issuecomment-831182564) by [@Wondertan](https://github.com/Wondertan) in this repository. + +For some use-cases—like DAS light validator nodes, or the light clients of a Data Availability Layer that are run by full nodes of an Optimistic Rollup—it would even make sense that the light client (passively) participates in the consensus protocol to some extent; i.e. runs a subset of the consensus reactor to Consensus messages ([Votes](https://github.com/tendermint/tendermint/blob/bc643b19c48495077e0394d3e21e1d2a52c99548/types/vote.go#L48-L59) etc.) come in as early as possible. +Then light clients would not need to wait for the canonical commit to be included in the next [block](https://github.com/tendermint/tendermint/blob/bc643b19c48495077e0394d3e21e1d2a52c99548/types/block.go#L48). + +For the RPC-based light client it could also make sense to add a new RPC endpoint to tendermint for clients to retrieve the [`DataAvailabilityHeader`](https://github.com/celestiaorg/celestia-core/blob/50f722a510dd2ba8e3d31931c9d83132d6318d4b/types/block.go#L52-L69) (DAHeader), or embed the DAHeader. +The [Commit](https://github.com/celestiaorg/celestia-core/blob/cbf1f1a4a0472373289a9834b0d33e0918237b7f/rpc/core/routes.go#L25) only contains the [SignedHeader](https://github.com/celestiaorg/celestia-core/blob/cbf1f1a4a0472373289a9834b0d33e0918237b7f/rpc/core/types/responses.go#L32-L36) (Header and Commit signatures). +Not all light clients will need the full DAHeader though (e.g. super-light-clients do not). + + +## Decision + +For our MVP, we [decide](https://github.com/celestiaorg/celestia-core/issues/307) to only modify the existing RPC-endpoint based light client. +This is mostly because we want to ship our MVP as quickly as possible but independently of this it makes sense to provide a familiar experience for engineers coming from the Cosmos ecosystem. + +We will later implement the above mentioned variants. +How exactly will be described in separate ADRs though. + +## Detailed Design + +From a user perspective very little changes: +the existing light client command gets an additional flag that indicates whether to run DAS or not. +Additionally, the light client operator can decide the number of successful samples to make to deem the block available (and hence valid). + +In case DAS is enabled, the light client will need to: +1. retrieve the DAHeader corresponding to the data root in the Header +2. request a parameterizable number of random samples. + +If the all sampling requests succeed, the whole block is available ([with some high enough probability](https://arxiv.org/abs/1809.09044)). + +### UX + +The main change to the light client [command](https://github.com/celestiaorg/celestia-core/blob/master/cmd/tendermint/commands/light.go#L32-L104) is to add in a new flag to indicate if it should run DAS or not. +Additionally, the user can choose the number of succeeding samples required for a block to be considered available. + +```diff +=================================================================== +diff --git a/cmd/tendermint/commands/light.go b/cmd/tendermint/commands/light.go +--- a/cmd/tendermint/commands/light.go (revision 48b043014f0243edd1e8ebad8cd0564ab9100407) ++++ b/cmd/tendermint/commands/light.go (date 1620546761822) +@@ -64,6 +64,8 @@ + dir string + maxOpenConnections int + ++ daSampling bool ++ numSamples uint32 + sequential bool + trustingPeriod time.Duration + trustedHeight int64 +@@ -101,6 +103,10 @@ + LightCmd.Flags().BoolVar(&sequential, "sequential", false, + "sequential verification. Verify all headers sequentially as opposed to using skipping verification", + ) ++ LightCmd.Flags().BoolVar(&daSampling, "da-sampling", false, ++ "data availability sampling. Verify each header (sequential verification), additionally verify data availability via data availability sampling", ++ ) ++ LightCmd.Flags().Uint32Var(&numSamples, "num-samples", 15, "Number of data availability samples until block data deemed available.") + } +``` + +For the Data Availability sampling, the light client will have to run an IPFS node. +It makes sense to make this mostly opaque to the user as everything around IPFS can be [configured](https://github.com/ipfs/go-ipfs/blob/d6322f485af222e319c893eeac51c44a9859e901/docs/config.md) in the `$IPFS_PATH`. +This IPFS path should simply be a sub-directory inside the light client's [directory](https://github.com/celestiaorg/celestia-core/blob/cbf1f1a4a0472373289a9834b0d33e0918237b7f/cmd/tendermint/commands/light.go#L86-L87). +We can later add the ability to let users configure the IPFS setup more granular. + +**Note:** DAS should only be compatible to sequential verification. +In case a light client is parametrized to run DAS and skipping verification, the CLI should return an easy-to-understand warning or even an error explaining why this does not make sense. + +### Light Client Protocol with DAS + +#### Light Store + +The light client stores data in its own [badgerdb instance](https://github.com/celestiaorg/celestia-core/blob/50f722a510dd2ba8e3d31931c9d83132d6318d4b/cmd/tendermint/commands/light.go#L125) in the given directory: + +```go +db, err := badgerdb.NewDB("light-client-db", dir) +``` + +While it is not critical for this feature, we should at least try to re-use that same DB instance for the local ipld store. +Otherwise, we introduce yet another DB instance; something we want to avoid, especially on the long run (see [#283](https://github.com/celestiaorg/celestia-core/issues/283)). +For the first implementation, it might still be simpler to create a separate DB instance and tackle cleaning this up in a separate pull request, e.g. together with other [instances]([#283](https://github.com/celestiaorg/celestia-core/issues/283)). + +#### RPC + +No changes to the RPC endpoints are absolutely required. +Although, for convenience and ease of use, we should either add the `DAHeader` to the existing [Commit](https://github.com/celestiaorg/celestia-core/blob/cbf1f1a4a0472373289a9834b0d33e0918237b7f/rpc/core/routes.go#L25) endpoint, or, introduce a new endpoint to retrieve the `DAHeader` on demand and for a certain height or block hash. + +The first has the downside that not every light client needs the DAHeader. +The second explicitly reveals to full-nodes which clients are doing DAS and which not. + +**Implementation Note:** The additional (or modified) RPC endpoint could work as a simple first step until we implement downloading the DAHeader from a given data root in the header. +Also, the light client uses a so called [`Provider`](https://github.com/tendermint/tendermint/blob/7f30bc96f014b27fbe74a546ea912740eabdda74/light/provider/provider.go#L9-L26) to retrieve [LightBlocks](https://github.com/tendermint/tendermint/blob/7f30bc96f014b27fbe74a546ea912740eabdda74/types/light.go#L11-L16), i.e. signed headers and validator sets. +Currently, only the [`http` provider](https://github.com/tendermint/tendermint/blob/7f30bc96f014b27fbe74a546ea912740eabdda74/light/provider/http/http.go#L1) is implemented. +Hence, as _a first implementation step_, we should augment the `Provider` and the `LightBlock` to optionally include the DAHeader (details below). +In parallel but in a separate pull request, we add a separate RPC endpoint to download the DAHeader for a certain height. + +#### Store DataAvailabilityHeader + +For full nodes to be able to serve the `DataAvailabilityHeader` without having to recompute it each time, it needs to be stored somewhere. +While this is independent of the concrete serving mechanism, it is more so relevant for the RPC endpoint. +There is ongoing work to make the Tendermint Store only store Headers and the DataAvailabilityHeader in [#218](https://github.com/celestiaorg/celestia-core/pull/218/) / [#182](https://github.com/celestiaorg/celestia-core/issues/182). + +At the time writing this ADR, another pull request ([#312](https://github.com/celestiaorg/celestia-core/pull/312)) is in the works with a more isolated change that adds the `DataAvailabilityHeader` to the `BlockID`. +Hence, the DAHeader is [stored](https://github.com/celestiaorg/celestia-core/blob/50f722a510dd2ba8e3d31931c9d83132d6318d4b/store/store.go#L355-L367) along the [`BlockMeta`](https://github.com/celestiaorg/celestia-core/blob/50f722a510dd2ba8e3d31931c9d83132d6318d4b/types/block_meta.go#L11-L17) there. + +For a first implementation, we could first build on top of #312 and adapt to the changed storage API where only headers and the DAHeader are stored inside tendermint's store (as drafted in #218). +A major downside of storing block data inside of tendermint's store as well as in the IPFS' block store is that is not only redundantly stored data but also IO intense work that will slow down full nodes. + + +#### DAS + +The changes for DAS are very simple from a high-level perspective assuming that the light client has the ability to download the DAHeader along with the required data (signed header + validator set) of a given height: + +Every time the light client validates a retrieved light-block, it additionally starts DAS in the background (once). +For a DAS light client it is important to use [sequential](https://github.com/tendermint/tendermint/blob/f366ae3c875a4f4f61f37f4b39383558ac5a58cc/light/client.go#L46-L53) verification and not [skipping](https://github.com/tendermint/tendermint/blob/f366ae3c875a4f4f61f37f4b39383558ac5a58cc/light/client.go#L55-L69) verification. +Skipping verification only works under the assumption that 2/3+1 of voting power is honest. +The whole point of doing DAS (and state fraud proofs) is to remove that assumption. +See also this related issue in the LL specification: [#159](https://github.com/celestiaorg/celestia-specs/issues/159). + +Independent of the existing implementation, there are three ways this could be implemented: +1. the DAS light client only accepts a header as valid and trusts it after DAS succeeds (additionally to the tendermint verification), and it waits until DAS succeeds (or there was an error or timeout on the way) +2. (aka 1.5) the DAS light client stages headers where the tendermint verification passes as valid and spins up DAS sampling rotines in the background; the staged headers are committed as valid iff all routines successfully return in time +3. the DAS light client optimistically accepts a header as valid and trusts it if the regular tendermint verification succeeds; the DAS is run in the background (with potentially much longer timeouts as in 1.) and after the background routine returns (or errs or times out), the already trusted headers are marked as unavailable; this might require rolling back the already trusted headers + +We note that from an implementation point of view 1. is not only the simplest approach, but it would also work best with the currently implemented light client design. +It is the approach that should be implemented first. + +The 2. approach can be seen as an optimization where the higher latency DAS can be conducted in parallel for various heights. +This could speed up catching-up (sequentially) if the light client went offline (shorter than the weak subjectivity time window). + +The 3. approach is the most general of all, but it moves the responsibility to wait or to rollback headers to the caller and hence is undesirable as it offers too much flexibility. + + +#### Data Structures + +##### LightBlock + +As mentioned above the LightBlock should optionally contain the DataAvailabilityHeader. +```diff +Index: types/light.go +=================================================================== +diff --git a/types/light.go b/types/light.go +--- a/types/light.go (revision 64044aa2f2f2266d1476013595aa33bb274ba161) ++++ b/types/light.go (date 1620481205049) +@@ -13,6 +13,9 @@ + type LightBlock struct { + *SignedHeader `json:"signed_header"` + ValidatorSet *ValidatorSet `json:"validator_set"` ++ ++ // DataAvailabilityHeader is only populated for DAS light clients for others it can be nil. ++ DataAvailabilityHeader *DataAvailabilityHeader `json:"data_availability_header"` + } +``` + +Alternatively, we could introduce a `DASLightBlock` that embeds a `LightBlock` and has the `DataAvailabilityHeader` as the only (non-optional) field. +This would be more explict as it is a new type. +Instead, adding a field to the existing `LightBlock`is backwards compatible and does not require any further code changes; the new type requires `To`- and `FromProto` functions at least. + +##### Provider + +The [`Provider`](https://github.com/tendermint/tendermint/blob/7f30bc96f014b27fbe74a546ea912740eabdda74/light/provider/provider.go#L9-L26) should be changed to additionally provide the `DataAvailabilityHeader` to enable DAS light clients. +Implementations of the interface need to additionally retrieve the `DataAvailabilityHeader` for the [modified LightBlock](#lightblock). +Users of the provider need to indicate this to the provider. + +We could either augment the `LightBlock` method with a flag, add a new method solely for providing the `DataAvailabilityHeader`, or, we could introduce a new method for DAS light clients. + +The latter is preferable because it is the most explicit and clear, and it still keeps places where DAS is not used without any code changes. + +Hence: + +```diff +Index: light/provider/provider.go +=================================================================== +diff --git a/light/provider/provider.go b/light/provider/provider.go +--- a/light/provider/provider.go (revision 7d06ae28196e8765c9747aca9db7d2732f56cfc3) ++++ b/light/provider/provider.go (date 1620298115962) +@@ -21,6 +21,14 @@ + // error is returned. + LightBlock(ctx context.Context, height int64) (*types.LightBlock, error) + ++ // DASLightBlock returns the LightBlock containing the DataAvailabilityHeader. ++ // Other than including the DataAvailabilityHeader it behaves exactly the same ++ // as LightBlock. ++ // ++ // It can be used by DAS light clients. ++ DASLightBlock(ctx context.Context, height int64) (*types.LightBlock, error) ++ ++ + // ReportEvidence reports an evidence of misbehavior. + ReportEvidence(context.Context, types.Evidence) error + } +``` +Alternatively, with the exact same result, we could embed the existing `Provider` into a new interface: e.g. `DASProvider` that adds this method. +This is completely equivalent as above and which approach is better will become more clear when we spent more time on the implementation. + +Regular light clients will call `LightBlock` and DAS light clients will call `DASLightBlock`. +In the first case the result will be the same as for vanilla Tendermint and in the second case the returned `LightBlock` will additionally contain the `DataAvailabilityHeader` of the requested height. + +#### Running an IPFS node + +We already have methods to [initialize](https://github.com/celestiaorg/celestia-core/blob/cbf1f1a4a0472373289a9834b0d33e0918237b7f/cmd/tendermint/commands/init.go#L116-L157) and [run](https://github.com/celestiaorg/celestia-core/blob/cbf1f1a4a0472373289a9834b0d33e0918237b7f/node/node.go#L1449-L1488) an IPFS node in place. +These need to be refactored such that they can effectively be for the light client as well. +This means: +1. these methods need to be exported and available in a place that does not introduce interdependence of go packages +2. users should be able to run a light client with a single command and hence most of the initialization logic should be coupled with creating the actual IPFS node and [made independent](https://github.com/celestiaorg/celestia-core/blob/cbf1f1a4a0472373289a9834b0d33e0918237b7f/cmd/tendermint/commands/init.go#L119-L120) of the `tendermint init` command + +An example for 2. can be found in the IPFS [code](https://github.com/ipfs/go-ipfs/blob/cd72589cfd41a5397bb8fc9765392bca904b596a/cmd/ipfs/daemon.go#L239) itself. +We might want to provide a slightly different default initialization though (see how this is [overridable](https://github.com/ipfs/go-ipfs/blob/cd72589cfd41a5397bb8fc9765392bca904b596a/cmd/ipfs/daemon.go#L164-L165) in the ipfs daemon cmd). + +We note that for operating a fully functional light client the IPFS node could be running in client mode [`dht.ModeClient`](https://github.com/libp2p/go-libp2p-kad-dht/blob/09d923fcf68218181b5cd329bf5199e767bd33c3/dht_options.go#L29-L30) but be actually want light clients to also respond to incoming queries, e.g. from other light clients. +Hence, they should by default run in [`dht.ModeServer`](https://github.com/libp2p/go-libp2p-kad-dht/blob/09d923fcf68218181b5cd329bf5199e767bd33c3/dht_options.go#L31-L32). +In an environment were any bandwidth must be saved, or, were the network conditions do not allow the server mode, we make it easy to change the default behavior. + +##### Client + +We add another [`Option`](https://github.com/tendermint/tendermint/blob/a91680efee3653e3de620f24eb8ddca1c95ce8f9/light/client.go#L43-L117) to the [`Client`](https://github.com/tendermint/tendermint/blob/a91680efee3653e3de620f24eb8ddca1c95ce8f9/light/client.go#L173) that indicates that this client does DAS. + +This option indicates: +1. to do sequential verification and +2. to request [`DASLightBlocks`](#lightblock) from the [provider](#provider). + +All other changes should only affect unexported methods only. + +##### ValidateAvailability + +In order for the light clients to perform DAS to validate availability, they do not need to be aware of the fact that an IPFS node is run. +Instead, we can use the existing [`ValidateAvailability`](https://github.com/celestiaorg/celestia-core/blame/master/p2p/ipld/validate.go#L23-L28) function (as defined in [ADR 002](adr-002-ipld-da-sampling.md) and implemented in [#270](https://github.com/celestiaorg/celestia-core/pull/270)). +Note that this expects an ipfs core API object `CoreAPI` to be passed in. +Using that interface has the major benefit that we could even change the requirement that the light client itself runs the IPFS node without changing most of the validation logic. +E.g., the IPFS node (with our custom IPLD plugin) could run in different process (or machine), and we could still just pass in that same `CoreAPI` interface. + +Orthogonal to this ADR, we also note that we could change all IPFS readonly methods to accept the minimal interface they actually use, namely something that implements `ResolveNode` (and maybe additionally a `NodeGetter`). + +`ValidateAvailability` needs to be called each time a header is validated. +A DAS light client will have to request the `DASLightBlock` for this as per above to be able to pass in a `DataAvailabilityHeader`. + +#### Testing + +Ideally, we add the DAS light client to the existing e2e tests. +It might be worth to catch up with some relevant changes from tendermint upstream. +In particular, [tendermint/tendermint#6196](https://github.com/tendermint/tendermint/pull/6196) and previous changes that it depends on. + +Additionally, we should provide a simple example in the documentation that walks through the DAS light client. +It would be good if the light client logs some (info) output related to DAS to provide feedback to the user. + +## Status + +Proposed + +## Consequences + +### Positive + +- simple to implement and understand +- familiar to tendermint / Cosmos devs +- allows trying out the MVP without relying on the [celestia-app](https://github.com/celestiaorg/celestia-app) (instead a simple abci app like a modified [KVStore](https://github.com/celestiaorg/celestia-core/blob/42e4e8b58ebc58ebd663c114d2bcd7ab045b1c55/abci/example/kvstore/README.md) app could be used to demo the DAS light client) + +### Negative + +- light client does not discover peers +- requires the light client that currently runs simple RPC requests only to run an IPFS node +- rpc makes it extremely easy to infer which light clients are doing DAS and which not +- the initial light client implementation might still be confusing to devs familiar to tendermint/Cosmos for the reason that it does DAS (and state fraud proofs) to get rid of the underlying honest majority assumption, but it will still do all checks related to that same honest majority assumption (e.g. download validator sets, Commits and validate that > 2/3 of them signed the header) + +### Neutral + +DAS light clients need to additionally obtain the DAHeader from the data root in the header to be able to actually do DAS. + +## References + +We have linked all references above inside the text already. diff --git a/docs/celestia-architecture/adr-005-decouple-blockid-and-partsetheader.md b/docs/celestia-architecture/adr-005-decouple-blockid-and-partsetheader.md new file mode 100644 index 0000000000..1bf8fa7416 --- /dev/null +++ b/docs/celestia-architecture/adr-005-decouple-blockid-and-partsetheader.md @@ -0,0 +1,47 @@ +# ADR 005: Decouple the PartSetHeader from the BlockID + +## Changelog + +- 2021-08-01: Initial Draft + +## Context + +Celestia has multiple commits to the block data via the `DataHash` and the `PartSetHeader` in the `BlockID`. As stated in the [#184](https://github.com/celestiaorg/lazyledger-core/issues/184), we no longer need the `PartSetHeader` for this additional commitment to the block's data. However, we are still planning to use the `PartSetHeader` for block propagation during consensus in the short-medium term. This means that we will remove the `PartSetHeader` from as many places as possible, but keep it in the `Proposal` struct. + +## Alternative Approaches + +It’s worth noting that there are proposed changes to remove the `PartSetHeader` entirely, and instead use the already existing commitment to block data, the `DataAvailabilityHeader`, to propagate blocks in parallel during consensus. Discussions regarding the detailed differences entailed in each approach are documented in that ADR's PR. The current direction that is described in this ADR is significantly more conservative in its approach, but it is not strictly an alternative to other designs. This is because other designs would also require removal of the `PartSethHeader`, which is a project in and of itself due to the `BlockID` widespread usage throughout tendermint and the bugs that pop up when attempting to remove it. + +## Decision + +While we build other better designs to experiment with, we will continue to implement the design specified here as it is not orthogonal. https://github.com/celestiaorg/lazyledger-core/pull/434#issuecomment-869158788 + +## Detailed Design + +- [X] Decouple the BlockID and the PartSetHeader [#441](https://github.com/celestiaorg/lazyledger-core/pull/441) +- [ ] Remove the BlockID from every possible struct other than the `Proposal` + - [X] Stop signing over the `PartSetHeader` while voting [#457](https://github.com/celestiaorg/lazyledger-core/pull/457) + - [X] Remove the `PartSetHeader` from the Header [#457](https://github.com/celestiaorg/lazyledger-core/pull/457) + - [X] Remove the `PartSetHeader` from `VoteSetBits`, `VoteSetMaj23`, and `state.State` [#479](https://github.com/celestiaorg/lazyledger-core/pull/479) + - [ ] Remove the `PartSetHeader` from other structs + + +## Status + +Proposed + +### Positive + +- Conservative and easy to implement +- Acts as a stepping stone for other better designs +- Allows us to use 64kb sized chunks, which are well tested + +### Negative + +- Not an ideal design as we still have to include an extra commitment to the block's data in the proposal + +## References + +Alternative ADR [#434](https://github.com/celestiaorg/lazyledger-core/pull/434) +Alternative implementation [#427](https://github.com/celestiaorg/lazyledger-core/pull/427) and [#443](https://github.com/celestiaorg/lazyledger-core/pull/443) +[Comment](https://github.com/celestiaorg/lazyledger-core/pull/434#issuecomment-869158788) that summarizes decision \ No newline at end of file diff --git a/docs/celestia-architecture/adr-006-row-propagation.md b/docs/celestia-architecture/adr-006-row-propagation.md new file mode 100644 index 0000000000..6fd2f3652e --- /dev/null +++ b/docs/celestia-architecture/adr-006-row-propagation.md @@ -0,0 +1,202 @@ +# ADR 006: Consensus Block Gossiping with Rows + +## Changelog +* 24.06.2021 - Initial description +* 07.07.2021 - More important details were added +* 18.08.2021 - Mention alternative approaches briefly + +## Context +It's a long story of relations between Celestia, Tendermint, and consensus block gossiping. Celestia's team discussed +multiple ideas, several ADRs were made, and nothing yet was finalized. This ADR is another attempt to bring valuable +changes into block gossiping and hopefully successful. + +Currently, we inherit the following from Tendermint. Our codebase relies on the blocks Parts notion. Each Part is a +piece of an entire serialized block. Those Parts are gossiped between nodes in consensus and committed with +`PartSetHeader` containing a Merkle Root of the Parts. However, Parts gossiping wasn't designed for Celestia blocks. + +Celestia comes with a different block representation from Tendermint. It lays out Blocks as a table of data shares, +where Rows or Columns can be and should be gossiped instead of Parts, keeping only one system-wide commitment to data. + +## Alternative Approaches +### ["nah it works just don't touch it"](https://ahseeit.com//king-include/uploads/2020/11/121269295_375504380484919_2997236194077828589_n-6586327691.jpg) approach + +It turns out that we could fully treat the Tendermint consensus as a black box, keeping two data commitments: one for +consensus with `PartSetHeader` and another for the world outside the consensus with `DAHeader`. + +#### Pros +* Less work + +### Others +* get rid of the PartsHeader from BlockID without changing block propagation at all (see [ADR 005](https://github.com/celestiaorg/celestia-core/blob/58a3901827afbf97852d807de34a2b66f93e0eb6/docs/lazy-adr/adr-005-decouple-blockid-and-partsetheader.md#adr-005-decouple-the-partsetheader-from-the-blockid)) +* change block propagation to fixed-sized chunks but based on the ODS instead of how Parts are built currently (for this we have empirical evidence of how it performs in practice) +* send the block as a whole (only works with smaller blocks) +* block propagation-based on sending the header and Tx-IDs and then requesting the Tx/Messages that are missing from the local mempool of a node on demand + +#### Cons +* Pulls two data commitments to Celestia's specs +* Brings ambiguity to data integrity verification +* Controversial from software design perspective +* Brings DOSing vector for big Blocks. Every Block would need to be represented in two formats in RAM +* Wastes more resources on building and verifying additional + +## Decision +The decision is to still treat Tendermint's consensus as a black box, but with few amendments to gossiping mechanism: +* Introduce `RowSet` that mimics `PartSet`. + + `RowSet` is a helper structure that wraps DAHeader and tracks received Rows with their integrity against DAHeader and + tells its user when the block is complete and/or can be recovered. Mostly it is a helper and is not a high-level + concept. +* Replace `PartSet` with `RowSet` within consensus. +* Keep `DAHeader` in `Proposal` +* Remove `PartSetHeader` from `Proposal` + +The changes above are required to implement the decision. At later point, other changes listed below are +likely to be implemented as a clean-up: +* Entirely removing `PartSetHeader`, as redundant data commitment +* Removing `PartSet` +* Relying on `DAHeader` instead of `PartSetHeader` + +## Detailed Design +The detailed design section demonstrates the design and supporting changes package by package. Fortunately, the +design does not affect any public API and changes are solely internal. + +### `types` +#### RowSet and Row +First and essential part is to implement `RowSet` and `Row`, fully mimicking semantics of `PartSet` and `Part` to +decrease the number of required changes. Below, implementation semantics are presented: + +```go +// Row represents a blob of multiple ExtendedDataSquare shares. +// Practically, it is half of an extended row, as other half can be recomputed. +type Row struct { +// Index is an top-to-bottom index of a Row in ExtendedDataSquare. +// NOTE: Row Index is unnecessary, as we can determine it's Index by hash from DAHeader. However, Index removal +// would bring more changes to Consensus Reactor with arguable pros of less bandwidth usage. +Index int +// The actual share blob. +Data []byte +} + +// NewRow creates new Row from flattened shares and index. +func NewRow(idx int, row [][]byte) *Row + +// RowSet wraps DAHeader and tracks added Rows with their integrity against DAHeader. +// It allows user to check whenever rsmt2d.ExtendedDataSquare can be recovered. +// +// RowSet tracks the whole ExtendedDataSquare, Where Q0 is the original block data: +// ---- ---- +// | Q0 || Q1 | +// ---- ---- +// | Q2 || Q3 | +// ---- ---- +// +// But its AddRow and GetRow methods accepts and returns only half of the Rows - Q0 and Q2. Q1 and Q3 are recomputed. +// ---- +// | Q0 | +// ---- +// | Q2 | +// ---- +// +type RowSet interface { +// NOTE: The RowSet is defined as an interface for simplicity. In practice it should be a struct with one and only +// implementation. + +// AddRow adds a Row to the set. It returns true with nil error in case Row was successfully added. +// The logic for Row is: +// * Check if it was already added +// * Verify its size corresponds to DAHeader +// * Extend it with erasure coding and compute a NMT Root over it +// * Verify that the NMT Root corresponds to DAHeader Root under its Index +// * Finally add it to set and mark as added. +// +AddRow(*Row) (bool, error) + +// GetRow return of a Row by its index, if exist. +GetRow(i int) *Row + +// Square checks if enough rows were added and returns recomputed ExtendedDataSquare if enough +Square() (*rsmt2d.ExtendedDataSquare, error) + +// other helper methods are omitted +} + +// NewRowSet creates full RowSet from rsmt2d.ExtendedDataSquare to gossip it to others through GetRow. +func NewRowSet(eds *rsmt2d.ExtendedDataSquare) *RowSet + +// NewRowSetFromHeader creates empty RowSet from a DAHeader to receive and verify gossiped Rows against the DAHeader +// with AddRow. +func NewRowSetFromHeader(dah *ipld.DataAvailabilityHeader) *RowSet +``` + +#### Vote +`Vote` should include a commitment to data. Previously, it relied on `PartSetHeader` in `BlockId`, instead it relies on +added `DAHeader`. Protobuf schema is updated accordingly. + +#### Proposal +`Proposal` is extended with `NumOriginalDataShares`. This is an optimization that +helps Validators to populate Header without counting original data shares themselves from a block received form a +Proposer. Potentially, that introduce a vulnerability by which a Proposer can send wrong value, leaving the populated +Header of Validators wrong. This part of the decision is optional. + +### `consenSUS` +#### Reactor +##### Messages +The decision affects two messages on consensus reactor: +* `BlockPartMessage` -> `BlockRowMessage` + * Instead of `Part` it carries `Row` defined above. +* `NewValidBlockMessage` + * Instead of `PartSetHeader` it carries `DAHeader` + * `BitArray` of `RowSet` instead of `PartSet` + Protobuf schema for both is updated accordingly. + +##### PeerRoundState +`PeerRoundState` tracks state of each known peer in a round, specifically what commitment it has for a Block and what +chunks peer holds. The decision changes it to track `DAHeader` instead of `PartSetHeader`, along with `BitArray` of +`RowSet` instead of `PartSet`. + +##### BlockCatchup +The Reactor helps its peers to catchup if they go out of sync. Instead of sending random `Part` it now sends random +`Row` by `BlockRowMessage`. Unfortunately, that requires the Reactor to load whole Block from store. As an optimization, +an ability to load Row only from the store could be introduced at later point. + +#### State +##### RoundState +The RoundState keeps Proposal, Valid and Lock Block's data. Along with an entire Block and its Parts, the RoundState +also keeps Rows using `RowSet`. At later point, `PartSet` that tracks part can be removed. + +##### Proposal Stage +Previously, the State in proposal stage waited for all Parts to assemble the entire Block. Instead, the State waits for +the half of all Rows from a proposer and/or peers to recompute the Block's data and notifies them back that no more +needs to be sent. Also, through Rows, only minimally required amount of information is gossiped. Everything else to +assemble the full Block is collected from own chain State and Proposal. + +## Status +Proposed + +## Consequences +### Positive +* Hardening of consensus gossiping with erasure coding +* Blocks exceeding the size limit are immediately rejected on Proposal, without the need to download an entire Block. +* More control over Row message size during consensus, comparing to Part message, as last part of the block always has + unpredictable size. `DAHeader`, on the other hand, allows knowing precisely the size of Row messages. +* Less bandwidth usage + * Only required Block's data is gossiped. + * Merkle proofs of Parts are not sent on the wire +* Only one system-wide block data commitment schema +* We don't abandon the work we were doing for months and taking profits out of it + * PR [#287](https://github.com/celestiaorg/lazyledger-core/pull/287) + * PR [#312](https://github.com/celestiaorg/lazyledger-core/pull/312) + * PR [#427](https://github.com/celestiaorg/lazyledger-core/pull/427) + * and merged others + +### Negative +* We invest some more time(~1.5 weeks). + * Most of the work is done. Only few changes left in the implementation along with peer reviews. + +### Neutral +* Rows vs Parts on the wire + * Previously, parts were propagated with max size of 64KiB. Let's now take a Row of the largest 128x128 block in + comparison. The actual data size in such a case for the Row would be 128x256(shares_per_row*share_size)=32KiB, which + is exactly two times smaller than a Part. +* Gossiped chunks are no longer constant size. Instead, their size is proportional to the size of Block's data. +* Another step back from original Tendermint's codebases \ No newline at end of file diff --git a/docs/celestia-architecture/adr-007-minimal-changes-to-tendermint.md b/docs/celestia-architecture/adr-007-minimal-changes-to-tendermint.md new file mode 100644 index 0000000000..486e25f233 --- /dev/null +++ b/docs/celestia-architecture/adr-007-minimal-changes-to-tendermint.md @@ -0,0 +1,234 @@ +# ADR 007: From Ukraine, with Love + +## Changelog + +- 20-08-2021: Initial Description + +## Context + +Currently, our fork of tendermint includes changes to how to erasure block data, minor changes to the header to commit +to that data, additions to serve data availability sampling, along with some miscellaneous modification to adhere to the +spec. Instead of incorporating all of these changes into our fork of tendermint, we will only make the strictly +necessary changes and the other services and their code to the new celestia-node repo. Notably, we will also refactor +some of the remaining necessary changes to be more isolated from the rest of the tendermint codebase. Both of these +strategies should significantly streamline pulling updates from upstream, and allow us to iterate faster since most +changes will be isolated to celestia-node. + +## Decision + +Treat tendermint more as a "black box". + +## Detailed Design + +### Overview + +We keep the bare-minimum changes to tendermint in our fork, celestia-core. Where necessary and possible we augment the +tendermint node in a separate process, via celestia-node, which communicates with the tendermint node via RPC. All data +availability sampling logic, including all Celestia-specific networking logic not already provided by tendermint, is +moved into celestia node: + +![](./img/core-node-relation.png) + +The detailed design of celestia-node will be defined in the repository itself. + +### Necessary changes to tendermint + +#### Changing the repo import names to celestiaorg + +- Rebrand (https://github.com/celestiaorg/celestia-core/pull/476) + +#### Changes to the README.md other basic things + +- update github templates (https://github.com/celestiaorg/celestia-core/pull/405) +- update README.md (https://github.com/celestiaorg/celestia-core/pull/10) + +#### Adding the extra types of block data + +- Update core data types (https://github.com/celestiaorg/celestia-core/pull/17) + - Create the Message/Messages types + - Proto and the tendermint version + - Create the IntermediateStateRoots type + - Proto and the tendermint version +- Data availability for evidence (https://github.com/celestiaorg/celestia-core/pull/19) + - Add both types to `types.Data` + - Modify proto + - Add `EvidenceData` to `types.Data` + +#### Add the HeaderHash to the Commit + +- Add header hash to commit(https://github.com/celestiaorg/celestia-core/pull/198) + +#### Adding the consts package in types + +#### Remove iavl as a dependency + +- remove iavl as a dependency (https://github.com/celestiaorg/celestia-core/pull/129) + +#### Using the `DataAvailabilityHeader` to calculate the DataHash + +The `DataAvailabilityHeader` struct will be used by celestia-core as well as by the celestia-node. It might make sense +to (eventually) move the struct together with all the DA-related code into a separate repository and go-module. +@Wondertan explored this as part of [#427](https://github.com/celestiaorg/celestia-core/pull/427#issue-674512464). This +way all client implementations can depend on that module without running into circular dependencies. Hence, we only +describe how to hash the block data here: + +- Update core types (https://github.com/celestiaorg/celestia-core/pull/17) + - Replace the `Data.Hash()` with `DAH.Hash()` + - Use DAH to fill DataHash when filling the header + - Fill the DAH when making a block to generate the data hash + +#### Add availableDataOriginalSharesUsed to the header + +- Add availableDataOriginalSharesUsed to the header (https://github.com/celestiaorg/celestia-core/pull/262) + +#### Reap some number of transactions probably using the app or some other mech + +- Enforce a minimum square size (https://github.com/celestiaorg/celestia-core/pull/282) +- Use squares with a width that is a power of two(https://github.com/celestiaorg/celestia-core/pull/331) +- Adopt reamping from the mempool to max square size (https://github.com/celestiaorg/celestia-core/issues/77) +- Proposal: Decide on a mech to pick square size and communicate that to the + app (https://github.com/celestiaorg/celestia-core/issues/454) +- Also see ABCI++ for a less hacky solution + +#### Filling the DAH using share merging and splitting + +- Compute Shares (not merged) (https://github.com/celestiaorg/celestia-core/pull/60) + - part II (not merged) (https://github.com/celestiaorg/celestia-core/pull/63) + - while this was not merged, we will need some function to compute the shares that make up the block data +- Share Splitting (https://github.com/celestiaorg/celestia-core/pull/246) + - Serialize each constituent of block data + - Split into shares + - Txs (contiguous) + - Messages (not contiguous) + - Evidence (contiguous) + - IntermediateStateRoots (contiguous) +- Combine shares into original square +- ExtendBlockData +- Generate nmt root of each row and col +- Use those roots to generate the DataHash +- Share Merging (https://github.com/celestiaorg/celestia-core/pull/261) + - Sort by namespace + - Parse each reserved type + - Parse remaining messages + +#### Add the wrapper around nmt to erasure namespaces + +- Implement rsmt tree wrapper for nmt (https://github.com/celestiaorg/celestia-core/pull/238) + +#### Add PreprocessTxs to ABCI + +- Add PreprocessTxs method to ABCI (https://github.com/celestiaorg/celestia-core/pull/110) +- Add method to ABCI interface +- Create sync and async versions +- Add sync version the the CreateProposalBlock method of BlockExecutor + +#### Fill the DAH while making the block + +- Basic DA functionality (https://github.com/celestiaorg/celestia-core/pull/83) + +#### Only produce blocks on some interval + +- Control block times (https://github.com/tendermint/tendermint/issues/5911) + +#### Stop signing over the PartSetHeader + +- Replace canonical blockID with just a hash in the CononicalVote +- Replace the LastBlockID in the header with just a hash + +#### Optionally remove some unused code + +- Removing misc unsued code (https://github.com/celestiaorg/celestia-core/pull/208) +- Remove docs deployment (https://github.com/celestiaorg/celestia-core/pull/134) +- Start deleting docs (https://github.com/celestiaorg/celestia-core/pull/209) +- Remove tendermint-db in favor of badgerdb (https://github.com/celestiaorg/celestia-core/pull/241) +- Delete blockchain 2 until further notice (https://github.com/celestiaorg/celestia-core/pull/309) +- We don’t need to support using out of process apps + +#### Nice to Haves + +- More efficient hashing (https://github.com/celestiaorg/celestia-core/pull/351) + +We should also take this opportunity to refactor as many additions to tendermint into their own package as possible. +This will hopefully make updating to future versions of tendermint easier. For example, when we fill the data +availability header, instead of using a method on `Block`, it could be handled by a function that takes `types.Data` as +input and returns the DAH, the number of shares used in the square, along with the obligatory error. + +```go +func FillDataAvailabilityHeader(data types.Data) (types.DataAvailabilityHeader, numOrigDataShares, error) +``` + +We could perform a similar treatment to the `splitIntoShares` methods and their helper method `ComputeShares`. Instead +of performing the share splitting logic in those methods, we could keep it in a different package and instead call the +equivalent function to compute the shares. + +Beyond refactoring and some minor additions, we will also have to remove and revert quite a few changes to get to the +minimum desired changes specified above. + +### Changes that will need to be reverted + +#### IPLD Plugin + +- Introduction (https://github.com/celestiaorg/celestia-core/pull/144) +- Initial integration (https://github.com/celestiaorg/celestia-core/pull/152) +- Custom Multihash (https://github.com/celestiaorg/celestia-core/pull/155) +- Puting data during proposal (https://github.com/celestiaorg/celestia-core/pull/178) +- Module name (https://github.com/celestiaorg/celestia-core/pull/151) +- Update rsmt2d (https://github.com/celestiaorg/celestia-core/pull/290) +- Make plugin a package (https://github.com/celestiaorg/celestia-core/pull/294) + +#### Adding DAH to Stuff + +- Adding DAH to Proposal (https://github.com/celestiaorg/celestia-core/pull/248/files) +- Blockmeta (https://github.com/celestiaorg/celestia-core/pull/372) + +#### Embedding DAS + +- GetLeafData (https://github.com/celestiaorg/celestia-core/pull/212) +- RetrieveBlockData (https://github.com/celestiaorg/celestia-core/pull/232) +- ValidateAvailability (https://github.com/celestiaorg/celestia-core/pull/270) +- Prevent double writes to IPFS (https://github.com/celestiaorg/celestia-core/pull/271) +- Stop Pinning (https://github.com/celestiaorg/celestia-core/pull/276) +- Rework IPFS Node (https://github.com/celestiaorg/celestia-core/pull/334) +- Refactor for putting the block (https://github.com/celestiaorg/celestia-core/pull/338) +- Config for IPFS node (https://github.com/celestiaorg/celestia-core/pull/340) +- IPLD Dag instead of CoreAPI (https://github.com/celestiaorg/celestia-core/pull/352) +- Adding the DAG to the blockstore (https://github.com/celestiaorg/celestia-core/pull/356) +- Saving and Loading using IPFS (https://github.com/celestiaorg/celestia-core/pull/374) +- Manual Providing (https://github.com/celestiaorg/celestia-core/pull/375) +- Refactor node provider (https://github.com/celestiaorg/celestia-core/pull/400) +- DAS in light client workaround (https://github.com/celestiaorg/celestia-core/pull/413) + +#### BlockID and PartSetHeader + +- Decouple ParSetHeader from BlockID (https://github.com/celestiaorg/celestia-core/pull/441) +- Stop Signing over the PartSetHeader (https://github.com/celestiaorg/celestia-core/pull/457) +- We still don’t want to sign over the PartSetHeader, but we will not be able to use the same mechanism used in the + linked PR, as that way requires decoupling of the PSH from the BlockID +- Remove PSH from some consensus messages (https://github.com/celestiaorg/celestia-core/pull/479) + +Note: This ADR overrides ADR 005 Decouple BlockID and the PartSetHeader. The PartSetHeader and the BlockID will mostly +remain the same. This will make pulling changes from upstream much easier + +## Status + +Accepted + +## Consequences + +### Positive + +- Pulling changes from upstream is streamlined +- Separation of functionality will help us iterate faster +- Creates a great opportunity for reconsidering past design choices without fully starting from scratch +- Prepare for future designs +- Don’t have to have two p2p stacks in a single repo + +### Negative + +- Perform some computation multiple times +- Running multiple nodes instead of a single node is less convenient for node operators (but only in the case the full + celestia-node wants to participate in the consensus protocol) + +## References + +Tracking Issue #491 \ No newline at end of file diff --git a/docs/celestia-architecture/adr-template.md b/docs/celestia-architecture/adr-template.md new file mode 100644 index 0000000000..c36879bcec --- /dev/null +++ b/docs/celestia-architecture/adr-template.md @@ -0,0 +1,72 @@ +# ADR {ADR-NUMBER}: {TITLE} + +## Changelog + +- {date}: {changelog} + +## Context + +> This section contains all the context one needs to understand the current state, and why there is a problem. It should be as succinct as possible and introduce the high level idea behind the solution. + +## Alternative Approaches + +> This section contains information around alternative options that are considered before making a decision. It should contain a explanation on why the alternative approach(es) were not chosen. + +## Decision + +> This section records the decision that was made. +> It is best to record as much info as possible from the discussion that happened. This aids in not having to go back to the Pull Request to get the needed information. + +## Detailed Design + +> This section does not need to be filled in at the start of the ADR, but must be completed prior to the merging of the implementation. +> +> Here are some common questions that get answered as part of the detailed design: +> +> - What are the user requirements? +> +> - What systems will be affected? +> +> - What new data structures are needed, what data structures will be changed? +> +> - What new APIs will be needed, what APIs will be changed? +> +> - What are the efficiency considerations (time/space)? +> +> - What are the expected access patterns (load/throughput)? +> +> - Are there any logging, monitoring or observability needs? +> +> - Are there any security considerations? +> +> - Are there any privacy considerations? +> +> - How will the changes be tested? +> +> - If the change is large, how will the changes be broken up for ease of review? +> +> - Will these changes require a breaking (major) release? +> +> - Does this change require coordination with the Celestia fork of the SDK or celestia-app? + +## Status + +> A decision may be "proposed" if it hasn't been agreed upon yet, or "accepted" once it is agreed upon. Once the ADR has been implemented mark the ADR as "implemented". If a later ADR changes or reverses a decision, it may be marked as "deprecated" or "superseded" with a reference to its replacement. + +{Deprecated|Proposed|Accepted|Declined} + +## Consequences + +> This section describes the consequences, after applying the decision. All consequences should be summarized here, not just the "positive" ones. + +### Positive + +### Negative + +### Neutral + +## References + +> Are there any relevant PR comments, issues that led up to this, or articles referenced for why we made the given design choice? If so link them here! + +- {reference link} diff --git a/docs/celestia-architecture/assets/user-request.png b/docs/celestia-architecture/assets/user-request.png new file mode 100644 index 0000000000..3d04fad734 Binary files /dev/null and b/docs/celestia-architecture/assets/user-request.png differ diff --git a/docs/celestia-architecture/celestia-logo.png b/docs/celestia-architecture/celestia-logo.png new file mode 100644 index 0000000000..dce8b0b34d Binary files /dev/null and b/docs/celestia-architecture/celestia-logo.png differ diff --git a/docs/celestia-architecture/img/extended_square.png b/docs/celestia-architecture/img/extended_square.png new file mode 100644 index 0000000000..8bbf469505 Binary files /dev/null and b/docs/celestia-architecture/img/extended_square.png differ diff --git a/docs/celestia-logo.png b/docs/celestia-logo.png new file mode 100644 index 0000000000..dce8b0b34d Binary files /dev/null and b/docs/celestia-logo.png differ diff --git a/docs/tutorials/go.md b/docs/tutorials/go.md index 6614cffe53..cd3804440c 100644 --- a/docs/tutorials/go.md +++ b/docs/tutorials/go.md @@ -43,7 +43,7 @@ Verify that you have the latest version of Go installed: ```bash $ go version -go version go1.16.x darwin/amd64 +go version go1.17.x darwin/amd64 ``` ## 1.2 Creating a new Go project @@ -446,7 +446,7 @@ This will populate the `go.mod` with a release number followed by a hash for Ten ```go module github.com/me/example -go 1.16 +go 1.17 require ( github.com/dgraph-io/badger v1.6.2 diff --git a/go.mod b/go.mod index 19badc3eeb..d74fdb914d 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/tendermint/tendermint -go 1.16 +go 1.17 require ( github.com/BurntSushi/toml v1.1.0 @@ -8,12 +8,11 @@ require ( github.com/adlio/schema v1.3.0 github.com/btcsuite/btcd v0.22.0-beta github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce + github.com/celestiaorg/nmt v0.8.0 + github.com/celestiaorg/rsmt2d v0.5.0 github.com/creachadair/atomicfile v0.2.5 github.com/creachadair/taskgroup v0.3.2 github.com/creachadair/tomledit v0.0.18 - github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51 // indirect - github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 // indirect - github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870 // indirect github.com/fortytw2/leaktest v1.3.0 github.com/go-kit/kit v0.12.0 github.com/gogo/protobuf v1.3.2 @@ -46,6 +45,182 @@ require ( golang.org/x/net v0.0.0-20220412020605-290c469a71a5 golang.org/x/sync v0.0.0-20210220032951-036812b2e83c google.golang.org/grpc v1.46.0 - gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b // indirect pgregory.net/rapid v0.4.7 ) + +require ( + 4d63.com/gochecknoglobals v0.1.0 // indirect + github.com/Antonboom/errname v0.1.5 // indirect + github.com/Antonboom/nilnil v0.1.0 // indirect + github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect + github.com/DataDog/zstd v1.4.1 // indirect + github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 // indirect + github.com/Masterminds/semver v1.5.0 // indirect + github.com/Microsoft/go-winio v0.5.1 // indirect + github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 // indirect + github.com/OpenPeeDeeP/depguard v1.1.0 // indirect + github.com/alexkohler/prealloc v1.0.0 // indirect + github.com/ashanbrown/forbidigo v1.3.0 // indirect + github.com/ashanbrown/makezero v1.1.1 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/bkielbasa/cyclop v1.2.0 // indirect + github.com/blizzy78/varnamelen v0.6.1 // indirect + github.com/bombsimon/wsl/v3 v3.3.0 // indirect + github.com/breml/bidichk v0.2.2 // indirect + github.com/breml/errchkjson v0.2.3 // indirect + github.com/butuzov/ireturn v0.1.1 // indirect + github.com/celestiaorg/go-leopard v0.1.0 // indirect + github.com/celestiaorg/merkletree v0.0.0-20210714075610-a84dc3ddbbe4 // indirect + github.com/cenkalti/backoff v2.2.1+incompatible // indirect + github.com/cespare/xxhash v1.1.0 // indirect + github.com/cespare/xxhash/v2 v2.1.2 // indirect + github.com/charithe/durationcheck v0.0.9 // indirect + github.com/chavacava/garif v0.0.0-20210405164556-e8a0a408d6af // indirect + github.com/containerd/continuity v0.2.1 // indirect + github.com/daixiang0/gci v0.3.3 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/denis-tingaikin/go-header v0.4.3 // indirect + github.com/dgraph-io/badger/v2 v2.2007.2 // indirect + github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de // indirect + github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 // indirect + github.com/docker/go-connections v0.4.0 // indirect + github.com/docker/go-units v0.4.0 // indirect + github.com/dustin/go-humanize v1.0.0 // indirect + github.com/esimonov/ifshort v1.0.4 // indirect + github.com/ettle/strcase v0.1.1 // indirect + github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51 // indirect + github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 // indirect + github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870 // indirect + github.com/fatih/color v1.13.0 // indirect + github.com/fatih/structtag v1.2.0 // indirect + github.com/fsnotify/fsnotify v1.5.1 // indirect + github.com/fzipp/gocyclo v0.4.0 // indirect + github.com/go-critic/go-critic v0.6.2 // indirect + github.com/go-toolsmith/astcast v1.0.0 // indirect + github.com/go-toolsmith/astcopy v1.0.0 // indirect + github.com/go-toolsmith/astequal v1.0.1 // indirect + github.com/go-toolsmith/astfmt v1.0.0 // indirect + github.com/go-toolsmith/astp v1.0.0 // indirect + github.com/go-toolsmith/strparse v1.0.0 // indirect + github.com/go-toolsmith/typep v1.0.2 // indirect + github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b // indirect + github.com/gobwas/glob v0.2.3 // indirect + github.com/gofrs/flock v0.8.1 // indirect + github.com/golang/snappy v0.0.3 // indirect + github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 // indirect + github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a // indirect + github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613 // indirect + github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a // indirect + github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0 // indirect + github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca // indirect + github.com/golangci/misspell v0.3.5 // indirect + github.com/golangci/revgrep v0.0.0-20210930125155-c22e5001d4f2 // indirect + github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4 // indirect + github.com/google/btree v1.0.0 // indirect + github.com/gordonklaus/ineffassign v0.0.0-20210914165742-4cc7213b9bc8 // indirect + github.com/gostaticanalysis/analysisutil v0.7.1 // indirect + github.com/gostaticanalysis/comment v1.4.2 // indirect + github.com/gostaticanalysis/forcetypeassert v0.1.0 // indirect + github.com/gostaticanalysis/nilerr v0.1.1 // indirect + github.com/hashicorp/errwrap v1.0.0 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/hashicorp/go-version v1.4.0 // indirect + github.com/hashicorp/hcl v1.0.0 // indirect + github.com/hexops/gotextdiff v1.0.3 // indirect + github.com/inconshreveable/mousetrap v1.0.0 // indirect + github.com/jgautheron/goconst v1.5.1 // indirect + github.com/jingyugao/rowserrcheck v1.1.1 // indirect + github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af // indirect + github.com/jmhodges/levigo v1.0.0 // indirect + github.com/julz/importas v0.1.0 // indirect + github.com/kisielk/errcheck v1.6.0 // indirect + github.com/kisielk/gotool v1.0.0 // indirect + github.com/kulti/thelper v0.5.1 // indirect + github.com/kunwardeep/paralleltest v1.0.3 // indirect + github.com/kyoh86/exportloopref v0.1.8 // indirect + github.com/ldez/gomoddirectives v0.2.2 // indirect + github.com/ldez/tagliatelle v0.3.1 // indirect + github.com/leonklingele/grouper v1.1.0 // indirect + github.com/magiconair/properties v1.8.6 // indirect + github.com/maratori/testpackage v1.0.1 // indirect + github.com/matoous/godox v0.0.0-20210227103229-6504466cf951 // indirect + github.com/mattn/go-colorable v0.1.12 // indirect + github.com/mattn/go-isatty v0.0.14 // indirect + github.com/mattn/go-runewidth v0.0.9 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect + github.com/mbilski/exhaustivestruct v1.2.0 // indirect + github.com/mgechev/revive v1.1.4 // indirect + github.com/mitchellh/go-homedir v1.1.0 // indirect + github.com/mitchellh/mapstructure v1.4.3 // indirect + github.com/moricho/tparallel v0.2.1 // indirect + github.com/nakabonne/nestif v0.3.1 // indirect + github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354 // indirect + github.com/nishanths/exhaustive v0.7.11 // indirect + github.com/nishanths/predeclared v0.2.1 // indirect + github.com/olekukonko/tablewriter v0.0.5 // indirect + github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/opencontainers/image-spec v1.0.2 // indirect + github.com/opencontainers/runc v1.0.3 // indirect + github.com/pelletier/go-toml v1.9.4 // indirect + github.com/pelletier/go-toml/v2 v2.0.0-beta.8 // indirect + github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 // indirect + github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/polyfloyd/go-errorlint v0.0.0-20211125173453-6d6d39c5bb8b // indirect + github.com/prometheus/client_model v0.2.0 // indirect + github.com/prometheus/common v0.32.1 // indirect + github.com/prometheus/procfs v0.7.3 // indirect + github.com/quasilyte/go-ruleguard v0.3.15 // indirect + github.com/quasilyte/gogrep v0.0.0-20220103110004-ffaa07af02e3 // indirect + github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95 // indirect + github.com/ryancurrah/gomodguard v1.2.3 // indirect + github.com/ryanrolds/sqlclosecheck v0.3.0 // indirect + github.com/sanposhiho/wastedassign/v2 v2.0.6 // indirect + github.com/securego/gosec/v2 v2.10.0 // indirect + github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c // indirect + github.com/sirupsen/logrus v1.8.1 // indirect + github.com/sivchari/containedctx v1.0.2 // indirect + github.com/sivchari/tenv v1.4.7 // indirect + github.com/sonatard/noctx v0.0.1 // indirect + github.com/sourcegraph/go-diff v0.6.1 // indirect + github.com/spf13/afero v1.8.2 // indirect + github.com/spf13/cast v1.4.1 // indirect + github.com/spf13/jwalterweatherman v1.1.0 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/ssgreg/nlreturn/v2 v2.2.1 // indirect + github.com/stretchr/objx v0.1.1 // indirect + github.com/subosito/gotenv v1.2.0 // indirect + github.com/sylvia7788/contextcheck v1.0.4 // indirect + github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca // indirect + github.com/tdakkota/asciicheck v0.1.1 // indirect + github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c // indirect + github.com/tetafro/godot v1.4.11 // indirect + github.com/timakin/bodyclose v0.0.0-20210704033933-f49887972144 // indirect + github.com/tomarrell/wrapcheck/v2 v2.5.0 // indirect + github.com/tommy-muehle/go-mnd/v2 v2.5.0 // indirect + github.com/ultraware/funlen v0.0.3 // indirect + github.com/ultraware/whitespace v0.0.5 // indirect + github.com/uudashr/gocognit v1.0.5 // indirect + github.com/vivint/infectious v0.0.0-20200605153912-25a574ae18a3 // indirect + github.com/yagipy/maintidx v1.0.0 // indirect + github.com/yeya24/promlinter v0.1.1-0.20210918184747-d757024714a1 // indirect + gitlab.com/bosi/decorder v0.2.1 // indirect + go.etcd.io/bbolt v1.3.6 // indirect + golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 // indirect + golang.org/x/sys v0.0.0-20220412211240-33da011f77ad // indirect + golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect + golang.org/x/text v0.3.7 // indirect + golang.org/x/tools v0.1.10 // indirect + golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f // indirect + google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac // indirect + google.golang.org/protobuf v1.28.0 // indirect + gopkg.in/ini.v1 v1.66.4 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect + honnef.co/go/tools v0.2.2 // indirect + mvdan.cc/gofumpt v0.3.0 // indirect + mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed // indirect + mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b // indirect + mvdan.cc/unparam v0.0.0-20211214103731-d0ef000c54e5 // indirect +) diff --git a/go.sum b/go.sum index 79be52c717..3943d738aa 100644 --- a/go.sum +++ b/go.sum @@ -175,6 +175,14 @@ github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46f github.com/butuzov/ireturn v0.1.1 h1:QvrO2QF2+/Cx1WA/vETCIYBKtRjc30vesdoPUNo1EbY= github.com/butuzov/ireturn v0.1.1/go.mod h1:Wh6Zl3IMtTpaIKbmwzqi6olnM9ptYQxxVacMsOEFPoc= github.com/casbin/casbin/v2 v2.37.0/go.mod h1:vByNa/Fchek0KZUgG5wEsl7iFsiviAYKRtgrQfcJqHg= +github.com/celestiaorg/go-leopard v0.1.0 h1:28z2EkvKJIez5J9CEaiiUEC+OxalRLtTGJJ1oScfE1g= +github.com/celestiaorg/go-leopard v0.1.0/go.mod h1:NtO/rjlB8dw2aq7jr06vZFKGvryQcTDXaNHelmPNOAM= +github.com/celestiaorg/merkletree v0.0.0-20210714075610-a84dc3ddbbe4 h1:CJdIpo8n5MFP2MwK0gSRcOVlDlFdQJO1p+FqdxYzmvc= +github.com/celestiaorg/merkletree v0.0.0-20210714075610-a84dc3ddbbe4/go.mod h1:fzuHnhzj1pUygGz+1ZkB3uQbEUL4htqCGJ4Qs2LwMZA= +github.com/celestiaorg/nmt v0.8.0 h1:wtX7GRouLbmBe+ffnc8+cOg2UbWteM+Y1imZuZ/EeqU= +github.com/celestiaorg/nmt v0.8.0/go.mod h1:3bqzTj8xKj0DgQUpOgZzoxvtNkC3MS/hTbQ6dn8SIa0= +github.com/celestiaorg/rsmt2d v0.5.0 h1:Wa0uNZUXl8lIMJnSunjoD65ktqBedXZD0z2ZU3xKYYw= +github.com/celestiaorg/rsmt2d v0.5.0/go.mod h1:EZ+O2KdCq8xI7WFwjATLdhtMdrdClmAs2w7zENDr010= github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= @@ -454,6 +462,8 @@ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= @@ -772,7 +782,6 @@ github.com/nats-io/nkeys v0.3.0/go.mod h1:gvUNGjVcM2IPr5rCsRsC6Wb3Hr2CQAm08dsxtV github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354 h1:4kuARK6Y6FxaNu/BnU2OAaLF86eTVhP2hjTB6iMvItA= github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354/go.mod h1:KSVJerMDfblTH7p5MZaTt+8zaT2iEk3AkVb9PQdZuE8= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nishanths/exhaustive v0.7.11 h1:xV/WU3Vdwh5BUH4N06JNUznb6d5zhRPOnlgCrpNYNKA= github.com/nishanths/exhaustive v0.7.11/go.mod h1:gX+MP7DWMKJmNa1HfMozK+u04hQd3na9i0hyqf3/dOI= @@ -1073,6 +1082,8 @@ github.com/vektra/mockery/v2 v2.12.0/go.mod h1:8vf4KDDUptfkyypzdHLuE7OE2xA7Gdt60 github.com/viki-org/dnscache v0.0.0-20130720023526-c70c1f23c5d8/go.mod h1:dniwbG03GafCjFohMDmz6Zc6oCuiqgH6tGNyXTkHzXE= github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= +github.com/vivint/infectious v0.0.0-20200605153912-25a574ae18a3 h1:zMsHhfK9+Wdl1F7sIKLyx3wrOFofpb3rWFbA4HgcK5k= +github.com/vivint/infectious v0.0.0-20200605153912-25a574ae18a3/go.mod h1:R0Gbuw7ElaGSLOZUSwBm/GgVwMd30jWxBDdAyMOeTuc= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xo/terminfo v0.0.0-20210125001918-ca9a967f8778/go.mod h1:2MuV+tbUrU1zIOPMxZ5EncGwgmMJsa+9ucAQZXxsObs= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= @@ -1091,6 +1102,11 @@ github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1 github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +gitlab.com/NebulousLabs/errors v0.0.0-20171229012116-7ead97ef90b8/go.mod h1:ZkMZ0dpQyWwlENaeZVBiQRjhMEZvk6VTXquzl3FOFP8= +gitlab.com/NebulousLabs/errors v0.0.0-20200929122200-06c536cf6975 h1:L/ENs/Ar1bFzUeKx6m3XjlmBgIUlykX9dzvp5k9NGxc= +gitlab.com/NebulousLabs/errors v0.0.0-20200929122200-06c536cf6975/go.mod h1:ZkMZ0dpQyWwlENaeZVBiQRjhMEZvk6VTXquzl3FOFP8= +gitlab.com/NebulousLabs/fastrand v0.0.0-20181126182046-603482d69e40 h1:dizWJqTWjwyD8KGcMOwgrkqu1JIkofYgKkmDeNE7oAs= +gitlab.com/NebulousLabs/fastrand v0.0.0-20181126182046-603482d69e40/go.mod h1:rOnSnoRyxMI3fe/7KIbVcsHRGxe30OONv8dEgo+vCfA= gitlab.com/bosi/decorder v0.2.1 h1:ehqZe8hI4w7O4b1vgsDZw1YU1PE7iJXrQWFMsocbQ1w= gitlab.com/bosi/decorder v0.2.1/go.mod h1:6C/nhLSbF6qZbYD8bRmISBwc6vcWdNsiIBkRvjJFrH0= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= @@ -1145,6 +1161,7 @@ golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200109152110-61a87790db17/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -1152,6 +1169,7 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= @@ -1736,8 +1754,8 @@ gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b h1:QRR6H1YWRnHb4Y/HeNFCTJLFVxaq6wH4YuVdsUOr75U= -gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/cheggaaa/pb.v1 v1.0.28/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= diff --git a/internal/consensus/mempool_test.go b/internal/consensus/mempool_test.go index 558dbd4b34..24eb0150da 100644 --- a/internal/consensus/mempool_test.go +++ b/internal/consensus/mempool_test.go @@ -272,3 +272,8 @@ func (app *CounterApplication) Commit() abci.ResponseCommit { binary.BigEndian.PutUint64(hash, uint64(app.txCount)) return abci.ResponseCommit{Data: hash} } + +func (app *CounterApplication) PrepareProposal( + req abci.RequestPrepareProposal) abci.ResponsePrepareProposal { + return abci.ResponsePrepareProposal{BlockData: req.BlockData} +} diff --git a/internal/consensus/mocks/cons_sync_reactor.go b/internal/consensus/mocks/cons_sync_reactor.go index 5ac592f0d4..f904e9129a 100644 --- a/internal/consensus/mocks/cons_sync_reactor.go +++ b/internal/consensus/mocks/cons_sync_reactor.go @@ -3,6 +3,8 @@ package mocks import ( + testing "testing" + mock "github.com/stretchr/testify/mock" state "github.com/tendermint/tendermint/internal/state" ) @@ -26,3 +28,13 @@ func (_m *ConsSyncReactor) SetStateSyncingMetrics(_a0 float64) { func (_m *ConsSyncReactor) SwitchToConsensus(_a0 state.State, _a1 bool) { _m.Called(_a0, _a1) } + +// NewConsSyncReactor creates a new instance of ConsSyncReactor. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. +func NewConsSyncReactor(t testing.TB) *ConsSyncReactor { + mock := &ConsSyncReactor{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/internal/consensus/state.go b/internal/consensus/state.go index 64a201ef6d..1e2e721d2f 100644 --- a/internal/consensus/state.go +++ b/internal/consensus/state.go @@ -1346,6 +1346,19 @@ func (cs *State) defaultDoPrevote(height int64, round int32) { return } + stateMachineValidBlock, err := cs.blockExec.ProcessProposal(cs.ProposalBlock) + if err != nil { + cs.Logger.Error("state machine returned an error when trying to process proposal block", "err", err) + } + + // Vote nil if application invalidated the block + if !stateMachineValidBlock { + // Consensus says we must vote nil + logger.Error("prevote step: consensus deems this block to be mustVoteNil", "err", err) + cs.signAddVote(tmproto.PrevoteType, nil, types.PartSetHeader{}) + return + } + // Prevote cs.ProposalBlock // NOTE: the proposal signature is validated when it is received, // and the proposal block parts are validated as they are received (against the merkle hash in the proposal) diff --git a/internal/evidence/mocks/block_store.go b/internal/evidence/mocks/block_store.go index ef3346b2a7..e45b281b90 100644 --- a/internal/evidence/mocks/block_store.go +++ b/internal/evidence/mocks/block_store.go @@ -3,7 +3,10 @@ package mocks import ( + testing "testing" + mock "github.com/stretchr/testify/mock" + types "github.com/tendermint/tendermint/types" ) @@ -57,3 +60,13 @@ func (_m *BlockStore) LoadBlockMeta(height int64) *types.BlockMeta { return r0 } + +// NewBlockStore creates a new instance of BlockStore. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. +func NewBlockStore(t testing.TB) *BlockStore { + mock := &BlockStore{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/internal/evidence/pool_test.go b/internal/evidence/pool_test.go index f38c09e02e..2439fedcd7 100644 --- a/internal/evidence/pool_test.go +++ b/internal/evidence/pool_test.go @@ -202,7 +202,7 @@ func TestEvidencePoolUpdate(t *testing.T) { evidenceChainID, ) lastCommit := makeCommit(height, val.PrivKey.PubKey().Address()) - block := types.MakeBlock(height+1, []types.Tx{}, lastCommit, []types.Evidence{ev}) + block := types.MakeBlock(height+1, []types.Tx{}, []types.Evidence{ev}, nil, lastCommit) // update state (partially) state.LastBlockHeight = height + 1 diff --git a/internal/mempool/v0/clist_mempool.go b/internal/mempool/v0/clist_mempool.go index 0a12c70009..f25f5718ca 100644 --- a/internal/mempool/v0/clist_mempool.go +++ b/internal/mempool/v0/clist_mempool.go @@ -7,6 +7,8 @@ import ( "sync" "sync/atomic" + "crypto/sha256" + abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/internal/libs/clist" @@ -626,6 +628,15 @@ func (mem *CListMempool) Update( // https://github.com/tendermint/tendermint/issues/3322. if e, ok := mem.txsMap.Load(tx.Key()); ok { mem.removeTx(tx, e.(*clist.CElement), false) + // see if the transaction is a child transaction of a some parent + // transaction that exists in the mempool + } else if originalHash, _, isMalleated := types.UnwrapMalleatedTx(tx); isMalleated { + var origianlKey [sha256.Size]byte + copy(origianlKey[:], originalHash) + err := mem.RemoveTxByKey(origianlKey) + if err != nil { + return err + } } } diff --git a/internal/mempool/v0/clist_mempool_test.go b/internal/mempool/v0/clist_mempool_test.go index 61ec543ef7..b0b5a0ef11 100644 --- a/internal/mempool/v0/clist_mempool_test.go +++ b/internal/mempool/v0/clist_mempool_test.go @@ -3,6 +3,7 @@ package v0 import ( "context" "crypto/rand" + "crypto/sha256" "encoding/binary" "fmt" mrand "math/rand" @@ -130,11 +131,11 @@ func TestReapMaxBytesMaxGas(t *testing.T) { {20, 0, -1, 0}, {20, 0, 10, 0}, {20, 10, 10, 0}, - {20, 24, 10, 1}, + {20, 28, 10, 1}, {20, 240, 5, 5}, - {20, 240, -1, 10}, - {20, 240, 10, 10}, - {20, 240, 15, 10}, + {20, 280, -1, 10}, + {20, 280, 10, 10}, + {20, 280, 15, 10}, {20, 20000, -1, 20}, {20, 20000, 5, 5}, {20, 20000, 30, 20}, @@ -169,14 +170,14 @@ func TestMempoolFilters(t *testing.T) { }{ {10, nopPreFilter, nopPostFilter, 10}, {10, mempool.PreCheckMaxBytes(10), nopPostFilter, 0}, - {10, mempool.PreCheckMaxBytes(22), nopPostFilter, 10}, + {10, mempool.PreCheckMaxBytes(28), nopPostFilter, 10}, {10, nopPreFilter, mempool.PostCheckMaxGas(-1), 10}, {10, nopPreFilter, mempool.PostCheckMaxGas(0), 0}, {10, nopPreFilter, mempool.PostCheckMaxGas(1), 10}, {10, nopPreFilter, mempool.PostCheckMaxGas(3000), 10}, {10, mempool.PreCheckMaxBytes(10), mempool.PostCheckMaxGas(20), 0}, {10, mempool.PreCheckMaxBytes(30), mempool.PostCheckMaxGas(20), 10}, - {10, mempool.PreCheckMaxBytes(22), mempool.PostCheckMaxGas(1), 10}, + {10, mempool.PreCheckMaxBytes(28), mempool.PostCheckMaxGas(1), 10}, {10, mempool.PreCheckMaxBytes(22), mempool.PostCheckMaxGas(0), 0}, } for tcIndex, tt := range tests { @@ -223,6 +224,29 @@ func TestMempoolUpdate(t *testing.T) { err = mp.CheckTx(context.Background(), []byte{0x03}, nil, mempool.TxInfo{}) require.NoError(t, err) } + + // 4. Removes a parent transaction after receiving a child transaction in the update + { + mp.Flush() + originalTx := []byte{1, 2, 3, 4} + malleatedTx := []byte{1, 2} + originalHash := sha256.Sum256(originalTx) + + // create the wrapped child transaction + wTx, err := types.WrapMalleatedTx(originalHash[:], malleatedTx) + require.NoError(t, err) + + // add the parent transaction to the mempool + err = mp.CheckTx(context.Background(), originalTx, nil, mempool.TxInfo{}) + require.NoError(t, err) + + // remove the parent from the mempool using the wrapped child tx + err = mp.Update(1, []types.Tx{wTx}, abciResponses(1, abci.CodeTypeOK), nil, nil) + require.NoError(t, err) + + assert.Zero(t, mp.Size()) + + } } func TestMempoolUpdateDoesNotPanicWhenApplicationMissedTx(t *testing.T) { diff --git a/internal/mempool/v1/mempool_test.go b/internal/mempool/v1/mempool_test.go index 72a72861c9..93838eb6c7 100644 --- a/internal/mempool/v1/mempool_test.go +++ b/internal/mempool/v1/mempool_test.go @@ -260,7 +260,7 @@ func TestTxMempool_ReapMaxBytesMaxGas(t *testing.T) { ensurePrioritized(reapedTxs) require.Equal(t, len(tTxs), txmp.Size()) require.Equal(t, int64(5690), txmp.SizeBytes()) - require.GreaterOrEqual(t, len(reapedTxs), 16) + require.GreaterOrEqual(t, len(reapedTxs), 15) // Reap by both transaction bytes and gas, where the size yields 31 reaped // transactions and the gas limit reaps 25 transactions. @@ -268,7 +268,7 @@ func TestTxMempool_ReapMaxBytesMaxGas(t *testing.T) { ensurePrioritized(reapedTxs) require.Equal(t, len(tTxs), txmp.Size()) require.Equal(t, int64(5690), txmp.SizeBytes()) - require.Len(t, reapedTxs, 25) + require.Len(t, reapedTxs, 23) } func TestTxMempool_ReapMaxTxs(t *testing.T) { diff --git a/internal/p2p/mocks/connection.go b/internal/p2p/mocks/connection.go index 6c61741172..1ab635f53d 100644 --- a/internal/p2p/mocks/connection.go +++ b/internal/p2p/mocks/connection.go @@ -13,6 +13,8 @@ import ( p2p "github.com/tendermint/tendermint/internal/p2p" + testing "testing" + types "github.com/tendermint/tendermint/types" ) @@ -206,3 +208,13 @@ func (_m *Connection) TrySendMessage(_a0 p2p.ChannelID, _a1 []byte) (bool, error return r0, r1 } + +// NewConnection creates a new instance of Connection. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. +func NewConnection(t testing.TB) *Connection { + mock := &Connection{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/internal/p2p/mocks/peer.go b/internal/p2p/mocks/peer.go index b905c1156b..a156b7a272 100644 --- a/internal/p2p/mocks/peer.go +++ b/internal/p2p/mocks/peer.go @@ -10,6 +10,8 @@ import ( net "net" + testing "testing" + types "github.com/tendermint/tendermint/types" ) @@ -332,3 +334,13 @@ func (_m *Peer) TrySend(_a0 byte, _a1 []byte) bool { func (_m *Peer) Wait() { _m.Called() } + +// NewPeer creates a new instance of Peer. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. +func NewPeer(t testing.TB) *Peer { + mock := &Peer{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/internal/p2p/mocks/transport.go b/internal/p2p/mocks/transport.go index 82bd670cbd..be5ef1d37d 100644 --- a/internal/p2p/mocks/transport.go +++ b/internal/p2p/mocks/transport.go @@ -7,6 +7,8 @@ import ( mock "github.com/stretchr/testify/mock" p2p "github.com/tendermint/tendermint/internal/p2p" + + testing "testing" ) // Transport is an autogenerated mock type for the Transport type @@ -119,3 +121,13 @@ func (_m *Transport) String() string { return r0 } + +// NewTransport creates a new instance of Transport. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. +func NewTransport(t testing.TB) *Transport { + mock := &Transport{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/internal/proxy/app_conn.go b/internal/proxy/app_conn.go index 54ce61dac8..701f5eeb01 100644 --- a/internal/proxy/app_conn.go +++ b/internal/proxy/app_conn.go @@ -19,6 +19,8 @@ type AppConnConsensus interface { Error() error InitChainSync(context.Context, types.RequestInitChain) (*types.ResponseInitChain, error) + PrepareProposalSync(context.Context, types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) + ProcessProposalSync(context.Context, types.RequestProcessProposal) (*types.ResponseProcessProposal, error) BeginBlockSync(context.Context, types.RequestBeginBlock) (*types.ResponseBeginBlock, error) DeliverTxAsync(context.Context, types.RequestDeliverTx) (*abciclient.ReqRes, error) @@ -85,6 +87,20 @@ func (app *appConnConsensus) InitChainSync( return app.appConn.InitChainSync(ctx, req) } +func (app *appConnConsensus) PrepareProposalSync( + ctx context.Context, + req types.RequestPrepareProposal, +) (*types.ResponsePrepareProposal, error) { + return app.appConn.PrepareProposalSync(ctx, req) +} + +func (app *appConnConsensus) ProcessProposalSync( + ctx context.Context, + req types.RequestProcessProposal, +) (*types.ResponseProcessProposal, error) { + return app.appConn.ProcessProposalSync(ctx, req) +} + func (app *appConnConsensus) BeginBlockSync( ctx context.Context, req types.RequestBeginBlock, diff --git a/internal/proxy/mocks/app_conn_consensus.go b/internal/proxy/mocks/app_conn_consensus.go index fa93b0931e..6e9e6e8676 100644 --- a/internal/proxy/mocks/app_conn_consensus.go +++ b/internal/proxy/mocks/app_conn_consensus.go @@ -9,6 +9,8 @@ import ( mock "github.com/stretchr/testify/mock" + testing "testing" + types "github.com/tendermint/tendermint/abci/types" ) @@ -146,7 +148,63 @@ func (_m *AppConnConsensus) InitChainSync(_a0 context.Context, _a1 types.Request return r0, r1 } +// PrepareProposalSync provides a mock function with given fields: _a0, _a1 +func (_m *AppConnConsensus) PrepareProposalSync(_a0 context.Context, _a1 types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) { + ret := _m.Called(_a0, _a1) + + var r0 *types.ResponsePrepareProposal + if rf, ok := ret.Get(0).(func(context.Context, types.RequestPrepareProposal) *types.ResponsePrepareProposal); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.ResponsePrepareProposal) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, types.RequestPrepareProposal) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ProcessProposalSync provides a mock function with given fields: _a0, _a1 +func (_m *AppConnConsensus) ProcessProposalSync(_a0 context.Context, _a1 types.RequestProcessProposal) (*types.ResponseProcessProposal, error) { + ret := _m.Called(_a0, _a1) + + var r0 *types.ResponseProcessProposal + if rf, ok := ret.Get(0).(func(context.Context, types.RequestProcessProposal) *types.ResponseProcessProposal); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.ResponseProcessProposal) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, types.RequestProcessProposal) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // SetResponseCallback provides a mock function with given fields: _a0 func (_m *AppConnConsensus) SetResponseCallback(_a0 abciclient.Callback) { _m.Called(_a0) } + +// NewAppConnConsensus creates a new instance of AppConnConsensus. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. +func NewAppConnConsensus(t testing.TB) *AppConnConsensus { + mock := &AppConnConsensus{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/internal/proxy/mocks/app_conn_mempool.go b/internal/proxy/mocks/app_conn_mempool.go index 5429d8f909..a4781cef09 100644 --- a/internal/proxy/mocks/app_conn_mempool.go +++ b/internal/proxy/mocks/app_conn_mempool.go @@ -9,6 +9,8 @@ import ( mock "github.com/stretchr/testify/mock" + testing "testing" + types "github.com/tendermint/tendermint/abci/types" ) @@ -118,3 +120,13 @@ func (_m *AppConnMempool) FlushSync(_a0 context.Context) error { func (_m *AppConnMempool) SetResponseCallback(_a0 abciclient.Callback) { _m.Called(_a0) } + +// NewAppConnMempool creates a new instance of AppConnMempool. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. +func NewAppConnMempool(t testing.TB) *AppConnMempool { + mock := &AppConnMempool{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/internal/proxy/mocks/app_conn_query.go b/internal/proxy/mocks/app_conn_query.go index 47ac5bef94..3fc0013499 100644 --- a/internal/proxy/mocks/app_conn_query.go +++ b/internal/proxy/mocks/app_conn_query.go @@ -7,6 +7,8 @@ import ( mock "github.com/stretchr/testify/mock" + testing "testing" + types "github.com/tendermint/tendermint/abci/types" ) @@ -97,3 +99,13 @@ func (_m *AppConnQuery) QuerySync(_a0 context.Context, _a1 types.RequestQuery) ( return r0, r1 } + +// NewAppConnQuery creates a new instance of AppConnQuery. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. +func NewAppConnQuery(t testing.TB) *AppConnQuery { + mock := &AppConnQuery{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/internal/proxy/mocks/app_conn_snapshot.go b/internal/proxy/mocks/app_conn_snapshot.go index 0b6f10ce13..af6ccfadad 100644 --- a/internal/proxy/mocks/app_conn_snapshot.go +++ b/internal/proxy/mocks/app_conn_snapshot.go @@ -7,6 +7,8 @@ import ( mock "github.com/stretchr/testify/mock" + testing "testing" + types "github.com/tendermint/tendermint/abci/types" ) @@ -120,3 +122,13 @@ func (_m *AppConnSnapshot) OfferSnapshotSync(_a0 context.Context, _a1 types.Requ return r0, r1 } + +// NewAppConnSnapshot creates a new instance of AppConnSnapshot. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. +func NewAppConnSnapshot(t testing.TB) *AppConnSnapshot { + mock := &AppConnSnapshot{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/internal/rpc/core/blocks.go b/internal/rpc/core/blocks.go index 2e7f247265..8ea1c240fe 100644 --- a/internal/rpc/core/blocks.go +++ b/internal/rpc/core/blocks.go @@ -1,13 +1,16 @@ package core import ( + "errors" "fmt" "sort" + "github.com/tendermint/tendermint/crypto/merkle" "github.com/tendermint/tendermint/internal/state/indexer" "github.com/tendermint/tendermint/libs/bytes" tmmath "github.com/tendermint/tendermint/libs/math" tmquery "github.com/tendermint/tendermint/libs/pubsub/query" + "github.com/tendermint/tendermint/pkg/consts" "github.com/tendermint/tendermint/rpc/coretypes" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" "github.com/tendermint/tendermint/types" @@ -189,6 +192,55 @@ func (env *Environment) Commit(ctx *rpctypes.Context, heightPtr *int64) (*corety return coretypes.NewResultCommit(&header, commit, true), nil } +// DataCommitment collects the data roots over a provided ordered range of blocks, +// and then creates a new Merkle root of those data roots. +func (env *Environment) DataCommitment(ctx *rpctypes.Context, query string) (*coretypes.ResultDataCommitment, error) { + heights, err := searchBlocks(ctx, env, query) + if err != nil { + return nil, err + } + + if len(heights) > consts.DataCommitmentBlocksLimit { + return nil, fmt.Errorf("the query exceeds the limit of allowed blocks %d", consts.DataCommitmentBlocksLimit) + } else if len(heights) == 0 { + return nil, fmt.Errorf("cannot create the data commitments for an empty set of blocks") + } + + err = sortBlocks(heights, "asc") + if err != nil { + return nil, err + } + + if len(heights) > consts.DataCommitmentBlocksLimit { + return nil, fmt.Errorf("the query exceeds the limit of allowed blocks %d", consts.DataCommitmentBlocksLimit) + } + + if len(heights) == 0 { + return nil, fmt.Errorf("cannot create the data commitments for an empty set of blocks") + } + + err = sortBlocks(heights, "asc") + if err != nil { + return nil, err + } + + blockResults := fetchBlocks(env, heights, len(heights), 0) + root := hashDataRoots(blockResults) + + // Create data commitment + return &coretypes.ResultDataCommitment{DataCommitment: root}, nil +} + +// hashDataRoots hashes a list of blocks data hashes and returns their merkle root. +func hashDataRoots(blocks []*coretypes.ResultBlock) []byte { + dataRoots := make([][]byte, 0, len(blocks)) + for _, block := range blocks { + dataRoots = append(dataRoots, block.Block.DataHash) + } + root := merkle.HashFromByteSlices(dataRoots) + return root +} + // BlockResults gets ABCIResults at a given height. // If no height is provided, it will fetch results for the latest block. // @@ -232,6 +284,34 @@ func (env *Environment) BlockSearch( orderBy string, ) (*coretypes.ResultBlockSearch, error) { + results, err := searchBlocks(ctx, env, query) + if err != nil { + return nil, err + } + + err = sortBlocks(results, orderBy) + if err != nil { + return nil, err + } + + // paginate results + totalCount := len(results) + perPage := env.validatePerPage(perPagePtr) + + page, err := validatePage(pagePtr, perPage, totalCount) + if err != nil { + return nil, err + } + + skipCount := validateSkipCount(page, perPage) + pageSize := tmmath.MinInt(perPage, totalCount-skipCount) + + apiResults := fetchBlocks(env, results, pageSize, skipCount) + + return &coretypes.ResultBlockSearch{Blocks: apiResults, TotalCount: totalCount}, nil +} + +func searchBlocks(ctx *rpctypes.Context, env *Environment, query string) ([]int64, error) { if !indexer.KVSinkEnabled(env.EventSinks) { return nil, fmt.Errorf("block searching is disabled due to no kvEventSink") } @@ -248,12 +328,12 @@ func (env *Environment) BlockSearch( } } - results, err := kvsink.SearchBlockEvents(ctx.Context(), q) - if err != nil { - return nil, err - } + return kvsink.SearchBlockEvents(ctx.Context(), q) +} - // sort results (must be done before pagination) +// sortBlocks takes a list of block heights and sorts them according to the order: "asc" or "desc". +// If `orderBy` is blank, then it is considered descending. +func sortBlocks(results []int64, orderBy string) error { switch orderBy { case "desc", "": sort.Slice(results, func(i, j int) bool { return results[i] > results[j] }) @@ -262,21 +342,13 @@ func (env *Environment) BlockSearch( sort.Slice(results, func(i, j int) bool { return results[i] < results[j] }) default: - return nil, fmt.Errorf("expected order_by to be either `asc` or `desc` or empty: %w", coretypes.ErrInvalidRequest) - } - - // paginate results - totalCount := len(results) - perPage := env.validatePerPage(perPagePtr) - - page, err := validatePage(pagePtr, perPage, totalCount) - if err != nil { - return nil, err + return errors.New("expected order_by to be either `asc` or `desc` or empty") } + return nil +} - skipCount := validateSkipCount(page, perPage) - pageSize := tmmath.MinInt(perPage, totalCount-skipCount) - +// fetchBlocks takes a list of block heights and fetches them. +func fetchBlocks(env *Environment, results []int64, pageSize int, skipCount int) []*coretypes.ResultBlock { apiResults := make([]*coretypes.ResultBlock, 0, pageSize) for i := skipCount; i < skipCount+pageSize; i++ { block := env.BlockStore.LoadBlock(results[i]) @@ -290,6 +362,5 @@ func (env *Environment) BlockSearch( } } } - - return &coretypes.ResultBlockSearch{Blocks: apiResults, TotalCount: totalCount}, nil + return apiResults } diff --git a/internal/rpc/core/blocks_test.go b/internal/rpc/core/blocks_test.go index 213845bf40..627c5f9eec 100644 --- a/internal/rpc/core/blocks_test.go +++ b/internal/rpc/core/blocks_test.go @@ -1,16 +1,22 @@ package core import ( + "bytes" "fmt" "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/crypto/merkle" + tmrand "github.com/tendermint/tendermint/libs/rand" + "github.com/tendermint/tendermint/types" dbm "github.com/tendermint/tm-db" abci "github.com/tendermint/tendermint/abci/types" sm "github.com/tendermint/tendermint/internal/state" + "github.com/tendermint/tendermint/internal/state/indexer" "github.com/tendermint/tendermint/internal/state/mocks" tmstate "github.com/tendermint/tendermint/proto/tendermint/state" "github.com/tendermint/tendermint/rpc/coretypes" @@ -118,3 +124,73 @@ func TestBlockResults(t *testing.T) { } } } + +func TestDataCommitmentResults(t *testing.T) { + env := &Environment{} + heights := []int64{0, 1, 2, 3, 4, 5, 6, 7, 8, 9} + + blocks := randomBlocks(int64(len(heights))) + mockstore := &mocks.BlockStore{} + for i, height := range heights { + mockstore.On("LoadBlock", height).Return(blocks[i]) + mockstore.On("LoadBlockMeta", height).Return(types.NewBlockMeta(blocks[i], nil)) + } + + mockEVS := mocks.EventSink{} + mockEVS.On("SearchBlockEvents", mock.Anything, mock.Anything).Return(heights[1:3], nil) + mockEVS.On("Type").Return(indexer.KV) + + env.EventSinks = append(env.EventSinks, &mockEVS) + env.BlockStore = mockstore + + testCases := []struct { + beginQuery int + endQuery int + expectPass bool + }{ + {1, 2, true}, + // {10, 9, false}, // TODO: mock errors? + // {0, 1000, false}, + } + + for _, tc := range testCases { + mockedQuery := fmt.Sprintf("block.height >= %d AND block.height <= %d", tc.beginQuery, tc.endQuery) + + actualCommitment, err := env.DataCommitment(&rpctypes.Context{}, mockedQuery) + if tc.expectPass { + require.Nil(t, err, "should generate the needed data commitment.") + + size := tc.endQuery - tc.beginQuery + 1 + dataRoots := make([][]byte, size) + for i := 0; i < size; i++ { + dataRoots[i] = blocks[tc.beginQuery+i].DataHash + } + expectedCommitment := merkle.HashFromByteSlices(dataRoots) + + if !bytes.Equal(expectedCommitment, actualCommitment.DataCommitment) { + t.Error("expected data commitment and actual data commitment doesn't match.") + } + } else { + assert.Error(t, err) + } + } +} + +// randomBlocks generates a set of random blocks up to the provided height. +func randomBlocks(height int64) []*types.Block { + blocks := make([]*types.Block, height) + for i := int64(0); i < height; i++ { + blocks[i] = randomBlock(i) + } + return blocks +} + +// randomBlock generates a Block with a certain height and random data hash. +func randomBlock(height int64) *types.Block { + return &types.Block{ + Header: types.Header{ + Height: height, + DataHash: tmrand.Bytes(32), + }, + } +} diff --git a/internal/rpc/core/routes.go b/internal/rpc/core/routes.go index 740fe3b191..b45f93c184 100644 --- a/internal/rpc/core/routes.go +++ b/internal/rpc/core/routes.go @@ -29,6 +29,7 @@ func (env *Environment) GetRoutes() RoutesMap { "block_by_hash": rpc.NewRPCFunc(env.BlockByHash, "hash", true), "block_results": rpc.NewRPCFunc(env.BlockResults, "height", true), "commit": rpc.NewRPCFunc(env.Commit, "height", true), + "data_commitment": rpc.NewRPCFunc(env.DataCommitment, "query", true), "check_tx": rpc.NewRPCFunc(env.CheckTx, "tx", true), "remove_tx": rpc.NewRPCFunc(env.RemoveTx, "txkey", false), "tx": rpc.NewRPCFunc(env.Tx, "hash,prove", true), diff --git a/internal/rpc/core/tx.go b/internal/rpc/core/tx.go index 7ba2bf90c0..7e6d0ec1cf 100644 --- a/internal/rpc/core/tx.go +++ b/internal/rpc/core/tx.go @@ -9,6 +9,8 @@ import ( "github.com/tendermint/tendermint/libs/bytes" tmmath "github.com/tendermint/tendermint/libs/math" tmquery "github.com/tendermint/tendermint/libs/pubsub/query" + "github.com/tendermint/tendermint/pkg/consts" + "github.com/tendermint/tendermint/pkg/prove" "github.com/tendermint/tendermint/rpc/coretypes" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" "github.com/tendermint/tendermint/types" @@ -18,7 +20,7 @@ import ( // transaction is in the mempool, invalidated, or was not sent in the first // place. // More: https://docs.tendermint.com/master/rpc/#/Info/tx -func (env *Environment) Tx(ctx *rpctypes.Context, hash bytes.HexBytes, prove bool) (*coretypes.ResultTx, error) { +func (env *Environment) Tx(ctx *rpctypes.Context, hash bytes.HexBytes, proveTx bool) (*coretypes.ResultTx, error) { // if index is disabled, return error // N.B. The hash parameter is HexBytes so that the reflective parameter @@ -39,10 +41,18 @@ func (env *Environment) Tx(ctx *rpctypes.Context, hash bytes.HexBytes, prove boo height := r.Height index := r.Index - var proof types.TxProof - if prove { + var txProof types.TxProof + if proveTx { block := env.BlockStore.LoadBlock(height) - proof = block.Data.Txs.Proof(int(index)) // XXX: overflow on 32-bit machines + txProof, err = prove.TxInclusion( + consts.DefaultCodec(), + block.Data, + uint(block.Data.OriginalSquareSize), + uint(r.Index), + ) + if err != nil { + return nil, err + } } return &coretypes.ResultTx{ @@ -51,7 +61,7 @@ func (env *Environment) Tx(ctx *rpctypes.Context, hash bytes.HexBytes, prove boo Index: index, TxResult: r.Result, Tx: r.Tx, - Proof: proof, + Proof: txProof, }, nil } } @@ -65,7 +75,7 @@ func (env *Environment) Tx(ctx *rpctypes.Context, hash bytes.HexBytes, prove boo func (env *Environment) TxSearch( ctx *rpctypes.Context, query string, - prove bool, + proveTx bool, pagePtr, perPagePtr *int, orderBy string, ) (*coretypes.ResultTxSearch, error) { @@ -125,9 +135,12 @@ func (env *Environment) TxSearch( r := results[i] var proof types.TxProof - if prove { + if proveTx { block := env.BlockStore.LoadBlock(r.Height) - proof = block.Data.Txs.Proof(int(r.Index)) // XXX: overflow on 32-bit machines + proof, err = prove.TxInclusion(consts.DefaultCodec(), block.Data, uint(block.Data.OriginalSquareSize), uint(r.Index)) + if err != nil { + return nil, err + } } apiResults = append(apiResults, &coretypes.ResultTx{ diff --git a/internal/state/execution.go b/internal/state/execution.go index e3dc80dca9..fb7810d373 100644 --- a/internal/state/execution.go +++ b/internal/state/execution.go @@ -13,6 +13,7 @@ import ( "github.com/tendermint/tendermint/internal/proxy" "github.com/tendermint/tendermint/libs/log" tmstate "github.com/tendermint/tendermint/proto/tendermint/state" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/types" ) @@ -99,6 +100,8 @@ func (blockExec *BlockExecutor) SetEventBus(eventBus types.BlockEventPublisher) // and txs from the mempool. The max bytes must be big enough to fit the commit. // Up to 1/10th of the block space is allcoated for maximum sized evidence. // The rest is given to txs, up to the max gas. +// +// Contract: application will not return more bytes than are sent over the wire. func (blockExec *BlockExecutor) CreateProposalBlock( height int64, state State, commit *types.Commit, @@ -110,12 +113,87 @@ func (blockExec *BlockExecutor) CreateProposalBlock( evidence, evSize := blockExec.evpool.PendingEvidence(state.ConsensusParams.Evidence.MaxBytes) + evdData := types.EvidenceData{Evidence: evidence} + pevdData, err := evdData.ToProto() + if err != nil { + // todo(evan): see if we can get rid of this panic + panic(err) + } + // Fetch a limited amount of valid txs maxDataBytes := types.MaxDataBytes(maxBytes, evSize, state.Validators.Size()) + // TODO(ismail): reaping the mempool has to happen in relation to a max + // allowed square size instead of (only) Gas / bytes + // maybe the mempool actually should track things separately + // meaning that CheckTx should already do the mapping: + // Tx -> Txs, Message + // https://github.com/tendermint/tendermint/issues/77 txs := blockExec.mempool.ReapMaxBytesMaxGas(maxDataBytes, maxGas) + l := len(txs) + bzs := make([][]byte, l) + for i := 0; i < l; i++ { + bzs[i] = txs[i] + } - return state.MakeBlock(height, txs, commit, evidence, proposerAddr) + preparedProposal, err := blockExec.proxyApp.PrepareProposalSync( + context.TODO(), + abci.RequestPrepareProposal{ + BlockData: &tmproto.Data{Txs: txs.ToSliceOfBytes(), Evidence: *pevdData}, + BlockDataSize: maxDataBytes}, + ) + if err != nil { + // The App MUST ensure that only valid (and hence 'processable') transactions + // enter the mempool. Hence, at this point, we can't have any non-processable + // transaction causing an error. + // + // Also, the App can simply skip any transaction that could cause any kind of trouble. + // Either way, we can not recover in a meaningful way, unless we skip proposing + // this block, repair what caused the error and try again. Hence, we panic on + // purpose for now. + panic(err) + } + rawNewData := preparedProposal.GetBlockData() + var txSize int + for _, tx := range rawNewData.GetTxs() { + txSize += len(tx) + + if maxDataBytes < int64(txSize) { + panic("block data exceeds max amount of allowed bytes") + } + } + + newData, err := types.DataFromProto(rawNewData) + if err != nil { + // todo(evan): see if we can get rid of this panic + panic(err) + } + + return state.MakeBlock( + height, + newData.Txs, + commit, + newData.Evidence.Evidence, + newData.Messages.MessagesList, + proposerAddr, + ) +} + +func (blockExec *BlockExecutor) ProcessProposal( + block *types.Block, +) (bool, error) { + ctx := context.Background() + req := abci.RequestProcessProposal{ + Txs: block.Data.Txs.ToSliceOfBytes(), + Header: *block.Header.ToProto(), + } + + resp, err := blockExec.proxyApp.ProcessProposalSync(ctx, req) + if err != nil { + return false, ErrInvalidBlock(err) + } + + return resp.IsOK(), nil } // ValidateBlock validates the given block against the given state. @@ -544,11 +622,22 @@ func fireEvents( } for i, tx := range block.Data.Txs { + var txHash []byte + var rawTx []byte + if originalHash, malleatedTx, ismalleated := types.UnwrapMalleatedTx(tx); ismalleated { + txHash = originalHash + rawTx = malleatedTx + } else { + txHash = tx.Hash() + rawTx = tx + } + if err := eventBus.PublishEventTx(types.EventDataTx{TxResult: abci.TxResult{ - Height: block.Height, - Index: uint32(i), - Tx: tx, - Result: *(abciResponses.DeliverTxs[i]), + Height: block.Height, + Index: uint32(i), + Tx: rawTx, + Result: *(abciResponses.DeliverTxs[i]), + OriginalHash: txHash, }}); err != nil { logger.Error("failed publishing event TX", "err", err) } diff --git a/internal/state/execution_test.go b/internal/state/execution_test.go index a66b677f94..3761bd6e30 100644 --- a/internal/state/execution_test.go +++ b/internal/state/execution_test.go @@ -22,6 +22,7 @@ import ( "github.com/tendermint/tendermint/internal/state/mocks" sf "github.com/tendermint/tendermint/internal/state/test/factory" "github.com/tendermint/tendermint/internal/store" + "github.com/tendermint/tendermint/internal/test/factory" "github.com/tendermint/tendermint/libs/log" tmtime "github.com/tendermint/tendermint/libs/time" "github.com/tendermint/tendermint/types" @@ -215,6 +216,38 @@ func TestBeginBlockByzantineValidators(t *testing.T) { assert.Equal(t, abciEv, app.ByzantineValidators) } +func TestProcessProposal(t *testing.T) { + height := 1 + runTest := func(txs types.Txs, expectAccept bool) { + app := &testApp{} + cc := abciclient.NewLocalCreator(app) + proxyApp := proxy.NewAppConns(cc, proxy.NopMetrics()) + err := proxyApp.Start() + require.Nil(t, err) + defer proxyApp.Stop() //nolint:errcheck // ignore for tests + + state, stateDB, _ := makeState(1, height) + stateStore := sm.NewStore(stateDB) + + blockStore := store.NewBlockStore(dbm.NewMemDB()) + + blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), + mmock.Mempool{}, sm.EmptyEvidencePool{}, blockStore) + + block := sf.MakeBlock(state, int64(height), new(types.Commit)) + block.Txs = txs + acceptBlock, err := blockExec.ProcessProposal(block) + require.Nil(t, err) + require.Equal(t, expectAccept, acceptBlock) + } + goodTxs := factory.MakeTenTxs(int64(height)) + runTest(goodTxs, true) + // testApp has process proposal fail if any tx is 0-len + badTxs := factory.MakeTenTxs(int64(height)) + badTxs[0] = types.Tx{} + runTest(badTxs, false) +} + func TestValidateValidatorUpdates(t *testing.T) { pubkey1 := ed25519.GenPrivKey().PubKey() pubkey2 := ed25519.GenPrivKey().PubKey() diff --git a/internal/state/helpers_test.go b/internal/state/helpers_test.go index 0cedebb008..2fecf13e07 100644 --- a/internal/state/helpers_test.go +++ b/internal/state/helpers_test.go @@ -58,7 +58,7 @@ func makeAndCommitGoodBlock( func makeAndApplyGoodBlock(state sm.State, height int64, lastCommit *types.Commit, proposerAddr []byte, blockExec *sm.BlockExecutor, evidence []types.Evidence) (sm.State, types.BlockID, error) { - block, _ := state.MakeBlock(height, factory.MakeTenTxs(height), lastCommit, evidence, proposerAddr) + block, _ := state.MakeBlock(height, factory.MakeTenTxs(height), lastCommit, evidence, nil, proposerAddr) if err := blockExec.ValidateBlock(state, block); err != nil { return state, types.BlockID{}, err } @@ -304,3 +304,12 @@ func (app *testApp) Commit() abci.ResponseCommit { func (app *testApp) Query(reqQuery abci.RequestQuery) (resQuery abci.ResponseQuery) { return } + +func (app *testApp) ProcessProposal(req abci.RequestProcessProposal) abci.ResponseProcessProposal { + for _, tx := range req.Txs { + if len(tx) == 0 { + return abci.ResponseProcessProposal{Result: abci.ResponseProcessProposal_REJECT} + } + } + return abci.ResponseProcessProposal{Result: abci.ResponseProcessProposal_ACCEPT} +} diff --git a/internal/state/indexer/mocks/event_sink.go b/internal/state/indexer/mocks/event_sink.go index 98b32e9350..82776483bf 100644 --- a/internal/state/indexer/mocks/event_sink.go +++ b/internal/state/indexer/mocks/event_sink.go @@ -12,6 +12,8 @@ import ( tenderminttypes "github.com/tendermint/tendermint/types" + testing "testing" + types "github.com/tendermint/tendermint/abci/types" ) @@ -165,3 +167,13 @@ func (_m *EventSink) Type() indexer.EventSinkType { return r0 } + +// NewEventSink creates a new instance of EventSink. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. +func NewEventSink(t testing.TB) *EventSink { + mock := &EventSink{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/internal/state/indexer/tx/kv/kv.go b/internal/state/indexer/tx/kv/kv.go index 4bcff958b0..0cb916c142 100644 --- a/internal/state/indexer/tx/kv/kv.go +++ b/internal/state/indexer/tx/kv/kv.go @@ -12,6 +12,7 @@ import ( dbm "github.com/tendermint/tm-db" abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/crypto/tmhash" indexer "github.com/tendermint/tendermint/internal/state/indexer" "github.com/tendermint/tendermint/libs/pubsub/query" "github.com/tendermint/tendermint/libs/pubsub/query/syntax" @@ -68,7 +69,12 @@ func (txi *TxIndex) Index(results []*abci.TxResult) error { defer b.Close() for _, result := range results { - hash := types.Tx(result.Tx).Hash() + var hash []byte + if len(result.OriginalHash) == tmhash.Size { + hash = result.OriginalHash + } else { + hash = types.Tx(result.Tx).Hash() + } // index tx by events err := txi.indexEvents(result, hash, b) diff --git a/internal/state/indexer/tx/kv/kv_test.go b/internal/state/indexer/tx/kv/kv_test.go index 9bb8bfb7bb..59c93e7304 100644 --- a/internal/state/indexer/tx/kv/kv_test.go +++ b/internal/state/indexer/tx/kv/kv_test.go @@ -65,6 +65,49 @@ func TestTxIndex(t *testing.T) { assert.True(t, proto.Equal(txResult2, loadedTxResult2)) } +func TestMalleatedTxIndex(t *testing.T) { + type test struct { + tx types.Tx + originalHash []byte + expectedTx []byte + } + originalTx1 := types.Tx([]byte("ORIGINAL_TX")) + malleatedTx1 := types.Tx([]byte("MALLEATED_TX")) + + tests := []test{ + // we expect to get the malleated tx returned when searching using the original hash + { + tx: malleatedTx1, + originalHash: originalTx1.Hash(), + expectedTx: malleatedTx1, + }, + } + + indexer := NewTxIndex(dbm.NewMemDB()) + + for i, tt := range tests { + + txResult := &abci.TxResult{ + Height: int64(i), + Index: 0, + Tx: tt.tx, + Result: abci.ResponseDeliverTx{ + Data: []byte{0}, + Code: abci.CodeTypeOK, Log: "", Events: nil, + }, + OriginalHash: tt.originalHash, + } + + err := indexer.Index([]*abci.TxResult{txResult}) + require.NoError(t, err) + + loadedTxResult, err := indexer.Get(tt.originalHash) + require.NoError(t, err) + require.NotNil(t, loadedTxResult) + assert.Equal(t, tt.expectedTx, loadedTxResult.Tx) + } +} + func TestTxSearch(t *testing.T) { indexer := NewTxIndex(dbm.NewMemDB()) diff --git a/internal/state/mocks/block_store.go b/internal/state/mocks/block_store.go index 563183437f..7cc7fa883c 100644 --- a/internal/state/mocks/block_store.go +++ b/internal/state/mocks/block_store.go @@ -5,6 +5,8 @@ package mocks import ( mock "github.com/stretchr/testify/mock" + testing "testing" + types "github.com/tendermint/tendermint/types" ) @@ -208,3 +210,13 @@ func (_m *BlockStore) Size() int64 { return r0 } + +// NewBlockStore creates a new instance of BlockStore. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. +func NewBlockStore(t testing.TB) *BlockStore { + mock := &BlockStore{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/internal/state/mocks/evidence_pool.go b/internal/state/mocks/evidence_pool.go index 8bf4a9b64b..96f9b32080 100644 --- a/internal/state/mocks/evidence_pool.go +++ b/internal/state/mocks/evidence_pool.go @@ -3,8 +3,11 @@ package mocks import ( + testing "testing" + mock "github.com/stretchr/testify/mock" state "github.com/tendermint/tendermint/internal/state" + types "github.com/tendermint/tendermint/types" ) @@ -68,3 +71,13 @@ func (_m *EvidencePool) PendingEvidence(maxBytes int64) ([]types.Evidence, int64 func (_m *EvidencePool) Update(_a0 state.State, _a1 types.EvidenceList) { _m.Called(_a0, _a1) } + +// NewEvidencePool creates a new instance of EvidencePool. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. +func NewEvidencePool(t testing.TB) *EvidencePool { + mock := &EvidencePool{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/internal/state/mocks/store.go b/internal/state/mocks/store.go index 02c69d3e05..9b41f3c1bc 100644 --- a/internal/state/mocks/store.go +++ b/internal/state/mocks/store.go @@ -7,6 +7,8 @@ import ( state "github.com/tendermint/tendermint/internal/state" tendermintstate "github.com/tendermint/tendermint/proto/tendermint/state" + testing "testing" + types "github.com/tendermint/tendermint/types" ) @@ -186,3 +188,13 @@ func (_m *Store) SaveValidatorSets(_a0 int64, _a1 int64, _a2 *types.ValidatorSet return r0 } + +// NewStore creates a new instance of Store. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. +func NewStore(t testing.TB) *Store { + mock := &Store{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/internal/state/state.go b/internal/state/state.go index 6a1ea3c65b..ea29d46cde 100644 --- a/internal/state/state.go +++ b/internal/state/state.go @@ -257,11 +257,12 @@ func (state State) MakeBlock( txs []types.Tx, commit *types.Commit, evidence []types.Evidence, + messages []types.Message, proposerAddress []byte, ) (*types.Block, *types.PartSet) { // Build base block with block data. - block := types.MakeBlock(height, txs, commit, evidence) + block := types.MakeBlock(height, txs, evidence, messages, commit) // Set time. var timestamp time.Time diff --git a/internal/state/test/factory/block.go b/internal/state/test/factory/block.go index dfcf5ebd92..244766e748 100644 --- a/internal/state/test/factory/block.go +++ b/internal/state/test/factory/block.go @@ -41,6 +41,7 @@ func MakeBlock(state sm.State, height int64, c *types.Commit) *types.Block { factory.MakeTenTxs(state.LastBlockHeight), c, nil, + nil, state.Validators.GetProposer().Address, ) return block @@ -61,5 +62,5 @@ func makeBlockAndPartSet(state sm.State, lastBlock *types.Block, lastBlockMeta * lastBlockMeta.BlockID, []types.CommitSig{vote.CommitSig()}) } - return state.MakeBlock(height, []types.Tx{}, lastCommit, nil, state.Validators.GetProposer().Address) + return state.MakeBlock(height, []types.Tx{}, lastCommit, nil, nil, state.Validators.GetProposer().Address) } diff --git a/internal/state/tx_filter_test.go b/internal/state/tx_filter_test.go index 27af28a40c..df13eda5e6 100644 --- a/internal/state/tx_filter_test.go +++ b/internal/state/tx_filter_test.go @@ -22,8 +22,8 @@ func TestTxFilter(t *testing.T) { tx types.Tx isErr bool }{ - {types.Tx(tmrand.Bytes(2155)), false}, - {types.Tx(tmrand.Bytes(2156)), true}, + {types.Tx(tmrand.Bytes(2106)), false}, + {types.Tx(tmrand.Bytes(2152)), true}, {types.Tx(tmrand.Bytes(3000)), true}, } diff --git a/internal/state/validation_test.go b/internal/state/validation_test.go index eb0cebbb73..85f7b928cc 100644 --- a/internal/state/validation_test.go +++ b/internal/state/validation_test.go @@ -278,7 +278,7 @@ func TestValidateBlockEvidence(t *testing.T) { evidence = append(evidence, newEv) currentBytes += int64(len(newEv.Bytes())) } - block, _ := state.MakeBlock(height, testfactory.MakeTenTxs(height), lastCommit, evidence, proposerAddr) + block, _ := state.MakeBlock(height, testfactory.MakeTenTxs(height), lastCommit, evidence, nil, proposerAddr) err := blockExec.ValidateBlock(state, block) if assert.Error(t, err) { _, ok := err.(*types.ErrEvidenceOverflow) diff --git a/internal/statesync/mocks/state_provider.go b/internal/statesync/mocks/state_provider.go index b8d6816310..582ebcd9c4 100644 --- a/internal/statesync/mocks/state_provider.go +++ b/internal/statesync/mocks/state_provider.go @@ -8,6 +8,8 @@ import ( mock "github.com/stretchr/testify/mock" state "github.com/tendermint/tendermint/internal/state" + testing "testing" + types "github.com/tendermint/tendermint/types" ) @@ -82,3 +84,13 @@ func (_m *StateProvider) State(ctx context.Context, height uint64) (state.State, return r0, r1 } + +// NewStateProvider creates a new instance of StateProvider. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. +func NewStateProvider(t testing.TB) *StateProvider { + mock := &StateProvider{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/light/provider/mocks/provider.go b/light/provider/mocks/provider.go index aa36fa2d34..343ba1e7e2 100644 --- a/light/provider/mocks/provider.go +++ b/light/provider/mocks/provider.go @@ -7,6 +7,8 @@ import ( mock "github.com/stretchr/testify/mock" + testing "testing" + types "github.com/tendermint/tendermint/types" ) @@ -51,3 +53,13 @@ func (_m *Provider) ReportEvidence(_a0 context.Context, _a1 types.Evidence) erro return r0 } + +// NewProvider creates a new instance of Provider. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. +func NewProvider(t testing.TB) *Provider { + mock := &Provider{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/light/proxy/routes.go b/light/proxy/routes.go index ac2e8b5dfb..66747fd1f6 100644 --- a/light/proxy/routes.go +++ b/light/proxy/routes.go @@ -30,6 +30,7 @@ func RPCRoutes(c *lrpc.Client) map[string]*rpcserver.RPCFunc { "block_by_hash": rpcserver.NewRPCFunc(makeBlockByHashFunc(c), "hash", true), "block_results": rpcserver.NewRPCFunc(makeBlockResultsFunc(c), "height", true), "commit": rpcserver.NewRPCFunc(makeCommitFunc(c), "height", true), + "data_commitment": rpcserver.NewRPCFunc(makeDataCommitmentFunc(c), "query", true), "tx": rpcserver.NewRPCFunc(makeTxFunc(c), "hash,prove", true), "tx_search": rpcserver.NewRPCFunc(makeTxSearchFunc(c), "query,prove,page,per_page,order_by", false), "block_search": rpcserver.NewRPCFunc(makeBlockSearchFunc(c), "query,page,per_page,order_by", false), @@ -151,6 +152,20 @@ func makeCommitFunc(c *lrpc.Client) rpcCommitFunc { } } +type rpcDataCommitmentFunc func( + ctx *rpctypes.Context, + query string, +) (*coretypes.ResultDataCommitment, error) + +func makeDataCommitmentFunc(c *lrpc.Client) rpcDataCommitmentFunc { + return func( + ctx *rpctypes.Context, + query string, + ) (*coretypes.ResultDataCommitment, error) { + return c.DataCommitment(ctx.Context(), query) + } +} + type rpcTxFunc func(ctx *rpctypes.Context, hash []byte, prove bool) (*coretypes.ResultTx, error) func makeTxFunc(c *lrpc.Client) rpcTxFunc { diff --git a/light/rpc/client.go b/light/rpc/client.go index 1852297c73..62dce3e3f6 100644 --- a/light/rpc/client.go +++ b/light/rpc/client.go @@ -503,6 +503,10 @@ func (c *Client) Commit(ctx context.Context, height *int64) (*coretypes.ResultCo }, nil } +func (c *Client) DataCommitment(ctx context.Context, query string) (*coretypes.ResultDataCommitment, error) { + return c.next.DataCommitment(ctx, query) +} + // Tx calls rpcclient#Tx method and then verifies the proof if such was // requested. func (c *Client) Tx(ctx context.Context, hash tmbytes.HexBytes, prove bool) (*coretypes.ResultTx, error) { @@ -517,13 +521,18 @@ func (c *Client) Tx(ctx context.Context, hash tmbytes.HexBytes, prove bool) (*co } // Update the light client if we're behind. - l, err := c.updateLightClientIfNeededTo(ctx, &res.Height) + _, err = c.updateLightClientIfNeededTo(ctx, &res.Height) if err != nil { return nil, err } + valid := res.Proof.VerifyProof() + if !valid { + err = errors.New("proof for transaction inclusion could not be verified") + } + // Validate the proof. - return res, res.Proof.Validate(l.DataHash) + return res, err } func (c *Client) TxSearch( diff --git a/light/rpc/mocks/light_client.go b/light/rpc/mocks/light_client.go index cc32cf6494..0a8b35dd2f 100644 --- a/light/rpc/mocks/light_client.go +++ b/light/rpc/mocks/light_client.go @@ -7,6 +7,8 @@ import ( mock "github.com/stretchr/testify/mock" + testing "testing" + time "time" types "github.com/tendermint/tendermint/types" @@ -99,3 +101,13 @@ func (_m *LightClient) VerifyLightBlockAtHeight(ctx context.Context, height int6 return r0, r1 } + +// NewLightClient creates a new instance of LightClient. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. +func NewLightClient(t testing.TB) *LightClient { + mock := &LightClient{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/networks/remote/integration.sh b/networks/remote/integration.sh index 0960f8962c..ec9b9de56f 100644 --- a/networks/remote/integration.sh +++ b/networks/remote/integration.sh @@ -10,8 +10,8 @@ sudo apt-get upgrade -y sudo apt-get install -y jq unzip python-pip software-properties-common make # get and unpack golang -curl -O https://dl.google.com/go/go1.16.5.linux-amd64.tar.gz -tar -xvf go1.16.5.linux-amd64.tar.gz +curl -O https://dl.google.com/go/go1.17.1.linux-amd64.tar.gz +tar -xvf go1.17.1.linux-amd64.tar.gz ## move binary and add to path mv go /usr/local diff --git a/node/node_test.go b/node/node_test.go index 305f9b873c..735a094344 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -345,7 +345,7 @@ func TestMaxTxsProposalBlockSize(t *testing.T) { // fill the mempool with one txs just below the maximum size txLength := int(types.MaxDataBytesNoEvidence(maxBytes, 1)) - tx := tmrand.Bytes(txLength - 4) // to account for the varint + tx := tmrand.Bytes(txLength - 4 - 5) // to account for the varint err = mp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{}) assert.NoError(t, err) @@ -406,7 +406,7 @@ func TestMaxProposalBlockSize(t *testing.T) { // fill the mempool with one txs just below the maximum size txLength := int(types.MaxDataBytesNoEvidence(maxBytes, types.MaxVotesCount)) - tx := tmrand.Bytes(txLength - 6) // to account for the varint + tx := tmrand.Bytes(txLength - 9) // to account for the varint err = mp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{}) assert.NoError(t, err) // now produce more txs than what a normal block can hold with 10 smaller txs @@ -483,7 +483,7 @@ func TestMaxProposalBlockSize(t *testing.T) { require.Equal(t, int64(pb.Header.Size()), types.MaxHeaderBytes) require.Equal(t, int64(pb.LastCommit.Size()), types.MaxCommitBytes(types.MaxVotesCount)) // make sure that the block is less than the max possible size - assert.Equal(t, int64(pb.Size()), maxBytes) + assert.LessOrEqual(t, int64(pb.Size()), maxBytes) // because of the proto overhead we expect the part set bytes to be equal or // less than the pb block size assert.LessOrEqual(t, partSet.ByteSize(), int64(pb.Size())) diff --git a/pkg/consts/consts.go b/pkg/consts/consts.go new file mode 100644 index 0000000000..165664f31a --- /dev/null +++ b/pkg/consts/consts.go @@ -0,0 +1,81 @@ +package consts + +import ( + "crypto/sha256" + + "github.com/celestiaorg/nmt/namespace" + "github.com/celestiaorg/rsmt2d" +) + +// This contains all constants of: +// https://github.com/celestiaorg/celestia-specs/blob/master/specs/consensus.md#constants +const ( + // ShareSize is the size of a share (in bytes). + // see: https://github.com/celestiaorg/celestia-specs/blob/master/specs/consensus.md#constants + ShareSize = 256 + + // NamespaceSize is the namespace size in bytes. + NamespaceSize = 8 + + // ShareReservedBytes is the reserved bytes for contiguous appends. + ShareReservedBytes = 1 + + // TxShareSize is the number of bytes usable for tx/evidence/ISR shares. + TxShareSize = ShareSize - NamespaceSize - ShareReservedBytes + // MsgShareSize is the number of bytes usable for message shares. + MsgShareSize = ShareSize - NamespaceSize + + // MaxSquareSize is the maximum number of + // rows/columns of the original data shares in square layout. + // Corresponds to AVAILABLE_DATA_ORIGINAL_SQUARE_MAX in the spec. + // 128*128*256 = 4 Megabytes + // TODO(ismail): settle on a proper max square + // if the square size is larger than this, the block producer will panic + MaxSquareSize = 128 + // MaxShareCount is the maximum number of shares allowed in the original data square. + // if there are more shares than this, the block producer will panic. + MaxShareCount = MaxSquareSize * MaxSquareSize + + // MinSquareSize depicts the smallest original square width. A square size smaller than this will + // cause block producer to panic + MinSquareSize = 1 + // MinshareCount is the minimum shares required in an original data square. + MinSharecount = MinSquareSize * MinSquareSize +) + +var ( + // See spec for further details on the types of available data + // https://github.com/celestiaorg/celestia-specs/blob/master/specs/consensus.md#reserved-namespace-ids + // https://github.com/celestiaorg/celestia-specs/blob/de5f4f74f56922e9fa735ef79d9e6e6492a2bad1/specs/data_structures.md#availabledata + + // TxNamespaceID is the namespace reserved for transaction data + TxNamespaceID = namespace.ID{0, 0, 0, 0, 0, 0, 0, 1} + // IntermediateStateRootsNamespaceID is the namespace reserved for + // intermediate state root data + // TODO(liamsi): code commented out but kept intentionally. + // IntermediateStateRootsNamespaceID = namespace.ID{0, 0, 0, 0, 0, 0, 0, 2} + + // EvidenceNamespaceID is the namespace reserved for evidence + EvidenceNamespaceID = namespace.ID{0, 0, 0, 0, 0, 0, 0, 3} + + // MaxReservedNamespace is the lexicographically largest namespace that is + // reserved for protocol use. It is derived from NAMESPACE_ID_MAX_RESERVED + // https://github.com/celestiaorg/celestia-specs/blob/master/specs/consensus.md#constants + MaxReservedNamespace = namespace.ID{0, 0, 0, 0, 0, 0, 0, 255} + // TailPaddingNamespaceID is the namespace ID for tail padding. All data + // with this namespace will be ignored + TailPaddingNamespaceID = namespace.ID{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE} + // ParitySharesNamespaceID indicates that share contains erasure data + ParitySharesNamespaceID = namespace.ID{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF} + + // NewBaseHashFunc change accordingly if another hash.Hash should be used as a base hasher in the NMT: + NewBaseHashFunc = sha256.New + + // DefaultCodec is the default codec creator used for data erasure + // TODO(ismail): for better efficiency and a larger number shares + // we should switch to the rsmt2d.LeopardFF16 codec: + DefaultCodec = rsmt2d.NewRSGF8Codec + + // DataCommitmentBlocksLimit is the limit to the number of blocks we can generate a data commitment for. + DataCommitmentBlocksLimit = 1000 +) diff --git a/pkg/da/data_availability_header.go b/pkg/da/data_availability_header.go new file mode 100644 index 0000000000..61e1f3c0c3 --- /dev/null +++ b/pkg/da/data_availability_header.go @@ -0,0 +1,206 @@ +package da + +import ( + "bytes" + "errors" + "fmt" + + "github.com/celestiaorg/rsmt2d" + "github.com/tendermint/tendermint/crypto/merkle" + "github.com/tendermint/tendermint/crypto/tmhash" + "github.com/tendermint/tendermint/pkg/consts" + "github.com/tendermint/tendermint/pkg/wrapper" + daproto "github.com/tendermint/tendermint/proto/tendermint/da" +) + +const ( + maxExtendedSquareWidth = consts.MaxSquareSize * 2 + minExtendedSquareWidth = consts.MinSquareSize * 2 +) + +// DataAvailabilityHeader (DAHeader) contains the row and column roots of the erasure +// coded version of the data in Block.Data. +// Therefor the original Block.Data is arranged in a +// k × k matrix, which is then "extended" to a +// 2k × 2k matrix applying multiple times Reed-Solomon encoding. +// For details see Section 5.2: https://arxiv.org/abs/1809.09044 +// or the Celestia specification: +// https://github.com/celestiaorg/celestia-specs/blob/master/specs/data_structures.md#availabledataheader +// Note that currently we list row and column roots in separate fields +// (different from the spec). +type DataAvailabilityHeader struct { + // RowRoot_j = root((M_{j,1} || M_{j,2} || ... || M_{j,2k} )) + RowsRoots [][]byte `json:"row_roots"` + // ColumnRoot_j = root((M_{1,j} || M_{2,j} || ... || M_{2k,j} )) + ColumnRoots [][]byte `json:"column_roots"` + // cached result of Hash() not to be recomputed + hash []byte +} + +// NewDataAvailabilityHeader generates a DataAvailability header using the provided square size and shares +func NewDataAvailabilityHeader(eds *rsmt2d.ExtendedDataSquare) DataAvailabilityHeader { + // generate the row and col roots using the EDS + dah := DataAvailabilityHeader{ + RowsRoots: eds.RowRoots(), + ColumnRoots: eds.ColRoots(), + } + + // generate the hash of the data using the new roots + dah.Hash() + + return dah +} + +func ExtendShares(squareSize uint64, shares [][]byte) (*rsmt2d.ExtendedDataSquare, error) { + // Check that square size is with range + if squareSize < consts.MinSquareSize || squareSize > consts.MaxSquareSize { + return nil, fmt.Errorf( + "invalid square size: min %d max %d provided %d", + consts.MinSquareSize, + consts.MaxSquareSize, + squareSize, + ) + } + // check that valid number of shares have been provided + if squareSize*squareSize != uint64(len(shares)) { + return nil, fmt.Errorf( + "must provide valid number of shares for square size: got %d wanted %d", + len(shares), + squareSize*squareSize, + ) + } + tree := wrapper.NewErasuredNamespacedMerkleTree(squareSize) + return rsmt2d.ComputeExtendedDataSquare(shares, consts.DefaultCodec(), tree.Constructor) +} + +// String returns hex representation of merkle hash of the DAHeader. +func (dah *DataAvailabilityHeader) String() string { + if dah == nil { + return "" + } + return fmt.Sprintf("%X", dah.Hash()) +} + +// Equals checks equality of two DAHeaders. +func (dah *DataAvailabilityHeader) Equals(to *DataAvailabilityHeader) bool { + return bytes.Equal(dah.Hash(), to.Hash()) +} + +// Hash computes and caches the merkle root of the row and column roots. +func (dah *DataAvailabilityHeader) Hash() []byte { + if dah == nil { + return merkle.HashFromByteSlices(nil) + } + if len(dah.hash) != 0 { + return dah.hash + } + + colsCount := len(dah.ColumnRoots) + rowsCount := len(dah.RowsRoots) + slices := make([][]byte, colsCount+rowsCount) + for i, rowRoot := range dah.RowsRoots { + slices[i] = rowRoot + } + for i, colRoot := range dah.ColumnRoots { + slices[i+colsCount] = colRoot + } + // The single data root is computed using a simple binary merkle tree. + // Effectively being root(rowRoots || columnRoots): + dah.hash = merkle.HashFromByteSlices(slices) + return dah.hash +} + +func (dah *DataAvailabilityHeader) ToProto() (*daproto.DataAvailabilityHeader, error) { + if dah == nil { + return nil, errors.New("nil DataAvailabilityHeader") + } + + dahp := new(daproto.DataAvailabilityHeader) + dahp.RowRoots = dah.RowsRoots + dahp.ColumnRoots = dah.ColumnRoots + return dahp, nil +} + +func DataAvailabilityHeaderFromProto(dahp *daproto.DataAvailabilityHeader) (dah *DataAvailabilityHeader, err error) { + if dahp == nil { + return nil, errors.New("nil DataAvailabilityHeader") + } + + dah = new(DataAvailabilityHeader) + dah.RowsRoots = dahp.RowRoots + dah.ColumnRoots = dahp.ColumnRoots + + return dah, dah.ValidateBasic() +} + +// ValidateBasic runs stateless checks on the DataAvailabilityHeader. +func (dah *DataAvailabilityHeader) ValidateBasic() error { + if dah == nil { + return errors.New("nil data availability header is not valid") + } + if len(dah.ColumnRoots) < minExtendedSquareWidth || len(dah.RowsRoots) < minExtendedSquareWidth { + return fmt.Errorf( + "minimum valid DataAvailabilityHeader has at least %d row and column roots", + minExtendedSquareWidth, + ) + } + if len(dah.ColumnRoots) > maxExtendedSquareWidth || len(dah.RowsRoots) > maxExtendedSquareWidth { + return fmt.Errorf( + "maximum valid DataAvailabilityHeader has at most %d row and column roots", + maxExtendedSquareWidth, + ) + } + if len(dah.ColumnRoots) != len(dah.RowsRoots) { + return fmt.Errorf( + "unequal number of row and column roots: row %d col %d", + len(dah.RowsRoots), + len(dah.ColumnRoots), + ) + } + if err := validateHash(dah.hash); err != nil { + return fmt.Errorf("wrong hash: %v", err) + } + + return nil +} + +func (dah *DataAvailabilityHeader) IsZero() bool { + if dah == nil { + return true + } + return len(dah.ColumnRoots) == 0 || len(dah.RowsRoots) == 0 +} + +// tail is filler for all tail padded shares +// it is allocated once and used everywhere +var tailPaddingShare = append( + append(make([]byte, 0, consts.ShareSize), consts.TailPaddingNamespaceID...), + bytes.Repeat([]byte{0}, consts.ShareSize-consts.NamespaceSize)..., +) + +// MinDataAvailabilityHeader returns the minimum valid data availability header. +// It is equal to the data availability header for an empty block +func MinDataAvailabilityHeader() DataAvailabilityHeader { + shares := make([][]byte, consts.MinSharecount) + for i := 0; i < consts.MinSharecount; i++ { + shares[i] = tailPaddingShare + } + eds, err := ExtendShares(consts.MinSquareSize, shares) + if err != nil { + panic(err) + } + dah := NewDataAvailabilityHeader(eds) + return dah +} + +// validateHash returns an error if the hash is not empty, but its +// size != tmhash.Size. copy pasted from `types` package as to not import +func validateHash(h []byte) error { + if len(h) > 0 && len(h) != tmhash.Size { + return fmt.Errorf("expected size to be %d bytes, got %d bytes", + tmhash.Size, + len(h), + ) + } + return nil +} diff --git a/pkg/da/data_availability_header_test.go b/pkg/da/data_availability_header_test.go new file mode 100644 index 0000000000..3e16f1019c --- /dev/null +++ b/pkg/da/data_availability_header_test.go @@ -0,0 +1,230 @@ +package da + +import ( + "bytes" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/pkg/consts" +) + +func TestNilDataAvailabilityHeaderHashDoesntCrash(t *testing.T) { + // This follows RFC-6962, i.e. `echo -n '' | sha256sum` + var emptyBytes = []byte{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, + 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, + 0x78, 0x52, 0xb8, 0x55} + + assert.Equal(t, emptyBytes, (*DataAvailabilityHeader)(nil).Hash()) + assert.Equal(t, emptyBytes, new(DataAvailabilityHeader).Hash()) +} + +func TestMinDataAvailabilityHeader(t *testing.T) { + dah := MinDataAvailabilityHeader() + expectedHash := []byte{ + 0x7b, 0x57, 0x8b, 0x35, 0x1b, 0x1b, 0xb, 0xbd, 0x70, 0xbb, 0x35, 0x0, 0x19, 0xeb, 0xc9, 0x64, + 0xc4, 0x4a, 0x14, 0xa, 0x37, 0xef, 0x71, 0x5b, 0x55, 0x2a, 0x7f, 0x8f, 0x31, 0x5a, 0xcd, 0x19, + } + require.Equal(t, expectedHash, dah.hash) + require.NoError(t, dah.ValidateBasic()) + // important note: also see the types.TestEmptyBlockDataAvailabilityHeader test + // which ensures that empty block data results in the minimum data availability + // header +} + +func TestNewDataAvailabilityHeader(t *testing.T) { + type test struct { + name string + expectedHash []byte + squareSize uint64 + shares [][]byte + } + + tests := []test{ + { + name: "typical", + expectedHash: []byte{ + 0xfe, 0x9c, 0x6b, 0xd8, 0xe5, 0x7c, 0xd1, 0x5d, 0x1f, 0xd6, 0x55, 0x7e, 0x87, 0x7d, 0xd9, 0x7d, + 0xdb, 0xf2, 0x66, 0xfa, 0x60, 0x24, 0x2d, 0xb3, 0xa0, 0x9c, 0x4f, 0x4e, 0x5b, 0x2a, 0x2c, 0x2a, + }, + squareSize: 2, + shares: generateShares(4, 1), + }, + { + name: "max square size", + expectedHash: []byte{ + 0xe2, 0x87, 0x23, 0xd0, 0x2d, 0x54, 0x25, 0x5f, 0x79, 0x43, 0x8e, 0xfb, 0xb7, 0xe8, 0xfa, 0xf5, + 0xbf, 0x93, 0x50, 0xb3, 0x64, 0xd0, 0x4f, 0xa7, 0x7b, 0xb1, 0x83, 0x3b, 0x8, 0xba, 0xd3, 0xa4, + }, + squareSize: consts.MaxSquareSize, + shares: generateShares(consts.MaxSquareSize*consts.MaxSquareSize, 99), + }, + } + + for _, tt := range tests { + tt := tt + eds, err := ExtendShares(tt.squareSize, tt.shares) + require.NoError(t, err) + resdah := NewDataAvailabilityHeader(eds) + require.Equal(t, tt.squareSize*2, uint64(len(resdah.ColumnRoots)), tt.name) + require.Equal(t, tt.squareSize*2, uint64(len(resdah.RowsRoots)), tt.name) + require.Equal(t, tt.expectedHash, resdah.hash, tt.name) + } +} + +func TestExtendShares(t *testing.T) { + type test struct { + name string + expectedErr bool + squareSize uint64 + shares [][]byte + } + + tests := []test{ + { + name: "too large square size", + expectedErr: true, + squareSize: consts.MaxSquareSize + 1, + shares: generateShares((consts.MaxSquareSize+1)*(consts.MaxSquareSize+1), 1), + }, + { + name: "invalid number of shares", + expectedErr: true, + squareSize: 2, + shares: generateShares(5, 1), + }, + } + + for _, tt := range tests { + tt := tt + eds, err := ExtendShares(tt.squareSize, tt.shares) + if tt.expectedErr { + require.NotNil(t, err) + continue + } + require.NoError(t, err) + require.Equal(t, tt.squareSize*2, eds.Width(), tt.name) + } +} + +func TestDataAvailabilityHeaderProtoConversion(t *testing.T) { + type test struct { + name string + dah DataAvailabilityHeader + } + + shares := generateShares(consts.MaxSquareSize*consts.MaxSquareSize, 1) + eds, err := ExtendShares(consts.MaxSquareSize, shares) + require.NoError(t, err) + bigdah := NewDataAvailabilityHeader(eds) + + tests := []test{ + { + name: "min", + dah: MinDataAvailabilityHeader(), + }, + { + name: "max", + dah: bigdah, + }, + } + + for _, tt := range tests { + tt := tt + pdah, err := tt.dah.ToProto() + require.NoError(t, err) + resDah, err := DataAvailabilityHeaderFromProto(pdah) + require.NoError(t, err) + resDah.Hash() // calc the hash to make the comparisons fair + require.Equal(t, tt.dah, *resDah, tt.name) + } + +} + +func Test_DAHValidateBasic(t *testing.T) { + type test struct { + name string + dah DataAvailabilityHeader + expectErr bool + errStr string + } + + shares := generateShares(consts.MaxSquareSize*consts.MaxSquareSize, 1) + eds, err := ExtendShares(consts.MaxSquareSize, shares) + require.NoError(t, err) + bigdah := NewDataAvailabilityHeader(eds) + + // make a mutant dah that has too many roots + var tooBigDah DataAvailabilityHeader + tooBigDah.ColumnRoots = make([][]byte, consts.MaxSquareSize*consts.MaxSquareSize) + tooBigDah.RowsRoots = make([][]byte, consts.MaxSquareSize*consts.MaxSquareSize) + copy(tooBigDah.ColumnRoots, bigdah.ColumnRoots) + copy(tooBigDah.RowsRoots, bigdah.RowsRoots) + tooBigDah.ColumnRoots = append(tooBigDah.ColumnRoots, bytes.Repeat([]byte{1}, 32)) + tooBigDah.RowsRoots = append(tooBigDah.RowsRoots, bytes.Repeat([]byte{1}, 32)) + // make a mutant dah that has too few roots + var tooSmallDah DataAvailabilityHeader + tooSmallDah.ColumnRoots = [][]byte{bytes.Repeat([]byte{2}, 32)} + tooSmallDah.RowsRoots = [][]byte{bytes.Repeat([]byte{2}, 32)} + // use a bad hash + badHashDah := MinDataAvailabilityHeader() + badHashDah.hash = []byte{1, 2, 3, 4} + // dah with not equal number of roots + mismatchDah := MinDataAvailabilityHeader() + mismatchDah.ColumnRoots = append(mismatchDah.ColumnRoots, bytes.Repeat([]byte{2}, 32)) + + tests := []test{ + { + name: "min", + dah: MinDataAvailabilityHeader(), + }, + { + name: "max", + dah: bigdah, + }, + { + name: "too big dah", + dah: tooBigDah, + expectErr: true, + errStr: "maximum valid DataAvailabilityHeader has at most", + }, + { + name: "too small dah", + dah: tooSmallDah, + expectErr: true, + errStr: "minimum valid DataAvailabilityHeader has at least", + }, + { + name: "bash hash", + dah: badHashDah, + expectErr: true, + errStr: "wrong hash", + }, + { + name: "mismatched roots", + dah: mismatchDah, + expectErr: true, + errStr: "unequal number of row and column roots", + }, + } + + for _, tt := range tests { + tt := tt + err := tt.dah.ValidateBasic() + if tt.expectErr { + require.True(t, strings.Contains(err.Error(), tt.errStr), tt.name) + require.Error(t, err) + continue + } + require.NoError(t, err) + } +} + +func generateShares(count int, repeatByte byte) [][]byte { + shares := make([][]byte, count) + for i := 0; i < count; i++ { + shares[i] = bytes.Repeat([]byte{repeatByte}, consts.ShareSize) + } + return shares +} diff --git a/pkg/prove/proof.go b/pkg/prove/proof.go new file mode 100644 index 0000000000..7e358ed9a7 --- /dev/null +++ b/pkg/prove/proof.go @@ -0,0 +1,187 @@ +package prove + +import ( + "encoding/binary" + "errors" + "fmt" + + "github.com/celestiaorg/rsmt2d" + tmbytes "github.com/tendermint/tendermint/libs/bytes" + "github.com/tendermint/tendermint/pkg/consts" + "github.com/tendermint/tendermint/pkg/wrapper" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" + "github.com/tendermint/tendermint/types" +) + +// TxInclusion uses the provided block data to progressively generate rows +// of a data square, and then using those shares to creates nmt inclusion proofs +// It is possible that a transaction spans more than one row. In that case, we +// have to return two proofs. +func TxInclusion(codec rsmt2d.Codec, data types.Data, origSquareSize, txIndex uint) (types.TxProof, error) { + // calculate the index of the shares that contain the tx + startPos, endPos, err := txSharePosition(data.Txs, txIndex) + if err != nil { + return types.TxProof{}, err + } + if (endPos - startPos) > 1 { + return types.TxProof{}, errors.New("transaction spanned more than two shares, this is not yet supported") + } + + // use the index of the shares and the square size to determine the row that + // contains the tx we need to prove + startRow := startPos / origSquareSize + endRow := endPos / origSquareSize + + rowShares, err := genRowShares(codec, data, origSquareSize, startRow, endRow) + if err != nil { + return types.TxProof{}, err + } + + var proofs []*tmproto.NMTProof //nolint:prealloc // rarely will this contain more than a single proof + var shares [][]byte //nolint:prealloc // rarely will this contain more than a single share + var rowRoots []tmbytes.HexBytes //nolint:prealloc // rarely will this contain more than a single root + for i, row := range rowShares { + // create an nmt to use to generate a proof + tree := wrapper.NewErasuredNamespacedMerkleTree(uint64(origSquareSize)) + for j, share := range row { + tree.Push( + share, + rsmt2d.SquareIndex{ + Axis: uint(i), + Cell: uint(j), + }, + ) + } + + var pos uint + if i == 0 { + pos = startPos - (startRow * origSquareSize) + } else { + pos = endPos - (endRow * origSquareSize) + } + + shares = append(shares, row[pos]) + + proof, err := tree.Prove(int(pos)) + if err != nil { + return types.TxProof{}, err + } + + proofs = append(proofs, &tmproto.NMTProof{ + Start: int32(proof.Start()), + End: int32(proof.End()), + Nodes: proof.Nodes(), + LeafHash: proof.LeafHash(), + }) + + // we don't store the data availability header anywhere, so we + // regenerate the roots to each row + rowRoots = append(rowRoots, tree.Root()) + } + + return types.TxProof{ + RowRoots: rowRoots, + Data: shares, + Proofs: proofs, + }, nil +} + +// txSharePosition returns the share that a given transaction is included in. +// returns -1 if index is greater than that of the provided txs. +func txSharePosition(txs types.Txs, txIndex uint) (startSharePos, endSharePos uint, err error) { + if txIndex >= uint(len(txs)) { + return startSharePos, endSharePos, errors.New("transaction index is greater than the number of txs") + } + + totalLen := 0 + for i := uint(0); i < txIndex; i++ { + txLen := len(txs[i]) + totalLen += (delimLen(txLen) + txLen) + } + + txLen := len(txs[txIndex]) + + startSharePos = uint((totalLen) / consts.TxShareSize) + endSharePos = uint((totalLen + txLen + delimLen(txLen)) / consts.TxShareSize) + + return startSharePos, endSharePos, nil +} + +func delimLen(txLen int) int { + lenBuf := make([]byte, binary.MaxVarintLen64) + return binary.PutUvarint(lenBuf, uint64(txLen)) +} + +// genRowShares progessively generates data square rows from block data +func genRowShares(codec rsmt2d.Codec, data types.Data, origSquareSize, startRow, endRow uint) ([][][]byte, error) { + if endRow > origSquareSize { + return nil, errors.New("cannot generate row shares past the original square size") + } + origRowShares := splitIntoRows( + origSquareSize, + genOrigRowShares(data, origSquareSize, startRow, endRow), + ) + + encodedRowShares := make([][][]byte, len(origRowShares)) + for i, row := range origRowShares { + encRow, err := codec.Encode(row) + if err != nil { + panic(err) + } + encodedRowShares[i] = append( + append( + make([][]byte, 0, len(row)+len(encRow)), + row..., + ), encRow..., + ) + } + + return encodedRowShares, nil +} + +// genOrigRowShares progressively generates data square rows for the original +// data square, meaning the rows only half the full square length, as there is +// not erasure data +func genOrigRowShares(data types.Data, originalSquareSize, startRow, endRow uint) [][]byte { + wantLen := (endRow + 1) * originalSquareSize + startPos := startRow * originalSquareSize + + shares := data.Txs.SplitIntoShares() + // return if we have enough shares + if uint(len(shares)) >= wantLen { + return shares[startPos:wantLen].RawShares() + } + + shares = append(shares, data.Evidence.SplitIntoShares()...) + if uint(len(shares)) >= wantLen { + return shares[startPos:wantLen].RawShares() + } + + for _, m := range data.Messages.MessagesList { + rawData, err := m.MarshalDelimited() + if err != nil { + panic(fmt.Sprintf("app accepted a Message that can not be encoded %#v", m)) + } + shares = types.AppendToShares(shares, m.NamespaceID, rawData) + + // return if we have enough shares + if uint(len(shares)) >= wantLen { + return shares[startPos:wantLen].RawShares() + } + } + + tailShares := types.TailPaddingShares(int(wantLen) - len(shares)) + shares = append(shares, tailShares...) + + return shares[startPos:wantLen].RawShares() +} + +// splitIntoRows splits shares into rows of a particular square size +func splitIntoRows(origSquareSize uint, shares [][]byte) [][][]byte { + rowCount := uint(len(shares)) / origSquareSize + rows := make([][][]byte, rowCount) + for i := uint(0); i < rowCount; i++ { + rows[i] = shares[i*origSquareSize : (i+1)*origSquareSize] + } + return rows +} diff --git a/pkg/prove/proof_test.go b/pkg/prove/proof_test.go new file mode 100644 index 0000000000..f3d9521382 --- /dev/null +++ b/pkg/prove/proof_test.go @@ -0,0 +1,237 @@ +package prove + +import ( + "bytes" + "fmt" + "math" + "math/rand" + "sort" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/pkg/consts" + "github.com/tendermint/tendermint/pkg/da" + "github.com/tendermint/tendermint/types" +) + +func TestTxInclusion(t *testing.T) { + txCount := 100 + typicalBlockData := types.Data{ + Txs: generateRandomlySizedContiguousShares(txCount, 200), + Messages: generateRandomlySizedMessages(10, 150), + } + + // compute the data availability header + shares, _, _ := typicalBlockData.ComputeShares(64) + + squareSize := uint(math.Sqrt(float64(len(shares)))) + + for i := 0; i < txCount; i++ { + txProof, err := TxInclusion(consts.DefaultCodec(), typicalBlockData, squareSize, uint(i)) + require.NoError(t, err) + assert.True(t, txProof.VerifyProof()) + } +} + +func TestTxSharePosition(t *testing.T) { + type test struct { + name string + txs types.Txs + } + + tests := []test{ + { + name: "typical", + txs: generateRandomlySizedContiguousShares(44, 200), + }, + { + name: "many small tx", + txs: generateRandomlySizedContiguousShares(444, 100), + }, + { + name: "one small tx", + txs: generateRandomlySizedContiguousShares(1, 200), + }, + { + name: "one large tx", + txs: generateRandomlySizedContiguousShares(1, 2000), + }, + { + name: "many large txs", + txs: generateRandomlySizedContiguousShares(100, 2000), + }, + } + + type startEndPoints struct { + start, end uint + } + + for _, tt := range tests { + positions := make([]startEndPoints, len(tt.txs)) + for i := 0; i < len(tt.txs); i++ { + start, end, err := txSharePosition(tt.txs, uint(i)) + require.NoError(t, err) + positions[i] = startEndPoints{start: start, end: end} + } + + shares := tt.txs.SplitIntoShares().RawShares() + + for i, pos := range positions { + if pos.start == pos.end { + assert.Contains(t, string(shares[pos.start]), string(tt.txs[i]), tt.name, i, pos) + } else { + assert.Contains( + t, + joinByteSlices(shares[pos.start:pos.end+1]...), + string(tt.txs[i]), + tt.name, + pos, + len(tt.txs[i]), + ) + } + } + } +} + +func Test_genRowShares(t *testing.T) { + typicalBlockData := types.Data{ + Txs: generateRandomlySizedContiguousShares(120, 200), + Messages: generateRandomlySizedMessages(10, 1000), + } + + allShares, _, _ := typicalBlockData.ComputeShares(64) + rawShares := allShares.RawShares() + + originalSquareSize := uint(math.Sqrt(float64(len(rawShares)))) + + eds, err := da.ExtendShares(uint64(originalSquareSize), rawShares) + require.NoError(t, err) + + eds.ColRoots() + + rowShares, err := genRowShares( + consts.DefaultCodec(), + typicalBlockData, + originalSquareSize, + 0, + originalSquareSize-1, + ) + require.NoError(t, err) + + for i := uint(0); i < originalSquareSize; i++ { + row := eds.Row(i) + assert.Equal(t, row, rowShares[i], fmt.Sprintf("row %d", i)) + // also test fetching individual rows + secondSet, err := genRowShares(consts.DefaultCodec(), typicalBlockData, originalSquareSize, i, i) + require.NoError(t, err) + assert.Equal(t, row, secondSet[0], fmt.Sprintf("row %d", i)) + } +} + +func Test_genOrigRowShares(t *testing.T) { + txCount := 100 + typicalBlockData := types.Data{ + Txs: generateRandomlySizedContiguousShares(txCount, 200), + Messages: generateRandomlySizedMessages(10, 1500), + } + + allShares, _, _ := typicalBlockData.ComputeShares(8) + rawShares := allShares.RawShares() + + genShares := genOrigRowShares(typicalBlockData, 8, 0, 7) + + require.Equal(t, len(allShares), len(genShares)) + assert.Equal(t, rawShares, genShares) +} + +func joinByteSlices(s ...[]byte) string { + out := make([]string, len(s)) + for i, sl := range s { + sl, _, _ := types.ParseDelimiter(sl) + out[i] = string(sl[consts.NamespaceSize:]) + } + return strings.Join(out, "") +} + +func generateRandomlySizedContiguousShares(count, max int) types.Txs { + txs := make(types.Txs, count) + for i := 0; i < count; i++ { + size := rand.Intn(max) + if size == 0 { + size = 1 + } + txs[i] = generateRandomContiguousShares(1, size)[0] + } + return txs +} + +func generateRandomContiguousShares(count, size int) types.Txs { + txs := make(types.Txs, count) + for i := 0; i < count; i++ { + tx := make([]byte, size) + _, err := rand.Read(tx) + if err != nil { + panic(err) + } + txs[i] = tx + } + return txs +} + +func generateRandomlySizedMessages(count, maxMsgSize int) types.Messages { + msgs := make([]types.Message, count) + for i := 0; i < count; i++ { + msgs[i] = generateRandomMessage(rand.Intn(maxMsgSize)) + } + + // this is just to let us use assert.Equal + if count == 0 { + msgs = nil + } + + return types.Messages{MessagesList: msgs} +} + +func generateRandomMessage(size int) types.Message { + share := generateRandomNamespacedShares(1, size)[0] + msg := types.Message{ + NamespaceID: share.NamespaceID(), + Data: share.Data(), + } + return msg +} + +func generateRandomNamespacedShares(count, msgSize int) types.NamespacedShares { + shares := generateRandNamespacedRawData(uint32(count), consts.NamespaceSize, uint32(msgSize)) + msgs := make([]types.Message, count) + for i, s := range shares { + msgs[i] = types.Message{ + Data: s[consts.NamespaceSize:], + NamespaceID: s[:consts.NamespaceSize], + } + } + return types.Messages{MessagesList: msgs}.SplitIntoShares() +} + +func generateRandNamespacedRawData(total, nidSize, leafSize uint32) [][]byte { + data := make([][]byte, total) + for i := uint32(0); i < total; i++ { + nid := make([]byte, nidSize) + rand.Read(nid) + data[i] = nid + } + sortByteArrays(data) + for i := uint32(0); i < total; i++ { + d := make([]byte, leafSize) + rand.Read(d) + data[i] = append(data[i], d...) + } + + return data +} + +func sortByteArrays(src [][]byte) { + sort.Slice(src, func(i, j int) bool { return bytes.Compare(src[i], src[j]) < 0 }) +} diff --git a/pkg/wrapper/nmt_wrapper.go b/pkg/wrapper/nmt_wrapper.go new file mode 100644 index 0000000000..e9b3747d5b --- /dev/null +++ b/pkg/wrapper/nmt_wrapper.go @@ -0,0 +1,83 @@ +package wrapper + +import ( + "fmt" + + "github.com/celestiaorg/nmt" + "github.com/celestiaorg/rsmt2d" + + "github.com/tendermint/tendermint/pkg/consts" +) + +// Fulfills the rsmt2d.Tree interface and rsmt2d.TreeConstructorFn function +var _ rsmt2d.TreeConstructorFn = ErasuredNamespacedMerkleTree{}.Constructor +var _ rsmt2d.Tree = &ErasuredNamespacedMerkleTree{} + +// ErasuredNamespacedMerkleTree wraps NamespaceMerkleTree to conform to the +// rsmt2d.Tree interface while also providing the correct namespaces to the +// underlying NamespaceMerkleTree. It does this by adding the already included +// namespace to the first half of the tree, and then uses the parity namespace +// ID for each share pushed to the second half of the tree. This allows for the +// namespaces to be included in the erasure data, while also keeping the nmt +// library sufficiently general +type ErasuredNamespacedMerkleTree struct { + squareSize uint64 // note: this refers to the width of the original square before erasure-coded + options []nmt.Option + tree *nmt.NamespacedMerkleTree +} + +// NewErasuredNamespacedMerkleTree issues a new ErasuredNamespacedMerkleTree. squareSize must be greater than zero +func NewErasuredNamespacedMerkleTree(origSquareSize uint64, setters ...nmt.Option) ErasuredNamespacedMerkleTree { + if origSquareSize == 0 { + panic("cannot create a ErasuredNamespacedMerkleTree of squareSize == 0") + } + tree := nmt.New(consts.NewBaseHashFunc(), setters...) + return ErasuredNamespacedMerkleTree{squareSize: origSquareSize, options: setters, tree: tree} +} + +// Constructor acts as the rsmt2d.TreeConstructorFn for +// ErasuredNamespacedMerkleTree +func (w ErasuredNamespacedMerkleTree) Constructor() rsmt2d.Tree { + newTree := NewErasuredNamespacedMerkleTree(w.squareSize, w.options...) + return &newTree +} + +// Push adds the provided data to the underlying NamespaceMerkleTree, and +// automatically uses the first DefaultNamespaceIDLen number of bytes as the +// namespace unless the data pushed to the second half of the tree. Fulfills the +// rsmt.Tree interface. NOTE: panics if an error is encountered while pushing or +// if the tree size is exceeded. +func (w *ErasuredNamespacedMerkleTree) Push(data []byte, idx rsmt2d.SquareIndex) { + if idx.Axis+1 > 2*uint(w.squareSize) || idx.Cell+1 > 2*uint(w.squareSize) { + panic(fmt.Sprintf("pushed past predetermined square size: boundary at %d index at %+v", 2*w.squareSize, idx)) + } + nidAndData := make([]byte, consts.NamespaceSize+len(data)) + copy(nidAndData[consts.NamespaceSize:], data) + // use the parity namespace if the cell is not in Q0 of the extended data square + if idx.Axis+1 > uint(w.squareSize) || idx.Cell+1 > uint(w.squareSize) { + copy(nidAndData[:consts.NamespaceSize], consts.ParitySharesNamespaceID) + } else { + copy(nidAndData[:consts.NamespaceSize], data[:consts.NamespaceSize]) + } + // push to the underlying tree + err := w.tree.Push(nidAndData) + // panic on error + if err != nil { + panic(err) + } +} + +// Root fulfills the rsmt.Tree interface by generating and returning the +// underlying NamespaceMerkleTree Root. +func (w *ErasuredNamespacedMerkleTree) Root() []byte { + return w.tree.Root() +} + +func (w *ErasuredNamespacedMerkleTree) Prove(ind int) (nmt.Proof, error) { + return w.tree.Prove(ind) +} + +// Tree returns the underlying NamespacedMerkleTree +func (w *ErasuredNamespacedMerkleTree) Tree() *nmt.NamespacedMerkleTree { + return w.tree +} diff --git a/pkg/wrapper/nmt_wrapper_test.go b/pkg/wrapper/nmt_wrapper_test.go new file mode 100644 index 0000000000..b43aeda402 --- /dev/null +++ b/pkg/wrapper/nmt_wrapper_test.go @@ -0,0 +1,168 @@ +package wrapper + +import ( + "bytes" + "crypto/rand" + "crypto/sha256" + "sort" + "testing" + + "github.com/celestiaorg/nmt" + "github.com/celestiaorg/rsmt2d" + "github.com/stretchr/testify/assert" + "github.com/tendermint/tendermint/pkg/consts" +) + +func TestPushErasuredNamespacedMerkleTree(t *testing.T) { + testCases := []struct { + name string + squareSize int + }{ + {"extendedSquareSize = 16", 8}, + {"extendedSquareSize = 256", 128}, + } + for _, tc := range testCases { + tc := tc + n := NewErasuredNamespacedMerkleTree(uint64(tc.squareSize)) + tree := n.Constructor() + + // push test data to the tree + for i, d := range generateErasuredData(t, tc.squareSize, consts.DefaultCodec()) { + // push will panic if there's an error + tree.Push(d, rsmt2d.SquareIndex{Axis: uint(0), Cell: uint(i)}) + } + } +} + +func TestRootErasuredNamespacedMerkleTree(t *testing.T) { + // check that the root is different from a standard nmt tree this should be + // the case, because the ErasuredNamespacedMerkleTree should add namespaces + // to the second half of the tree + size := 8 + data := generateRandNamespacedRawData(size, consts.NamespaceSize, consts.MsgShareSize) + n := NewErasuredNamespacedMerkleTree(uint64(size)) + tree := n.Constructor() + nmtTree := nmt.New(sha256.New()) + + for i, d := range data { + tree.Push(d, rsmt2d.SquareIndex{Axis: uint(0), Cell: uint(i)}) + err := nmtTree.Push(d) + if err != nil { + t.Error(err) + } + } + + assert.NotEqual(t, nmtTree.Root(), tree.Root()) +} + +func TestErasureNamespacedMerkleTreePanics(t *testing.T) { + testCases := []struct { + name string + pFunc assert.PanicTestFunc + }{ + { + "push over square size", + assert.PanicTestFunc( + func() { + data := generateErasuredData(t, 16, consts.DefaultCodec()) + n := NewErasuredNamespacedMerkleTree(uint64(15)) + tree := n.Constructor() + for i, d := range data { + tree.Push(d, rsmt2d.SquareIndex{Axis: uint(0), Cell: uint(i)}) + } + }), + }, + { + "push in incorrect lexigraphic order", + assert.PanicTestFunc( + func() { + data := generateErasuredData(t, 16, consts.DefaultCodec()) + n := NewErasuredNamespacedMerkleTree(uint64(16)) + tree := n.Constructor() + for i := len(data) - 1; i > 0; i-- { + tree.Push(data[i], rsmt2d.SquareIndex{Axis: uint(0), Cell: uint(i)}) + } + }, + ), + }, + } + for _, tc := range testCases { + tc := tc + assert.Panics(t, tc.pFunc, tc.name) + + } +} + +func TestExtendedDataSquare(t *testing.T) { + squareSize := 4 + // data for a 4X4 square + raw := generateRandNamespacedRawData( + squareSize*squareSize, + consts.NamespaceSize, + consts.MsgShareSize, + ) + + tree := NewErasuredNamespacedMerkleTree(uint64(squareSize)) + + _, err := rsmt2d.ComputeExtendedDataSquare(raw, consts.DefaultCodec(), tree.Constructor) + assert.NoError(t, err) +} + +func TestErasuredNamespacedMerkleTree(t *testing.T) { + // check that the Tree() returns exact underlying nmt tree + size := 8 + data := generateRandNamespacedRawData(size, consts.NamespaceSize, consts.MsgShareSize) + n := NewErasuredNamespacedMerkleTree(uint64(size)) + tree := n.Constructor() + + for i, d := range data { + tree.Push(d, rsmt2d.SquareIndex{Axis: uint(0), Cell: uint(i)}) + } + + assert.Equal(t, n.Tree(), n.tree) + assert.Equal(t, n.Tree().Root(), n.tree.Root()) +} + +// generateErasuredData produces a slice that is twice as long as it erasures +// the data +func generateErasuredData(t *testing.T, numLeaves int, codec rsmt2d.Codec) [][]byte { + raw := generateRandNamespacedRawData( + numLeaves, + consts.NamespaceSize, + consts.MsgShareSize, + ) + erasuredData, err := codec.Encode(raw) + if err != nil { + t.Error(err) + } + return append(raw, erasuredData...) +} + +// this code is copy pasted from the plugin, and should likely be exported in the plugin instead +func generateRandNamespacedRawData(total int, nidSize int, leafSize int) [][]byte { + data := make([][]byte, total) + for i := 0; i < total; i++ { + nid := make([]byte, nidSize) + _, err := rand.Read(nid) + if err != nil { + panic(err) + } + data[i] = nid + } + + sortByteArrays(data) + for i := 0; i < total; i++ { + d := make([]byte, leafSize) + _, err := rand.Read(d) + if err != nil { + panic(err) + } + data[i] = append(data[i], d...) + } + + return data +} + +func sortByteArrays(src [][]byte) { + sort.Slice(src, func(i, j int) bool { return bytes.Compare(src[i], src[j]) < 0 }) +} diff --git a/proto/gogoproto/gogo.pb.go b/proto/gogoproto/gogo.pb.go new file mode 100644 index 0000000000..9a76ba844c --- /dev/null +++ b/proto/gogoproto/gogo.pb.go @@ -0,0 +1,888 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: gogoproto/gogo.proto + +package gogoproto + +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +var E_GoprotoEnumPrefix = &proto.ExtensionDesc{ + ExtendedType: (*descriptorpb.EnumOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 62001, + Name: "gogoproto.goproto_enum_prefix", + Tag: "varint,62001,opt,name=goproto_enum_prefix", + Filename: "gogoproto/gogo.proto", +} + +var E_GoprotoEnumStringer = &proto.ExtensionDesc{ + ExtendedType: (*descriptorpb.EnumOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 62021, + Name: "gogoproto.goproto_enum_stringer", + Tag: "varint,62021,opt,name=goproto_enum_stringer", + Filename: "gogoproto/gogo.proto", +} + +var E_EnumStringer = &proto.ExtensionDesc{ + ExtendedType: (*descriptorpb.EnumOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 62022, + Name: "gogoproto.enum_stringer", + Tag: "varint,62022,opt,name=enum_stringer", + Filename: "gogoproto/gogo.proto", +} + +var E_EnumCustomname = &proto.ExtensionDesc{ + ExtendedType: (*descriptorpb.EnumOptions)(nil), + ExtensionType: (*string)(nil), + Field: 62023, + Name: "gogoproto.enum_customname", + Tag: "bytes,62023,opt,name=enum_customname", + Filename: "gogoproto/gogo.proto", +} + +var E_Enumdecl = &proto.ExtensionDesc{ + ExtendedType: (*descriptorpb.EnumOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 62024, + Name: "gogoproto.enumdecl", + Tag: "varint,62024,opt,name=enumdecl", + Filename: "gogoproto/gogo.proto", +} + +var E_EnumvalueCustomname = &proto.ExtensionDesc{ + ExtendedType: (*descriptorpb.EnumValueOptions)(nil), + ExtensionType: (*string)(nil), + Field: 66001, + Name: "gogoproto.enumvalue_customname", + Tag: "bytes,66001,opt,name=enumvalue_customname", + Filename: "gogoproto/gogo.proto", +} + +var E_GoprotoGettersAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptorpb.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63001, + Name: "gogoproto.goproto_getters_all", + Tag: "varint,63001,opt,name=goproto_getters_all", + Filename: "gogoproto/gogo.proto", +} + +var E_GoprotoEnumPrefixAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptorpb.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63002, + Name: "gogoproto.goproto_enum_prefix_all", + Tag: "varint,63002,opt,name=goproto_enum_prefix_all", + Filename: "gogoproto/gogo.proto", +} + +var E_GoprotoStringerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptorpb.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63003, + Name: "gogoproto.goproto_stringer_all", + Tag: "varint,63003,opt,name=goproto_stringer_all", + Filename: "gogoproto/gogo.proto", +} + +var E_VerboseEqualAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptorpb.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63004, + Name: "gogoproto.verbose_equal_all", + Tag: "varint,63004,opt,name=verbose_equal_all", + Filename: "gogoproto/gogo.proto", +} + +var E_FaceAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptorpb.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63005, + Name: "gogoproto.face_all", + Tag: "varint,63005,opt,name=face_all", + Filename: "gogoproto/gogo.proto", +} + +var E_GostringAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptorpb.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63006, + Name: "gogoproto.gostring_all", + Tag: "varint,63006,opt,name=gostring_all", + Filename: "gogoproto/gogo.proto", +} + +var E_PopulateAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptorpb.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63007, + Name: "gogoproto.populate_all", + Tag: "varint,63007,opt,name=populate_all", + Filename: "gogoproto/gogo.proto", +} + +var E_StringerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptorpb.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63008, + Name: "gogoproto.stringer_all", + Tag: "varint,63008,opt,name=stringer_all", + Filename: "gogoproto/gogo.proto", +} + +var E_OnlyoneAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptorpb.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63009, + Name: "gogoproto.onlyone_all", + Tag: "varint,63009,opt,name=onlyone_all", + Filename: "gogoproto/gogo.proto", +} + +var E_EqualAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptorpb.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63013, + Name: "gogoproto.equal_all", + Tag: "varint,63013,opt,name=equal_all", + Filename: "gogoproto/gogo.proto", +} + +var E_DescriptionAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptorpb.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63014, + Name: "gogoproto.description_all", + Tag: "varint,63014,opt,name=description_all", + Filename: "gogoproto/gogo.proto", +} + +var E_TestgenAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptorpb.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63015, + Name: "gogoproto.testgen_all", + Tag: "varint,63015,opt,name=testgen_all", + Filename: "gogoproto/gogo.proto", +} + +var E_BenchgenAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptorpb.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63016, + Name: "gogoproto.benchgen_all", + Tag: "varint,63016,opt,name=benchgen_all", + Filename: "gogoproto/gogo.proto", +} + +var E_MarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptorpb.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63017, + Name: "gogoproto.marshaler_all", + Tag: "varint,63017,opt,name=marshaler_all", + Filename: "gogoproto/gogo.proto", +} + +var E_UnmarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptorpb.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63018, + Name: "gogoproto.unmarshaler_all", + Tag: "varint,63018,opt,name=unmarshaler_all", + Filename: "gogoproto/gogo.proto", +} + +var E_StableMarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptorpb.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63019, + Name: "gogoproto.stable_marshaler_all", + Tag: "varint,63019,opt,name=stable_marshaler_all", + Filename: "gogoproto/gogo.proto", +} + +var E_SizerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptorpb.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63020, + Name: "gogoproto.sizer_all", + Tag: "varint,63020,opt,name=sizer_all", + Filename: "gogoproto/gogo.proto", +} + +var E_GoprotoEnumStringerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptorpb.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63021, + Name: "gogoproto.goproto_enum_stringer_all", + Tag: "varint,63021,opt,name=goproto_enum_stringer_all", + Filename: "gogoproto/gogo.proto", +} + +var E_EnumStringerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptorpb.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63022, + Name: "gogoproto.enum_stringer_all", + Tag: "varint,63022,opt,name=enum_stringer_all", + Filename: "gogoproto/gogo.proto", +} + +var E_UnsafeMarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptorpb.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63023, + Name: "gogoproto.unsafe_marshaler_all", + Tag: "varint,63023,opt,name=unsafe_marshaler_all", + Filename: "gogoproto/gogo.proto", +} + +var E_UnsafeUnmarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptorpb.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63024, + Name: "gogoproto.unsafe_unmarshaler_all", + Tag: "varint,63024,opt,name=unsafe_unmarshaler_all", + Filename: "gogoproto/gogo.proto", +} + +var E_GoprotoExtensionsMapAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptorpb.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63025, + Name: "gogoproto.goproto_extensions_map_all", + Tag: "varint,63025,opt,name=goproto_extensions_map_all", + Filename: "gogoproto/gogo.proto", +} + +var E_GoprotoUnrecognizedAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptorpb.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63026, + Name: "gogoproto.goproto_unrecognized_all", + Tag: "varint,63026,opt,name=goproto_unrecognized_all", + Filename: "gogoproto/gogo.proto", +} + +var E_GogoprotoImport = &proto.ExtensionDesc{ + ExtendedType: (*descriptorpb.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63027, + Name: "gogoproto.gogoproto_import", + Tag: "varint,63027,opt,name=gogoproto_import", + Filename: "gogoproto/gogo.proto", +} + +var E_ProtosizerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptorpb.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63028, + Name: "gogoproto.protosizer_all", + Tag: "varint,63028,opt,name=protosizer_all", + Filename: "gogoproto/gogo.proto", +} + +var E_CompareAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptorpb.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63029, + Name: "gogoproto.compare_all", + Tag: "varint,63029,opt,name=compare_all", + Filename: "gogoproto/gogo.proto", +} + +var E_TypedeclAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptorpb.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63030, + Name: "gogoproto.typedecl_all", + Tag: "varint,63030,opt,name=typedecl_all", + Filename: "gogoproto/gogo.proto", +} + +var E_EnumdeclAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptorpb.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63031, + Name: "gogoproto.enumdecl_all", + Tag: "varint,63031,opt,name=enumdecl_all", + Filename: "gogoproto/gogo.proto", +} + +var E_GoprotoRegistration = &proto.ExtensionDesc{ + ExtendedType: (*descriptorpb.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63032, + Name: "gogoproto.goproto_registration", + Tag: "varint,63032,opt,name=goproto_registration", + Filename: "gogoproto/gogo.proto", +} + +var E_MessagenameAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptorpb.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63033, + Name: "gogoproto.messagename_all", + Tag: "varint,63033,opt,name=messagename_all", + Filename: "gogoproto/gogo.proto", +} + +var E_GoprotoSizecacheAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptorpb.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63034, + Name: "gogoproto.goproto_sizecache_all", + Tag: "varint,63034,opt,name=goproto_sizecache_all", + Filename: "gogoproto/gogo.proto", +} + +var E_GoprotoUnkeyedAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptorpb.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63035, + Name: "gogoproto.goproto_unkeyed_all", + Tag: "varint,63035,opt,name=goproto_unkeyed_all", + Filename: "gogoproto/gogo.proto", +} + +var E_GoprotoGetters = &proto.ExtensionDesc{ + ExtendedType: (*descriptorpb.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64001, + Name: "gogoproto.goproto_getters", + Tag: "varint,64001,opt,name=goproto_getters", + Filename: "gogoproto/gogo.proto", +} + +var E_GoprotoStringer = &proto.ExtensionDesc{ + ExtendedType: (*descriptorpb.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64003, + Name: "gogoproto.goproto_stringer", + Tag: "varint,64003,opt,name=goproto_stringer", + Filename: "gogoproto/gogo.proto", +} + +var E_VerboseEqual = &proto.ExtensionDesc{ + ExtendedType: (*descriptorpb.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64004, + Name: "gogoproto.verbose_equal", + Tag: "varint,64004,opt,name=verbose_equal", + Filename: "gogoproto/gogo.proto", +} + +var E_Face = &proto.ExtensionDesc{ + ExtendedType: (*descriptorpb.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64005, + Name: "gogoproto.face", + Tag: "varint,64005,opt,name=face", + Filename: "gogoproto/gogo.proto", +} + +var E_Gostring = &proto.ExtensionDesc{ + ExtendedType: (*descriptorpb.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64006, + Name: "gogoproto.gostring", + Tag: "varint,64006,opt,name=gostring", + Filename: "gogoproto/gogo.proto", +} + +var E_Populate = &proto.ExtensionDesc{ + ExtendedType: (*descriptorpb.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64007, + Name: "gogoproto.populate", + Tag: "varint,64007,opt,name=populate", + Filename: "gogoproto/gogo.proto", +} + +var E_Stringer = &proto.ExtensionDesc{ + ExtendedType: (*descriptorpb.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 67008, + Name: "gogoproto.stringer", + Tag: "varint,67008,opt,name=stringer", + Filename: "gogoproto/gogo.proto", +} + +var E_Onlyone = &proto.ExtensionDesc{ + ExtendedType: (*descriptorpb.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64009, + Name: "gogoproto.onlyone", + Tag: "varint,64009,opt,name=onlyone", + Filename: "gogoproto/gogo.proto", +} + +var E_Equal = &proto.ExtensionDesc{ + ExtendedType: (*descriptorpb.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64013, + Name: "gogoproto.equal", + Tag: "varint,64013,opt,name=equal", + Filename: "gogoproto/gogo.proto", +} + +var E_Description = &proto.ExtensionDesc{ + ExtendedType: (*descriptorpb.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64014, + Name: "gogoproto.description", + Tag: "varint,64014,opt,name=description", + Filename: "gogoproto/gogo.proto", +} + +var E_Testgen = &proto.ExtensionDesc{ + ExtendedType: (*descriptorpb.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64015, + Name: "gogoproto.testgen", + Tag: "varint,64015,opt,name=testgen", + Filename: "gogoproto/gogo.proto", +} + +var E_Benchgen = &proto.ExtensionDesc{ + ExtendedType: (*descriptorpb.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64016, + Name: "gogoproto.benchgen", + Tag: "varint,64016,opt,name=benchgen", + Filename: "gogoproto/gogo.proto", +} + +var E_Marshaler = &proto.ExtensionDesc{ + ExtendedType: (*descriptorpb.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64017, + Name: "gogoproto.marshaler", + Tag: "varint,64017,opt,name=marshaler", + Filename: "gogoproto/gogo.proto", +} + +var E_Unmarshaler = &proto.ExtensionDesc{ + ExtendedType: (*descriptorpb.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64018, + Name: "gogoproto.unmarshaler", + Tag: "varint,64018,opt,name=unmarshaler", + Filename: "gogoproto/gogo.proto", +} + +var E_StableMarshaler = &proto.ExtensionDesc{ + ExtendedType: (*descriptorpb.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64019, + Name: "gogoproto.stable_marshaler", + Tag: "varint,64019,opt,name=stable_marshaler", + Filename: "gogoproto/gogo.proto", +} + +var E_Sizer = &proto.ExtensionDesc{ + ExtendedType: (*descriptorpb.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64020, + Name: "gogoproto.sizer", + Tag: "varint,64020,opt,name=sizer", + Filename: "gogoproto/gogo.proto", +} + +var E_UnsafeMarshaler = &proto.ExtensionDesc{ + ExtendedType: (*descriptorpb.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64023, + Name: "gogoproto.unsafe_marshaler", + Tag: "varint,64023,opt,name=unsafe_marshaler", + Filename: "gogoproto/gogo.proto", +} + +var E_UnsafeUnmarshaler = &proto.ExtensionDesc{ + ExtendedType: (*descriptorpb.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64024, + Name: "gogoproto.unsafe_unmarshaler", + Tag: "varint,64024,opt,name=unsafe_unmarshaler", + Filename: "gogoproto/gogo.proto", +} + +var E_GoprotoExtensionsMap = &proto.ExtensionDesc{ + ExtendedType: (*descriptorpb.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64025, + Name: "gogoproto.goproto_extensions_map", + Tag: "varint,64025,opt,name=goproto_extensions_map", + Filename: "gogoproto/gogo.proto", +} + +var E_GoprotoUnrecognized = &proto.ExtensionDesc{ + ExtendedType: (*descriptorpb.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64026, + Name: "gogoproto.goproto_unrecognized", + Tag: "varint,64026,opt,name=goproto_unrecognized", + Filename: "gogoproto/gogo.proto", +} + +var E_Protosizer = &proto.ExtensionDesc{ + ExtendedType: (*descriptorpb.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64028, + Name: "gogoproto.protosizer", + Tag: "varint,64028,opt,name=protosizer", + Filename: "gogoproto/gogo.proto", +} + +var E_Compare = &proto.ExtensionDesc{ + ExtendedType: (*descriptorpb.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64029, + Name: "gogoproto.compare", + Tag: "varint,64029,opt,name=compare", + Filename: "gogoproto/gogo.proto", +} + +var E_Typedecl = &proto.ExtensionDesc{ + ExtendedType: (*descriptorpb.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64030, + Name: "gogoproto.typedecl", + Tag: "varint,64030,opt,name=typedecl", + Filename: "gogoproto/gogo.proto", +} + +var E_Messagename = &proto.ExtensionDesc{ + ExtendedType: (*descriptorpb.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64033, + Name: "gogoproto.messagename", + Tag: "varint,64033,opt,name=messagename", + Filename: "gogoproto/gogo.proto", +} + +var E_GoprotoSizecache = &proto.ExtensionDesc{ + ExtendedType: (*descriptorpb.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64034, + Name: "gogoproto.goproto_sizecache", + Tag: "varint,64034,opt,name=goproto_sizecache", + Filename: "gogoproto/gogo.proto", +} + +var E_GoprotoUnkeyed = &proto.ExtensionDesc{ + ExtendedType: (*descriptorpb.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64035, + Name: "gogoproto.goproto_unkeyed", + Tag: "varint,64035,opt,name=goproto_unkeyed", + Filename: "gogoproto/gogo.proto", +} + +var E_Nullable = &proto.ExtensionDesc{ + ExtendedType: (*descriptorpb.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65001, + Name: "gogoproto.nullable", + Tag: "varint,65001,opt,name=nullable", + Filename: "gogoproto/gogo.proto", +} + +var E_Embed = &proto.ExtensionDesc{ + ExtendedType: (*descriptorpb.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65002, + Name: "gogoproto.embed", + Tag: "varint,65002,opt,name=embed", + Filename: "gogoproto/gogo.proto", +} + +var E_Customtype = &proto.ExtensionDesc{ + ExtendedType: (*descriptorpb.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65003, + Name: "gogoproto.customtype", + Tag: "bytes,65003,opt,name=customtype", + Filename: "gogoproto/gogo.proto", +} + +var E_Customname = &proto.ExtensionDesc{ + ExtendedType: (*descriptorpb.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65004, + Name: "gogoproto.customname", + Tag: "bytes,65004,opt,name=customname", + Filename: "gogoproto/gogo.proto", +} + +var E_Jsontag = &proto.ExtensionDesc{ + ExtendedType: (*descriptorpb.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65005, + Name: "gogoproto.jsontag", + Tag: "bytes,65005,opt,name=jsontag", + Filename: "gogoproto/gogo.proto", +} + +var E_Moretags = &proto.ExtensionDesc{ + ExtendedType: (*descriptorpb.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65006, + Name: "gogoproto.moretags", + Tag: "bytes,65006,opt,name=moretags", + Filename: "gogoproto/gogo.proto", +} + +var E_Casttype = &proto.ExtensionDesc{ + ExtendedType: (*descriptorpb.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65007, + Name: "gogoproto.casttype", + Tag: "bytes,65007,opt,name=casttype", + Filename: "gogoproto/gogo.proto", +} + +var E_Castkey = &proto.ExtensionDesc{ + ExtendedType: (*descriptorpb.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65008, + Name: "gogoproto.castkey", + Tag: "bytes,65008,opt,name=castkey", + Filename: "gogoproto/gogo.proto", +} + +var E_Castvalue = &proto.ExtensionDesc{ + ExtendedType: (*descriptorpb.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65009, + Name: "gogoproto.castvalue", + Tag: "bytes,65009,opt,name=castvalue", + Filename: "gogoproto/gogo.proto", +} + +var E_Stdtime = &proto.ExtensionDesc{ + ExtendedType: (*descriptorpb.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65010, + Name: "gogoproto.stdtime", + Tag: "varint,65010,opt,name=stdtime", + Filename: "gogoproto/gogo.proto", +} + +var E_Stdduration = &proto.ExtensionDesc{ + ExtendedType: (*descriptorpb.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65011, + Name: "gogoproto.stdduration", + Tag: "varint,65011,opt,name=stdduration", + Filename: "gogoproto/gogo.proto", +} + +var E_Wktpointer = &proto.ExtensionDesc{ + ExtendedType: (*descriptorpb.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65012, + Name: "gogoproto.wktpointer", + Tag: "varint,65012,opt,name=wktpointer", + Filename: "gogoproto/gogo.proto", +} + +var E_Castrepeated = &proto.ExtensionDesc{ + ExtendedType: (*descriptorpb.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65013, + Name: "gogoproto.castrepeated", + Tag: "bytes,65013,opt,name=castrepeated", + Filename: "gogoproto/gogo.proto", +} + +func init() { + proto.RegisterExtension(E_GoprotoEnumPrefix) + proto.RegisterExtension(E_GoprotoEnumStringer) + proto.RegisterExtension(E_EnumStringer) + proto.RegisterExtension(E_EnumCustomname) + proto.RegisterExtension(E_Enumdecl) + proto.RegisterExtension(E_EnumvalueCustomname) + proto.RegisterExtension(E_GoprotoGettersAll) + proto.RegisterExtension(E_GoprotoEnumPrefixAll) + proto.RegisterExtension(E_GoprotoStringerAll) + proto.RegisterExtension(E_VerboseEqualAll) + proto.RegisterExtension(E_FaceAll) + proto.RegisterExtension(E_GostringAll) + proto.RegisterExtension(E_PopulateAll) + proto.RegisterExtension(E_StringerAll) + proto.RegisterExtension(E_OnlyoneAll) + proto.RegisterExtension(E_EqualAll) + proto.RegisterExtension(E_DescriptionAll) + proto.RegisterExtension(E_TestgenAll) + proto.RegisterExtension(E_BenchgenAll) + proto.RegisterExtension(E_MarshalerAll) + proto.RegisterExtension(E_UnmarshalerAll) + proto.RegisterExtension(E_StableMarshalerAll) + proto.RegisterExtension(E_SizerAll) + proto.RegisterExtension(E_GoprotoEnumStringerAll) + proto.RegisterExtension(E_EnumStringerAll) + proto.RegisterExtension(E_UnsafeMarshalerAll) + proto.RegisterExtension(E_UnsafeUnmarshalerAll) + proto.RegisterExtension(E_GoprotoExtensionsMapAll) + proto.RegisterExtension(E_GoprotoUnrecognizedAll) + proto.RegisterExtension(E_GogoprotoImport) + proto.RegisterExtension(E_ProtosizerAll) + proto.RegisterExtension(E_CompareAll) + proto.RegisterExtension(E_TypedeclAll) + proto.RegisterExtension(E_EnumdeclAll) + proto.RegisterExtension(E_GoprotoRegistration) + proto.RegisterExtension(E_MessagenameAll) + proto.RegisterExtension(E_GoprotoSizecacheAll) + proto.RegisterExtension(E_GoprotoUnkeyedAll) + proto.RegisterExtension(E_GoprotoGetters) + proto.RegisterExtension(E_GoprotoStringer) + proto.RegisterExtension(E_VerboseEqual) + proto.RegisterExtension(E_Face) + proto.RegisterExtension(E_Gostring) + proto.RegisterExtension(E_Populate) + proto.RegisterExtension(E_Stringer) + proto.RegisterExtension(E_Onlyone) + proto.RegisterExtension(E_Equal) + proto.RegisterExtension(E_Description) + proto.RegisterExtension(E_Testgen) + proto.RegisterExtension(E_Benchgen) + proto.RegisterExtension(E_Marshaler) + proto.RegisterExtension(E_Unmarshaler) + proto.RegisterExtension(E_StableMarshaler) + proto.RegisterExtension(E_Sizer) + proto.RegisterExtension(E_UnsafeMarshaler) + proto.RegisterExtension(E_UnsafeUnmarshaler) + proto.RegisterExtension(E_GoprotoExtensionsMap) + proto.RegisterExtension(E_GoprotoUnrecognized) + proto.RegisterExtension(E_Protosizer) + proto.RegisterExtension(E_Compare) + proto.RegisterExtension(E_Typedecl) + proto.RegisterExtension(E_Messagename) + proto.RegisterExtension(E_GoprotoSizecache) + proto.RegisterExtension(E_GoprotoUnkeyed) + proto.RegisterExtension(E_Nullable) + proto.RegisterExtension(E_Embed) + proto.RegisterExtension(E_Customtype) + proto.RegisterExtension(E_Customname) + proto.RegisterExtension(E_Jsontag) + proto.RegisterExtension(E_Moretags) + proto.RegisterExtension(E_Casttype) + proto.RegisterExtension(E_Castkey) + proto.RegisterExtension(E_Castvalue) + proto.RegisterExtension(E_Stdtime) + proto.RegisterExtension(E_Stdduration) + proto.RegisterExtension(E_Wktpointer) + proto.RegisterExtension(E_Castrepeated) +} + +func init() { proto.RegisterFile("gogoproto/gogo.proto", fileDescriptor_c586470e9b64aee7) } + +var fileDescriptor_c586470e9b64aee7 = []byte{ + // 1382 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x98, 0x49, 0x6c, 0x1c, 0x45, + 0x17, 0x80, 0x63, 0xfd, 0x89, 0x62, 0x97, 0xed, 0x38, 0x5e, 0xfe, 0x10, 0x22, 0x30, 0x81, 0x13, + 0x27, 0xe7, 0x14, 0xa1, 0x94, 0x15, 0x45, 0x8e, 0xe5, 0x58, 0x41, 0x24, 0x18, 0x27, 0x0e, 0x9b, + 0xd0, 0xa8, 0x67, 0xa6, 0xdc, 0x6e, 0xd2, 0xdd, 0xd5, 0x74, 0x57, 0x87, 0x38, 0x37, 0x14, 0x16, + 0x21, 0x04, 0x84, 0x45, 0x82, 0x84, 0x24, 0x10, 0x10, 0xfb, 0x1a, 0xf6, 0xe5, 0xc2, 0x05, 0xc8, + 0x31, 0xdc, 0x38, 0xa2, 0x98, 0x0b, 0x60, 0x76, 0x73, 0xf2, 0x05, 0xbd, 0xee, 0xf7, 0x7a, 0xaa, + 0xdb, 0x23, 0x55, 0xcd, 0x6d, 0x3c, 0xae, 0xef, 0x73, 0xf5, 0x7b, 0x55, 0xef, 0x3d, 0x37, 0x1b, + 0x71, 0xa5, 0x2b, 0xa3, 0x58, 0x2a, 0xb9, 0x03, 0x3e, 0x8d, 0x65, 0x1f, 0x87, 0x7a, 0x8a, 0x6f, + 0xb7, 0x6d, 0x77, 0xa5, 0x74, 0x7d, 0xb1, 0x23, 0xfb, 0xa9, 0x9e, 0xce, 0xef, 0x68, 0x8a, 0xa4, + 0x11, 0x7b, 0x91, 0x92, 0x71, 0xbe, 0x98, 0x1f, 0x64, 0xc3, 0xb8, 0xb8, 0x26, 0xc2, 0x34, 0xa8, + 0x45, 0xb1, 0x98, 0xf7, 0x8e, 0x0f, 0x5d, 0x33, 0x96, 0x93, 0x63, 0x44, 0x8e, 0x4d, 0x85, 0x69, + 0x70, 0x6b, 0xa4, 0x3c, 0x19, 0x26, 0x5b, 0x2f, 0xfe, 0xf4, 0xbf, 0xed, 0x5d, 0x37, 0x76, 0xcf, + 0x0e, 0x22, 0x0a, 0xbf, 0x9b, 0xc9, 0x40, 0x3e, 0xcb, 0xfe, 0x5f, 0xf2, 0x25, 0x2a, 0xf6, 0x42, + 0x57, 0xc4, 0x06, 0xe3, 0x37, 0x68, 0x1c, 0xd6, 0x8c, 0x87, 0x10, 0xe5, 0x93, 0xac, 0xbf, 0x13, + 0xd7, 0xb7, 0xe8, 0xea, 0x13, 0xba, 0x64, 0x9a, 0x0d, 0x64, 0x92, 0x46, 0x9a, 0x28, 0x19, 0x84, + 0x4e, 0x20, 0x0c, 0x9a, 0xef, 0x32, 0x4d, 0xcf, 0xec, 0x26, 0xc0, 0x26, 0x0b, 0x8a, 0x73, 0xd6, + 0x0d, 0xdf, 0x34, 0x45, 0xc3, 0x37, 0x18, 0x2e, 0xe1, 0x46, 0x8a, 0xf5, 0xfc, 0x08, 0x1b, 0x81, + 0xcf, 0xc7, 0x1c, 0x3f, 0x15, 0xfa, 0x4e, 0xae, 0x6f, 0xeb, 0x39, 0x02, 0xcb, 0x48, 0xf6, 0xfd, + 0xc9, 0xf5, 0xd9, 0x76, 0x86, 0x0b, 0x81, 0xb6, 0x27, 0x2d, 0x8b, 0xae, 0x50, 0x4a, 0xc4, 0x49, + 0xcd, 0xf1, 0xdb, 0x6d, 0x6f, 0x9f, 0xe7, 0x17, 0xc6, 0xd3, 0xcb, 0xe5, 0x2c, 0x4e, 0xe7, 0xe4, + 0x84, 0xef, 0xf3, 0x39, 0x76, 0x55, 0x9b, 0x53, 0x61, 0xe1, 0x3c, 0x83, 0xce, 0x91, 0x35, 0x27, + 0x03, 0xb4, 0x33, 0x8c, 0xbe, 0x2f, 0x72, 0x69, 0xe1, 0x7c, 0x01, 0x9d, 0x43, 0xc8, 0x52, 0x4a, + 0xc1, 0x78, 0x33, 0x1b, 0x3c, 0x26, 0xe2, 0xba, 0x4c, 0x44, 0x4d, 0xdc, 0x97, 0x3a, 0xbe, 0x85, + 0xee, 0x2c, 0xea, 0x06, 0x10, 0x9c, 0x02, 0x0e, 0x5c, 0xbb, 0x58, 0xf7, 0xbc, 0xd3, 0x10, 0x16, + 0x8a, 0x73, 0xa8, 0xd8, 0x08, 0xeb, 0x01, 0x9d, 0x60, 0x7d, 0xae, 0xcc, 0x1f, 0xc9, 0x02, 0x3f, + 0x8f, 0x78, 0x2f, 0x31, 0xa8, 0x88, 0x64, 0x94, 0xfa, 0x8e, 0xb2, 0xd9, 0xc1, 0x8b, 0xa4, 0x20, + 0x06, 0x15, 0x1d, 0x84, 0xf5, 0x25, 0x52, 0x24, 0x5a, 0x3c, 0xf7, 0xb0, 0x5e, 0x19, 0xfa, 0x8b, + 0x32, 0xb4, 0xd9, 0xc4, 0x05, 0x34, 0x30, 0x44, 0x40, 0x30, 0xce, 0x7a, 0x6c, 0x13, 0xf1, 0xea, + 0x32, 0x5d, 0x0f, 0xca, 0xc0, 0x34, 0x1b, 0xa0, 0x02, 0xe5, 0xc9, 0xd0, 0x42, 0xf1, 0x1a, 0x2a, + 0x36, 0x69, 0x18, 0x3e, 0x86, 0x12, 0x89, 0x72, 0x85, 0x8d, 0xe4, 0x75, 0x7a, 0x0c, 0x44, 0x30, + 0x94, 0x75, 0x11, 0x36, 0x16, 0xec, 0x0c, 0x6f, 0x50, 0x28, 0x89, 0x01, 0xc5, 0x24, 0xeb, 0x0f, + 0x9c, 0x38, 0x59, 0x70, 0x7c, 0xab, 0x74, 0xbc, 0x89, 0x8e, 0xbe, 0x02, 0xc2, 0x88, 0xa4, 0x61, + 0x27, 0x9a, 0xb7, 0x28, 0x22, 0x1a, 0x86, 0x57, 0x2f, 0x51, 0x4e, 0xdd, 0x17, 0xb5, 0x4e, 0x6c, + 0x6f, 0xd3, 0xd5, 0xcb, 0xd9, 0x03, 0xba, 0x71, 0x9c, 0xf5, 0x24, 0xde, 0x09, 0x2b, 0xcd, 0x3b, + 0x94, 0xe9, 0x0c, 0x00, 0xf8, 0x4e, 0x76, 0x75, 0xdb, 0x36, 0x61, 0x21, 0x7b, 0x17, 0x65, 0x5b, + 0xda, 0xb4, 0x0a, 0x2c, 0x09, 0x9d, 0x2a, 0xdf, 0xa3, 0x92, 0x20, 0x2a, 0xae, 0x19, 0x36, 0x92, + 0x86, 0x89, 0x33, 0xdf, 0x59, 0xd4, 0xde, 0xa7, 0xa8, 0xe5, 0x6c, 0x29, 0x6a, 0x87, 0xd9, 0x16, + 0x34, 0x76, 0x96, 0xd7, 0x0f, 0xa8, 0xb0, 0xe6, 0xf4, 0x5c, 0x39, 0xbb, 0x77, 0xb3, 0x6d, 0x45, + 0x38, 0x8f, 0x2b, 0x11, 0x26, 0xc0, 0xd4, 0x02, 0x27, 0xb2, 0x30, 0x5f, 0x44, 0x33, 0x55, 0xfc, + 0xa9, 0x42, 0x70, 0xc0, 0x89, 0x40, 0x7e, 0x07, 0xdb, 0x4a, 0xf2, 0x34, 0x8c, 0x45, 0x43, 0xba, + 0xa1, 0x77, 0x42, 0x34, 0x2d, 0xd4, 0x1f, 0x56, 0x52, 0x35, 0xa7, 0xe1, 0x60, 0xde, 0xcf, 0x36, + 0x17, 0xb3, 0x4a, 0xcd, 0x0b, 0x22, 0x19, 0x2b, 0x83, 0xf1, 0x23, 0xca, 0x54, 0xc1, 0xed, 0xcf, + 0x30, 0x3e, 0xc5, 0x36, 0x65, 0x3f, 0xda, 0x1e, 0xc9, 0x8f, 0x51, 0xd4, 0xdf, 0xa2, 0xb0, 0x70, + 0x34, 0x64, 0x10, 0x39, 0xb1, 0x4d, 0xfd, 0xfb, 0x84, 0x0a, 0x07, 0x22, 0x58, 0x38, 0xd4, 0x62, + 0x24, 0xa0, 0xdb, 0x5b, 0x18, 0x3e, 0xa5, 0xc2, 0x41, 0x0c, 0x2a, 0x68, 0x60, 0xb0, 0x50, 0x7c, + 0x46, 0x0a, 0x62, 0x40, 0x71, 0x5b, 0xab, 0xd1, 0xc6, 0xc2, 0xf5, 0x12, 0x15, 0x3b, 0xb0, 0xda, + 0xa0, 0xfa, 0x7c, 0xb9, 0x3c, 0x84, 0xcd, 0x6a, 0x28, 0x54, 0xa2, 0x40, 0x24, 0x89, 0xe3, 0x0a, + 0x98, 0x38, 0x2c, 0x36, 0xf6, 0x05, 0x55, 0x22, 0x0d, 0x83, 0xbd, 0x69, 0x13, 0x22, 0x84, 0xbd, + 0xe1, 0x34, 0x16, 0x6c, 0x74, 0x5f, 0x56, 0x36, 0x77, 0x88, 0x58, 0x70, 0x6a, 0xf3, 0x4f, 0x1a, + 0x1e, 0x15, 0x8b, 0x56, 0xa7, 0xf3, 0xab, 0xca, 0xfc, 0x33, 0x97, 0x93, 0x79, 0x0d, 0x19, 0xa8, + 0xcc, 0x53, 0x43, 0xd7, 0xad, 0x71, 0x1d, 0xc8, 0x9f, 0x8b, 0x74, 0x0f, 0xac, 0xe0, 0xf3, 0x96, + 0xc7, 0x29, 0x7e, 0x0b, 0x1c, 0xf2, 0xf2, 0xd0, 0x63, 0x96, 0x9d, 0x5c, 0x29, 0xce, 0x79, 0x69, + 0xe6, 0xe1, 0xfb, 0x58, 0x7f, 0x69, 0xe0, 0x31, 0xab, 0x1e, 0x44, 0x55, 0x9f, 0x3e, 0xef, 0xf0, + 0x9d, 0x6c, 0x3d, 0x0c, 0x2f, 0x66, 0xfc, 0x21, 0xc4, 0xb3, 0xe5, 0x7c, 0x37, 0xeb, 0xa6, 0xa1, + 0xc5, 0x8c, 0x3e, 0x8c, 0x68, 0x81, 0x00, 0x4e, 0x03, 0x8b, 0x19, 0x7f, 0x84, 0x70, 0x42, 0x00, + 0xb7, 0x0f, 0xe1, 0xd7, 0x8f, 0xad, 0xc7, 0xa6, 0x43, 0xb1, 0x1b, 0x67, 0x1b, 0x71, 0x52, 0x31, + 0xd3, 0x8f, 0xe2, 0x1f, 0x27, 0x82, 0xdf, 0xc4, 0x36, 0x58, 0x06, 0xfc, 0x71, 0x44, 0xf3, 0xf5, + 0x7c, 0x92, 0xf5, 0x6a, 0xd3, 0x89, 0x19, 0x7f, 0x02, 0x71, 0x9d, 0x82, 0xad, 0xe3, 0x74, 0x62, + 0x16, 0x3c, 0x49, 0x5b, 0x47, 0x02, 0xc2, 0x46, 0x83, 0x89, 0x99, 0x3e, 0x45, 0x51, 0x27, 0x84, + 0xef, 0x61, 0x3d, 0x45, 0xb3, 0x31, 0xf3, 0x4f, 0x21, 0xdf, 0x62, 0x20, 0x02, 0x5a, 0xb3, 0x33, + 0x2b, 0x9e, 0xa6, 0x08, 0x68, 0x14, 0x5c, 0xa3, 0xea, 0x00, 0x63, 0x36, 0x3d, 0x43, 0xd7, 0xa8, + 0x32, 0xbf, 0x40, 0x36, 0xb3, 0x9a, 0x6f, 0x56, 0x3c, 0x4b, 0xd9, 0xcc, 0xd6, 0xc3, 0x36, 0xaa, + 0x13, 0x81, 0xd9, 0xf1, 0x1c, 0x6d, 0xa3, 0x32, 0x10, 0xf0, 0x19, 0x36, 0xb4, 0x76, 0x1a, 0x30, + 0xfb, 0x9e, 0x47, 0xdf, 0xe0, 0x9a, 0x61, 0x80, 0xdf, 0xce, 0xb6, 0xb4, 0x9f, 0x04, 0xcc, 0xd6, + 0xd3, 0x2b, 0x95, 0xff, 0xdd, 0xf4, 0x41, 0x80, 0x1f, 0x6e, 0xb5, 0x14, 0x7d, 0x0a, 0x30, 0x6b, + 0xcf, 0xac, 0x94, 0x0b, 0xb7, 0x3e, 0x04, 0xf0, 0x09, 0xc6, 0x5a, 0x0d, 0xd8, 0xec, 0x3a, 0x8b, + 0x2e, 0x0d, 0x82, 0xab, 0x81, 0xfd, 0xd7, 0xcc, 0x9f, 0xa3, 0xab, 0x81, 0x04, 0x5c, 0x0d, 0x6a, + 0xbd, 0x66, 0xfa, 0x3c, 0x5d, 0x0d, 0x42, 0xe0, 0x64, 0x6b, 0xdd, 0xcd, 0x6c, 0xb8, 0x40, 0x27, + 0x5b, 0xa3, 0xf8, 0x41, 0x36, 0xb8, 0xa6, 0x21, 0x9a, 0x55, 0x2f, 0xa3, 0x6a, 0x73, 0xb5, 0x1f, + 0xea, 0xcd, 0x0b, 0x9b, 0xa1, 0xd9, 0xf6, 0x4a, 0xa5, 0x79, 0x61, 0x2f, 0xe4, 0xe3, 0xac, 0x3b, + 0x4c, 0x7d, 0x1f, 0x2e, 0xcf, 0xd0, 0xb5, 0x6d, 0xba, 0xa9, 0xf0, 0x9b, 0xa4, 0xf8, 0x79, 0x15, + 0xa3, 0x43, 0x00, 0xdf, 0xc9, 0x36, 0x88, 0xa0, 0x2e, 0x9a, 0x26, 0xf2, 0x97, 0x55, 0x2a, 0x98, + 0xb0, 0x9a, 0xef, 0x61, 0x2c, 0x7f, 0x35, 0x02, 0x61, 0x36, 0xb1, 0xbf, 0xae, 0xe6, 0x6f, 0x69, + 0x34, 0xa4, 0x25, 0xc8, 0x92, 0x62, 0x10, 0x2c, 0x97, 0x05, 0x59, 0x46, 0x76, 0xb1, 0x8d, 0xf7, + 0x26, 0x32, 0x54, 0x8e, 0x6b, 0xa2, 0x7f, 0x43, 0x9a, 0xd6, 0x43, 0xc0, 0x02, 0x19, 0x0b, 0xe5, + 0xb8, 0x89, 0x89, 0xfd, 0x1d, 0xd9, 0x02, 0x00, 0xb8, 0xe1, 0x24, 0xca, 0xe6, 0xb9, 0xff, 0x20, + 0x98, 0x00, 0xd8, 0x34, 0x7c, 0x3e, 0x2a, 0x16, 0x4d, 0xec, 0x9f, 0xb4, 0x69, 0x5c, 0xcf, 0x77, + 0xb3, 0x1e, 0xf8, 0x98, 0xbd, 0x55, 0x32, 0xc1, 0x7f, 0x21, 0xdc, 0x22, 0xe0, 0x2f, 0x27, 0xaa, + 0xa9, 0x3c, 0x73, 0xb0, 0xff, 0xc6, 0x4c, 0xd3, 0x7a, 0x3e, 0xc1, 0x7a, 0x13, 0xd5, 0x6c, 0xa6, + 0x38, 0x9f, 0x1a, 0xf0, 0x7f, 0x56, 0x8b, 0x57, 0x16, 0x05, 0x03, 0xd9, 0xbe, 0xff, 0xa8, 0x8a, + 0xa4, 0x17, 0x2a, 0x11, 0x9b, 0x0c, 0x2b, 0x68, 0xd0, 0x10, 0x3e, 0xc9, 0xfa, 0xe0, 0x59, 0x62, + 0x11, 0x09, 0x47, 0x99, 0x4f, 0xeb, 0xbf, 0x18, 0x80, 0x12, 0xb4, 0xf7, 0x9e, 0x4b, 0x57, 0x46, + 0xbb, 0x2e, 0x5f, 0x19, 0xed, 0xfa, 0xf1, 0xca, 0x68, 0xd7, 0xa9, 0xa5, 0xd1, 0x75, 0x97, 0x97, + 0x46, 0xd7, 0xfd, 0xb0, 0x34, 0xba, 0x8e, 0x0d, 0x37, 0x64, 0x50, 0x35, 0xee, 0x65, 0xd3, 0x72, + 0x5a, 0xce, 0x64, 0x45, 0xec, 0xae, 0x1b, 0x5c, 0x4f, 0x2d, 0xa4, 0xf5, 0xb1, 0x86, 0x0c, 0xb2, + 0xd7, 0xb8, 0xad, 0xb7, 0xb5, 0xc5, 0x3f, 0x39, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0x5f, 0xbe, + 0x0f, 0x06, 0xea, 0x15, 0x00, 0x00, +} diff --git a/proto/tendermint/abci/types.proto b/proto/tendermint/abci/types.proto index 7126488d0a..bcb75a000f 100644 --- a/proto/tendermint/abci/types.proto +++ b/proto/tendermint/abci/types.proto @@ -35,6 +35,8 @@ message Request { RequestOfferSnapshot offer_snapshot = 12; RequestLoadSnapshotChunk load_snapshot_chunk = 13; RequestApplySnapshotChunk apply_snapshot_chunk = 14; + RequestPrepareProposal prepare_proposal = 15; + RequestProcessProposal process_proposal = 16; } } @@ -117,6 +119,20 @@ message RequestApplySnapshotChunk { string sender = 3; } +message RequestPrepareProposal { + // block_data is an array of transactions that will be included in a block, + // sent to the app for possible modifications. + // applications can not exceed the size of the data passed to it. + tendermint.types.Data block_data = 1; + // If an application decides to populate block_data with extra information, they can not exceed this value. + int64 block_data_size = 2; +} + +message RequestProcessProposal { + tendermint.types.Header header = 1 [(gogoproto.nullable) = false]; + repeated bytes txs = 2; +} + //---------------------------------------- // Response types @@ -137,6 +153,8 @@ message Response { ResponseOfferSnapshot offer_snapshot = 13; ResponseLoadSnapshotChunk load_snapshot_chunk = 14; ResponseApplySnapshotChunk apply_snapshot_chunk = 15; + ResponsePrepareProposal prepare_proposal = 16; + ResponseProcessProposal process_proposal = 17; } } @@ -262,6 +280,21 @@ message ResponseApplySnapshotChunk { } } +message ResponsePrepareProposal { + tendermint.types.Data block_data = 1; +} + +message ResponseProcessProposal { + Result result = 1; + repeated bytes evidence = 2; + + enum Result { + UNKNOWN = 0; // Unknown result, invalidate + ACCEPT = 1; // proposal verified, vote on the proposal + REJECT = 2; // proposal invalidated + } +} + //---------------------------------------- // Misc. @@ -289,10 +322,11 @@ message EventAttribute { // // One usage is indexing transaction results. message TxResult { - int64 height = 1; - uint32 index = 2; - bytes tx = 3; - ResponseDeliverTx result = 4 [(gogoproto.nullable) = false]; + int64 height = 1; + uint32 index = 2; + bytes tx = 3; + ResponseDeliverTx result = 4 [(gogoproto.nullable) = false]; + bytes original_hash = 5; } //---------------------------------------- @@ -366,4 +400,6 @@ service ABCIApplication { rpc OfferSnapshot(RequestOfferSnapshot) returns (ResponseOfferSnapshot); rpc LoadSnapshotChunk(RequestLoadSnapshotChunk) returns (ResponseLoadSnapshotChunk); rpc ApplySnapshotChunk(RequestApplySnapshotChunk) returns (ResponseApplySnapshotChunk); + rpc PrepareProposal(RequestPrepareProposal) returns (ResponsePrepareProposal); + rpc ProcessProposal(RequestProcessProposal) returns (ResponseProcessProposal); } diff --git a/proto/tendermint/blocksync/message_test.go b/proto/tendermint/blocksync/message_test.go index 3406c6dff4..0ea7e43a09 100644 --- a/proto/tendermint/blocksync/message_test.go +++ b/proto/tendermint/blocksync/message_test.go @@ -85,7 +85,7 @@ func TestStatusResponse_Validate(t *testing.T) { } func TestBlockchainMessageVectors(t *testing.T) { - block := types.MakeBlock(int64(3), []types.Tx{types.Tx("Hello World")}, nil, nil) + block := types.MakeBlock(int64(3), []types.Tx{types.Tx("Hello World")}, nil, nil, nil) block.Version.Block = 11 // overwrite updated protocol version bpb, err := block.ToProto() @@ -102,7 +102,7 @@ func TestBlockchainMessageVectors(t *testing.T) { BlockRequest: &bcproto.BlockRequest{Height: math.MaxInt64}}}, "0a0a08ffffffffffffffff7f"}, {"BlockResponseMessage", &bcproto.Message{Sum: &bcproto.Message_BlockResponse{ - BlockResponse: &bcproto.BlockResponse{Block: bpb}}}, "1a700a6e0a5b0a02080b1803220b088092b8c398feffffff012a0212003a20c4da88e876062aa1543400d50d0eaa0dac88096057949cfb7bca7f3a48c04bf96a20e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855120d0a0b48656c6c6f20576f726c641a00"}, + BlockResponse: &bcproto.BlockResponse{Block: bpb}}}, "1a97010a94010a5b0a02080b1803220b088092b8c398feffffff012a0212003a20269ece38583f42aaf53fdd3abe1f570ab9b0d08284d080900966040a29df504c6a20e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b85512350a0b48656c6c6f20576f726c641a00220028013220269ece38583f42aaf53fdd3abe1f570ab9b0d08284d080900966040a29df504c"}, {"NoBlockResponseMessage", &bcproto.Message{Sum: &bcproto.Message_NoBlockResponse{ NoBlockResponse: &bcproto.NoBlockResponse{Height: 1}}}, "12020801"}, {"NoBlockResponseMessage", &bcproto.Message{Sum: &bcproto.Message_NoBlockResponse{ diff --git a/proto/tendermint/da/data_availability_header.pb.go b/proto/tendermint/da/data_availability_header.pb.go new file mode 100644 index 0000000000..875dbbc734 --- /dev/null +++ b/proto/tendermint/da/data_availability_header.pb.go @@ -0,0 +1,390 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: tendermint/da/data_availability_header.proto + +package da + +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// DataAvailabilityHeader contains the row and column roots of the erasure +// coded version of the data in Block.Data. +// Therefor the original Block.Data is arranged in a +// k × k matrix, which is then "extended" to a +// 2k × 2k matrix applying multiple times Reed-Solomon encoding. +// For details see Section 5.2: https://arxiv.org/abs/1809.09044 +// or the Celestia specification: +// https://github.com/celestiaorg/celestia-specs/blob/master/specs/data_structures.md#availabledataheader +// Note that currently we list row and column roots in separate fields +// (different from the spec). +type DataAvailabilityHeader struct { + // RowRoot_j = root((M_{j,1} || M_{j,2} || ... || M_{j,2k} )) + RowRoots [][]byte `protobuf:"bytes,1,rep,name=row_roots,json=rowRoots,proto3" json:"row_roots,omitempty"` + // ColumnRoot_j = root((M_{1,j} || M_{2,j} || ... || M_{2k,j} )) + ColumnRoots [][]byte `protobuf:"bytes,2,rep,name=column_roots,json=columnRoots,proto3" json:"column_roots,omitempty"` +} + +func (m *DataAvailabilityHeader) Reset() { *m = DataAvailabilityHeader{} } +func (m *DataAvailabilityHeader) String() string { return proto.CompactTextString(m) } +func (*DataAvailabilityHeader) ProtoMessage() {} +func (*DataAvailabilityHeader) Descriptor() ([]byte, []int) { + return fileDescriptor_dfe248ce3a86eed9, []int{0} +} +func (m *DataAvailabilityHeader) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DataAvailabilityHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DataAvailabilityHeader.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DataAvailabilityHeader) XXX_Merge(src proto.Message) { + xxx_messageInfo_DataAvailabilityHeader.Merge(m, src) +} +func (m *DataAvailabilityHeader) XXX_Size() int { + return m.Size() +} +func (m *DataAvailabilityHeader) XXX_DiscardUnknown() { + xxx_messageInfo_DataAvailabilityHeader.DiscardUnknown(m) +} + +var xxx_messageInfo_DataAvailabilityHeader proto.InternalMessageInfo + +func (m *DataAvailabilityHeader) GetRowRoots() [][]byte { + if m != nil { + return m.RowRoots + } + return nil +} + +func (m *DataAvailabilityHeader) GetColumnRoots() [][]byte { + if m != nil { + return m.ColumnRoots + } + return nil +} + +func init() { + proto.RegisterType((*DataAvailabilityHeader)(nil), "tendermint.da.DataAvailabilityHeader") +} + +func init() { + proto.RegisterFile("tendermint/da/data_availability_header.proto", fileDescriptor_dfe248ce3a86eed9) +} + +var fileDescriptor_dfe248ce3a86eed9 = []byte{ + // 193 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0x29, 0x49, 0xcd, 0x4b, + 0x49, 0x2d, 0xca, 0xcd, 0xcc, 0x2b, 0xd1, 0x4f, 0x49, 0xd4, 0x4f, 0x49, 0x2c, 0x49, 0x8c, 0x4f, + 0x2c, 0x4b, 0xcc, 0xcc, 0x49, 0x4c, 0xca, 0xcc, 0xc9, 0x2c, 0xa9, 0x8c, 0xcf, 0x48, 0x4d, 0x4c, + 0x49, 0x2d, 0xd2, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x45, 0xa8, 0xd6, 0x4b, 0x49, 0x54, + 0x8a, 0xe0, 0x12, 0x73, 0x49, 0x2c, 0x49, 0x74, 0x44, 0x52, 0xef, 0x01, 0x56, 0x2e, 0x24, 0xcd, + 0xc5, 0x59, 0x94, 0x5f, 0x1e, 0x5f, 0x94, 0x9f, 0x5f, 0x52, 0x2c, 0xc1, 0xa8, 0xc0, 0xac, 0xc1, + 0x13, 0xc4, 0x51, 0x94, 0x5f, 0x1e, 0x04, 0xe2, 0x0b, 0x29, 0x72, 0xf1, 0x24, 0xe7, 0xe7, 0x94, + 0xe6, 0xe6, 0x41, 0xe5, 0x99, 0xc0, 0xf2, 0xdc, 0x10, 0x31, 0xb0, 0x12, 0x27, 0xbf, 0x13, 0x8f, + 0xe4, 0x18, 0x2f, 0x3c, 0x92, 0x63, 0x7c, 0xf0, 0x48, 0x8e, 0x71, 0xc2, 0x63, 0x39, 0x86, 0x0b, + 0x8f, 0xe5, 0x18, 0x6e, 0x3c, 0x96, 0x63, 0x88, 0x32, 0x49, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, + 0x4b, 0xce, 0xcf, 0xd5, 0x47, 0x72, 0x3b, 0x12, 0x13, 0xec, 0x54, 0x7d, 0x14, 0x7f, 0x25, 0xb1, + 0x81, 0x05, 0x8d, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0x2c, 0x16, 0x1b, 0x0d, 0xef, 0x00, 0x00, + 0x00, +} + +func (m *DataAvailabilityHeader) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DataAvailabilityHeader) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DataAvailabilityHeader) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ColumnRoots) > 0 { + for iNdEx := len(m.ColumnRoots) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ColumnRoots[iNdEx]) + copy(dAtA[i:], m.ColumnRoots[iNdEx]) + i = encodeVarintDataAvailabilityHeader(dAtA, i, uint64(len(m.ColumnRoots[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.RowRoots) > 0 { + for iNdEx := len(m.RowRoots) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.RowRoots[iNdEx]) + copy(dAtA[i:], m.RowRoots[iNdEx]) + i = encodeVarintDataAvailabilityHeader(dAtA, i, uint64(len(m.RowRoots[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintDataAvailabilityHeader(dAtA []byte, offset int, v uint64) int { + offset -= sovDataAvailabilityHeader(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *DataAvailabilityHeader) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.RowRoots) > 0 { + for _, b := range m.RowRoots { + l = len(b) + n += 1 + l + sovDataAvailabilityHeader(uint64(l)) + } + } + if len(m.ColumnRoots) > 0 { + for _, b := range m.ColumnRoots { + l = len(b) + n += 1 + l + sovDataAvailabilityHeader(uint64(l)) + } + } + return n +} + +func sovDataAvailabilityHeader(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozDataAvailabilityHeader(x uint64) (n int) { + return sovDataAvailabilityHeader(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *DataAvailabilityHeader) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDataAvailabilityHeader + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DataAvailabilityHeader: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DataAvailabilityHeader: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RowRoots", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDataAvailabilityHeader + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthDataAvailabilityHeader + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthDataAvailabilityHeader + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RowRoots = append(m.RowRoots, make([]byte, postIndex-iNdEx)) + copy(m.RowRoots[len(m.RowRoots)-1], dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ColumnRoots", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDataAvailabilityHeader + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthDataAvailabilityHeader + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthDataAvailabilityHeader + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ColumnRoots = append(m.ColumnRoots, make([]byte, postIndex-iNdEx)) + copy(m.ColumnRoots[len(m.ColumnRoots)-1], dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDataAvailabilityHeader(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDataAvailabilityHeader + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipDataAvailabilityHeader(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDataAvailabilityHeader + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDataAvailabilityHeader + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDataAvailabilityHeader + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthDataAvailabilityHeader + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupDataAvailabilityHeader + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthDataAvailabilityHeader + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthDataAvailabilityHeader = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowDataAvailabilityHeader = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupDataAvailabilityHeader = fmt.Errorf("proto: unexpected end of group") +) diff --git a/proto/tendermint/da/data_availability_header.proto b/proto/tendermint/da/data_availability_header.proto new file mode 100644 index 0000000000..3e82c6084b --- /dev/null +++ b/proto/tendermint/da/data_availability_header.proto @@ -0,0 +1,21 @@ +syntax = "proto3"; +package tendermint.da; + +option go_package = "github.com/tendermint/tendermint/proto/tendermint/da"; + +// DataAvailabilityHeader contains the row and column roots of the erasure +// coded version of the data in Block.Data. +// Therefor the original Block.Data is arranged in a +// k × k matrix, which is then "extended" to a +// 2k × 2k matrix applying multiple times Reed-Solomon encoding. +// For details see Section 5.2: https://arxiv.org/abs/1809.09044 +// or the Celestia specification: +// https://github.com/celestiaorg/celestia-specs/blob/master/specs/data_structures.md#availabledataheader +// Note that currently we list row and column roots in separate fields +// (different from the spec). +message DataAvailabilityHeader { + // RowRoot_j = root((M_{j,1} || M_{j,2} || ... || M_{j,2k} )) + repeated bytes row_roots = 1; + // ColumnRoot_j = root((M_{1,j} || M_{2,j} || ... || M_{2k,j} )) + repeated bytes column_roots = 2; +} \ No newline at end of file diff --git a/proto/tendermint/types/block.pb.go b/proto/tendermint/types/block.pb.go index f2077aad8b..6dd30be50a 100644 --- a/proto/tendermint/types/block.pb.go +++ b/proto/tendermint/types/block.pb.go @@ -24,10 +24,9 @@ var _ = math.Inf const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package type Block struct { - Header Header `protobuf:"bytes,1,opt,name=header,proto3" json:"header"` - Data Data `protobuf:"bytes,2,opt,name=data,proto3" json:"data"` - Evidence EvidenceList `protobuf:"bytes,3,opt,name=evidence,proto3" json:"evidence"` - LastCommit *Commit `protobuf:"bytes,4,opt,name=last_commit,json=lastCommit,proto3" json:"last_commit,omitempty"` + Header Header `protobuf:"bytes,1,opt,name=header,proto3" json:"header"` + Data Data `protobuf:"bytes,2,opt,name=data,proto3" json:"data"` + LastCommit *Commit `protobuf:"bytes,4,opt,name=last_commit,json=lastCommit,proto3" json:"last_commit,omitempty"` } func (m *Block) Reset() { *m = Block{} } @@ -77,13 +76,6 @@ func (m *Block) GetData() Data { return Data{} } -func (m *Block) GetEvidence() EvidenceList { - if m != nil { - return m.Evidence - } - return EvidenceList{} -} - func (m *Block) GetLastCommit() *Commit { if m != nil { return m.LastCommit @@ -98,24 +90,22 @@ func init() { func init() { proto.RegisterFile("tendermint/types/block.proto", fileDescriptor_70840e82f4357ab1) } var fileDescriptor_70840e82f4357ab1 = []byte{ - // 266 bytes of a gzipped FileDescriptorProto + // 228 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x29, 0x49, 0xcd, 0x4b, 0x49, 0x2d, 0xca, 0xcd, 0xcc, 0x2b, 0xd1, 0x2f, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x4f, 0xca, 0xc9, 0x4f, 0xce, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x40, 0xc8, 0xea, 0x81, 0x65, 0xa5, 0x44, 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0x92, 0xfa, 0x20, 0x16, 0x44, 0x9d, 0x14, 0xa6, 0x29, 0x60, - 0x12, 0x2a, 0x2b, 0x8f, 0x21, 0x9b, 0x5a, 0x96, 0x99, 0x92, 0x9a, 0x97, 0x9c, 0x0a, 0x51, 0xa0, - 0xf4, 0x8e, 0x91, 0x8b, 0xd5, 0x09, 0x64, 0xad, 0x90, 0x19, 0x17, 0x5b, 0x46, 0x6a, 0x62, 0x4a, - 0x6a, 0x91, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0xb7, 0x91, 0x84, 0x1e, 0xba, 0x0b, 0xf4, 0x3c, 0xc0, - 0xf2, 0x4e, 0x2c, 0x27, 0xee, 0xc9, 0x33, 0x04, 0x41, 0x55, 0x0b, 0x19, 0x70, 0xb1, 0xa4, 0x24, - 0x96, 0x24, 0x4a, 0x30, 0x81, 0x75, 0x89, 0x61, 0xea, 0x72, 0x49, 0x2c, 0x49, 0x84, 0xea, 0x01, - 0xab, 0x14, 0x72, 0xe0, 0xe2, 0x80, 0xb9, 0x42, 0x82, 0x19, 0xac, 0x4b, 0x0e, 0x53, 0x97, 0x2b, - 0x54, 0x85, 0x4f, 0x66, 0x71, 0x09, 0x54, 0x37, 0x5c, 0x97, 0x90, 0x25, 0x17, 0x77, 0x4e, 0x62, - 0x71, 0x49, 0x7c, 0x72, 0x7e, 0x6e, 0x6e, 0x66, 0x89, 0x04, 0x0b, 0x2e, 0x07, 0x3b, 0x83, 0xe5, - 0x83, 0xb8, 0x40, 0x8a, 0x21, 0x6c, 0xa7, 0xc0, 0x13, 0x8f, 0xe4, 0x18, 0x2f, 0x3c, 0x92, 0x63, - 0x7c, 0xf0, 0x48, 0x8e, 0x71, 0xc2, 0x63, 0x39, 0x86, 0x0b, 0x8f, 0xe5, 0x18, 0x6e, 0x3c, 0x96, - 0x63, 0x88, 0x32, 0x4f, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x47, 0x0e, - 0x36, 0x04, 0x13, 0x12, 0xf8, 0xe8, 0x41, 0x9a, 0xc4, 0x06, 0x16, 0x37, 0x06, 0x04, 0x00, 0x00, - 0xff, 0xff, 0x79, 0x8c, 0xb5, 0x43, 0xd1, 0x01, 0x00, 0x00, + 0x12, 0x22, 0xab, 0xb4, 0x86, 0x91, 0x8b, 0xd5, 0x09, 0x64, 0xaa, 0x90, 0x19, 0x17, 0x5b, 0x46, + 0x6a, 0x62, 0x4a, 0x6a, 0x91, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0xb7, 0x91, 0x84, 0x1e, 0xba, 0x05, + 0x7a, 0x1e, 0x60, 0x79, 0x27, 0x96, 0x13, 0xf7, 0xe4, 0x19, 0x82, 0xa0, 0xaa, 0x85, 0x0c, 0xb8, + 0x58, 0x52, 0x12, 0x4b, 0x12, 0x25, 0x98, 0xc0, 0xba, 0xc4, 0x30, 0x75, 0xb9, 0x24, 0x96, 0x24, + 0x42, 0xf5, 0x80, 0x55, 0x0a, 0x59, 0x72, 0x71, 0xe7, 0x24, 0x16, 0x97, 0xc4, 0x27, 0xe7, 0xe7, + 0xe6, 0x66, 0x96, 0x48, 0xb0, 0xe0, 0xb2, 0xce, 0x19, 0x2c, 0x1f, 0xc4, 0x05, 0x52, 0x0c, 0x61, + 0x3b, 0x05, 0x9e, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, 0x47, 0x72, 0x8c, 0x13, + 0x1e, 0xcb, 0x31, 0x5c, 0x78, 0x2c, 0xc7, 0x70, 0xe3, 0xb1, 0x1c, 0x43, 0x94, 0x79, 0x7a, 0x66, + 0x49, 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x3e, 0xb2, 0x8f, 0x11, 0x4c, 0x48, 0xc8, 0xa0, + 0x87, 0x46, 0x12, 0x1b, 0x58, 0xdc, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0xa3, 0xfc, 0x87, 0xdc, + 0x6e, 0x01, 0x00, 0x00, } func (m *Block) Marshal() (dAtA []byte, err error) { @@ -150,16 +140,6 @@ func (m *Block) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x22 } - { - size, err := m.Evidence.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintBlock(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a { size, err := m.Data.MarshalToSizedBuffer(dAtA[:i]) if err != nil { @@ -204,8 +184,6 @@ func (m *Block) Size() (n int) { n += 1 + l + sovBlock(uint64(l)) l = m.Data.Size() n += 1 + l + sovBlock(uint64(l)) - l = m.Evidence.Size() - n += 1 + l + sovBlock(uint64(l)) if m.LastCommit != nil { l = m.LastCommit.Size() n += 1 + l + sovBlock(uint64(l)) @@ -314,39 +292,6 @@ func (m *Block) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Evidence", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBlock - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthBlock - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthBlock - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Evidence.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field LastCommit", wireType) diff --git a/proto/tendermint/types/block.proto b/proto/tendermint/types/block.proto index 84e9bb15d8..d395501f5f 100644 --- a/proto/tendermint/types/block.proto +++ b/proto/tendermint/types/block.proto @@ -5,11 +5,9 @@ option go_package = "github.com/tendermint/tendermint/proto/tendermint/types"; import "gogoproto/gogo.proto"; import "tendermint/types/types.proto"; -import "tendermint/types/evidence.proto"; message Block { - Header header = 1 [(gogoproto.nullable) = false]; - Data data = 2 [(gogoproto.nullable) = false]; - tendermint.types.EvidenceList evidence = 3 [(gogoproto.nullable) = false]; - Commit last_commit = 4; + Header header = 1 [(gogoproto.nullable) = false]; + Data data = 2 [(gogoproto.nullable) = false]; + Commit last_commit = 4; } diff --git a/proto/tendermint/types/evidence.pb.go b/proto/tendermint/types/evidence.pb.go deleted file mode 100644 index daab3dc34f..0000000000 --- a/proto/tendermint/types/evidence.pb.go +++ /dev/null @@ -1,1394 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: tendermint/types/evidence.proto - -package types - -import ( - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - _ "github.com/gogo/protobuf/types" - github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" - io "io" - math "math" - math_bits "math/bits" - time "time" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf -var _ = time.Kitchen - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -type Evidence struct { - // Types that are valid to be assigned to Sum: - // *Evidence_DuplicateVoteEvidence - // *Evidence_LightClientAttackEvidence - Sum isEvidence_Sum `protobuf_oneof:"sum"` -} - -func (m *Evidence) Reset() { *m = Evidence{} } -func (m *Evidence) String() string { return proto.CompactTextString(m) } -func (*Evidence) ProtoMessage() {} -func (*Evidence) Descriptor() ([]byte, []int) { - return fileDescriptor_6825fabc78e0a168, []int{0} -} -func (m *Evidence) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Evidence) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Evidence.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Evidence) XXX_Merge(src proto.Message) { - xxx_messageInfo_Evidence.Merge(m, src) -} -func (m *Evidence) XXX_Size() int { - return m.Size() -} -func (m *Evidence) XXX_DiscardUnknown() { - xxx_messageInfo_Evidence.DiscardUnknown(m) -} - -var xxx_messageInfo_Evidence proto.InternalMessageInfo - -type isEvidence_Sum interface { - isEvidence_Sum() - MarshalTo([]byte) (int, error) - Size() int -} - -type Evidence_DuplicateVoteEvidence struct { - DuplicateVoteEvidence *DuplicateVoteEvidence `protobuf:"bytes,1,opt,name=duplicate_vote_evidence,json=duplicateVoteEvidence,proto3,oneof" json:"duplicate_vote_evidence,omitempty"` -} -type Evidence_LightClientAttackEvidence struct { - LightClientAttackEvidence *LightClientAttackEvidence `protobuf:"bytes,2,opt,name=light_client_attack_evidence,json=lightClientAttackEvidence,proto3,oneof" json:"light_client_attack_evidence,omitempty"` -} - -func (*Evidence_DuplicateVoteEvidence) isEvidence_Sum() {} -func (*Evidence_LightClientAttackEvidence) isEvidence_Sum() {} - -func (m *Evidence) GetSum() isEvidence_Sum { - if m != nil { - return m.Sum - } - return nil -} - -func (m *Evidence) GetDuplicateVoteEvidence() *DuplicateVoteEvidence { - if x, ok := m.GetSum().(*Evidence_DuplicateVoteEvidence); ok { - return x.DuplicateVoteEvidence - } - return nil -} - -func (m *Evidence) GetLightClientAttackEvidence() *LightClientAttackEvidence { - if x, ok := m.GetSum().(*Evidence_LightClientAttackEvidence); ok { - return x.LightClientAttackEvidence - } - return nil -} - -// XXX_OneofWrappers is for the internal use of the proto package. -func (*Evidence) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*Evidence_DuplicateVoteEvidence)(nil), - (*Evidence_LightClientAttackEvidence)(nil), - } -} - -// DuplicateVoteEvidence contains evidence of a validator signed two conflicting votes. -type DuplicateVoteEvidence struct { - VoteA *Vote `protobuf:"bytes,1,opt,name=vote_a,json=voteA,proto3" json:"vote_a,omitempty"` - VoteB *Vote `protobuf:"bytes,2,opt,name=vote_b,json=voteB,proto3" json:"vote_b,omitempty"` - TotalVotingPower int64 `protobuf:"varint,3,opt,name=total_voting_power,json=totalVotingPower,proto3" json:"total_voting_power,omitempty"` - ValidatorPower int64 `protobuf:"varint,4,opt,name=validator_power,json=validatorPower,proto3" json:"validator_power,omitempty"` - Timestamp time.Time `protobuf:"bytes,5,opt,name=timestamp,proto3,stdtime" json:"timestamp"` -} - -func (m *DuplicateVoteEvidence) Reset() { *m = DuplicateVoteEvidence{} } -func (m *DuplicateVoteEvidence) String() string { return proto.CompactTextString(m) } -func (*DuplicateVoteEvidence) ProtoMessage() {} -func (*DuplicateVoteEvidence) Descriptor() ([]byte, []int) { - return fileDescriptor_6825fabc78e0a168, []int{1} -} -func (m *DuplicateVoteEvidence) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DuplicateVoteEvidence) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_DuplicateVoteEvidence.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *DuplicateVoteEvidence) XXX_Merge(src proto.Message) { - xxx_messageInfo_DuplicateVoteEvidence.Merge(m, src) -} -func (m *DuplicateVoteEvidence) XXX_Size() int { - return m.Size() -} -func (m *DuplicateVoteEvidence) XXX_DiscardUnknown() { - xxx_messageInfo_DuplicateVoteEvidence.DiscardUnknown(m) -} - -var xxx_messageInfo_DuplicateVoteEvidence proto.InternalMessageInfo - -func (m *DuplicateVoteEvidence) GetVoteA() *Vote { - if m != nil { - return m.VoteA - } - return nil -} - -func (m *DuplicateVoteEvidence) GetVoteB() *Vote { - if m != nil { - return m.VoteB - } - return nil -} - -func (m *DuplicateVoteEvidence) GetTotalVotingPower() int64 { - if m != nil { - return m.TotalVotingPower - } - return 0 -} - -func (m *DuplicateVoteEvidence) GetValidatorPower() int64 { - if m != nil { - return m.ValidatorPower - } - return 0 -} - -func (m *DuplicateVoteEvidence) GetTimestamp() time.Time { - if m != nil { - return m.Timestamp - } - return time.Time{} -} - -// LightClientAttackEvidence contains evidence of a set of validators attempting to mislead a light client. -type LightClientAttackEvidence struct { - ConflictingBlock *LightBlock `protobuf:"bytes,1,opt,name=conflicting_block,json=conflictingBlock,proto3" json:"conflicting_block,omitempty"` - CommonHeight int64 `protobuf:"varint,2,opt,name=common_height,json=commonHeight,proto3" json:"common_height,omitempty"` - ByzantineValidators []*Validator `protobuf:"bytes,3,rep,name=byzantine_validators,json=byzantineValidators,proto3" json:"byzantine_validators,omitempty"` - TotalVotingPower int64 `protobuf:"varint,4,opt,name=total_voting_power,json=totalVotingPower,proto3" json:"total_voting_power,omitempty"` - Timestamp time.Time `protobuf:"bytes,5,opt,name=timestamp,proto3,stdtime" json:"timestamp"` -} - -func (m *LightClientAttackEvidence) Reset() { *m = LightClientAttackEvidence{} } -func (m *LightClientAttackEvidence) String() string { return proto.CompactTextString(m) } -func (*LightClientAttackEvidence) ProtoMessage() {} -func (*LightClientAttackEvidence) Descriptor() ([]byte, []int) { - return fileDescriptor_6825fabc78e0a168, []int{2} -} -func (m *LightClientAttackEvidence) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LightClientAttackEvidence) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_LightClientAttackEvidence.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *LightClientAttackEvidence) XXX_Merge(src proto.Message) { - xxx_messageInfo_LightClientAttackEvidence.Merge(m, src) -} -func (m *LightClientAttackEvidence) XXX_Size() int { - return m.Size() -} -func (m *LightClientAttackEvidence) XXX_DiscardUnknown() { - xxx_messageInfo_LightClientAttackEvidence.DiscardUnknown(m) -} - -var xxx_messageInfo_LightClientAttackEvidence proto.InternalMessageInfo - -func (m *LightClientAttackEvidence) GetConflictingBlock() *LightBlock { - if m != nil { - return m.ConflictingBlock - } - return nil -} - -func (m *LightClientAttackEvidence) GetCommonHeight() int64 { - if m != nil { - return m.CommonHeight - } - return 0 -} - -func (m *LightClientAttackEvidence) GetByzantineValidators() []*Validator { - if m != nil { - return m.ByzantineValidators - } - return nil -} - -func (m *LightClientAttackEvidence) GetTotalVotingPower() int64 { - if m != nil { - return m.TotalVotingPower - } - return 0 -} - -func (m *LightClientAttackEvidence) GetTimestamp() time.Time { - if m != nil { - return m.Timestamp - } - return time.Time{} -} - -type EvidenceList struct { - Evidence []Evidence `protobuf:"bytes,1,rep,name=evidence,proto3" json:"evidence"` -} - -func (m *EvidenceList) Reset() { *m = EvidenceList{} } -func (m *EvidenceList) String() string { return proto.CompactTextString(m) } -func (*EvidenceList) ProtoMessage() {} -func (*EvidenceList) Descriptor() ([]byte, []int) { - return fileDescriptor_6825fabc78e0a168, []int{3} -} -func (m *EvidenceList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *EvidenceList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_EvidenceList.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *EvidenceList) XXX_Merge(src proto.Message) { - xxx_messageInfo_EvidenceList.Merge(m, src) -} -func (m *EvidenceList) XXX_Size() int { - return m.Size() -} -func (m *EvidenceList) XXX_DiscardUnknown() { - xxx_messageInfo_EvidenceList.DiscardUnknown(m) -} - -var xxx_messageInfo_EvidenceList proto.InternalMessageInfo - -func (m *EvidenceList) GetEvidence() []Evidence { - if m != nil { - return m.Evidence - } - return nil -} - -func init() { - proto.RegisterType((*Evidence)(nil), "tendermint.types.Evidence") - proto.RegisterType((*DuplicateVoteEvidence)(nil), "tendermint.types.DuplicateVoteEvidence") - proto.RegisterType((*LightClientAttackEvidence)(nil), "tendermint.types.LightClientAttackEvidence") - proto.RegisterType((*EvidenceList)(nil), "tendermint.types.EvidenceList") -} - -func init() { proto.RegisterFile("tendermint/types/evidence.proto", fileDescriptor_6825fabc78e0a168) } - -var fileDescriptor_6825fabc78e0a168 = []byte{ - // 532 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x94, 0xcf, 0x6e, 0xd3, 0x40, - 0x10, 0xc6, 0xed, 0x3a, 0xa9, 0xc2, 0xb6, 0x40, 0x58, 0x5a, 0x48, 0x43, 0xe4, 0x44, 0xe1, 0xd0, - 0x48, 0x80, 0x2d, 0x95, 0x03, 0x17, 0x2e, 0x35, 0x20, 0x15, 0x29, 0x42, 0x60, 0xa1, 0x1e, 0xb8, - 0x58, 0x6b, 0x7b, 0xeb, 0xac, 0x6a, 0xef, 0x5a, 0xf1, 0x24, 0xa8, 0x3c, 0x45, 0x1e, 0xab, 0x17, - 0xa4, 0x1e, 0x39, 0x01, 0x4a, 0x78, 0x10, 0xe4, 0xf5, 0x9f, 0x44, 0x75, 0xcc, 0x89, 0x4b, 0xe4, - 0xcc, 0xfc, 0xbe, 0x9d, 0x99, 0xcf, 0xb3, 0x46, 0x7d, 0xa0, 0xdc, 0xa7, 0xd3, 0x88, 0x71, 0x30, - 0xe1, 0x2a, 0xa6, 0x89, 0x49, 0xe7, 0xcc, 0xa7, 0xdc, 0xa3, 0x46, 0x3c, 0x15, 0x20, 0x70, 0x7b, - 0x0d, 0x18, 0x12, 0xe8, 0x1e, 0x04, 0x22, 0x10, 0x32, 0x69, 0xa6, 0x4f, 0x19, 0xd7, 0xed, 0x07, - 0x42, 0x04, 0x21, 0x35, 0xe5, 0x3f, 0x77, 0x76, 0x61, 0x02, 0x8b, 0x68, 0x02, 0x24, 0x8a, 0x73, - 0xa0, 0x57, 0xa9, 0x24, 0x7f, 0xf3, 0xec, 0xa0, 0x92, 0x9d, 0x93, 0x90, 0xf9, 0x04, 0xc4, 0x34, - 0x23, 0x86, 0x7f, 0x54, 0xd4, 0x7a, 0x97, 0xf7, 0x86, 0x09, 0x7a, 0xec, 0xcf, 0xe2, 0x90, 0x79, - 0x04, 0xa8, 0x33, 0x17, 0x40, 0x9d, 0xa2, 0xed, 0x8e, 0x3a, 0x50, 0x47, 0x7b, 0x27, 0xc7, 0xc6, - 0xed, 0xbe, 0x8d, 0xb7, 0x85, 0xe0, 0x5c, 0x00, 0x2d, 0x4e, 0x3a, 0x53, 0xec, 0x43, 0x7f, 0x5b, - 0x02, 0x73, 0xd4, 0x0b, 0x59, 0x30, 0x01, 0xc7, 0x0b, 0x19, 0xe5, 0xe0, 0x10, 0x00, 0xe2, 0x5d, - 0xae, 0xeb, 0xec, 0xc8, 0x3a, 0xcf, 0xaa, 0x75, 0xc6, 0xa9, 0xea, 0x8d, 0x14, 0x9d, 0x4a, 0xcd, - 0x46, 0xad, 0xa3, 0xb0, 0x2e, 0x69, 0x35, 0x91, 0x96, 0xcc, 0xa2, 0xe1, 0x62, 0x07, 0x1d, 0x6e, - 0xed, 0x14, 0xbf, 0x40, 0xbb, 0x72, 0x52, 0x92, 0x8f, 0xf8, 0xa8, 0x5a, 0x3a, 0xe5, 0xed, 0x66, - 0x4a, 0x9d, 0x96, 0xb8, 0x9b, 0x77, 0xfa, 0x4f, 0xdc, 0xc2, 0xcf, 0x11, 0x06, 0x01, 0x24, 0x4c, - 0xdd, 0x64, 0x3c, 0x70, 0x62, 0xf1, 0x95, 0x4e, 0x3b, 0xda, 0x40, 0x1d, 0x69, 0x76, 0x5b, 0x66, - 0xce, 0x65, 0xe2, 0x63, 0x1a, 0xc7, 0xc7, 0xe8, 0x7e, 0xf9, 0x7e, 0x72, 0xb4, 0x21, 0xd1, 0x7b, - 0x65, 0x38, 0x03, 0x2d, 0x74, 0xa7, 0x5c, 0x84, 0x4e, 0x53, 0x36, 0xd2, 0x35, 0xb2, 0x55, 0x31, - 0x8a, 0x55, 0x31, 0x3e, 0x17, 0x84, 0xd5, 0xba, 0xfe, 0xd9, 0x57, 0x16, 0xbf, 0xfa, 0xaa, 0xbd, - 0x96, 0x0d, 0xbf, 0xef, 0xa0, 0xa3, 0x5a, 0x53, 0xf1, 0x7b, 0xf4, 0xc0, 0x13, 0xfc, 0x22, 0x64, - 0x9e, 0xec, 0xdb, 0x0d, 0x85, 0x77, 0x99, 0x3b, 0xd4, 0xab, 0x79, 0x39, 0x56, 0xca, 0xd8, 0xed, - 0x0d, 0x99, 0x8c, 0xe0, 0xa7, 0xe8, 0xae, 0x27, 0xa2, 0x48, 0x70, 0x67, 0x42, 0x53, 0x4e, 0x3a, - 0xa7, 0xd9, 0xfb, 0x59, 0xf0, 0x4c, 0xc6, 0xf0, 0x07, 0x74, 0xe0, 0x5e, 0x7d, 0x23, 0x1c, 0x18, - 0xa7, 0x4e, 0x39, 0x6d, 0xd2, 0xd1, 0x06, 0xda, 0x68, 0xef, 0xe4, 0xc9, 0x16, 0x97, 0x0b, 0xc6, - 0x7e, 0x58, 0x0a, 0xcb, 0x58, 0x52, 0x63, 0x7c, 0xa3, 0xc6, 0xf8, 0xff, 0xe1, 0xe7, 0x18, 0xed, - 0x17, 0xee, 0x8d, 0x59, 0x02, 0xf8, 0x35, 0x6a, 0x6d, 0xdc, 0x1e, 0x4d, 0x1e, 0x59, 0x99, 0xa2, - 0xdc, 0xd3, 0x46, 0x7a, 0xa4, 0x5d, 0x2a, 0xac, 0x4f, 0xd7, 0x4b, 0x5d, 0xbd, 0x59, 0xea, 0xea, - 0xef, 0xa5, 0xae, 0x2e, 0x56, 0xba, 0x72, 0xb3, 0xd2, 0x95, 0x1f, 0x2b, 0x5d, 0xf9, 0xf2, 0x2a, - 0x60, 0x30, 0x99, 0xb9, 0x86, 0x27, 0x22, 0x73, 0xf3, 0x7a, 0xaf, 0x1f, 0xb3, 0xaf, 0xc8, 0xed, - 0xab, 0xef, 0xee, 0xca, 0xf8, 0xcb, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0xa6, 0x21, 0x16, 0x68, - 0x9d, 0x04, 0x00, 0x00, -} - -func (m *Evidence) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Evidence) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Evidence) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Sum != nil { - { - size := m.Sum.Size() - i -= size - if _, err := m.Sum.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - } - } - return len(dAtA) - i, nil -} - -func (m *Evidence_DuplicateVoteEvidence) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Evidence_DuplicateVoteEvidence) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.DuplicateVoteEvidence != nil { - { - size, err := m.DuplicateVoteEvidence.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintEvidence(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} -func (m *Evidence_LightClientAttackEvidence) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Evidence_LightClientAttackEvidence) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.LightClientAttackEvidence != nil { - { - size, err := m.LightClientAttackEvidence.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintEvidence(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - return len(dAtA) - i, nil -} -func (m *DuplicateVoteEvidence) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DuplicateVoteEvidence) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DuplicateVoteEvidence) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - n3, err3 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp):]) - if err3 != nil { - return 0, err3 - } - i -= n3 - i = encodeVarintEvidence(dAtA, i, uint64(n3)) - i-- - dAtA[i] = 0x2a - if m.ValidatorPower != 0 { - i = encodeVarintEvidence(dAtA, i, uint64(m.ValidatorPower)) - i-- - dAtA[i] = 0x20 - } - if m.TotalVotingPower != 0 { - i = encodeVarintEvidence(dAtA, i, uint64(m.TotalVotingPower)) - i-- - dAtA[i] = 0x18 - } - if m.VoteB != nil { - { - size, err := m.VoteB.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintEvidence(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.VoteA != nil { - { - size, err := m.VoteA.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintEvidence(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *LightClientAttackEvidence) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LightClientAttackEvidence) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LightClientAttackEvidence) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - n6, err6 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp):]) - if err6 != nil { - return 0, err6 - } - i -= n6 - i = encodeVarintEvidence(dAtA, i, uint64(n6)) - i-- - dAtA[i] = 0x2a - if m.TotalVotingPower != 0 { - i = encodeVarintEvidence(dAtA, i, uint64(m.TotalVotingPower)) - i-- - dAtA[i] = 0x20 - } - if len(m.ByzantineValidators) > 0 { - for iNdEx := len(m.ByzantineValidators) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ByzantineValidators[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintEvidence(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if m.CommonHeight != 0 { - i = encodeVarintEvidence(dAtA, i, uint64(m.CommonHeight)) - i-- - dAtA[i] = 0x10 - } - if m.ConflictingBlock != nil { - { - size, err := m.ConflictingBlock.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintEvidence(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *EvidenceList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *EvidenceList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *EvidenceList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Evidence) > 0 { - for iNdEx := len(m.Evidence) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Evidence[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintEvidence(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func encodeVarintEvidence(dAtA []byte, offset int, v uint64) int { - offset -= sovEvidence(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Evidence) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Sum != nil { - n += m.Sum.Size() - } - return n -} - -func (m *Evidence_DuplicateVoteEvidence) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.DuplicateVoteEvidence != nil { - l = m.DuplicateVoteEvidence.Size() - n += 1 + l + sovEvidence(uint64(l)) - } - return n -} -func (m *Evidence_LightClientAttackEvidence) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.LightClientAttackEvidence != nil { - l = m.LightClientAttackEvidence.Size() - n += 1 + l + sovEvidence(uint64(l)) - } - return n -} -func (m *DuplicateVoteEvidence) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.VoteA != nil { - l = m.VoteA.Size() - n += 1 + l + sovEvidence(uint64(l)) - } - if m.VoteB != nil { - l = m.VoteB.Size() - n += 1 + l + sovEvidence(uint64(l)) - } - if m.TotalVotingPower != 0 { - n += 1 + sovEvidence(uint64(m.TotalVotingPower)) - } - if m.ValidatorPower != 0 { - n += 1 + sovEvidence(uint64(m.ValidatorPower)) - } - l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp) - n += 1 + l + sovEvidence(uint64(l)) - return n -} - -func (m *LightClientAttackEvidence) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.ConflictingBlock != nil { - l = m.ConflictingBlock.Size() - n += 1 + l + sovEvidence(uint64(l)) - } - if m.CommonHeight != 0 { - n += 1 + sovEvidence(uint64(m.CommonHeight)) - } - if len(m.ByzantineValidators) > 0 { - for _, e := range m.ByzantineValidators { - l = e.Size() - n += 1 + l + sovEvidence(uint64(l)) - } - } - if m.TotalVotingPower != 0 { - n += 1 + sovEvidence(uint64(m.TotalVotingPower)) - } - l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp) - n += 1 + l + sovEvidence(uint64(l)) - return n -} - -func (m *EvidenceList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Evidence) > 0 { - for _, e := range m.Evidence { - l = e.Size() - n += 1 + l + sovEvidence(uint64(l)) - } - } - return n -} - -func sovEvidence(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozEvidence(x uint64) (n int) { - return sovEvidence(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *Evidence) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEvidence - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Evidence: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Evidence: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DuplicateVoteEvidence", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEvidence - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthEvidence - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthEvidence - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &DuplicateVoteEvidence{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Sum = &Evidence_DuplicateVoteEvidence{v} - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LightClientAttackEvidence", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEvidence - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthEvidence - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthEvidence - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &LightClientAttackEvidence{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Sum = &Evidence_LightClientAttackEvidence{v} - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipEvidence(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthEvidence - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DuplicateVoteEvidence) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEvidence - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DuplicateVoteEvidence: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DuplicateVoteEvidence: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VoteA", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEvidence - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthEvidence - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthEvidence - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.VoteA == nil { - m.VoteA = &Vote{} - } - if err := m.VoteA.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VoteB", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEvidence - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthEvidence - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthEvidence - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.VoteB == nil { - m.VoteB = &Vote{} - } - if err := m.VoteB.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TotalVotingPower", wireType) - } - m.TotalVotingPower = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEvidence - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TotalVotingPower |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ValidatorPower", wireType) - } - m.ValidatorPower = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEvidence - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ValidatorPower |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEvidence - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthEvidence - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthEvidence - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipEvidence(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthEvidence - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LightClientAttackEvidence) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEvidence - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LightClientAttackEvidence: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LightClientAttackEvidence: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConflictingBlock", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEvidence - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthEvidence - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthEvidence - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ConflictingBlock == nil { - m.ConflictingBlock = &LightBlock{} - } - if err := m.ConflictingBlock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CommonHeight", wireType) - } - m.CommonHeight = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEvidence - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.CommonHeight |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ByzantineValidators", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEvidence - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthEvidence - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthEvidence - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ByzantineValidators = append(m.ByzantineValidators, &Validator{}) - if err := m.ByzantineValidators[len(m.ByzantineValidators)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TotalVotingPower", wireType) - } - m.TotalVotingPower = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEvidence - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TotalVotingPower |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEvidence - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthEvidence - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthEvidence - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipEvidence(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthEvidence - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *EvidenceList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEvidence - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: EvidenceList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: EvidenceList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Evidence", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEvidence - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthEvidence - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthEvidence - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Evidence = append(m.Evidence, Evidence{}) - if err := m.Evidence[len(m.Evidence)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipEvidence(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthEvidence - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipEvidence(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowEvidence - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowEvidence - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowEvidence - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthEvidence - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupEvidence - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthEvidence - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthEvidence = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowEvidence = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupEvidence = fmt.Errorf("proto: unexpected end of group") -) diff --git a/proto/tendermint/types/evidence.proto b/proto/tendermint/types/evidence.proto deleted file mode 100644 index 451b8dca3c..0000000000 --- a/proto/tendermint/types/evidence.proto +++ /dev/null @@ -1,38 +0,0 @@ -syntax = "proto3"; -package tendermint.types; - -option go_package = "github.com/tendermint/tendermint/proto/tendermint/types"; - -import "gogoproto/gogo.proto"; -import "google/protobuf/timestamp.proto"; -import "tendermint/types/types.proto"; -import "tendermint/types/validator.proto"; - -message Evidence { - oneof sum { - DuplicateVoteEvidence duplicate_vote_evidence = 1; - LightClientAttackEvidence light_client_attack_evidence = 2; - } -} - -// DuplicateVoteEvidence contains evidence of a validator signed two conflicting votes. -message DuplicateVoteEvidence { - tendermint.types.Vote vote_a = 1; - tendermint.types.Vote vote_b = 2; - int64 total_voting_power = 3; - int64 validator_power = 4; - google.protobuf.Timestamp timestamp = 5 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; -} - -// LightClientAttackEvidence contains evidence of a set of validators attempting to mislead a light client. -message LightClientAttackEvidence { - tendermint.types.LightBlock conflicting_block = 1; - int64 common_height = 2; - repeated tendermint.types.Validator byzantine_validators = 3; - int64 total_voting_power = 4; - google.protobuf.Timestamp timestamp = 5 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; -} - -message EvidenceList { - repeated Evidence evidence = 1 [(gogoproto.nullable) = false]; -} diff --git a/proto/tendermint/types/types.pb.go b/proto/tendermint/types/types.pb.go index 73090558ea..1f0048075d 100644 --- a/proto/tendermint/types/types.pb.go +++ b/proto/tendermint/types/types.pb.go @@ -421,6 +421,11 @@ type Data struct { // NOTE: not all txs here are valid. We're just agreeing on the order first. // This means that block.AppHash does not include these txs. Txs [][]byte `protobuf:"bytes,1,rep,name=txs,proto3" json:"txs,omitempty"` + // field number 2 is reserved for intermediate state roots + Evidence EvidenceList `protobuf:"bytes,3,opt,name=evidence,proto3" json:"evidence"` + Messages Messages `protobuf:"bytes,4,opt,name=messages,proto3" json:"messages"` + OriginalSquareSize uint64 `protobuf:"varint,5,opt,name=original_square_size,json=originalSquareSize,proto3" json:"original_square_size,omitempty"` + Hash []byte `protobuf:"bytes,6,opt,name=hash,proto3" json:"hash,omitempty"` } func (m *Data) Reset() { *m = Data{} } @@ -463,6 +468,414 @@ func (m *Data) GetTxs() [][]byte { return nil } +func (m *Data) GetEvidence() EvidenceList { + if m != nil { + return m.Evidence + } + return EvidenceList{} +} + +func (m *Data) GetMessages() Messages { + if m != nil { + return m.Messages + } + return Messages{} +} + +func (m *Data) GetOriginalSquareSize() uint64 { + if m != nil { + return m.OriginalSquareSize + } + return 0 +} + +func (m *Data) GetHash() []byte { + if m != nil { + return m.Hash + } + return nil +} + +// DuplicateVoteEvidence contains evidence of a validator signed two conflicting votes. +type DuplicateVoteEvidence struct { + VoteA *Vote `protobuf:"bytes,1,opt,name=vote_a,json=voteA,proto3" json:"vote_a,omitempty"` + VoteB *Vote `protobuf:"bytes,2,opt,name=vote_b,json=voteB,proto3" json:"vote_b,omitempty"` + TotalVotingPower int64 `protobuf:"varint,3,opt,name=total_voting_power,json=totalVotingPower,proto3" json:"total_voting_power,omitempty"` + ValidatorPower int64 `protobuf:"varint,4,opt,name=validator_power,json=validatorPower,proto3" json:"validator_power,omitempty"` + Timestamp time.Time `protobuf:"bytes,5,opt,name=timestamp,proto3,stdtime" json:"timestamp"` +} + +func (m *DuplicateVoteEvidence) Reset() { *m = DuplicateVoteEvidence{} } +func (m *DuplicateVoteEvidence) String() string { return proto.CompactTextString(m) } +func (*DuplicateVoteEvidence) ProtoMessage() {} +func (*DuplicateVoteEvidence) Descriptor() ([]byte, []int) { + return fileDescriptor_d3a6e55e2345de56, []int{5} +} +func (m *DuplicateVoteEvidence) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DuplicateVoteEvidence) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DuplicateVoteEvidence.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DuplicateVoteEvidence) XXX_Merge(src proto.Message) { + xxx_messageInfo_DuplicateVoteEvidence.Merge(m, src) +} +func (m *DuplicateVoteEvidence) XXX_Size() int { + return m.Size() +} +func (m *DuplicateVoteEvidence) XXX_DiscardUnknown() { + xxx_messageInfo_DuplicateVoteEvidence.DiscardUnknown(m) +} + +var xxx_messageInfo_DuplicateVoteEvidence proto.InternalMessageInfo + +func (m *DuplicateVoteEvidence) GetVoteA() *Vote { + if m != nil { + return m.VoteA + } + return nil +} + +func (m *DuplicateVoteEvidence) GetVoteB() *Vote { + if m != nil { + return m.VoteB + } + return nil +} + +func (m *DuplicateVoteEvidence) GetTotalVotingPower() int64 { + if m != nil { + return m.TotalVotingPower + } + return 0 +} + +func (m *DuplicateVoteEvidence) GetValidatorPower() int64 { + if m != nil { + return m.ValidatorPower + } + return 0 +} + +func (m *DuplicateVoteEvidence) GetTimestamp() time.Time { + if m != nil { + return m.Timestamp + } + return time.Time{} +} + +// LightClientAttackEvidence contains evidence of a set of validators attempting to mislead a light client. +type LightClientAttackEvidence struct { + ConflictingBlock *LightBlock `protobuf:"bytes,1,opt,name=conflicting_block,json=conflictingBlock,proto3" json:"conflicting_block,omitempty"` + CommonHeight int64 `protobuf:"varint,2,opt,name=common_height,json=commonHeight,proto3" json:"common_height,omitempty"` + ByzantineValidators []*Validator `protobuf:"bytes,3,rep,name=byzantine_validators,json=byzantineValidators,proto3" json:"byzantine_validators,omitempty"` + TotalVotingPower int64 `protobuf:"varint,4,opt,name=total_voting_power,json=totalVotingPower,proto3" json:"total_voting_power,omitempty"` + Timestamp time.Time `protobuf:"bytes,5,opt,name=timestamp,proto3,stdtime" json:"timestamp"` +} + +func (m *LightClientAttackEvidence) Reset() { *m = LightClientAttackEvidence{} } +func (m *LightClientAttackEvidence) String() string { return proto.CompactTextString(m) } +func (*LightClientAttackEvidence) ProtoMessage() {} +func (*LightClientAttackEvidence) Descriptor() ([]byte, []int) { + return fileDescriptor_d3a6e55e2345de56, []int{6} +} +func (m *LightClientAttackEvidence) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LightClientAttackEvidence) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LightClientAttackEvidence.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *LightClientAttackEvidence) XXX_Merge(src proto.Message) { + xxx_messageInfo_LightClientAttackEvidence.Merge(m, src) +} +func (m *LightClientAttackEvidence) XXX_Size() int { + return m.Size() +} +func (m *LightClientAttackEvidence) XXX_DiscardUnknown() { + xxx_messageInfo_LightClientAttackEvidence.DiscardUnknown(m) +} + +var xxx_messageInfo_LightClientAttackEvidence proto.InternalMessageInfo + +func (m *LightClientAttackEvidence) GetConflictingBlock() *LightBlock { + if m != nil { + return m.ConflictingBlock + } + return nil +} + +func (m *LightClientAttackEvidence) GetCommonHeight() int64 { + if m != nil { + return m.CommonHeight + } + return 0 +} + +func (m *LightClientAttackEvidence) GetByzantineValidators() []*Validator { + if m != nil { + return m.ByzantineValidators + } + return nil +} + +func (m *LightClientAttackEvidence) GetTotalVotingPower() int64 { + if m != nil { + return m.TotalVotingPower + } + return 0 +} + +func (m *LightClientAttackEvidence) GetTimestamp() time.Time { + if m != nil { + return m.Timestamp + } + return time.Time{} +} + +type Evidence struct { + // Types that are valid to be assigned to Sum: + // *Evidence_DuplicateVoteEvidence + // *Evidence_LightClientAttackEvidence + Sum isEvidence_Sum `protobuf_oneof:"sum"` +} + +func (m *Evidence) Reset() { *m = Evidence{} } +func (m *Evidence) String() string { return proto.CompactTextString(m) } +func (*Evidence) ProtoMessage() {} +func (*Evidence) Descriptor() ([]byte, []int) { + return fileDescriptor_d3a6e55e2345de56, []int{7} +} +func (m *Evidence) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Evidence) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Evidence.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Evidence) XXX_Merge(src proto.Message) { + xxx_messageInfo_Evidence.Merge(m, src) +} +func (m *Evidence) XXX_Size() int { + return m.Size() +} +func (m *Evidence) XXX_DiscardUnknown() { + xxx_messageInfo_Evidence.DiscardUnknown(m) +} + +var xxx_messageInfo_Evidence proto.InternalMessageInfo + +type isEvidence_Sum interface { + isEvidence_Sum() + MarshalTo([]byte) (int, error) + Size() int +} + +type Evidence_DuplicateVoteEvidence struct { + DuplicateVoteEvidence *DuplicateVoteEvidence `protobuf:"bytes,1,opt,name=duplicate_vote_evidence,json=duplicateVoteEvidence,proto3,oneof" json:"duplicate_vote_evidence,omitempty"` +} +type Evidence_LightClientAttackEvidence struct { + LightClientAttackEvidence *LightClientAttackEvidence `protobuf:"bytes,2,opt,name=light_client_attack_evidence,json=lightClientAttackEvidence,proto3,oneof" json:"light_client_attack_evidence,omitempty"` +} + +func (*Evidence_DuplicateVoteEvidence) isEvidence_Sum() {} +func (*Evidence_LightClientAttackEvidence) isEvidence_Sum() {} + +func (m *Evidence) GetSum() isEvidence_Sum { + if m != nil { + return m.Sum + } + return nil +} + +func (m *Evidence) GetDuplicateVoteEvidence() *DuplicateVoteEvidence { + if x, ok := m.GetSum().(*Evidence_DuplicateVoteEvidence); ok { + return x.DuplicateVoteEvidence + } + return nil +} + +func (m *Evidence) GetLightClientAttackEvidence() *LightClientAttackEvidence { + if x, ok := m.GetSum().(*Evidence_LightClientAttackEvidence); ok { + return x.LightClientAttackEvidence + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*Evidence) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*Evidence_DuplicateVoteEvidence)(nil), + (*Evidence_LightClientAttackEvidence)(nil), + } +} + +// EvidenceData contains any evidence of malicious wrong-doing by validators +type EvidenceList struct { + Evidence []Evidence `protobuf:"bytes,1,rep,name=evidence,proto3" json:"evidence"` +} + +func (m *EvidenceList) Reset() { *m = EvidenceList{} } +func (m *EvidenceList) String() string { return proto.CompactTextString(m) } +func (*EvidenceList) ProtoMessage() {} +func (*EvidenceList) Descriptor() ([]byte, []int) { + return fileDescriptor_d3a6e55e2345de56, []int{8} +} +func (m *EvidenceList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EvidenceList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EvidenceList.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EvidenceList) XXX_Merge(src proto.Message) { + xxx_messageInfo_EvidenceList.Merge(m, src) +} +func (m *EvidenceList) XXX_Size() int { + return m.Size() +} +func (m *EvidenceList) XXX_DiscardUnknown() { + xxx_messageInfo_EvidenceList.DiscardUnknown(m) +} + +var xxx_messageInfo_EvidenceList proto.InternalMessageInfo + +func (m *EvidenceList) GetEvidence() []Evidence { + if m != nil { + return m.Evidence + } + return nil +} + +type Messages struct { + MessagesList []*Message `protobuf:"bytes,1,rep,name=messages_list,json=messagesList,proto3" json:"messages_list,omitempty"` +} + +func (m *Messages) Reset() { *m = Messages{} } +func (m *Messages) String() string { return proto.CompactTextString(m) } +func (*Messages) ProtoMessage() {} +func (*Messages) Descriptor() ([]byte, []int) { + return fileDescriptor_d3a6e55e2345de56, []int{9} +} +func (m *Messages) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Messages) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Messages.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Messages) XXX_Merge(src proto.Message) { + xxx_messageInfo_Messages.Merge(m, src) +} +func (m *Messages) XXX_Size() int { + return m.Size() +} +func (m *Messages) XXX_DiscardUnknown() { + xxx_messageInfo_Messages.DiscardUnknown(m) +} + +var xxx_messageInfo_Messages proto.InternalMessageInfo + +func (m *Messages) GetMessagesList() []*Message { + if m != nil { + return m.MessagesList + } + return nil +} + +type Message struct { + NamespaceId []byte `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` +} + +func (m *Message) Reset() { *m = Message{} } +func (m *Message) String() string { return proto.CompactTextString(m) } +func (*Message) ProtoMessage() {} +func (*Message) Descriptor() ([]byte, []int) { + return fileDescriptor_d3a6e55e2345de56, []int{10} +} +func (m *Message) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Message) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Message.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Message) XXX_Merge(src proto.Message) { + xxx_messageInfo_Message.Merge(m, src) +} +func (m *Message) XXX_Size() int { + return m.Size() +} +func (m *Message) XXX_DiscardUnknown() { + xxx_messageInfo_Message.DiscardUnknown(m) +} + +var xxx_messageInfo_Message proto.InternalMessageInfo + +func (m *Message) GetNamespaceId() []byte { + if m != nil { + return m.NamespaceId + } + return nil +} + +func (m *Message) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + // Vote represents a prevote, precommit, or commit vote from validators for // consensus. type Vote struct { @@ -480,7 +893,7 @@ func (m *Vote) Reset() { *m = Vote{} } func (m *Vote) String() string { return proto.CompactTextString(m) } func (*Vote) ProtoMessage() {} func (*Vote) Descriptor() ([]byte, []int) { - return fileDescriptor_d3a6e55e2345de56, []int{5} + return fileDescriptor_d3a6e55e2345de56, []int{11} } func (m *Vote) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -577,7 +990,7 @@ func (m *Commit) Reset() { *m = Commit{} } func (m *Commit) String() string { return proto.CompactTextString(m) } func (*Commit) ProtoMessage() {} func (*Commit) Descriptor() ([]byte, []int) { - return fileDescriptor_d3a6e55e2345de56, []int{6} + return fileDescriptor_d3a6e55e2345de56, []int{12} } func (m *Commit) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -646,7 +1059,7 @@ func (m *CommitSig) Reset() { *m = CommitSig{} } func (m *CommitSig) String() string { return proto.CompactTextString(m) } func (*CommitSig) ProtoMessage() {} func (*CommitSig) Descriptor() ([]byte, []int) { - return fileDescriptor_d3a6e55e2345de56, []int{7} + return fileDescriptor_d3a6e55e2345de56, []int{13} } func (m *CommitSig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -717,7 +1130,7 @@ func (m *Proposal) Reset() { *m = Proposal{} } func (m *Proposal) String() string { return proto.CompactTextString(m) } func (*Proposal) ProtoMessage() {} func (*Proposal) Descriptor() ([]byte, []int) { - return fileDescriptor_d3a6e55e2345de56, []int{8} + return fileDescriptor_d3a6e55e2345de56, []int{14} } func (m *Proposal) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -804,7 +1217,7 @@ func (m *SignedHeader) Reset() { *m = SignedHeader{} } func (m *SignedHeader) String() string { return proto.CompactTextString(m) } func (*SignedHeader) ProtoMessage() {} func (*SignedHeader) Descriptor() ([]byte, []int) { - return fileDescriptor_d3a6e55e2345de56, []int{9} + return fileDescriptor_d3a6e55e2345de56, []int{15} } func (m *SignedHeader) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -856,7 +1269,7 @@ func (m *LightBlock) Reset() { *m = LightBlock{} } func (m *LightBlock) String() string { return proto.CompactTextString(m) } func (*LightBlock) ProtoMessage() {} func (*LightBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_d3a6e55e2345de56, []int{10} + return fileDescriptor_d3a6e55e2345de56, []int{16} } func (m *LightBlock) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -910,7 +1323,7 @@ func (m *BlockMeta) Reset() { *m = BlockMeta{} } func (m *BlockMeta) String() string { return proto.CompactTextString(m) } func (*BlockMeta) ProtoMessage() {} func (*BlockMeta) Descriptor() ([]byte, []int) { - return fileDescriptor_d3a6e55e2345de56, []int{11} + return fileDescriptor_d3a6e55e2345de56, []int{17} } func (m *BlockMeta) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -969,16 +1382,16 @@ func (m *BlockMeta) GetNumTxs() int64 { // TxProof represents a Merkle proof of the presence of a transaction in the Merkle tree. type TxProof struct { - RootHash []byte `protobuf:"bytes,1,opt,name=root_hash,json=rootHash,proto3" json:"root_hash,omitempty"` - Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` - Proof *crypto.Proof `protobuf:"bytes,3,opt,name=proof,proto3" json:"proof,omitempty"` + RowRoots [][]byte `protobuf:"bytes,1,rep,name=row_roots,json=rowRoots,proto3" json:"row_roots,omitempty"` + Data [][]byte `protobuf:"bytes,2,rep,name=data,proto3" json:"data,omitempty"` + Proofs []*NMTProof `protobuf:"bytes,3,rep,name=proofs,proto3" json:"proofs,omitempty"` } func (m *TxProof) Reset() { *m = TxProof{} } func (m *TxProof) String() string { return proto.CompactTextString(m) } func (*TxProof) ProtoMessage() {} func (*TxProof) Descriptor() ([]byte, []int) { - return fileDescriptor_d3a6e55e2345de56, []int{12} + return fileDescriptor_d3a6e55e2345de56, []int{18} } func (m *TxProof) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1007,23 +1420,161 @@ func (m *TxProof) XXX_DiscardUnknown() { var xxx_messageInfo_TxProof proto.InternalMessageInfo -func (m *TxProof) GetRootHash() []byte { +func (m *TxProof) GetRowRoots() [][]byte { if m != nil { - return m.RootHash + return m.RowRoots } return nil } -func (m *TxProof) GetData() []byte { +func (m *TxProof) GetData() [][]byte { if m != nil { return m.Data } return nil } -func (m *TxProof) GetProof() *crypto.Proof { +func (m *TxProof) GetProofs() []*NMTProof { if m != nil { - return m.Proof + return m.Proofs + } + return nil +} + +// MalleatedTx wraps a transaction that was derived from a different original +// transaction. This allows for tendermint to track malleated and original +// transactions +type MalleatedTx struct { + OriginalTxHash []byte `protobuf:"bytes,1,opt,name=original_tx_hash,json=originalTxHash,proto3" json:"original_tx_hash,omitempty"` + Tx []byte `protobuf:"bytes,2,opt,name=tx,proto3" json:"tx,omitempty"` +} + +func (m *MalleatedTx) Reset() { *m = MalleatedTx{} } +func (m *MalleatedTx) String() string { return proto.CompactTextString(m) } +func (*MalleatedTx) ProtoMessage() {} +func (*MalleatedTx) Descriptor() ([]byte, []int) { + return fileDescriptor_d3a6e55e2345de56, []int{19} +} +func (m *MalleatedTx) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MalleatedTx) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MalleatedTx.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MalleatedTx) XXX_Merge(src proto.Message) { + xxx_messageInfo_MalleatedTx.Merge(m, src) +} +func (m *MalleatedTx) XXX_Size() int { + return m.Size() +} +func (m *MalleatedTx) XXX_DiscardUnknown() { + xxx_messageInfo_MalleatedTx.DiscardUnknown(m) +} + +var xxx_messageInfo_MalleatedTx proto.InternalMessageInfo + +func (m *MalleatedTx) GetOriginalTxHash() []byte { + if m != nil { + return m.OriginalTxHash + } + return nil +} + +func (m *MalleatedTx) GetTx() []byte { + if m != nil { + return m.Tx + } + return nil +} + +// Proof represents proof of a namespace.ID in an NMT. +// In case this proof proves the absence of a namespace.ID +// in a tree it also contains the leaf hashes of the range +// where that namespace would be. +type NMTProof struct { + // start index of this proof. + Start int32 `protobuf:"varint,1,opt,name=start,proto3" json:"start,omitempty"` + // end index of this proof. + End int32 `protobuf:"varint,2,opt,name=end,proto3" json:"end,omitempty"` + // Nodes that together with the corresponding leaf values can be used to + // recompute the root and verify this proof. Nodes should consist of the max + // and min namespaces along with the actual hash, resulting in each being 48 + // bytes each + Nodes [][]byte `protobuf:"bytes,3,rep,name=nodes,proto3" json:"nodes,omitempty"` + // leafHash are nil if the namespace is present in the NMT. In case the + // namespace to be proved is in the min/max range of the tree but absent, this + // will contain the leaf hash necessary to verify the proof of absence. Leaf + // hashes should consist of the namespace along with the actual hash, + // resulting 40 bytes total. + LeafHash []byte `protobuf:"bytes,4,opt,name=leaf_hash,json=leafHash,proto3" json:"leaf_hash,omitempty"` +} + +func (m *NMTProof) Reset() { *m = NMTProof{} } +func (m *NMTProof) String() string { return proto.CompactTextString(m) } +func (*NMTProof) ProtoMessage() {} +func (*NMTProof) Descriptor() ([]byte, []int) { + return fileDescriptor_d3a6e55e2345de56, []int{20} +} +func (m *NMTProof) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NMTProof) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_NMTProof.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *NMTProof) XXX_Merge(src proto.Message) { + xxx_messageInfo_NMTProof.Merge(m, src) +} +func (m *NMTProof) XXX_Size() int { + return m.Size() +} +func (m *NMTProof) XXX_DiscardUnknown() { + xxx_messageInfo_NMTProof.DiscardUnknown(m) +} + +var xxx_messageInfo_NMTProof proto.InternalMessageInfo + +func (m *NMTProof) GetStart() int32 { + if m != nil { + return m.Start + } + return 0 +} + +func (m *NMTProof) GetEnd() int32 { + if m != nil { + return m.End + } + return 0 +} + +func (m *NMTProof) GetNodes() [][]byte { + if m != nil { + return m.Nodes + } + return nil +} + +func (m *NMTProof) GetLeafHash() []byte { + if m != nil { + return m.LeafHash } return nil } @@ -1036,6 +1587,12 @@ func init() { proto.RegisterType((*BlockID)(nil), "tendermint.types.BlockID") proto.RegisterType((*Header)(nil), "tendermint.types.Header") proto.RegisterType((*Data)(nil), "tendermint.types.Data") + proto.RegisterType((*DuplicateVoteEvidence)(nil), "tendermint.types.DuplicateVoteEvidence") + proto.RegisterType((*LightClientAttackEvidence)(nil), "tendermint.types.LightClientAttackEvidence") + proto.RegisterType((*Evidence)(nil), "tendermint.types.Evidence") + proto.RegisterType((*EvidenceList)(nil), "tendermint.types.EvidenceList") + proto.RegisterType((*Messages)(nil), "tendermint.types.Messages") + proto.RegisterType((*Message)(nil), "tendermint.types.Message") proto.RegisterType((*Vote)(nil), "tendermint.types.Vote") proto.RegisterType((*Commit)(nil), "tendermint.types.Commit") proto.RegisterType((*CommitSig)(nil), "tendermint.types.CommitSig") @@ -1044,95 +1601,127 @@ func init() { proto.RegisterType((*LightBlock)(nil), "tendermint.types.LightBlock") proto.RegisterType((*BlockMeta)(nil), "tendermint.types.BlockMeta") proto.RegisterType((*TxProof)(nil), "tendermint.types.TxProof") + proto.RegisterType((*MalleatedTx)(nil), "tendermint.types.MalleatedTx") + proto.RegisterType((*NMTProof)(nil), "tendermint.types.NMTProof") } func init() { proto.RegisterFile("tendermint/types/types.proto", fileDescriptor_d3a6e55e2345de56) } var fileDescriptor_d3a6e55e2345de56 = []byte{ - // 1314 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x57, 0x4f, 0x6f, 0x1b, 0x45, - 0x14, 0xcf, 0xda, 0x9b, 0xd8, 0x7e, 0xb6, 0x13, 0x67, 0x95, 0xb6, 0xae, 0xdb, 0x38, 0x2b, 0x23, - 0x20, 0x2d, 0x68, 0x53, 0x52, 0xc4, 0x9f, 0x03, 0x07, 0xdb, 0x49, 0x5b, 0xab, 0x89, 0x63, 0xd6, - 0x6e, 0x11, 0x5c, 0x56, 0x6b, 0xef, 0xd4, 0x5e, 0xba, 0xde, 0x59, 0xed, 0x8c, 0x43, 0xd2, 0x4f, - 0x80, 0x72, 0xea, 0x89, 0x5b, 0x4e, 0x70, 0xe0, 0xce, 0x17, 0x40, 0x9c, 0x7a, 0xec, 0x0d, 0x2e, - 0x14, 0x94, 0x4a, 0x88, 0x8f, 0x81, 0xe6, 0x8f, 0xd7, 0xeb, 0x38, 0x86, 0xaa, 0xaa, 0xb8, 0x58, - 0x3b, 0xef, 0xfd, 0xde, 0xcc, 0x7b, 0xbf, 0xf7, 0x9b, 0x3f, 0x86, 0xeb, 0x14, 0xf9, 0x0e, 0x0a, - 0x87, 0xae, 0x4f, 0xb7, 0xe8, 0x71, 0x80, 0x88, 0xf8, 0x35, 0x82, 0x10, 0x53, 0xac, 0x15, 0x26, - 0x5e, 0x83, 0xdb, 0x4b, 0x6b, 0x7d, 0xdc, 0xc7, 0xdc, 0xb9, 0xc5, 0xbe, 0x04, 0xae, 0xb4, 0xd1, - 0xc7, 0xb8, 0xef, 0xa1, 0x2d, 0x3e, 0xea, 0x8e, 0x1e, 0x6d, 0x51, 0x77, 0x88, 0x08, 0xb5, 0x87, - 0x81, 0x04, 0xac, 0xc7, 0x96, 0xe9, 0x85, 0xc7, 0x01, 0xc5, 0x0c, 0x8b, 0x1f, 0x49, 0x77, 0x39, - 0xe6, 0x3e, 0x44, 0x21, 0x71, 0xb1, 0x1f, 0xcf, 0xa3, 0xa4, 0xcf, 0x64, 0x79, 0x68, 0x7b, 0xae, - 0x63, 0x53, 0x1c, 0x0a, 0x44, 0xe5, 0x53, 0xc8, 0xb7, 0xec, 0x90, 0xb6, 0x11, 0xbd, 0x87, 0x6c, - 0x07, 0x85, 0xda, 0x1a, 0x2c, 0x52, 0x4c, 0x6d, 0xaf, 0xa8, 0xe8, 0xca, 0x66, 0xde, 0x14, 0x03, - 0x4d, 0x03, 0x75, 0x60, 0x93, 0x41, 0x31, 0xa1, 0x2b, 0x9b, 0x39, 0x93, 0x7f, 0x57, 0x06, 0xa0, - 0xb2, 0x50, 0x16, 0xe1, 0xfa, 0x0e, 0x3a, 0x1a, 0x47, 0xf0, 0x01, 0xb3, 0x76, 0x8f, 0x29, 0x22, - 0x32, 0x44, 0x0c, 0xb4, 0x0f, 0x61, 0x91, 0xe7, 0x5f, 0x4c, 0xea, 0xca, 0x66, 0x76, 0xbb, 0x68, - 0xc4, 0x88, 0x12, 0xf5, 0x19, 0x2d, 0xe6, 0xaf, 0xa9, 0xcf, 0x5e, 0x6c, 0x2c, 0x98, 0x02, 0x5c, - 0xf1, 0x20, 0x55, 0xf3, 0x70, 0xef, 0x71, 0x63, 0x27, 0x4a, 0x44, 0x99, 0x24, 0xa2, 0xed, 0xc3, - 0x4a, 0x60, 0x87, 0xd4, 0x22, 0x88, 0x5a, 0x03, 0x5e, 0x05, 0x5f, 0x34, 0xbb, 0xbd, 0x61, 0x9c, - 0xef, 0x83, 0x31, 0x55, 0xac, 0x5c, 0x25, 0x1f, 0xc4, 0x8d, 0x95, 0xbf, 0x54, 0x58, 0x92, 0x64, - 0x7c, 0x06, 0x29, 0x49, 0x2b, 0x5f, 0x30, 0xbb, 0xbd, 0x1e, 0x9f, 0x51, 0xba, 0x8c, 0x3a, 0xf6, - 0x09, 0xf2, 0xc9, 0x88, 0xc8, 0xf9, 0xc6, 0x31, 0xda, 0x3b, 0x90, 0xee, 0x0d, 0x6c, 0xd7, 0xb7, - 0x5c, 0x87, 0x67, 0x94, 0xa9, 0x65, 0xcf, 0x5e, 0x6c, 0xa4, 0xea, 0xcc, 0xd6, 0xd8, 0x31, 0x53, - 0xdc, 0xd9, 0x70, 0xb4, 0xcb, 0xb0, 0x34, 0x40, 0x6e, 0x7f, 0x40, 0x39, 0x2d, 0x49, 0x53, 0x8e, - 0xb4, 0x4f, 0x40, 0x65, 0x82, 0x28, 0xaa, 0x7c, 0xed, 0x92, 0x21, 0xd4, 0x62, 0x8c, 0xd5, 0x62, - 0x74, 0xc6, 0x6a, 0xa9, 0xa5, 0xd9, 0xc2, 0x4f, 0xff, 0xd8, 0x50, 0x4c, 0x1e, 0xa1, 0xd5, 0x21, - 0xef, 0xd9, 0x84, 0x5a, 0x5d, 0x46, 0x1b, 0x5b, 0x7e, 0x91, 0x4f, 0x71, 0x75, 0x96, 0x10, 0x49, - 0xac, 0x4c, 0x3d, 0xcb, 0xa2, 0x84, 0xc9, 0xd1, 0x36, 0xa1, 0xc0, 0x27, 0xe9, 0xe1, 0xe1, 0xd0, - 0xa5, 0x16, 0xe7, 0x7d, 0x89, 0xf3, 0xbe, 0xcc, 0xec, 0x75, 0x6e, 0xbe, 0xc7, 0x3a, 0x70, 0x0d, - 0x32, 0x8e, 0x4d, 0x6d, 0x01, 0x49, 0x71, 0x48, 0x9a, 0x19, 0xb8, 0xf3, 0x5d, 0x58, 0x89, 0x54, - 0x47, 0x04, 0x24, 0x2d, 0x66, 0x99, 0x98, 0x39, 0xf0, 0x16, 0xac, 0xf9, 0xe8, 0x88, 0x5a, 0xe7, - 0xd1, 0x19, 0x8e, 0xd6, 0x98, 0xef, 0xe1, 0x74, 0xc4, 0xdb, 0xb0, 0xdc, 0x1b, 0x93, 0x2f, 0xb0, - 0xc0, 0xb1, 0xf9, 0xc8, 0xca, 0x61, 0x57, 0x21, 0x6d, 0x07, 0x81, 0x00, 0x64, 0x39, 0x20, 0x65, - 0x07, 0x01, 0x77, 0xdd, 0x84, 0x55, 0x5e, 0x63, 0x88, 0xc8, 0xc8, 0xa3, 0x72, 0x92, 0x1c, 0xc7, - 0xac, 0x30, 0x87, 0x29, 0xec, 0x1c, 0xfb, 0x16, 0xe4, 0xd1, 0xa1, 0xeb, 0x20, 0xbf, 0x87, 0x04, - 0x2e, 0xcf, 0x71, 0xb9, 0xb1, 0x91, 0x83, 0x6e, 0x40, 0x21, 0x08, 0x71, 0x80, 0x09, 0x0a, 0x2d, - 0xdb, 0x71, 0x42, 0x44, 0x48, 0x71, 0x59, 0xcc, 0x37, 0xb6, 0x57, 0x85, 0xb9, 0x52, 0x04, 0x75, - 0xc7, 0xa6, 0xb6, 0x56, 0x80, 0x24, 0x3d, 0x22, 0x45, 0x45, 0x4f, 0x6e, 0xe6, 0x4c, 0xf6, 0x59, - 0xf9, 0x3b, 0x01, 0xea, 0x43, 0x4c, 0x91, 0x76, 0x1b, 0x54, 0xd6, 0x26, 0xae, 0xbe, 0xe5, 0x8b, - 0xf4, 0xdc, 0x76, 0xfb, 0x3e, 0x72, 0xf6, 0x49, 0xbf, 0x73, 0x1c, 0x20, 0x93, 0x83, 0x63, 0x72, - 0x4a, 0x4c, 0xc9, 0x69, 0x0d, 0x16, 0x43, 0x3c, 0xf2, 0x1d, 0xae, 0xb2, 0x45, 0x53, 0x0c, 0xb4, - 0x5d, 0x48, 0x47, 0x2a, 0x51, 0xff, 0x4b, 0x25, 0x2b, 0x4c, 0x25, 0x4c, 0xc3, 0xd2, 0x60, 0xa6, - 0xba, 0x52, 0x2c, 0x35, 0xc8, 0x44, 0x87, 0x97, 0x54, 0xdb, 0xab, 0x09, 0x76, 0x12, 0xa6, 0xbd, - 0x07, 0xab, 0x51, 0xef, 0x23, 0xf2, 0x84, 0xe2, 0x0a, 0x91, 0x43, 0xb2, 0x37, 0x25, 0x2b, 0x4b, - 0x1c, 0x40, 0x29, 0x5e, 0xd7, 0x44, 0x56, 0x0d, 0x7e, 0x12, 0x5d, 0x87, 0x0c, 0x71, 0xfb, 0xbe, - 0x4d, 0x47, 0x21, 0x92, 0xca, 0x9b, 0x18, 0x2a, 0x3f, 0x2b, 0xb0, 0x24, 0x94, 0x1c, 0xe3, 0x4d, - 0xb9, 0x98, 0xb7, 0xc4, 0x3c, 0xde, 0x92, 0xaf, 0xcf, 0x5b, 0x15, 0x20, 0x4a, 0x86, 0x14, 0x55, - 0x3d, 0xb9, 0x99, 0xdd, 0xbe, 0x36, 0x3b, 0x91, 0x48, 0xb1, 0xed, 0xf6, 0xe5, 0x46, 0x8d, 0x05, - 0x55, 0x7e, 0x57, 0x20, 0x13, 0xf9, 0xb5, 0x2a, 0xe4, 0xc7, 0x79, 0x59, 0x8f, 0x3c, 0xbb, 0x2f, - 0xb5, 0xb3, 0x3e, 0x37, 0xb9, 0x3b, 0x9e, 0xdd, 0x37, 0xb3, 0x32, 0x1f, 0x36, 0xb8, 0xb8, 0x0f, - 0x89, 0x39, 0x7d, 0x98, 0x6a, 0x7c, 0xf2, 0xf5, 0x1a, 0x3f, 0xd5, 0x22, 0xf5, 0x7c, 0x8b, 0x7e, - 0x4a, 0x40, 0xba, 0xc5, 0xf7, 0x8e, 0xed, 0xfd, 0x1f, 0x3b, 0xe2, 0x1a, 0x64, 0x02, 0xec, 0x59, - 0xc2, 0xa3, 0x72, 0x4f, 0x3a, 0xc0, 0x9e, 0x39, 0xd3, 0xf6, 0xc5, 0x37, 0xb4, 0x5d, 0x96, 0xde, - 0x00, 0x6b, 0xa9, 0xf3, 0xac, 0x85, 0x90, 0x13, 0x54, 0xc8, 0xbb, 0xec, 0x16, 0xe3, 0x80, 0x5f, - 0x8e, 0xca, 0xec, 0xdd, 0x2b, 0xd2, 0x16, 0x48, 0x53, 0xe2, 0x58, 0x84, 0x38, 0xfa, 0xe5, 0x75, - 0x5a, 0x9c, 0x27, 0x4b, 0x53, 0xe2, 0x2a, 0xdf, 0x29, 0x00, 0x7b, 0x8c, 0x59, 0x5e, 0x2f, 0xbb, - 0x85, 0x08, 0x4f, 0xc1, 0x9a, 0x5a, 0xb9, 0x3c, 0xaf, 0x69, 0x72, 0xfd, 0x1c, 0x89, 0xe7, 0x5d, - 0x87, 0xfc, 0x44, 0x8c, 0x04, 0x8d, 0x93, 0xb9, 0x60, 0x92, 0xe8, 0x72, 0x68, 0x23, 0x6a, 0xe6, - 0x0e, 0x63, 0xa3, 0xca, 0x2f, 0x0a, 0x64, 0x78, 0x4e, 0xfb, 0x88, 0xda, 0x53, 0x3d, 0x54, 0x5e, - 0xbf, 0x87, 0xeb, 0x00, 0x62, 0x1a, 0xe2, 0x3e, 0x41, 0x52, 0x59, 0x19, 0x6e, 0x69, 0xbb, 0x4f, - 0x90, 0xf6, 0x51, 0x44, 0x78, 0xf2, 0xdf, 0x09, 0x97, 0x5b, 0x7a, 0x4c, 0xfb, 0x15, 0x48, 0xf9, - 0xa3, 0xa1, 0xc5, 0xae, 0x04, 0x55, 0xa8, 0xd5, 0x1f, 0x0d, 0x3b, 0x47, 0xa4, 0xf2, 0x35, 0xa4, - 0x3a, 0x47, 0xfc, 0x79, 0xc4, 0x24, 0x1a, 0x62, 0x2c, 0xef, 0x64, 0xf1, 0x16, 0x4a, 0x33, 0x03, - 0xbf, 0x82, 0x34, 0x50, 0xd9, 0xe5, 0x3b, 0x7e, 0xac, 0xb1, 0x6f, 0xcd, 0x78, 0xc5, 0x87, 0x97, - 0x7c, 0x72, 0xdd, 0xfc, 0x55, 0x81, 0x6c, 0xec, 0x7c, 0xd0, 0x3e, 0x80, 0x4b, 0xb5, 0xbd, 0x83, - 0xfa, 0x7d, 0xab, 0xb1, 0x63, 0xdd, 0xd9, 0xab, 0xde, 0xb5, 0x1e, 0x34, 0xef, 0x37, 0x0f, 0xbe, - 0x68, 0x16, 0x16, 0x4a, 0x97, 0x4f, 0x4e, 0x75, 0x2d, 0x86, 0x7d, 0xe0, 0x3f, 0xf6, 0xf1, 0x37, - 0xbe, 0xb6, 0x05, 0x6b, 0xd3, 0x21, 0xd5, 0x5a, 0x7b, 0xb7, 0xd9, 0x29, 0x28, 0xa5, 0x4b, 0x27, - 0xa7, 0xfa, 0x6a, 0x2c, 0xa2, 0xda, 0x25, 0xc8, 0xa7, 0xb3, 0x01, 0xf5, 0x83, 0xfd, 0xfd, 0x46, - 0xa7, 0x90, 0x98, 0x09, 0x90, 0x07, 0xf6, 0x0d, 0x58, 0x9d, 0x0e, 0x68, 0x36, 0xf6, 0x0a, 0xc9, - 0x92, 0x76, 0x72, 0xaa, 0x2f, 0xc7, 0xd0, 0x4d, 0xd7, 0x2b, 0xa5, 0xbf, 0xfd, 0xbe, 0xbc, 0xf0, - 0xe3, 0x0f, 0x65, 0x85, 0x55, 0x96, 0x9f, 0x3a, 0x23, 0xb4, 0xf7, 0xe1, 0x4a, 0xbb, 0x71, 0xb7, - 0xb9, 0xbb, 0x63, 0xed, 0xb7, 0xef, 0x5a, 0x9d, 0x2f, 0x5b, 0xbb, 0xb1, 0xea, 0x56, 0x4e, 0x4e, - 0xf5, 0xac, 0x2c, 0x69, 0x1e, 0xba, 0x65, 0xee, 0x3e, 0x3c, 0xe8, 0xec, 0x16, 0x14, 0x81, 0x6e, - 0x85, 0xe8, 0x10, 0x53, 0xc4, 0xd1, 0xb7, 0xe0, 0xea, 0x05, 0xe8, 0xa8, 0xb0, 0xd5, 0x93, 0x53, - 0x3d, 0xdf, 0x0a, 0x91, 0xd8, 0x3f, 0x3c, 0xc2, 0x80, 0xe2, 0x6c, 0xc4, 0x41, 0xeb, 0xa0, 0x5d, - 0xdd, 0x2b, 0xe8, 0xa5, 0xc2, 0xc9, 0xa9, 0x9e, 0x1b, 0x1f, 0x86, 0x0c, 0x3f, 0xa9, 0xac, 0xf6, - 0xf9, 0xb3, 0xb3, 0xb2, 0xf2, 0xfc, 0xac, 0xac, 0xfc, 0x79, 0x56, 0x56, 0x9e, 0xbe, 0x2c, 0x2f, - 0x3c, 0x7f, 0x59, 0x5e, 0xf8, 0xed, 0x65, 0x79, 0xe1, 0xab, 0x8f, 0xfb, 0x2e, 0x1d, 0x8c, 0xba, - 0x46, 0x0f, 0x0f, 0xb7, 0xe2, 0x7f, 0x09, 0x26, 0x9f, 0xe2, 0xaf, 0xc9, 0xf9, 0xbf, 0x0b, 0xdd, - 0x25, 0x6e, 0xbf, 0xfd, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x4c, 0x78, 0x43, 0xdf, 0xef, 0x0c, - 0x00, 0x00, + // 1805 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0xcd, 0x6f, 0x1b, 0x5b, + 0x15, 0xcf, 0xd8, 0x8e, 0x3f, 0x8e, 0xed, 0xc4, 0x19, 0xd2, 0xd6, 0x71, 0x5b, 0xc7, 0x0c, 0x82, + 0x97, 0xf7, 0x81, 0x53, 0xf2, 0x10, 0x1f, 0x12, 0xa0, 0x67, 0x27, 0x79, 0x8d, 0x79, 0xb1, 0x63, + 0xc6, 0x7e, 0x45, 0xb0, 0x19, 0x5d, 0x7b, 0x6e, 0x9c, 0x51, 0xc7, 0x33, 0xc3, 0xcc, 0x75, 0xea, + 0xf4, 0x2f, 0x40, 0x59, 0x75, 0xc5, 0x2e, 0x2b, 0x58, 0xb0, 0xe7, 0x1f, 0x40, 0xac, 0xde, 0x06, + 0xa9, 0x3b, 0xd8, 0x50, 0x50, 0x8b, 0x10, 0x7f, 0x06, 0xba, 0xe7, 0xde, 0x19, 0x8f, 0x63, 0xbb, + 0xa0, 0xaa, 0x62, 0x63, 0xcd, 0x3d, 0xe7, 0x77, 0xee, 0x3d, 0xf7, 0x7c, 0x5f, 0xc3, 0x03, 0x46, + 0x1d, 0x93, 0xfa, 0x63, 0xcb, 0x61, 0xfb, 0xec, 0xca, 0xa3, 0x81, 0xf8, 0xad, 0x7b, 0xbe, 0xcb, + 0x5c, 0xb5, 0x34, 0xe3, 0xd6, 0x91, 0x5e, 0xd9, 0x1e, 0xb9, 0x23, 0x17, 0x99, 0xfb, 0xfc, 0x4b, + 0xe0, 0x2a, 0xbb, 0x23, 0xd7, 0x1d, 0xd9, 0x74, 0x1f, 0x57, 0x83, 0xc9, 0xf9, 0x3e, 0xb3, 0xc6, + 0x34, 0x60, 0x64, 0xec, 0x49, 0xc0, 0xc3, 0xd8, 0x31, 0x43, 0xff, 0xca, 0x63, 0x2e, 0xc7, 0xba, + 0xe7, 0x92, 0x5d, 0x8d, 0xb1, 0x2f, 0xa9, 0x1f, 0x58, 0xae, 0x13, 0xd7, 0xa3, 0x52, 0x5b, 0xd0, + 0xf2, 0x92, 0xd8, 0x96, 0x49, 0x98, 0xeb, 0x0b, 0x84, 0xf6, 0x43, 0x28, 0x76, 0x89, 0xcf, 0x7a, + 0x94, 0x9d, 0x50, 0x62, 0x52, 0x5f, 0xdd, 0x86, 0x75, 0xe6, 0x32, 0x62, 0x97, 0x95, 0x9a, 0xb2, + 0x57, 0xd4, 0xc5, 0x42, 0x55, 0x21, 0x75, 0x41, 0x82, 0x8b, 0x72, 0xa2, 0xa6, 0xec, 0x15, 0x74, + 0xfc, 0xd6, 0x2e, 0x20, 0xc5, 0x45, 0xb9, 0x84, 0xe5, 0x98, 0x74, 0x1a, 0x4a, 0xe0, 0x82, 0x53, + 0x07, 0x57, 0x8c, 0x06, 0x52, 0x44, 0x2c, 0xd4, 0xef, 0xc2, 0x3a, 0xea, 0x5f, 0x4e, 0xd6, 0x94, + 0xbd, 0xfc, 0x41, 0xb9, 0x1e, 0x33, 0x94, 0xb8, 0x5f, 0xbd, 0xcb, 0xf9, 0xcd, 0xd4, 0x57, 0xaf, + 0x76, 0xd7, 0x74, 0x01, 0xd6, 0x6c, 0xc8, 0x34, 0x6d, 0x77, 0xf8, 0xb4, 0x75, 0x14, 0x29, 0xa2, + 0xcc, 0x14, 0x51, 0xdb, 0xb0, 0xe9, 0x11, 0x9f, 0x19, 0x01, 0x65, 0xc6, 0x05, 0xde, 0x02, 0x0f, + 0xcd, 0x1f, 0xec, 0xd6, 0x6f, 0xfb, 0xa1, 0x3e, 0x77, 0x59, 0x79, 0x4a, 0xd1, 0x8b, 0x13, 0xb5, + 0x7f, 0xa5, 0x20, 0x2d, 0x8d, 0xf1, 0x63, 0xc8, 0x48, 0xb3, 0xe2, 0x81, 0xf9, 0x83, 0x87, 0xf1, + 0x1d, 0x25, 0xab, 0x7e, 0xe8, 0x3a, 0x01, 0x75, 0x82, 0x49, 0x20, 0xf7, 0x0b, 0x65, 0xd4, 0x6f, + 0x41, 0x76, 0x78, 0x41, 0x2c, 0xc7, 0xb0, 0x4c, 0xd4, 0x28, 0xd7, 0xcc, 0xbf, 0x7e, 0xb5, 0x9b, + 0x39, 0xe4, 0xb4, 0xd6, 0x91, 0x9e, 0x41, 0x66, 0xcb, 0x54, 0xef, 0x42, 0xfa, 0x82, 0x5a, 0xa3, + 0x0b, 0x86, 0x66, 0x49, 0xea, 0x72, 0xa5, 0xfe, 0x00, 0x52, 0x3c, 0x20, 0xca, 0x29, 0x3c, 0xbb, + 0x52, 0x17, 0xd1, 0x52, 0x0f, 0xa3, 0xa5, 0xde, 0x0f, 0xa3, 0xa5, 0x99, 0xe5, 0x07, 0xbf, 0xf8, + 0xfb, 0xae, 0xa2, 0xa3, 0x84, 0x7a, 0x08, 0x45, 0x9b, 0x04, 0xcc, 0x18, 0x70, 0xb3, 0xf1, 0xe3, + 0xd7, 0x71, 0x8b, 0x9d, 0x45, 0x83, 0x48, 0xc3, 0x4a, 0xd5, 0xf3, 0x5c, 0x4a, 0x90, 0x4c, 0x75, + 0x0f, 0x4a, 0xb8, 0xc9, 0xd0, 0x1d, 0x8f, 0x2d, 0x66, 0xa0, 0xdd, 0xd3, 0x68, 0xf7, 0x0d, 0x4e, + 0x3f, 0x44, 0xf2, 0x09, 0xf7, 0xc0, 0x7d, 0xc8, 0x99, 0x84, 0x11, 0x01, 0xc9, 0x20, 0x24, 0xcb, + 0x09, 0xc8, 0xfc, 0x00, 0x36, 0xa3, 0xa8, 0x0b, 0x04, 0x24, 0x2b, 0x76, 0x99, 0x91, 0x11, 0xf8, + 0x08, 0xb6, 0x1d, 0x3a, 0x65, 0xc6, 0x6d, 0x74, 0x0e, 0xd1, 0x2a, 0xe7, 0x3d, 0x99, 0x97, 0xf8, + 0x26, 0x6c, 0x0c, 0x43, 0xe3, 0x0b, 0x2c, 0x20, 0xb6, 0x18, 0x51, 0x11, 0xb6, 0x03, 0x59, 0xe2, + 0x79, 0x02, 0x90, 0x47, 0x40, 0x86, 0x78, 0x1e, 0xb2, 0x3e, 0x82, 0x2d, 0xbc, 0xa3, 0x4f, 0x83, + 0x89, 0xcd, 0xe4, 0x26, 0x05, 0xc4, 0x6c, 0x72, 0x86, 0x2e, 0xe8, 0x88, 0xfd, 0x06, 0x14, 0xe9, + 0xa5, 0x65, 0x52, 0x67, 0x48, 0x05, 0xae, 0x88, 0xb8, 0x42, 0x48, 0x44, 0xd0, 0x87, 0x50, 0xf2, + 0x7c, 0xd7, 0x73, 0x03, 0xea, 0x1b, 0xc4, 0x34, 0x7d, 0x1a, 0x04, 0xe5, 0x0d, 0xb1, 0x5f, 0x48, + 0x6f, 0x08, 0xb2, 0xf6, 0x4a, 0x81, 0xd4, 0x11, 0x61, 0x44, 0x2d, 0x41, 0x92, 0x4d, 0x83, 0xb2, + 0x52, 0x4b, 0xee, 0x15, 0x74, 0xfe, 0xa9, 0x7e, 0x06, 0xd9, 0x70, 0x57, 0x99, 0x2a, 0xd5, 0x45, + 0xd7, 0x1d, 0x4b, 0xc4, 0xa9, 0x15, 0x30, 0xe9, 0xbf, 0x48, 0x4a, 0xfd, 0x11, 0x64, 0xc7, 0x34, + 0x08, 0xc8, 0x88, 0x06, 0x51, 0xfc, 0x2c, 0xec, 0xd0, 0x96, 0x88, 0x50, 0x3a, 0x94, 0xe0, 0xae, + 0x70, 0x7d, 0x6b, 0x64, 0x39, 0xc4, 0x36, 0x82, 0x5f, 0x4d, 0x88, 0x4f, 0x8d, 0xc0, 0x7a, 0x4e, + 0x31, 0x8c, 0x52, 0xba, 0x1a, 0xf2, 0x7a, 0xc8, 0xea, 0x59, 0xcf, 0x69, 0x94, 0x98, 0xe9, 0x58, + 0x85, 0x78, 0x91, 0x80, 0x3b, 0x47, 0x13, 0xcf, 0xb6, 0x86, 0x84, 0xd1, 0x27, 0x2e, 0xa3, 0xa1, + 0xc6, 0xea, 0xb7, 0x21, 0x7d, 0xe9, 0x32, 0x6a, 0x10, 0x99, 0x57, 0x77, 0x17, 0x75, 0xe3, 0x78, + 0x7d, 0x9d, 0xa3, 0x1a, 0x11, 0x7c, 0x20, 0x13, 0xfb, 0xad, 0xf0, 0xa6, 0xfa, 0x09, 0xa8, 0x58, + 0xb6, 0x8c, 0x4b, 0x97, 0x59, 0xce, 0xc8, 0xf0, 0xdc, 0x67, 0xd4, 0x97, 0xb9, 0x55, 0x42, 0xce, + 0x13, 0x64, 0x74, 0x39, 0x7d, 0x2e, 0x3e, 0x25, 0x34, 0x85, 0xd0, 0x59, 0x7c, 0x0a, 0x60, 0x13, + 0x72, 0x51, 0x7d, 0x96, 0x09, 0xf5, 0xbf, 0xe5, 0xe4, 0x4c, 0x4c, 0xfb, 0x73, 0x02, 0x76, 0x4e, + 0x79, 0x72, 0x1f, 0xda, 0x16, 0x75, 0x58, 0x83, 0x31, 0x32, 0x7c, 0x1a, 0x99, 0xa5, 0x05, 0x5b, + 0x43, 0xd7, 0x39, 0xb7, 0xad, 0x21, 0xea, 0x8d, 0xd9, 0x2b, 0x2d, 0xf4, 0x60, 0xf1, 0xca, 0xb8, + 0x0f, 0x26, 0xab, 0x5e, 0x8a, 0x89, 0x21, 0x85, 0x07, 0x2b, 0xcf, 0x5b, 0xd7, 0x31, 0x64, 0x69, + 0x49, 0xe0, 0x9d, 0x0a, 0x82, 0x78, 0x22, 0x0a, 0x4c, 0x07, 0xb6, 0x07, 0x57, 0xcf, 0x89, 0xc3, + 0x2c, 0x87, 0xc6, 0xd2, 0xae, 0x9c, 0xac, 0x25, 0xf7, 0xf2, 0x07, 0xf7, 0x97, 0x58, 0x39, 0xc4, + 0xe8, 0x5f, 0x8b, 0x04, 0x67, 0x39, 0xb9, 0xc2, 0xf0, 0xa9, 0x15, 0x86, 0x7f, 0x1f, 0xf6, 0xfc, + 0xa7, 0x02, 0xd9, 0xc8, 0x7c, 0x04, 0xee, 0x99, 0x61, 0xb8, 0x19, 0x18, 0x30, 0x51, 0x12, 0x09, + 0x23, 0x7e, 0xb0, 0x78, 0xa3, 0xa5, 0xf1, 0x79, 0xb2, 0xa6, 0xdf, 0x31, 0x97, 0x06, 0xae, 0x03, + 0x0f, 0x6c, 0x6e, 0x3a, 0x63, 0x88, 0xfe, 0x33, 0x08, 0x3a, 0x70, 0x76, 0x8e, 0x88, 0xcf, 0x8f, + 0x57, 0x38, 0x6b, 0x99, 0xd3, 0x4f, 0xd6, 0xf4, 0x1d, 0x7b, 0x15, 0xb3, 0xb9, 0x0e, 0xc9, 0x60, + 0x32, 0xd6, 0x4e, 0xa1, 0x10, 0xcf, 0x76, 0x9e, 0xdd, 0xb1, 0xab, 0x25, 0x97, 0x67, 0x77, 0xb4, + 0xc9, 0xad, 0xda, 0xa0, 0xfd, 0x14, 0xb2, 0x61, 0xe6, 0xab, 0x3f, 0x81, 0x62, 0x98, 0xf5, 0x86, + 0x6d, 0x05, 0x4c, 0x6e, 0xb7, 0xb3, 0xb2, 0x58, 0xe8, 0x85, 0x10, 0xcf, 0x35, 0xd1, 0x3e, 0x83, + 0x8c, 0x64, 0xa8, 0x5f, 0x87, 0x82, 0x43, 0xc6, 0x34, 0xf0, 0xc8, 0x90, 0xf2, 0x9e, 0x23, 0x7a, + 0x74, 0x3e, 0xa2, 0xb5, 0x4c, 0x5e, 0x25, 0x78, 0x5f, 0x08, 0xe7, 0x08, 0xfe, 0xad, 0xfd, 0x3b, + 0x01, 0x29, 0x6e, 0x63, 0xf5, 0x53, 0x48, 0xf1, 0x93, 0x50, 0x6e, 0x63, 0x59, 0xf3, 0xee, 0x59, + 0x23, 0x87, 0x9a, 0xed, 0x60, 0xd4, 0xbf, 0xf2, 0xa8, 0x8e, 0xe0, 0x58, 0xef, 0x4c, 0xcc, 0xf5, + 0xce, 0x6d, 0x58, 0xf7, 0xdd, 0x89, 0x63, 0x62, 0xda, 0xaf, 0xeb, 0x62, 0xa1, 0x1e, 0x43, 0x36, + 0x6a, 0x89, 0xa9, 0xff, 0xd6, 0x12, 0x37, 0xb9, 0xd9, 0x78, 0xc3, 0x96, 0x04, 0x3d, 0x33, 0x90, + 0x9d, 0xf1, 0x3d, 0x44, 0xae, 0xfa, 0x31, 0x6c, 0xcd, 0xca, 0x4e, 0xd8, 0x29, 0x44, 0xf5, 0x2c, + 0x45, 0x0c, 0xd9, 0x2a, 0xe6, 0x6b, 0x94, 0x98, 0xb6, 0x32, 0x78, 0xaf, 0x59, 0x8d, 0x6a, 0xe1, + 0xd8, 0xf5, 0x00, 0x72, 0x81, 0x35, 0x72, 0x08, 0x9b, 0xf8, 0x54, 0xb6, 0xd9, 0x19, 0x41, 0xfb, + 0xa3, 0x02, 0x69, 0xd1, 0xb6, 0x63, 0x76, 0x53, 0x96, 0xdb, 0x2d, 0xb1, 0xca, 0x6e, 0xc9, 0x77, + 0xb7, 0x5b, 0x03, 0x20, 0x52, 0x86, 0xb7, 0xa5, 0x15, 0x55, 0x46, 0xa8, 0xd8, 0xb3, 0x46, 0x32, + 0x72, 0x63, 0x42, 0xda, 0xdf, 0x14, 0xc8, 0x45, 0x7c, 0xb5, 0x01, 0xc5, 0x50, 0x2f, 0xe3, 0xdc, + 0x26, 0x23, 0x19, 0x3b, 0x0f, 0x57, 0x2a, 0xf7, 0xb9, 0x4d, 0x46, 0x7a, 0x5e, 0xea, 0xc3, 0x17, + 0xcb, 0xfd, 0x90, 0x58, 0xe1, 0x87, 0x39, 0xc7, 0x27, 0xdf, 0xcd, 0xf1, 0x73, 0x2e, 0x4a, 0xdd, + 0x76, 0xd1, 0x1f, 0x12, 0x90, 0xed, 0xe2, 0xa0, 0x40, 0xec, 0xff, 0x47, 0x46, 0xdc, 0x87, 0x9c, + 0xe7, 0xda, 0x86, 0xe0, 0xa4, 0x90, 0x93, 0xf5, 0x5c, 0x5b, 0x5f, 0x70, 0xfb, 0xfa, 0x7b, 0x4a, + 0x97, 0xf4, 0x7b, 0xb0, 0x5a, 0xe6, 0xb6, 0xd5, 0x7c, 0x28, 0x08, 0x53, 0xc8, 0xc1, 0xfd, 0x11, + 0xb7, 0x01, 0xbe, 0x04, 0x94, 0xc5, 0x87, 0x86, 0x50, 0x5b, 0x20, 0x75, 0x89, 0xe3, 0x12, 0x62, + 0xce, 0x95, 0x25, 0xbc, 0xbc, 0x2a, 0x2c, 0x75, 0x89, 0xd3, 0x7e, 0xa3, 0x00, 0xcc, 0x5a, 0x30, + 0x1f, 0xb9, 0x03, 0x54, 0xc1, 0x98, 0x3b, 0xb9, 0xba, 0xca, 0x69, 0xf2, 0xfc, 0x42, 0x10, 0xd7, + 0xfb, 0x10, 0x8a, 0xb3, 0x60, 0x0c, 0x68, 0xa8, 0x4c, 0xf5, 0x2d, 0x9d, 0xb8, 0x47, 0x99, 0x5e, + 0xb8, 0x8c, 0xad, 0xb4, 0x3f, 0x29, 0x90, 0x43, 0x9d, 0xda, 0x94, 0x91, 0x39, 0x1f, 0x2a, 0xef, + 0xee, 0xc3, 0x87, 0x00, 0x62, 0x1b, 0x9c, 0x03, 0x45, 0x64, 0xe5, 0x90, 0x82, 0xe3, 0xdf, 0xf7, + 0x22, 0x83, 0x27, 0xdf, 0x6e, 0x70, 0x99, 0xd2, 0xa1, 0xd9, 0xef, 0x41, 0xc6, 0x99, 0x8c, 0x0d, + 0x3e, 0xfe, 0x8a, 0x31, 0x21, 0xed, 0x4c, 0xc6, 0xfd, 0x69, 0xa0, 0x39, 0x90, 0xe9, 0x4f, 0xf1, + 0x2d, 0xc8, 0x43, 0xd4, 0x77, 0x9f, 0x19, 0xbe, 0xeb, 0xb2, 0x70, 0x48, 0xce, 0xfa, 0xee, 0x33, + 0x9d, 0xaf, 0x63, 0x1d, 0x25, 0x19, 0x76, 0x14, 0xf5, 0x00, 0xd2, 0xf8, 0x70, 0x0c, 0x07, 0x99, + 0x25, 0xbd, 0xb1, 0xd3, 0xee, 0xe3, 0xe6, 0xba, 0x44, 0x6a, 0x8f, 0x21, 0xdf, 0x26, 0xb6, 0x4d, + 0x09, 0xa3, 0x66, 0x7f, 0xca, 0xdf, 0x3e, 0xd1, 0x00, 0xcc, 0xa6, 0x46, 0xec, 0xcd, 0xb9, 0x11, + 0xd2, 0xfb, 0x53, 0x1c, 0xf8, 0x37, 0x20, 0xc1, 0xa6, 0xb2, 0x60, 0x24, 0xd8, 0x54, 0xa3, 0x90, + 0x0d, 0x37, 0xe7, 0x29, 0x17, 0x30, 0xe2, 0x8b, 0x1a, 0xbb, 0xae, 0x8b, 0x05, 0x1f, 0xf7, 0x69, + 0x54, 0x60, 0xf9, 0x27, 0xc7, 0x39, 0xae, 0x49, 0x85, 0xbe, 0x05, 0x5d, 0x2c, 0xf8, 0xbd, 0x6d, + 0x4a, 0xce, 0xc5, 0xe1, 0xa2, 0x50, 0x64, 0x39, 0x81, 0x1f, 0xfb, 0xd1, 0x5f, 0x14, 0xc8, 0xc7, + 0x6a, 0x9a, 0xfa, 0x1d, 0xb8, 0xd3, 0x3c, 0x3d, 0x3b, 0xfc, 0xc2, 0x68, 0x1d, 0x19, 0x9f, 0x9f, + 0x36, 0x1e, 0x1b, 0x5f, 0x76, 0xbe, 0xe8, 0x9c, 0xfd, 0xbc, 0x53, 0x5a, 0xab, 0xdc, 0xbd, 0xbe, + 0xa9, 0xa9, 0x31, 0xec, 0x97, 0xce, 0x53, 0xc7, 0x7d, 0xe6, 0xa8, 0xfb, 0xb0, 0x3d, 0x2f, 0xd2, + 0x68, 0xf6, 0x8e, 0x3b, 0xfd, 0x92, 0x52, 0xb9, 0x73, 0x7d, 0x53, 0xdb, 0x8a, 0x49, 0x34, 0x06, + 0x01, 0x75, 0xd8, 0xa2, 0xc0, 0xe1, 0x59, 0xbb, 0xdd, 0xea, 0x97, 0x12, 0x0b, 0x02, 0xb2, 0xc9, + 0x7c, 0x08, 0x5b, 0xf3, 0x02, 0x9d, 0xd6, 0x69, 0x29, 0x59, 0x51, 0xaf, 0x6f, 0x6a, 0x1b, 0x31, + 0x74, 0xc7, 0xb2, 0x2b, 0xd9, 0x5f, 0xff, 0xb6, 0xba, 0xf6, 0xfb, 0xdf, 0x55, 0x15, 0x7e, 0xb3, + 0xe2, 0x5c, 0x5d, 0x53, 0x3f, 0x81, 0x7b, 0xbd, 0xd6, 0xe3, 0xce, 0xf1, 0x91, 0xd1, 0xee, 0x3d, + 0x36, 0xfa, 0xbf, 0xe8, 0x1e, 0xc7, 0x6e, 0xb7, 0x79, 0x7d, 0x53, 0xcb, 0xcb, 0x2b, 0xad, 0x42, + 0x77, 0xf5, 0xe3, 0x27, 0x67, 0xfd, 0xe3, 0x92, 0x22, 0xd0, 0x5d, 0x9f, 0xf2, 0xe9, 0x10, 0xd1, + 0x8f, 0x60, 0x67, 0x09, 0x3a, 0xba, 0xd8, 0xd6, 0xf5, 0x4d, 0xad, 0xd8, 0xf5, 0xa9, 0xc8, 0x79, + 0x94, 0xa8, 0x43, 0x79, 0x51, 0xe2, 0xac, 0x7b, 0xd6, 0x6b, 0x9c, 0x96, 0x6a, 0x95, 0xd2, 0xf5, + 0x4d, 0xad, 0x10, 0x16, 0x70, 0x8e, 0x9f, 0xdd, 0xac, 0xf9, 0xb3, 0xaf, 0x5e, 0x57, 0x95, 0x97, + 0xaf, 0xab, 0xca, 0x3f, 0x5e, 0x57, 0x95, 0x17, 0x6f, 0xaa, 0x6b, 0x2f, 0xdf, 0x54, 0xd7, 0xfe, + 0xfa, 0xa6, 0xba, 0xf6, 0xcb, 0xef, 0x8f, 0x2c, 0x76, 0x31, 0x19, 0xd4, 0x87, 0xee, 0x78, 0x3f, + 0xfe, 0x9f, 0xcd, 0xec, 0x53, 0xfc, 0x77, 0x74, 0xfb, 0xff, 0x9c, 0x41, 0x1a, 0xe9, 0x9f, 0xfe, + 0x27, 0x00, 0x00, 0xff, 0xff, 0x6a, 0xf1, 0xb9, 0xe5, 0x90, 0x12, 0x00, 0x00, } func (m *PartSetHeader) Marshal() (dAtA []byte, err error) { @@ -1401,6 +1990,38 @@ func (m *Data) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.Hash) > 0 { + i -= len(m.Hash) + copy(dAtA[i:], m.Hash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Hash))) + i-- + dAtA[i] = 0x32 + } + if m.OriginalSquareSize != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.OriginalSquareSize)) + i-- + dAtA[i] = 0x28 + } + { + size, err := m.Messages.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + { + size, err := m.Evidence.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a if len(m.Txs) > 0 { for iNdEx := len(m.Txs) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.Txs[iNdEx]) @@ -1413,7 +2034,7 @@ func (m *Data) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *Vote) Marshal() (dAtA []byte, err error) { +func (m *DuplicateVoteEvidence) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1423,72 +2044,62 @@ func (m *Vote) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *Vote) MarshalTo(dAtA []byte) (int, error) { +func (m *DuplicateVoteEvidence) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Vote) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *DuplicateVoteEvidence) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.Signature) > 0 { - i -= len(m.Signature) - copy(dAtA[i:], m.Signature) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Signature))) - i-- - dAtA[i] = 0x42 - } - if m.ValidatorIndex != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.ValidatorIndex)) - i-- - dAtA[i] = 0x38 - } - if len(m.ValidatorAddress) > 0 { - i -= len(m.ValidatorAddress) - copy(dAtA[i:], m.ValidatorAddress) - i = encodeVarintTypes(dAtA, i, uint64(len(m.ValidatorAddress))) - i-- - dAtA[i] = 0x32 + n8, err8 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp):]) + if err8 != nil { + return 0, err8 } - n6, err6 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp):]) - if err6 != nil { - return 0, err6 - } - i -= n6 - i = encodeVarintTypes(dAtA, i, uint64(n6)) + i -= n8 + i = encodeVarintTypes(dAtA, i, uint64(n8)) i-- dAtA[i] = 0x2a - { - size, err := m.BlockID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) + if m.ValidatorPower != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.ValidatorPower)) + i-- + dAtA[i] = 0x20 } - i-- - dAtA[i] = 0x22 - if m.Round != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Round)) + if m.TotalVotingPower != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.TotalVotingPower)) i-- dAtA[i] = 0x18 } - if m.Height != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + if m.VoteB != nil { + { + size, err := m.VoteB.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } i-- - dAtA[i] = 0x10 + dAtA[i] = 0x12 } - if m.Type != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Type)) + if m.VoteA != nil { + { + size, err := m.VoteA.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } i-- - dAtA[i] = 0x8 + dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *Commit) Marshal() (dAtA []byte, err error) { +func (m *LightClientAttackEvidence) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1498,20 +2109,33 @@ func (m *Commit) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *Commit) MarshalTo(dAtA []byte) (int, error) { +func (m *LightClientAttackEvidence) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Commit) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *LightClientAttackEvidence) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.Signatures) > 0 { - for iNdEx := len(m.Signatures) - 1; iNdEx >= 0; iNdEx-- { + n11, err11 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp):]) + if err11 != nil { + return 0, err11 + } + i -= n11 + i = encodeVarintTypes(dAtA, i, uint64(n11)) + i-- + dAtA[i] = 0x2a + if m.TotalVotingPower != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.TotalVotingPower)) + i-- + dAtA[i] = 0x20 + } + if len(m.ByzantineValidators) > 0 { + for iNdEx := len(m.ByzantineValidators) - 1; iNdEx >= 0; iNdEx-- { { - size, err := m.Signatures[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + size, err := m.ByzantineValidators[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -1519,33 +2143,30 @@ func (m *Commit) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintTypes(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x22 - } - } - { - size, err := m.BlockID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + dAtA[i] = 0x1a } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) } - i-- - dAtA[i] = 0x1a - if m.Round != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Round)) + if m.CommonHeight != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.CommonHeight)) i-- dAtA[i] = 0x10 } - if m.Height != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + if m.ConflictingBlock != nil { + { + size, err := m.ConflictingBlock.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } i-- - dAtA[i] = 0x8 + dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *CommitSig) Marshal() (dAtA []byte, err error) { +func (m *Evidence) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1555,47 +2176,71 @@ func (m *CommitSig) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *CommitSig) MarshalTo(dAtA []byte) (int, error) { +func (m *Evidence) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *CommitSig) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Evidence) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.Signature) > 0 { - i -= len(m.Signature) - copy(dAtA[i:], m.Signature) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Signature))) - i-- - dAtA[i] = 0x22 - } - n9, err9 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp):]) - if err9 != nil { - return 0, err9 + if m.Sum != nil { + { + size := m.Sum.Size() + i -= size + if _, err := m.Sum.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } } - i -= n9 - i = encodeVarintTypes(dAtA, i, uint64(n9)) - i-- - dAtA[i] = 0x1a - if len(m.ValidatorAddress) > 0 { - i -= len(m.ValidatorAddress) - copy(dAtA[i:], m.ValidatorAddress) - i = encodeVarintTypes(dAtA, i, uint64(len(m.ValidatorAddress))) + return len(dAtA) - i, nil +} + +func (m *Evidence_DuplicateVoteEvidence) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Evidence_DuplicateVoteEvidence) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.DuplicateVoteEvidence != nil { + { + size, err := m.DuplicateVoteEvidence.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } i-- - dAtA[i] = 0x12 + dAtA[i] = 0xa } - if m.BlockIdFlag != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.BlockIdFlag)) + return len(dAtA) - i, nil +} +func (m *Evidence_LightClientAttackEvidence) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Evidence_LightClientAttackEvidence) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.LightClientAttackEvidence != nil { + { + size, err := m.LightClientAttackEvidence.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } i-- - dAtA[i] = 0x8 + dAtA[i] = 0x12 } return len(dAtA) - i, nil } - -func (m *Proposal) Marshal() (dAtA []byte, err error) { +func (m *EvidenceList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1605,65 +2250,34 @@ func (m *Proposal) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *Proposal) MarshalTo(dAtA []byte) (int, error) { +func (m *EvidenceList) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Proposal) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *EvidenceList) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.Signature) > 0 { - i -= len(m.Signature) - copy(dAtA[i:], m.Signature) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Signature))) - i-- - dAtA[i] = 0x3a - } - n10, err10 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp):]) - if err10 != nil { - return 0, err10 - } - i -= n10 - i = encodeVarintTypes(dAtA, i, uint64(n10)) - i-- - dAtA[i] = 0x32 - { - size, err := m.BlockID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + if len(m.Evidence) > 0 { + for iNdEx := len(m.Evidence) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Evidence[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - if m.PolRound != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.PolRound)) - i-- - dAtA[i] = 0x20 - } - if m.Round != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Round)) - i-- - dAtA[i] = 0x18 - } - if m.Height != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Height)) - i-- - dAtA[i] = 0x10 - } - if m.Type != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Type)) - i-- - dAtA[i] = 0x8 } return len(dAtA) - i, nil } -func (m *SignedHeader) Marshal() (dAtA []byte, err error) { +func (m *Messages) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1673,44 +2287,34 @@ func (m *SignedHeader) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *SignedHeader) MarshalTo(dAtA []byte) (int, error) { +func (m *Messages) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *SignedHeader) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Messages) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.Commit != nil { - { - size, err := m.Commit.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + if len(m.MessagesList) > 0 { + for iNdEx := len(m.MessagesList) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.MessagesList[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa } - i-- - dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *LightBlock) Marshal() (dAtA []byte, err error) { +func (m *Message) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1720,44 +2324,34 @@ func (m *LightBlock) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *LightBlock) MarshalTo(dAtA []byte) (int, error) { +func (m *Message) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *LightBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Message) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.ValidatorSet != nil { - { - size, err := m.ValidatorSet.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } + if len(m.Data) > 0 { + i -= len(m.Data) + copy(dAtA[i:], m.Data) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Data))) i-- dAtA[i] = 0x12 } - if m.SignedHeader != nil { - { - size, err := m.SignedHeader.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } + if len(m.NamespaceId) > 0 { + i -= len(m.NamespaceId) + copy(dAtA[i:], m.NamespaceId) + i = encodeVarintTypes(dAtA, i, uint64(len(m.NamespaceId))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *BlockMeta) Marshal() (dAtA []byte, err error) { +func (m *Vote) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1767,23 +2361,45 @@ func (m *BlockMeta) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *BlockMeta) MarshalTo(dAtA []byte) (int, error) { +func (m *Vote) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *BlockMeta) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Vote) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.NumTxs != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.NumTxs)) + if len(m.Signature) > 0 { + i -= len(m.Signature) + copy(dAtA[i:], m.Signature) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Signature))) i-- - dAtA[i] = 0x20 + dAtA[i] = 0x42 } + if m.ValidatorIndex != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.ValidatorIndex)) + i-- + dAtA[i] = 0x38 + } + if len(m.ValidatorAddress) > 0 { + i -= len(m.ValidatorAddress) + copy(dAtA[i:], m.ValidatorAddress) + i = encodeVarintTypes(dAtA, i, uint64(len(m.ValidatorAddress))) + i-- + dAtA[i] = 0x32 + } + n15, err15 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp):]) + if err15 != nil { + return 0, err15 + } + i -= n15 + i = encodeVarintTypes(dAtA, i, uint64(n15)) + i-- + dAtA[i] = 0x2a { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.BlockID.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -1791,12 +2407,59 @@ func (m *BlockMeta) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintTypes(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x1a - if m.BlockSize != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.BlockSize)) + dAtA[i] = 0x22 + if m.Round != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Round)) + i-- + dAtA[i] = 0x18 + } + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) i-- dAtA[i] = 0x10 } + if m.Type != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Commit) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Commit) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Commit) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Signatures) > 0 { + for iNdEx := len(m.Signatures) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Signatures[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } { size, err := m.BlockID.MarshalToSizedBuffer(dAtA[:i]) if err != nil { @@ -1806,11 +2469,21 @@ func (m *BlockMeta) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintTypes(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0xa + dAtA[i] = 0x1a + if m.Round != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Round)) + i-- + dAtA[i] = 0x10 + } + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x8 + } return len(dAtA) - i, nil } -func (m *TxProof) Marshal() (dAtA []byte, err error) { +func (m *CommitSig) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1820,366 +2493,1273 @@ func (m *TxProof) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *TxProof) MarshalTo(dAtA []byte) (int, error) { +func (m *CommitSig) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *TxProof) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *CommitSig) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.Proof != nil { - { - size, err := m.Proof.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } + if len(m.Signature) > 0 { + i -= len(m.Signature) + copy(dAtA[i:], m.Signature) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Signature))) i-- - dAtA[i] = 0x1a + dAtA[i] = 0x22 } - if len(m.Data) > 0 { - i -= len(m.Data) - copy(dAtA[i:], m.Data) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Data))) + n18, err18 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp):]) + if err18 != nil { + return 0, err18 + } + i -= n18 + i = encodeVarintTypes(dAtA, i, uint64(n18)) + i-- + dAtA[i] = 0x1a + if len(m.ValidatorAddress) > 0 { + i -= len(m.ValidatorAddress) + copy(dAtA[i:], m.ValidatorAddress) + i = encodeVarintTypes(dAtA, i, uint64(len(m.ValidatorAddress))) i-- dAtA[i] = 0x12 } - if len(m.RootHash) > 0 { - i -= len(m.RootHash) - copy(dAtA[i:], m.RootHash) - i = encodeVarintTypes(dAtA, i, uint64(len(m.RootHash))) + if m.BlockIdFlag != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.BlockIdFlag)) i-- - dAtA[i] = 0xa + dAtA[i] = 0x8 } return len(dAtA) - i, nil } -func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { - offset -= sovTypes(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ +func (m *Proposal) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - dAtA[offset] = uint8(v) - return base + return dAtA[:n], nil } -func (m *PartSetHeader) Size() (n int) { - if m == nil { - return 0 - } + +func (m *Proposal) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Proposal) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.Total != 0 { - n += 1 + sovTypes(uint64(m.Total)) + if len(m.Signature) > 0 { + i -= len(m.Signature) + copy(dAtA[i:], m.Signature) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Signature))) + i-- + dAtA[i] = 0x3a } - l = len(m.Hash) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) + n19, err19 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp):]) + if err19 != nil { + return 0, err19 } - return n -} - -func (m *Part) Size() (n int) { - if m == nil { - return 0 + i -= n19 + i = encodeVarintTypes(dAtA, i, uint64(n19)) + i-- + dAtA[i] = 0x32 + { + size, err := m.BlockID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) } - var l int - _ = l - if m.Index != 0 { - n += 1 + sovTypes(uint64(m.Index)) + i-- + dAtA[i] = 0x2a + if m.PolRound != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.PolRound)) + i-- + dAtA[i] = 0x20 } - l = len(m.Bytes) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) + if m.Round != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Round)) + i-- + dAtA[i] = 0x18 } - l = m.Proof.Size() - n += 1 + l + sovTypes(uint64(l)) - return n -} - -func (m *BlockID) Size() (n int) { - if m == nil { - return 0 + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x10 } - var l int - _ = l - l = len(m.Hash) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) + if m.Type != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x8 } - l = m.PartSetHeader.Size() - n += 1 + l + sovTypes(uint64(l)) - return n + return len(dAtA) - i, nil } -func (m *Header) Size() (n int) { - if m == nil { - return 0 +func (m *SignedHeader) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *SignedHeader) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SignedHeader) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.Version.Size() - n += 1 + l + sovTypes(uint64(l)) - l = len(m.ChainID) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - if m.Height != 0 { - n += 1 + sovTypes(uint64(m.Height)) - } - l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Time) - n += 1 + l + sovTypes(uint64(l)) - l = m.LastBlockId.Size() - n += 1 + l + sovTypes(uint64(l)) - l = len(m.LastCommitHash) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - l = len(m.DataHash) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - l = len(m.ValidatorsHash) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - l = len(m.NextValidatorsHash) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - l = len(m.ConsensusHash) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - l = len(m.AppHash) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - l = len(m.LastResultsHash) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - l = len(m.EvidenceHash) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) + if m.Commit != nil { + { + size, err := m.Commit.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } - l = len(m.ProposerAddress) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) + if m.Header != nil { + { + size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } - return n + return len(dAtA) - i, nil } -func (m *Data) Size() (n int) { - if m == nil { - return 0 +func (m *LightBlock) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *LightBlock) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LightBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.Txs) > 0 { - for _, b := range m.Txs { - l = len(b) - n += 1 + l + sovTypes(uint64(l)) + if m.ValidatorSet != nil { + { + size, err := m.ValidatorSet.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.SignedHeader != nil { + { + size, err := m.SignedHeader.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa } - return n + return len(dAtA) - i, nil } -func (m *Vote) Size() (n int) { - if m == nil { - return 0 +func (m *BlockMeta) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *BlockMeta) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BlockMeta) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.Type != 0 { - n += 1 + sovTypes(uint64(m.Type)) + if m.NumTxs != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.NumTxs)) + i-- + dAtA[i] = 0x20 } - if m.Height != 0 { - n += 1 + sovTypes(uint64(m.Height)) + { + size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) } - if m.Round != 0 { - n += 1 + sovTypes(uint64(m.Round)) + i-- + dAtA[i] = 0x1a + if m.BlockSize != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.BlockSize)) + i-- + dAtA[i] = 0x10 } - l = m.BlockID.Size() - n += 1 + l + sovTypes(uint64(l)) - l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp) - n += 1 + l + sovTypes(uint64(l)) - l = len(m.ValidatorAddress) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) + { + size, err := m.BlockID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) } - if m.ValidatorIndex != 0 { - n += 1 + sovTypes(uint64(m.ValidatorIndex)) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *TxProof) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - l = len(m.Signature) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) + return dAtA[:n], nil +} + +func (m *TxProof) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TxProof) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Proofs) > 0 { + for iNdEx := len(m.Proofs) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Proofs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } } - return n + if len(m.Data) > 0 { + for iNdEx := len(m.Data) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Data[iNdEx]) + copy(dAtA[i:], m.Data[iNdEx]) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Data[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.RowRoots) > 0 { + for iNdEx := len(m.RowRoots) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.RowRoots[iNdEx]) + copy(dAtA[i:], m.RowRoots[iNdEx]) + i = encodeVarintTypes(dAtA, i, uint64(len(m.RowRoots[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil } -func (m *Commit) Size() (n int) { - if m == nil { - return 0 +func (m *MalleatedTx) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *MalleatedTx) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MalleatedTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.Height != 0 { - n += 1 + sovTypes(uint64(m.Height)) + if len(m.Tx) > 0 { + i -= len(m.Tx) + copy(dAtA[i:], m.Tx) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Tx))) + i-- + dAtA[i] = 0x12 } - if m.Round != 0 { - n += 1 + sovTypes(uint64(m.Round)) + if len(m.OriginalTxHash) > 0 { + i -= len(m.OriginalTxHash) + copy(dAtA[i:], m.OriginalTxHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.OriginalTxHash))) + i-- + dAtA[i] = 0xa } - l = m.BlockID.Size() - n += 1 + l + sovTypes(uint64(l)) - if len(m.Signatures) > 0 { - for _, e := range m.Signatures { - l = e.Size() - n += 1 + l + sovTypes(uint64(l)) + return len(dAtA) - i, nil +} + +func (m *NMTProof) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NMTProof) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NMTProof) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.LeafHash) > 0 { + i -= len(m.LeafHash) + copy(dAtA[i:], m.LeafHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.LeafHash))) + i-- + dAtA[i] = 0x22 + } + if len(m.Nodes) > 0 { + for iNdEx := len(m.Nodes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Nodes[iNdEx]) + copy(dAtA[i:], m.Nodes[iNdEx]) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Nodes[iNdEx]))) + i-- + dAtA[i] = 0x1a } } - return n + if m.End != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.End)) + i-- + dAtA[i] = 0x10 + } + if m.Start != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Start)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil } -func (m *CommitSig) Size() (n int) { +func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { + offset -= sovTypes(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *PartSetHeader) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.BlockIdFlag != 0 { - n += 1 + sovTypes(uint64(m.BlockIdFlag)) - } - l = len(m.ValidatorAddress) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) + if m.Total != 0 { + n += 1 + sovTypes(uint64(m.Total)) } - l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp) - n += 1 + l + sovTypes(uint64(l)) - l = len(m.Signature) + l = len(m.Hash) if l > 0 { n += 1 + l + sovTypes(uint64(l)) } return n } -func (m *Proposal) Size() (n int) { +func (m *Part) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.Type != 0 { - n += 1 + sovTypes(uint64(m.Type)) - } - if m.Height != 0 { - n += 1 + sovTypes(uint64(m.Height)) - } - if m.Round != 0 { - n += 1 + sovTypes(uint64(m.Round)) - } - if m.PolRound != 0 { - n += 1 + sovTypes(uint64(m.PolRound)) + if m.Index != 0 { + n += 1 + sovTypes(uint64(m.Index)) } - l = m.BlockID.Size() - n += 1 + l + sovTypes(uint64(l)) - l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp) - n += 1 + l + sovTypes(uint64(l)) - l = len(m.Signature) + l = len(m.Bytes) if l > 0 { n += 1 + l + sovTypes(uint64(l)) } + l = m.Proof.Size() + n += 1 + l + sovTypes(uint64(l)) return n } -func (m *SignedHeader) Size() (n int) { +func (m *BlockID) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovTypes(uint64(l)) - } - if m.Commit != nil { - l = m.Commit.Size() + l = len(m.Hash) + if l > 0 { n += 1 + l + sovTypes(uint64(l)) } + l = m.PartSetHeader.Size() + n += 1 + l + sovTypes(uint64(l)) return n } -func (m *LightBlock) Size() (n int) { +func (m *Header) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.SignedHeader != nil { - l = m.SignedHeader.Size() + l = m.Version.Size() + n += 1 + l + sovTypes(uint64(l)) + l = len(m.ChainID) + if l > 0 { n += 1 + l + sovTypes(uint64(l)) } - if m.ValidatorSet != nil { - l = m.ValidatorSet.Size() + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Time) + n += 1 + l + sovTypes(uint64(l)) + l = m.LastBlockId.Size() + n += 1 + l + sovTypes(uint64(l)) + l = len(m.LastCommitHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.DataHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.ValidatorsHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.NextValidatorsHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.ConsensusHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.AppHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.LastResultsHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.EvidenceHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.ProposerAddress) + if l > 0 { n += 1 + l + sovTypes(uint64(l)) } return n } -func (m *BlockMeta) Size() (n int) { +func (m *Data) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = m.BlockID.Size() - n += 1 + l + sovTypes(uint64(l)) - if m.BlockSize != 0 { - n += 1 + sovTypes(uint64(m.BlockSize)) + if len(m.Txs) > 0 { + for _, b := range m.Txs { + l = len(b) + n += 1 + l + sovTypes(uint64(l)) + } } - l = m.Header.Size() + l = m.Evidence.Size() n += 1 + l + sovTypes(uint64(l)) - if m.NumTxs != 0 { - n += 1 + sovTypes(uint64(m.NumTxs)) + l = m.Messages.Size() + n += 1 + l + sovTypes(uint64(l)) + if m.OriginalSquareSize != 0 { + n += 1 + sovTypes(uint64(m.OriginalSquareSize)) + } + l = len(m.Hash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) } return n } -func (m *TxProof) Size() (n int) { +func (m *DuplicateVoteEvidence) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.RootHash) - if l > 0 { + if m.VoteA != nil { + l = m.VoteA.Size() n += 1 + l + sovTypes(uint64(l)) } - l = len(m.Data) - if l > 0 { + if m.VoteB != nil { + l = m.VoteB.Size() n += 1 + l + sovTypes(uint64(l)) } - if m.Proof != nil { - l = m.Proof.Size() - n += 1 + l + sovTypes(uint64(l)) + if m.TotalVotingPower != 0 { + n += 1 + sovTypes(uint64(m.TotalVotingPower)) + } + if m.ValidatorPower != 0 { + n += 1 + sovTypes(uint64(m.ValidatorPower)) + } + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp) + n += 1 + l + sovTypes(uint64(l)) + return n +} + +func (m *LightClientAttackEvidence) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ConflictingBlock != nil { + l = m.ConflictingBlock.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.CommonHeight != 0 { + n += 1 + sovTypes(uint64(m.CommonHeight)) + } + if len(m.ByzantineValidators) > 0 { + for _, e := range m.ByzantineValidators { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + if m.TotalVotingPower != 0 { + n += 1 + sovTypes(uint64(m.TotalVotingPower)) + } + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp) + n += 1 + l + sovTypes(uint64(l)) + return n +} + +func (m *Evidence) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Sum != nil { + n += m.Sum.Size() + } + return n +} + +func (m *Evidence_DuplicateVoteEvidence) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.DuplicateVoteEvidence != nil { + l = m.DuplicateVoteEvidence.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Evidence_LightClientAttackEvidence) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.LightClientAttackEvidence != nil { + l = m.LightClientAttackEvidence.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *EvidenceList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Evidence) > 0 { + for _, e := range m.Evidence { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + return n +} + +func (m *Messages) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.MessagesList) > 0 { + for _, e := range m.MessagesList { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + return n +} + +func (m *Message) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.NamespaceId) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Data) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *Vote) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Type != 0 { + n += 1 + sovTypes(uint64(m.Type)) + } + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + if m.Round != 0 { + n += 1 + sovTypes(uint64(m.Round)) + } + l = m.BlockID.Size() + n += 1 + l + sovTypes(uint64(l)) + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp) + n += 1 + l + sovTypes(uint64(l)) + l = len(m.ValidatorAddress) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.ValidatorIndex != 0 { + n += 1 + sovTypes(uint64(m.ValidatorIndex)) + } + l = len(m.Signature) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *Commit) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + if m.Round != 0 { + n += 1 + sovTypes(uint64(m.Round)) + } + l = m.BlockID.Size() + n += 1 + l + sovTypes(uint64(l)) + if len(m.Signatures) > 0 { + for _, e := range m.Signatures { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + return n +} + +func (m *CommitSig) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.BlockIdFlag != 0 { + n += 1 + sovTypes(uint64(m.BlockIdFlag)) + } + l = len(m.ValidatorAddress) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp) + n += 1 + l + sovTypes(uint64(l)) + l = len(m.Signature) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *Proposal) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Type != 0 { + n += 1 + sovTypes(uint64(m.Type)) + } + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + if m.Round != 0 { + n += 1 + sovTypes(uint64(m.Round)) + } + if m.PolRound != 0 { + n += 1 + sovTypes(uint64(m.PolRound)) + } + l = m.BlockID.Size() + n += 1 + l + sovTypes(uint64(l)) + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp) + n += 1 + l + sovTypes(uint64(l)) + l = len(m.Signature) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *SignedHeader) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.Commit != nil { + l = m.Commit.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *LightBlock) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SignedHeader != nil { + l = m.SignedHeader.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.ValidatorSet != nil { + l = m.ValidatorSet.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *BlockMeta) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.BlockID.Size() + n += 1 + l + sovTypes(uint64(l)) + if m.BlockSize != 0 { + n += 1 + sovTypes(uint64(m.BlockSize)) + } + l = m.Header.Size() + n += 1 + l + sovTypes(uint64(l)) + if m.NumTxs != 0 { + n += 1 + sovTypes(uint64(m.NumTxs)) + } + return n +} + +func (m *TxProof) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.RowRoots) > 0 { + for _, b := range m.RowRoots { + l = len(b) + n += 1 + l + sovTypes(uint64(l)) + } + } + if len(m.Data) > 0 { + for _, b := range m.Data { + l = len(b) + n += 1 + l + sovTypes(uint64(l)) + } + } + if len(m.Proofs) > 0 { + for _, e := range m.Proofs { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + return n +} + +func (m *MalleatedTx) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.OriginalTxHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Tx) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *NMTProof) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Start != 0 { + n += 1 + sovTypes(uint64(m.Start)) + } + if m.End != 0 { + n += 1 + sovTypes(uint64(m.End)) + } + if len(m.Nodes) > 0 { + for _, b := range m.Nodes { + l = len(b) + n += 1 + l + sovTypes(uint64(l)) + } + } + l = len(m.LeafHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func sovTypes(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozTypes(x uint64) (n int) { + return sovTypes(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *PartSetHeader) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PartSetHeader: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PartSetHeader: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Total", wireType) + } + m.Total = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Total |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hash = append(m.Hash[:0], dAtA[iNdEx:postIndex]...) + if m.Hash == nil { + m.Hash = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Part) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Part: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Part: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + } + m.Index = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Index |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Bytes", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Bytes = append(m.Bytes[:0], dAtA[iNdEx:postIndex]...) + if m.Bytes == nil { + m.Bytes = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Proof", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Proof.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BlockID) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BlockID: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BlockID: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hash = append(m.Hash[:0], dAtA[iNdEx:postIndex]...) + if m.Hash == nil { + m.Hash = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PartSetHeader", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.PartSetHeader.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } } - return n -} -func sovTypes(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozTypes(x uint64) (n int) { - return sovTypes(uint64((x << 1) ^ uint64((int64(x) >> 63)))) + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil } -func (m *PartSetHeader) Unmarshal(dAtA []byte) error { +func (m *Header) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2189,30 +3769,248 @@ func (m *PartSetHeader) Unmarshal(dAtA []byte) error { if shift >= 64 { return ErrIntOverflowTypes } - if iNdEx >= l { + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Header: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Header: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Version.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChainID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChainID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Time, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastBlockId", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastBlockId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastCommitHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LastCommitHash = append(m.LastCommitHash[:0], dAtA[iNdEx:postIndex]...) + if m.LastCommitHash == nil { + m.LastCommitHash = []byte{} + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DataHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + m.DataHash = append(m.DataHash[:0], dAtA[iNdEx:postIndex]...) + if m.DataHash == nil { + m.DataHash = []byte{} } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PartSetHeader: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PartSetHeader: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Total", wireType) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ValidatorsHash", wireType) } - m.Total = 0 + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -2222,14 +4020,29 @@ func (m *PartSetHeader) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Total |= uint32(b&0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } - case 2: + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ValidatorsHash = append(m.ValidatorsHash[:0], dAtA[iNdEx:postIndex]...) + if m.ValidatorsHash == nil { + m.ValidatorsHash = []byte{} + } + iNdEx = postIndex + case 9: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field NextValidatorsHash", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { @@ -2256,66 +4069,84 @@ func (m *PartSetHeader) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Hash = append(m.Hash[:0], dAtA[iNdEx:postIndex]...) - if m.Hash == nil { - m.Hash = []byte{} + m.NextValidatorsHash = append(m.NextValidatorsHash[:0], dAtA[iNdEx:postIndex]...) + if m.NextValidatorsHash == nil { + m.NextValidatorsHash = []byte{} } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTypes(dAtA[iNdEx:]) - if err != nil { - return err + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConsensusHash", wireType) } - if (skippy < 0) || (iNdEx+skippy) < 0 { + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { return ErrInvalidLengthTypes } - if (iNdEx + skippy) > l { + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { return io.ErrUnexpectedEOF } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Part) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes + m.ConsensusHash = append(m.ConsensusHash[:0], dAtA[iNdEx:postIndex]...) + if m.ConsensusHash == nil { + m.ConsensusHash = []byte{} } - if iNdEx >= l { + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AppHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + m.AppHash = append(m.AppHash[:0], dAtA[iNdEx:postIndex]...) + if m.AppHash == nil { + m.AppHash = []byte{} } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Part: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Part: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastResultsHash", wireType) } - m.Index = 0 + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -2325,14 +4156,29 @@ func (m *Part) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Index |= uint32(b&0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } - case 2: + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LastResultsHash = append(m.LastResultsHash[:0], dAtA[iNdEx:postIndex]...) + if m.LastResultsHash == nil { + m.LastResultsHash = []byte{} + } + iNdEx = postIndex + case 13: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Bytes", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field EvidenceHash", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { @@ -2359,16 +4205,16 @@ func (m *Part) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Bytes = append(m.Bytes[:0], dAtA[iNdEx:postIndex]...) - if m.Bytes == nil { - m.Bytes = []byte{} + m.EvidenceHash = append(m.EvidenceHash[:0], dAtA[iNdEx:postIndex]...) + if m.EvidenceHash == nil { + m.EvidenceHash = []byte{} } iNdEx = postIndex - case 3: + case 14: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Proof", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ProposerAddress", wireType) } - var msglen int + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -2378,23 +4224,24 @@ func (m *Part) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + if byteLen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + msglen + postIndex := iNdEx + byteLen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Proof.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + m.ProposerAddress = append(m.ProposerAddress[:0], dAtA[iNdEx:postIndex]...) + if m.ProposerAddress == nil { + m.ProposerAddress = []byte{} } iNdEx = postIndex default: @@ -2418,7 +4265,7 @@ func (m *Part) Unmarshal(dAtA []byte) error { } return nil } -func (m *BlockID) Unmarshal(dAtA []byte) error { +func (m *Data) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2431,27 +4278,59 @@ func (m *BlockID) Unmarshal(dAtA []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: BlockID: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: BlockID: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Data: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Data: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Txs", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Txs = append(m.Txs, make([]byte, postIndex-iNdEx)) + copy(m.Txs[len(m.Txs)-1], dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Evidence", wireType) } - var byteLen int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -2461,29 +4340,28 @@ func (m *BlockID) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { + if msglen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + byteLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - m.Hash = append(m.Hash[:0], dAtA[iNdEx:postIndex]...) - if m.Hash == nil { - m.Hash = []byte{} + if err := m.Evidence.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } iNdEx = postIndex - case 2: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PartSetHeader", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Messages", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2510,10 +4388,63 @@ func (m *BlockID) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.PartSetHeader.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Messages.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field OriginalSquareSize", wireType) + } + m.OriginalSquareSize = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.OriginalSquareSize |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hash = append(m.Hash[:0], dAtA[iNdEx:postIndex]...) + if m.Hash == nil { + m.Hash = []byte{} + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) @@ -2535,7 +4466,7 @@ func (m *BlockID) Unmarshal(dAtA []byte) error { } return nil } -func (m *Header) Unmarshal(dAtA []byte) error { +func (m *DuplicateVoteEvidence) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2558,15 +4489,15 @@ func (m *Header) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Header: wiretype end group for non-group") + return fmt.Errorf("proto: DuplicateVoteEvidence: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Header: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DuplicateVoteEvidence: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field VoteA", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2593,15 +4524,18 @@ func (m *Header) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Version.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.VoteA == nil { + m.VoteA = &Vote{} + } + if err := m.VoteA.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ChainID", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field VoteB", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -2611,29 +4545,33 @@ func (m *Header) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - m.ChainID = string(dAtA[iNdEx:postIndex]) + if m.VoteB == nil { + m.VoteB = &Vote{} + } + if err := m.VoteB.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 3: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TotalVotingPower", wireType) } - m.Height = 0 + m.TotalVotingPower = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -2643,16 +4581,16 @@ func (m *Header) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Height |= int64(b&0x7F) << shift + m.TotalVotingPower |= int64(b&0x7F) << shift if b < 0x80 { break } } case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ValidatorPower", wireType) } - var msglen int + m.ValidatorPower = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -2662,28 +4600,14 @@ func (m *Header) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + m.ValidatorPower |= int64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Time, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastBlockId", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2710,15 +4634,65 @@ func (m *Header) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.LastBlockId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 6: + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LightClientAttackEvidence) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LightClientAttackEvidence: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LightClientAttackEvidence: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastCommitHash", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ConflictingBlock", wireType) } - var byteLen int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -2728,31 +4702,33 @@ func (m *Header) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { + if msglen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + byteLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - m.LastCommitHash = append(m.LastCommitHash[:0], dAtA[iNdEx:postIndex]...) - if m.LastCommitHash == nil { - m.LastCommitHash = []byte{} + if m.ConflictingBlock == nil { + m.ConflictingBlock = &LightBlock{} + } + if err := m.ConflictingBlock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DataHash", wireType) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CommonHeight", wireType) } - var byteLen int + m.CommonHeight = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -2762,31 +4738,16 @@ func (m *Header) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + m.CommonHeight |= int64(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DataHash = append(m.DataHash[:0], dAtA[iNdEx:postIndex]...) - if m.DataHash == nil { - m.DataHash = []byte{} - } - iNdEx = postIndex - case 8: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ValidatorsHash", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ByzantineValidators", wireType) } - var byteLen int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -2796,31 +4757,31 @@ func (m *Header) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { + if msglen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + byteLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - m.ValidatorsHash = append(m.ValidatorsHash[:0], dAtA[iNdEx:postIndex]...) - if m.ValidatorsHash == nil { - m.ValidatorsHash = []byte{} + m.ByzantineValidators = append(m.ByzantineValidators, &Validator{}) + if err := m.ByzantineValidators[len(m.ByzantineValidators)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NextValidatorsHash", wireType) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalVotingPower", wireType) } - var byteLen int + m.TotalVotingPower = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -2830,31 +4791,16 @@ func (m *Header) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + m.TotalVotingPower |= int64(b&0x7F) << shift if b < 0x80 { break - } - } - if byteLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.NextValidatorsHash = append(m.NextValidatorsHash[:0], dAtA[iNdEx:postIndex]...) - if m.NextValidatorsHash == nil { - m.NextValidatorsHash = []byte{} + } } - iNdEx = postIndex - case 10: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConsensusHash", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) } - var byteLen int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -2864,31 +4810,80 @@ func (m *Header) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { + if msglen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + byteLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - m.ConsensusHash = append(m.ConsensusHash[:0], dAtA[iNdEx:postIndex]...) - if m.ConsensusHash == nil { - m.ConsensusHash = []byte{} + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil { + return err } iNdEx = postIndex - case 11: + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Evidence) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Evidence: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Evidence: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AppHash", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DuplicateVoteEvidence", wireType) } - var byteLen int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -2898,31 +4893,32 @@ func (m *Header) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { + if msglen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + byteLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - m.AppHash = append(m.AppHash[:0], dAtA[iNdEx:postIndex]...) - if m.AppHash == nil { - m.AppHash = []byte{} + v := &DuplicateVoteEvidence{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } + m.Sum = &Evidence_DuplicateVoteEvidence{v} iNdEx = postIndex - case 12: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastResultsHash", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LightClientAttackEvidence", wireType) } - var byteLen int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -2932,31 +4928,82 @@ func (m *Header) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { + if msglen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + byteLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - m.LastResultsHash = append(m.LastResultsHash[:0], dAtA[iNdEx:postIndex]...) - if m.LastResultsHash == nil { - m.LastResultsHash = []byte{} + v := &LightClientAttackEvidence{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } + m.Sum = &Evidence_LightClientAttackEvidence{v} iNdEx = postIndex - case 13: + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EvidenceList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EvidenceList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EvidenceList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EvidenceHash", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Evidence", wireType) } - var byteLen int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -2966,31 +5013,81 @@ func (m *Header) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { + if msglen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + byteLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - m.EvidenceHash = append(m.EvidenceHash[:0], dAtA[iNdEx:postIndex]...) - if m.EvidenceHash == nil { - m.EvidenceHash = []byte{} + m.Evidence = append(m.Evidence, Evidence{}) + if err := m.Evidence[len(m.Evidence)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } iNdEx = postIndex - case 14: + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Messages) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Messages: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Messages: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ProposerAddress", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MessagesList", wireType) } - var byteLen int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -3000,24 +5097,24 @@ func (m *Header) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { + if msglen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + byteLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - m.ProposerAddress = append(m.ProposerAddress[:0], dAtA[iNdEx:postIndex]...) - if m.ProposerAddress == nil { - m.ProposerAddress = []byte{} + m.MessagesList = append(m.MessagesList, &Message{}) + if err := m.MessagesList[len(m.MessagesList)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } iNdEx = postIndex default: @@ -3041,7 +5138,7 @@ func (m *Header) Unmarshal(dAtA []byte) error { } return nil } -func (m *Data) Unmarshal(dAtA []byte) error { +func (m *Message) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3064,15 +5161,15 @@ func (m *Data) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Data: wiretype end group for non-group") + return fmt.Errorf("proto: Message: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Data: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Message: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Txs", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field NamespaceId", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { @@ -3099,8 +5196,44 @@ func (m *Data) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Txs = append(m.Txs, make([]byte, postIndex-iNdEx)) - copy(m.Txs[len(m.Txs)-1], dAtA[iNdEx:postIndex]) + m.NamespaceId = append(m.NamespaceId[:0], dAtA[iNdEx:postIndex]...) + if m.NamespaceId == nil { + m.NamespaceId = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } iNdEx = postIndex default: iNdEx = preIndex @@ -4363,7 +6496,7 @@ func (m *TxProof) Unmarshal(dAtA []byte) error { switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RootHash", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RowRoots", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { @@ -4390,10 +6523,8 @@ func (m *TxProof) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.RootHash = append(m.RootHash[:0], dAtA[iNdEx:postIndex]...) - if m.RootHash == nil { - m.RootHash = []byte{} - } + m.RowRoots = append(m.RowRoots, make([]byte, postIndex-iNdEx)) + copy(m.RowRoots[len(m.RowRoots)-1], dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -4424,14 +6555,12 @@ func (m *TxProof) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) - if m.Data == nil { - m.Data = []byte{} - } + m.Data = append(m.Data, make([]byte, postIndex-iNdEx)) + copy(m.Data[len(m.Data)-1], dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Proof", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Proofs", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -4458,12 +6587,282 @@ func (m *TxProof) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Proof == nil { - m.Proof = &crypto.Proof{} + m.Proofs = append(m.Proofs, &NMTProof{}) + if err := m.Proofs[len(m.Proofs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - if err := m.Proof.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MalleatedTx) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MalleatedTx: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MalleatedTx: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OriginalTxHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OriginalTxHash = append(m.OriginalTxHash[:0], dAtA[iNdEx:postIndex]...) + if m.OriginalTxHash == nil { + m.OriginalTxHash = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tx", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tx = append(m.Tx[:0], dAtA[iNdEx:postIndex]...) + if m.Tx == nil { + m.Tx = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { return err } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NMTProof) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NMTProof: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NMTProof: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Start", wireType) + } + m.Start = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Start |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field End", wireType) + } + m.End = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.End |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Nodes", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Nodes = append(m.Nodes, make([]byte, postIndex-iNdEx)) + copy(m.Nodes[len(m.Nodes)-1], dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LeafHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LeafHash = append(m.LeafHash[:0], dAtA[iNdEx:postIndex]...) + if m.LeafHash == nil { + m.LeafHash = []byte{} + } iNdEx = postIndex default: iNdEx = preIndex diff --git a/proto/tendermint/types/types.proto b/proto/tendermint/types/types.proto index 8d4f009729..861e6515d9 100644 --- a/proto/tendermint/types/types.proto +++ b/proto/tendermint/types/types.proto @@ -87,6 +87,51 @@ message Data { // NOTE: not all txs here are valid. We're just agreeing on the order first. // This means that block.AppHash does not include these txs. repeated bytes txs = 1; + + // field number 2 is reserved for intermediate state roots + EvidenceList evidence = 3 [(gogoproto.nullable) = false]; + Messages messages = 4 [(gogoproto.nullable) = false]; + uint64 original_square_size = 5; + bytes hash = 6; +} + +// DuplicateVoteEvidence contains evidence of a validator signed two conflicting votes. +message DuplicateVoteEvidence { + tendermint.types.Vote vote_a = 1; + tendermint.types.Vote vote_b = 2; + int64 total_voting_power = 3; + int64 validator_power = 4; + google.protobuf.Timestamp timestamp = 5 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; +} + +// LightClientAttackEvidence contains evidence of a set of validators attempting to mislead a light client. +message LightClientAttackEvidence { + tendermint.types.LightBlock conflicting_block = 1; + int64 common_height = 2; + repeated tendermint.types.Validator byzantine_validators = 3; + int64 total_voting_power = 4; + google.protobuf.Timestamp timestamp = 5 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; +} + +message Evidence { + oneof sum { + DuplicateVoteEvidence duplicate_vote_evidence = 1; + LightClientAttackEvidence light_client_attack_evidence = 2; + } +} + +// EvidenceData contains any evidence of malicious wrong-doing by validators +message EvidenceList { + repeated Evidence evidence = 1 [(gogoproto.nullable) = false]; +} + +message Messages { + repeated Message messages_list = 1; +} + +message Message { + bytes namespace_id = 1; + bytes data = 2; } // Vote represents a prevote, precommit, or commit vote from validators for @@ -151,7 +196,37 @@ message BlockMeta { // TxProof represents a Merkle proof of the presence of a transaction in the Merkle tree. message TxProof { - bytes root_hash = 1; - bytes data = 2; - tendermint.crypto.Proof proof = 3; + repeated bytes row_roots = 1; + repeated bytes data = 2; + repeated NMTProof proofs = 3; +} + +// MalleatedTx wraps a transaction that was derived from a different original +// transaction. This allows for tendermint to track malleated and original +// transactions +message MalleatedTx { + bytes original_tx_hash = 1; + bytes tx = 2; } + +// Proof represents proof of a namespace.ID in an NMT. +// In case this proof proves the absence of a namespace.ID +// in a tree it also contains the leaf hashes of the range +// where that namespace would be. +message NMTProof { + // start index of this proof. + int32 start = 1; + // end index of this proof. + int32 end = 2; + // Nodes that together with the corresponding leaf values can be used to + // recompute the root and verify this proof. Nodes should consist of the max + // and min namespaces along with the actual hash, resulting in each being 48 + // bytes each + repeated bytes nodes = 3; + // leafHash are nil if the namespace is present in the NMT. In case the + // namespace to be proved is in the min/max range of the tree but absent, this + // will contain the leaf hash necessary to verify the proof of absence. Leaf + // hashes should consist of the namespace along with the actual hash, + // resulting 40 bytes total. + bytes leaf_hash = 4; +} \ No newline at end of file diff --git a/rpc/client/http/http.go b/rpc/client/http/http.go index 6c518e8994..393ac89c4f 100644 --- a/rpc/client/http/http.go +++ b/rpc/client/http/http.go @@ -489,6 +489,20 @@ func (c *baseRPCClient) Commit(ctx context.Context, height *int64) (*coretypes.R return result, nil } +func (c *baseRPCClient) DataCommitment(ctx context.Context, query string) (*coretypes.ResultDataCommitment, error) { + result := new(coretypes.ResultDataCommitment) + params := map[string]interface{}{ + "query": query, + } + + _, err := c.caller.Call(ctx, "data_commitment", params, result) + if err != nil { + return nil, err + } + + return result, nil +} + func (c *baseRPCClient) Tx(ctx context.Context, hash bytes.HexBytes, prove bool) (*coretypes.ResultTx, error) { result := new(coretypes.ResultTx) params := map[string]interface{}{ diff --git a/rpc/client/interface.go b/rpc/client/interface.go index 5f6d9adddf..c55c4d1203 100644 --- a/rpc/client/interface.go +++ b/rpc/client/interface.go @@ -81,6 +81,7 @@ type SignClient interface { Header(ctx context.Context, height *int64) (*coretypes.ResultHeader, error) HeaderByHash(ctx context.Context, hash bytes.HexBytes) (*coretypes.ResultHeader, error) Commit(ctx context.Context, height *int64) (*coretypes.ResultCommit, error) + DataCommitment(ctx context.Context, query string) (*coretypes.ResultDataCommitment, error) Validators(ctx context.Context, height *int64, page, perPage *int) (*coretypes.ResultValidators, error) Tx(ctx context.Context, hash bytes.HexBytes, prove bool) (*coretypes.ResultTx, error) diff --git a/rpc/client/local/local.go b/rpc/client/local/local.go index 95428a93d7..18cce5ba7c 100644 --- a/rpc/client/local/local.go +++ b/rpc/client/local/local.go @@ -186,6 +186,10 @@ func (c *Local) HeaderByHash(ctx context.Context, hash bytes.HexBytes) (*coretyp return c.env.HeaderByHash(c.ctx, hash) } +func (c *Local) DataCommitment(_ context.Context, query string) (*coretypes.ResultDataCommitment, error) { + return c.env.DataCommitment(c.ctx, query) +} + func (c *Local) Commit(ctx context.Context, height *int64) (*coretypes.ResultCommit, error) { return c.env.Commit(c.ctx, height) } diff --git a/rpc/client/mock/client.go b/rpc/client/mock/client.go index c550c01c0d..7aa550a687 100644 --- a/rpc/client/mock/client.go +++ b/rpc/client/mock/client.go @@ -165,6 +165,10 @@ func (c Client) Commit(ctx context.Context, height *int64) (*coretypes.ResultCom return c.env.Commit(&rpctypes.Context{}, height) } +func (c Client) DataCommitment(ctx context.Context, query string) (*coretypes.ResultDataCommitment, error) { + return c.env.DataCommitment(&rpctypes.Context{}, query) +} + func (c Client) Validators(ctx context.Context, height *int64, page, perPage *int) (*coretypes.ResultValidators, error) { return c.env.Validators(&rpctypes.Context{}, height, page, perPage) } diff --git a/rpc/client/mocks/client.go b/rpc/client/mocks/client.go index 0a83ef201e..2725a5fa7c 100644 --- a/rpc/client/mocks/client.go +++ b/rpc/client/mocks/client.go @@ -12,6 +12,8 @@ import ( mock "github.com/stretchr/testify/mock" + testing "testing" + types "github.com/tendermint/tendermint/types" ) @@ -388,6 +390,29 @@ func (_m *Client) ConsensusState(_a0 context.Context) (*coretypes.ResultConsensu return r0, r1 } +// DataCommitment provides a mock function with given fields: ctx, query +func (_m *Client) DataCommitment(ctx context.Context, query string) (*coretypes.ResultDataCommitment, error) { + ret := _m.Called(ctx, query) + + var r0 *coretypes.ResultDataCommitment + if rf, ok := ret.Get(0).(func(context.Context, string) *coretypes.ResultDataCommitment); ok { + r0 = rf(ctx, query) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultDataCommitment) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + r1 = rf(ctx, query) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // DumpConsensusState provides a mock function with given fields: _a0 func (_m *Client) DumpConsensusState(_a0 context.Context) (*coretypes.ResultDumpConsensusState, error) { ret := _m.Called(_a0) @@ -800,3 +825,13 @@ func (_m *Client) Validators(ctx context.Context, height *int64, page *int, perP return r0, r1 } + +// NewClient creates a new instance of Client. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. +func NewClient(t testing.TB) *Client { + mock := &Client{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/rpc/client/rpc_test.go b/rpc/client/rpc_test.go index 5c980bdd68..9762605947 100644 --- a/rpc/client/rpc_test.go +++ b/rpc/client/rpc_test.go @@ -617,8 +617,8 @@ func TestTx(t *testing.T) { // time to verify the proof proof := ptx.Proof - if tc.prove && assert.EqualValues(t, tx, proof.Data) { - assert.NoError(t, proof.Proof.Verify(proof.RootHash, txHash)) + if tc.prove && assert.True(t, proof.IncludesTx(ptx.Tx), i) { + assert.True(t, proof.VerifyProof(), i) } } } @@ -680,10 +680,9 @@ func TestTxSearch(t *testing.T) { assert.EqualValues(t, find.Hash, ptx.Hash) // time to verify the proof - if assert.EqualValues(t, find.Tx, ptx.Proof.Data) { - assert.NoError(t, ptx.Proof.Proof.Verify(ptx.Proof.RootHash, find.Hash)) + if assert.True(t, ptx.Proof.IncludesTx(find.Tx)) { + assert.True(t, ptx.Proof.VerifyProof()) } - // query by height result, err = c.TxSearch(context.Background(), fmt.Sprintf("tx.height=%d", find.Height), true, nil, nil, "asc") require.Nil(t, err) @@ -766,6 +765,34 @@ func TestTxSearch(t *testing.T) { } } +func TestDataCommitment(t *testing.T) { + _, conf := NodeSuite(t) + c := getHTTPClient(t, conf) + + // first we broadcast a few tx + expectedHeight := int64(3) + var bres *coretypes.ResultBroadcastTxCommit + var err error + for i := int64(0); i < expectedHeight; i++ { + _, _, tx := MakeTxKV() + bres, err = c.BroadcastTxCommit(context.Background(), tx) + require.Nil(t, err, "%+v when submitting tx %d", err, i) + } + + // check if height >= 3 + actualHeight := bres.Height + require.LessOrEqual(t, expectedHeight, actualHeight, "couldn't create enough blocks for testing the commitment.") + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // check if data commitment is not nil. + // Checking if the commitment is correct is done in `core/blocks_test.go`. + dataCommitment, err := c.DataCommitment(ctx, fmt.Sprintf("block.height <= %d", expectedHeight)) + require.NotNil(t, dataCommitment, "data commitment shouldn't be nul.") + require.Nil(t, err, "%+v when creating data commitment.", err) +} + func TestBatchedJSONRPCCalls(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() diff --git a/rpc/coretypes/responses.go b/rpc/coretypes/responses.go index 223a25ff78..a9755a0f3a 100644 --- a/rpc/coretypes/responses.go +++ b/rpc/coretypes/responses.go @@ -73,6 +73,10 @@ type ResultBlockResults struct { ConsensusParamUpdates *tmproto.ConsensusParams `json:"consensus_param_updates"` } +type ResultDataCommitment struct { + DataCommitment bytes.HexBytes `json:"data_commitment"` +} + // NewResultCommit is a helper to initialize the ResultCommit with // the embedded struct func NewResultCommit(header *types.Header, commit *types.Commit, diff --git a/rpc/openapi/openapi.yaml b/rpc/openapi/openapi.yaml index bcb5739ddc..a60bb07ca7 100644 --- a/rpc/openapi/openapi.yaml +++ b/rpc/openapi/openapi.yaml @@ -1225,7 +1225,37 @@ paths: application/json: schema: $ref: "#/components/schemas/ErrorResponse" + /data_commitment: + get: + summary: Generates a data commitment for a range of blocks + description: | + Generates a data commitment over an ordered list of blocks matching the query. + See /subscribe for the query syntax. + operationId: data_commitment + parameters: + - in: query + name: query + description: Query + required: true + schema: + type: string + example: "block.height > 1000" + tags: + - Info + responses: + "200": + description: Hex representation of the data commitment. + content: + application/json: + schema: + $ref: "#/components/schemas/DataCommitmentResponse" + "500": + description: Error + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorResponse" /tx: get: summary: Get transactions by hash @@ -2626,6 +2656,28 @@ components: example: "2" type: object + DataCommitmentResponse: + type: object + required: + - "jsonrpc" + - "id" + - "result" + properties: + jsonrpc: + type: string + example: "2.0" + id: + type: integer + example: 0 + result: + required: + - "dataCommitment" + properties: + dataCommitment: + type: string + example: "D70952032620CC4E2737EB8AC379806359D8E0B17B0488F627997A0B043ABDED" + type: object + TxResponse: type: object required: diff --git a/test/Makefile b/test/Makefile index 86226cf03a..bc788bc97d 100644 --- a/test/Makefile +++ b/test/Makefile @@ -5,6 +5,8 @@ BINDIR ?= $(GOPATH)/bin +PACKAGES=$(shell go list ./...) + ## required to be run first by most tests build_docker_test_image: docker build -t tester -f ./test/docker/Dockerfile . diff --git a/test/docker/Dockerfile b/test/docker/Dockerfile index 9110b4bf57..6d472db4cc 100644 --- a/test/docker/Dockerfile +++ b/test/docker/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.16 +FROM golang:1.17 # Grab deps (jq, hexdump, xxd, killall) RUN apt-get update && \ diff --git a/test/e2e/app/app.go b/test/e2e/app/app.go index 5a782fa337..173ca33d2c 100644 --- a/test/e2e/app/app.go +++ b/test/e2e/app/app.go @@ -271,6 +271,17 @@ func (app *Application) Rollback() error { return app.state.Rollback() } +func (app *Application) PrepareProposal( + req abci.RequestPrepareProposal) abci.ResponsePrepareProposal { + return abci.ResponsePrepareProposal{BlockData: req.BlockData} +} + +func (app *Application) ProcessProposal(req abci.RequestProcessProposal) abci.ResponseProcessProposal { + return abci.ResponseProcessProposal{ + Result: abci.ResponseProcessProposal_ACCEPT, + } +} + // validatorUpdates generates a validator set update. func (app *Application) validatorUpdates(height uint64) (abci.ValidatorUpdates, error) { updates := app.cfg.ValidatorUpdates[fmt.Sprintf("%v", height)] diff --git a/test/e2e/docker/Dockerfile b/test/e2e/docker/Dockerfile index 260df23f3a..4e19fe9f8e 100644 --- a/test/e2e/docker/Dockerfile +++ b/test/e2e/docker/Dockerfile @@ -1,7 +1,7 @@ # We need to build in a Linux environment to support C libraries, e.g. RocksDB. # We use Debian instead of Alpine, so that we can use binary database packages # instead of spending time compiling them. -FROM golang:1.16 +FROM golang:1.17 RUN apt-get -qq update -y && apt-get -qq upgrade -y >/dev/null RUN apt-get -qq install -y libleveldb-dev librocksdb-dev >/dev/null diff --git a/test/e2e/runner/perturb.go b/test/e2e/runner/perturb.go index ccb3f6c510..7d89f80c8f 100644 --- a/test/e2e/runner/perturb.go +++ b/test/e2e/runner/perturb.go @@ -88,7 +88,7 @@ func PerturbNode(ctx context.Context, node *e2e.Node, perturbation e2e.Perturbat return nil, nil } - ctx, cancel := context.WithTimeout(ctx, 5*time.Minute) + ctx, cancel := context.WithTimeout(ctx, 7*time.Minute) defer cancel() status, err := waitForNode(ctx, node, 0) if err != nil { diff --git a/test/e2e/runner/rpc.go b/test/e2e/runner/rpc.go index ad5fa7a64d..14c3f2e597 100644 --- a/test/e2e/runner/rpc.go +++ b/test/e2e/runner/rpc.go @@ -138,7 +138,7 @@ func waitForNode(ctx context.Context, node *e2e.Node, height int64) (*rpctypes.R return nil, err } - timer := time.NewTimer(0) + timer := time.NewTimer(5 * time.Second) defer timer.Stop() var ( @@ -171,7 +171,7 @@ func waitForNode(ctx context.Context, node *e2e.Node, height int64) (*rpctypes.R return nil, err case err == nil && status.SyncInfo.LatestBlockHeight >= height: return status, nil - case counter%500 == 0: + case counter%1000 == 0: switch { case err != nil: lastFailed = true @@ -190,7 +190,7 @@ func waitForNode(ctx context.Context, node *e2e.Node, height int64) (*rpctypes.R ) } } - timer.Reset(250 * time.Millisecond) + timer.Reset(350 * time.Millisecond) } } } diff --git a/test/e2e/runner/start.go b/test/e2e/runner/start.go index 967d2519cf..ee43d36fd7 100644 --- a/test/e2e/runner/start.go +++ b/test/e2e/runner/start.go @@ -109,7 +109,7 @@ func Start(ctx context.Context, testnet *e2e.Testnet) error { return err } - wctx, wcancel := context.WithTimeout(ctx, 8*time.Minute) + wctx, wcancel := context.WithTimeout(ctx, 12*time.Minute) status, err := waitForNode(wctx, node, node.StartAt) if err != nil { wcancel() diff --git a/types/block.go b/types/block.go index 2f444be748..6143130f11 100644 --- a/types/block.go +++ b/types/block.go @@ -4,19 +4,25 @@ import ( "bytes" "errors" "fmt" + "math" + "sort" "strings" "time" + "github.com/celestiaorg/nmt/namespace" "github.com/gogo/protobuf/proto" gogotypes "github.com/gogo/protobuf/types" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/merkle" "github.com/tendermint/tendermint/crypto/tmhash" + "github.com/tendermint/tendermint/internal/libs/protoio" tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/libs/bits" tmbytes "github.com/tendermint/tendermint/libs/bytes" tmmath "github.com/tendermint/tendermint/libs/math" + "github.com/tendermint/tendermint/pkg/consts" + "github.com/tendermint/tendermint/pkg/da" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/version" ) @@ -34,8 +40,14 @@ const ( // Uvarint length of MaxBlockSizeBytes: 4 bytes // 2 fields (2 embedded): 2 bytes // Uvarint length of Data.Txs: 4 bytes - // Data.Txs field: 1 byte - MaxOverheadForBlock int64 = 11 + // Data fields: 6 bytes + MaxOverheadForBlock int64 = 16 + + // MinimumDataBytes is the numbers of bytes that will be taken up by empty + // block data + // Hash in Data 32 bytes + // OriginalSquareSize in Data 8 bytes + MinimumDataBytes int64 = 40 ) // Block defines the atomic unit of a Tendermint blockchain. @@ -44,8 +56,7 @@ type Block struct { Header `json:"header"` Data `json:"data"` - Evidence EvidenceData `json:"evidence"` - LastCommit *Commit `json:"last_commit"` + LastCommit *Commit `json:"last_commit"` } // ValidateBasic performs basic validation that doesn't involve state data. @@ -217,12 +228,6 @@ func (b *Block) ToProto() (*tmproto.Block, error) { pb.LastCommit = b.LastCommit.ToProto() pb.Data = b.Data.ToProto() - protoEvidence, err := b.Evidence.ToProto() - if err != nil { - return nil, err - } - pb.Evidence = *protoEvidence - return pb, nil } @@ -244,9 +249,6 @@ func BlockFromProto(bp *tmproto.Block) (*Block, error) { return nil, err } b.Data = data - if err := b.Evidence.FromProto(&bp.Evidence); err != nil { - return nil, err - } if bp.LastCommit != nil { lc, err := CommitFromProto(bp.LastCommit) @@ -267,6 +269,7 @@ func BlockFromProto(bp *tmproto.Block) (*Block, error) { func MaxDataBytes(maxBytes, evidenceBytes int64, valsCount int) int64 { maxDataBytes := maxBytes - MaxOverheadForBlock - + MinimumDataBytes - MaxHeaderBytes - MaxCommitBytes(valsCount) - evidenceBytes @@ -290,6 +293,7 @@ func MaxDataBytes(maxBytes, evidenceBytes int64, valsCount int) int64 { func MaxDataBytesNoEvidence(maxBytes int64, valsCount int) int64 { maxDataBytes := maxBytes - MaxOverheadForBlock - + MinimumDataBytes - MaxHeaderBytes - MaxCommitBytes(valsCount) @@ -307,16 +311,20 @@ func MaxDataBytesNoEvidence(maxBytes int64, valsCount int) int64 { // MakeBlock returns a new block with an empty header, except what can be // computed from itself. // It populates the same set of fields validated by ValidateBasic. -func MakeBlock(height int64, txs []Tx, lastCommit *Commit, evidence []Evidence) *Block { +func MakeBlock( + height int64, + txs []Tx, evidence []Evidence, messages []Message, + lastCommit *Commit) *Block { block := &Block{ Header: Header{ Version: version.Consensus{Block: version.BlockProtocol, App: 0}, Height: height, }, Data: Data{ - Txs: txs, + Txs: txs, + Evidence: EvidenceData{Evidence: evidence}, + Messages: Messages{MessagesList: messages}, }, - Evidence: EvidenceData{Evidence: evidence}, LastCommit: lastCommit, } block.fillHeader() @@ -997,29 +1005,214 @@ func CommitFromProto(cp *tmproto.Commit) (*Commit, error) { //----------------------------------------------------------------------------- -// Data contains the set of transactions included in the block +// Data contains all the available Data of the block. +// Data with reserved namespaces (Txs, Evidence) and +// Celestia application specific Messages. type Data struct { - // Txs that will be applied by state @ block.Height+1. // NOTE: not all txs here are valid. We're just agreeing on the order first. // This means that block.AppHash does not include these txs. Txs Txs `json:"txs"` + Evidence EvidenceData `json:"evidence"` + + // The messages included in this block. + Messages Messages `json:"msgs"` + + // OriginalSquareSize is the size of the square after splitting all the block data + // into shares. The erasure data is discarded after generation, and keeping this + // value avoids unnecessarily regenerating all of the shares when returning + // proofs that some element was included in the block + OriginalSquareSize uint64 `json:"square_size"` + // Volatile hash tmbytes.HexBytes } // Hash returns the hash of the data func (data *Data) Hash() tmbytes.HexBytes { - if data == nil { - return (Txs{}).Hash() + if data.hash != nil { + return data.hash } - if data.hash == nil { - data.hash = data.Txs.Hash() // NOTE: leaves of merkle tree are TxIDs + + // compute the data availability header + // todo(evan): add the non redundant shares back into the header + shares, _, err := data.ComputeShares(data.OriginalSquareSize) + if err != nil { + // todo(evan): see if we can get rid of this panic + panic(err) } + rawShares := shares.RawShares() + + eds, err := da.ExtendShares(data.OriginalSquareSize, rawShares) + if err != nil { + panic(err) + } + + dah := da.NewDataAvailabilityHeader(eds) + + data.hash = dah.Hash() + return data.hash } +// ComputeShares splits block data into shares of an original data square and +// returns them along with an amount of non-redundant shares. If a square size +// of 0 is passed, then it is determined based on how many shares are needed to +// fill the square for the underlying block data. The square size is stored in +// the local instance of the struct. +func (data *Data) ComputeShares(squareSize uint64) (NamespacedShares, int, error) { + if squareSize != 0 { + if !powerOf2(squareSize) { + return nil, 0, errors.New("square size is not a power of two") + } + } + + // reserved shares: + txShares := data.Txs.SplitIntoShares() + evidenceShares := data.Evidence.SplitIntoShares() + + // application data shares from messages: + msgShares := data.Messages.SplitIntoShares() + curLen := len(txShares) + len(evidenceShares) + len(msgShares) + + if curLen > consts.MaxShareCount { + panic(fmt.Sprintf("Block data exceeds the max square size. Number of shares required: %d\n", curLen)) + } + + // find the number of shares needed to create a square that has a power of + // two width + wantLen := int(squareSize * squareSize) + if squareSize == 0 { + wantLen = paddedLen(curLen) + } + + if wantLen < curLen { + return nil, 0, errors.New("square size too small to fit block data") + } + + // ensure that the min square size is used + if wantLen < consts.MinSharecount { + wantLen = consts.MinSharecount + } + + tailShares := TailPaddingShares(wantLen - curLen) + + shares := append(append(append( + txShares, + evidenceShares...), + msgShares...), + tailShares...) + + if squareSize == 0 { + squareSize = uint64(math.Sqrt(float64(wantLen))) + } + + data.OriginalSquareSize = squareSize + + return shares, curLen, nil +} + +// paddedLen calculates the number of shares needed to make a power of 2 square +// given the current number of shares +func paddedLen(length int) int { + width := uint32(math.Ceil(math.Sqrt(float64(length)))) + width = nextHighestPowerOf2(width) + return int(width * width) +} + +// nextPowerOf2 returns the next highest power of 2 unless the input is a power +// of two, in which case it returns the input +func nextHighestPowerOf2(v uint32) uint32 { + if v == 0 { + return 0 + } + + // find the next highest power using bit mashing + v-- + v |= v >> 1 + v |= v >> 2 + v |= v >> 4 + v |= v >> 8 + v |= v >> 16 + v++ + + // return the next highest power + return v +} + +// powerOf2 checks if number is power of 2 +func powerOf2(v uint64) bool { + if v&(v-1) == 0 && v != 0 { + return true + } + return false +} + +type Messages struct { + MessagesList []Message `json:"msgs"` +} + +func (msgs Messages) SplitIntoShares() NamespacedShares { + shares := make([]NamespacedShare, 0) + msgs.sortMessages() + for _, m := range msgs.MessagesList { + rawData, err := m.MarshalDelimited() + if err != nil { + panic(fmt.Sprintf("app accepted a Message that can not be encoded %#v", m)) + } + shares = AppendToShares(shares, m.NamespaceID, rawData) + } + return shares +} + +func (msgs *Messages) sortMessages() { + sort.Slice(msgs.MessagesList, func(i, j int) bool { + return bytes.Compare(msgs.MessagesList[i].NamespaceID, msgs.MessagesList[j].NamespaceID) < 0 + }) +} + +type Message struct { + // NamespaceID defines the namespace of this message, i.e. the + // namespace it will use in the namespaced Merkle tree. + // + // TODO: spec out constrains and + // introduce dedicated type instead of just []byte + NamespaceID namespace.ID + + // Data is the actual data contained in the message + // (e.g. a block of a virtual sidechain). + Data []byte +} + +var ( + MessageEmpty = Message{} + MessagesEmpty = Messages{} +) + +func MessageFromProto(p *tmproto.Message) Message { + if p == nil { + return MessageEmpty + } + return Message{ + NamespaceID: p.NamespaceId, + Data: p.Data, + } +} + +func MessagesFromProto(p *tmproto.Messages) Messages { + if p == nil { + return MessagesEmpty + } + + msgs := make([]Message, 0, len(p.MessagesList)) + + for i := 0; i < len(p.MessagesList); i++ { + msgs = append(msgs, MessageFromProto(p.MessagesList[i])) + } + return Messages{MessagesList: msgs} +} + // StringIndented returns an indented string representation of the transactions. func (data *Data) StringIndented(indent string) string { if data == nil { @@ -1035,9 +1228,8 @@ func (data *Data) StringIndented(indent string) string { } return fmt.Sprintf(`Data{ %s %v -%s}#%v`, - indent, strings.Join(txStrings, "\n"+indent+" "), - indent, data.hash) +}`, + indent, strings.Join(txStrings, "\n"+indent+" ")) } // ToProto converts Data to protobuf @@ -1052,6 +1244,25 @@ func (data *Data) ToProto() tmproto.Data { tp.Txs = txBzs } + pevd, err := data.Evidence.ToProto() + if err != nil { + // TODO(evan): fix + panic(err) + } + tp.Evidence = *pevd + + protoMsgs := make([]*tmproto.Message, len(data.Messages.MessagesList)) + for i, msg := range data.Messages.MessagesList { + protoMsgs[i] = &tmproto.Message{ + NamespaceId: msg.NamespaceID, + Data: msg.Data, + } + } + tp.Messages = tmproto.Messages{MessagesList: protoMsgs} + tp.OriginalSquareSize = data.OriginalSquareSize + + tp.Hash = data.hash + return *tp } @@ -1073,6 +1284,27 @@ func DataFromProto(dp *tmproto.Data) (Data, error) { data.Txs = Txs{} } + if len(dp.Messages.MessagesList) > 0 { + msgs := make([]Message, len(dp.Messages.MessagesList)) + for i, m := range dp.Messages.MessagesList { + msgs[i] = Message{NamespaceID: m.NamespaceId, Data: m.Data} + } + data.Messages = Messages{MessagesList: msgs} + } else { + data.Messages = Messages{} + } + + evdData := new(EvidenceData) + err := evdData.FromProto(&dp.Evidence) + if err != nil { + return Data{}, err + } + if evdData != nil { + data.Evidence = *evdData + } + data.OriginalSquareSize = dp.OriginalSquareSize + data.hash = dp.Hash + return *data, nil } @@ -1167,6 +1399,26 @@ func (data *EvidenceData) FromProto(eviData *tmproto.EvidenceList) error { return nil } +func (data *EvidenceData) SplitIntoShares() NamespacedShares { + rawDatas := make([][]byte, 0, len(data.Evidence)) + for _, ev := range data.Evidence { + pev, err := EvidenceToProto(ev) + if err != nil { + panic("failure to convert evidence to equivalent proto type") + } + rawData, err := protoio.MarshalDelimited(pev) + if err != nil { + panic(err) + } + rawDatas = append(rawDatas, rawData) + } + w := NewContiguousShareWriter(consts.EvidenceNamespaceID) + for _, evd := range rawDatas { + w.Write(evd) + } + return w.Export() +} + //-------------------------------------------------------------------------------- // BlockID diff --git a/types/block_test.go b/types/block_test.go index 1c762653b8..c1d9d9c227 100644 --- a/types/block_test.go +++ b/types/block_test.go @@ -46,7 +46,7 @@ func TestBlockAddEvidence(t *testing.T) { ev := NewMockDuplicateVoteEvidenceWithValidator(h, time.Now(), vals[0], "block-test-chain") evList := []Evidence{ev} - block := MakeBlock(h, txs, commit, evList) + block := MakeBlock(h, txs, evList, nil, commit) require.NotNil(t, block) require.Equal(t, 1, len(block.Evidence.Evidence)) require.NotNil(t, block.EvidenceHash) @@ -107,7 +107,7 @@ func TestBlockValidateBasic(t *testing.T) { tc := tc i := i t.Run(tc.testName, func(t *testing.T) { - block := MakeBlock(h, txs, commit, evList) + block := MakeBlock(h, txs, evList, nil, commit) block.ProposerAddress = valSet.GetProposer().Address tc.malleateBlock(block) err = block.ValidateBasic() @@ -119,13 +119,13 @@ func TestBlockValidateBasic(t *testing.T) { func TestBlockHash(t *testing.T) { assert.Nil(t, (*Block)(nil).Hash()) - assert.Nil(t, MakeBlock(int64(3), []Tx{Tx("Hello World")}, nil, nil).Hash()) + assert.Nil(t, MakeBlock(int64(3), []Tx{Tx("Hello World")}, nil, nil, nil).Hash()) } func TestBlockMakePartSet(t *testing.T) { assert.Nil(t, (*Block)(nil).MakePartSet(2)) - partSet := MakeBlock(int64(3), []Tx{Tx("Hello World")}, nil, nil).MakePartSet(1024) + partSet := MakeBlock(int64(3), []Tx{Tx("Hello World")}, nil, nil, nil).MakePartSet(1024) assert.NotNil(t, partSet) assert.EqualValues(t, 1, partSet.Total()) } @@ -143,7 +143,7 @@ func TestBlockMakePartSetWithEvidence(t *testing.T) { ev := NewMockDuplicateVoteEvidenceWithValidator(h, time.Now(), vals[0], "block-test-chain") evList := []Evidence{ev} - partSet := MakeBlock(h, []Tx{Tx("Hello World")}, commit, evList).MakePartSet(512) + partSet := MakeBlock(h, []Tx{Tx("Hello World")}, evList, nil, commit).MakePartSet(512) assert.NotNil(t, partSet) assert.EqualValues(t, 4, partSet.Total()) } @@ -160,7 +160,7 @@ func TestBlockHashesTo(t *testing.T) { ev := NewMockDuplicateVoteEvidenceWithValidator(h, time.Now(), vals[0], "block-test-chain") evList := []Evidence{ev} - block := MakeBlock(h, []Tx{Tx("Hello World")}, commit, evList) + block := MakeBlock(h, []Tx{Tx("Hello World")}, evList, nil, commit) block.ValidatorsHash = valSet.Hash() assert.False(t, block.HashesTo([]byte{})) assert.False(t, block.HashesTo([]byte("something else"))) @@ -168,7 +168,7 @@ func TestBlockHashesTo(t *testing.T) { } func TestBlockSize(t *testing.T) { - size := MakeBlock(int64(3), []Tx{Tx("Hello World")}, nil, nil).Size() + size := MakeBlock(int64(3), []Tx{Tx("Hello World")}, nil, nil, nil).Size() if size <= 0 { t.Fatal("Size of the block is zero or negative") } @@ -179,7 +179,7 @@ func TestBlockString(t *testing.T) { assert.Equal(t, "nil-Block", (*Block)(nil).StringIndented("")) assert.Equal(t, "nil-Block", (*Block)(nil).StringShort()) - block := MakeBlock(int64(3), []Tx{Tx("Hello World")}, nil, nil) + block := MakeBlock(int64(3), []Tx{Tx("Hello World")}, nil, nil, nil) assert.NotEqual(t, "nil-Block", block.String()) assert.NotEqual(t, "nil-Block", block.StringIndented("")) assert.NotEqual(t, "nil-Block", block.StringShort()) @@ -213,21 +213,11 @@ func makeBlockID(hash []byte, partSetSize uint32, partSetHash []byte) BlockID { var nilBytes []byte -// This follows RFC-6962, i.e. `echo -n '' | sha256sum` -var emptyBytes = []byte{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, - 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, - 0x78, 0x52, 0xb8, 0x55} - func TestNilHeaderHashDoesntCrash(t *testing.T) { assert.Equal(t, nilBytes, []byte((*Header)(nil).Hash())) assert.Equal(t, nilBytes, []byte((new(Header)).Hash())) } -func TestNilDataHashDoesntCrash(t *testing.T) { - assert.Equal(t, emptyBytes, []byte((*Data)(nil).Hash())) - assert.Equal(t, emptyBytes, []byte(new(Data).Hash())) -} - func TestCommit(t *testing.T) { lastID := makeBlockIDRandom() h := int64(3) @@ -469,11 +459,11 @@ func TestBlockMaxDataBytes(t *testing.T) { }{ 0: {-10, 1, 0, true, 0}, 1: {10, 1, 0, true, 0}, - 2: {841, 1, 0, true, 0}, - 3: {842, 1, 0, false, 0}, - 4: {843, 1, 0, false, 1}, - 5: {954, 2, 0, false, 1}, - 6: {1053, 2, 100, false, 0}, + 2: {886, 1, 0, true, 0}, + 3: {887, 1, 0, false, 0}, + 4: {888, 1, 0, false, 1}, + 5: {999, 2, 0, false, 1}, + 6: {1098, 2, 100, false, 0}, } for i, tc := range testCases { @@ -500,9 +490,9 @@ func TestBlockMaxDataBytesNoEvidence(t *testing.T) { }{ 0: {-10, 1, true, 0}, 1: {10, 1, true, 0}, - 2: {841, 1, true, 0}, - 3: {842, 1, false, 0}, - 4: {843, 1, false, 1}, + 2: {886, 1, true, 0}, + 3: {887, 1, false, 0}, + 4: {888, 1, false, 1}, } for i, tc := range testCases { @@ -646,17 +636,16 @@ func TestBlockIDValidateBasic(t *testing.T) { func TestBlockProtoBuf(t *testing.T) { h := mrand.Int63() c1 := randCommit(time.Now()) - b1 := MakeBlock(h, []Tx{Tx([]byte{1})}, &Commit{Signatures: []CommitSig{}}, []Evidence{}) + b1 := MakeBlock(h, []Tx{Tx([]byte{1})}, []Evidence{}, nil, &Commit{Signatures: []CommitSig{}}) b1.ProposerAddress = tmrand.Bytes(crypto.AddressSize) - b2 := MakeBlock(h, []Tx{Tx([]byte{1})}, c1, []Evidence{}) - b2.ProposerAddress = tmrand.Bytes(crypto.AddressSize) evidenceTime := time.Date(2019, 1, 1, 0, 0, 0, 0, time.UTC) evi := NewMockDuplicateVoteEvidence(h, evidenceTime, "block-test-chain") - b2.Evidence = EvidenceData{Evidence: EvidenceList{evi}} - b2.EvidenceHash = b2.Evidence.Hash() + b2 := MakeBlock(h, []Tx{Tx([]byte{1})}, []Evidence{evi}, nil, c1) + b2.ProposerAddress = tmrand.Bytes(crypto.AddressSize) + b2.Data.Evidence.ByteSize() - b3 := MakeBlock(h, []Tx{}, c1, []Evidence{}) + b3 := MakeBlock(h, []Tx{}, []Evidence{}, nil, c1) b3.ProposerAddress = tmrand.Bytes(crypto.AddressSize) testCases := []struct { msg string @@ -691,8 +680,14 @@ func TestBlockProtoBuf(t *testing.T) { } func TestDataProtoBuf(t *testing.T) { - data := &Data{Txs: Txs{Tx([]byte{1}), Tx([]byte{2}), Tx([]byte{3})}} - data2 := &Data{Txs: Txs{}} + data := &Data{ + Txs: Txs{Tx([]byte{1}), Tx([]byte{2}), Tx([]byte{3})}, + Evidence: EvidenceData{Evidence: EvidenceList{}}, + } + data2 := &Data{ + Txs: Txs{}, + Evidence: EvidenceData{Evidence: EvidenceList{}}, + } testCases := []struct { msg string data1 *Data diff --git a/types/event_bus.go b/types/event_bus.go index dfe3a06644..5ac434d584 100644 --- a/types/event_bus.go +++ b/types/event_bus.go @@ -6,6 +6,7 @@ import ( "strings" "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/crypto/tmhash" "github.com/tendermint/tendermint/libs/log" tmpubsub "github.com/tendermint/tendermint/libs/pubsub" "github.com/tendermint/tendermint/libs/service" @@ -172,13 +173,20 @@ func (b *EventBus) PublishEventTx(data EventDataTx) error { // add Tendermint-reserved events events = append(events, EventTx) + var txHash []byte + if len(data.OriginalHash) == tmhash.Size { + txHash = data.OriginalHash + } else { + txHash = Tx(data.Tx).Hash() + } + tokens := strings.Split(TxHashKey, ".") events = append(events, types.Event{ Type: tokens[0], Attributes: []types.EventAttribute{ { Key: tokens[1], - Value: fmt.Sprintf("%X", Tx(data.Tx).Hash()), + Value: fmt.Sprintf("%X", txHash), }, }, }) diff --git a/types/event_bus_test.go b/types/event_bus_test.go index 73dc13d0c1..56a5d035a7 100644 --- a/types/event_bus_test.go +++ b/types/event_bus_test.go @@ -64,6 +64,58 @@ func TestEventBusPublishEventTx(t *testing.T) { } } +func TestEventBusPublishEventMalleatedTx(t *testing.T) { + eventBus := NewEventBus() + err := eventBus.Start() + require.NoError(t, err) + t.Cleanup(func() { + if err := eventBus.Stop(); err != nil { + t.Error(err) + } + }) + + tx := Tx("foo") + malleatedTx := Tx("foo-malleated") + + result := abci.ResponseDeliverTx{ + Data: []byte("bar"), + Events: []abci.Event{ + {Type: "testType", Attributes: []abci.EventAttribute{{Key: "baz", Value: "1"}}}, + }, + } + + // PublishEventTx adds 3 composite keys, so the query below should work + query := fmt.Sprintf("tm.event='Tx' AND tx.height=1 AND tx.hash='%X' AND testType.baz=1", tx.Hash()) + txsSub, err := eventBus.Subscribe(context.Background(), "test", tmquery.MustCompile(query)) + require.NoError(t, err) + + done := make(chan struct{}) + go func() { + msg := <-txsSub.Out() + edt := msg.Data().(EventDataTx) + assert.Equal(t, int64(1), edt.Height) + assert.Equal(t, uint32(0), edt.Index) + assert.EqualValues(t, malleatedTx, edt.Tx) + assert.Equal(t, result, edt.Result) + close(done) + }() + + err = eventBus.PublishEventTx(EventDataTx{abci.TxResult{ + Height: 1, + Index: 0, + Tx: malleatedTx, + Result: result, + OriginalHash: tx.Hash(), + }}) + assert.NoError(t, err) + + select { + case <-done: + case <-time.After(1 * time.Second): + t.Fatal("did not receive a transaction after 1 sec.") + } +} + func TestEventBusPublishEventNewBlock(t *testing.T) { eventBus := NewEventBus() err := eventBus.Start() @@ -74,7 +126,7 @@ func TestEventBusPublishEventNewBlock(t *testing.T) { } }) - block := MakeBlock(0, []Tx{}, nil, []Evidence{}) + block := MakeBlock(0, []Tx{}, []Evidence{}, nil, nil) blockID := BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(BlockPartSizeBytes).Header()} resultBeginBlock := abci.ResponseBeginBlock{ Events: []abci.Event{ @@ -236,7 +288,7 @@ func TestEventBusPublishEventNewBlockHeader(t *testing.T) { } }) - block := MakeBlock(0, []Tx{}, nil, []Evidence{}) + block := MakeBlock(0, []Tx{}, []Evidence{}, nil, nil) resultBeginBlock := abci.ResponseBeginBlock{ Events: []abci.Event{ {Type: "testType", Attributes: []abci.EventAttribute{{Key: "baz", Value: "1"}}}, diff --git a/types/share_merging.go b/types/share_merging.go new file mode 100644 index 0000000000..73ac48ba2f --- /dev/null +++ b/types/share_merging.go @@ -0,0 +1,307 @@ +package types + +import ( + "bytes" + "encoding/binary" + "errors" + + "github.com/celestiaorg/rsmt2d" + "github.com/gogo/protobuf/proto" + "github.com/tendermint/tendermint/pkg/consts" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" +) + +// DataFromSquare extracts block data from an extended data square. +func DataFromSquare(eds *rsmt2d.ExtendedDataSquare) (Data, error) { + originalWidth := eds.Width() / 2 + + // sort block data shares by namespace + var ( + sortedTxShares [][]byte + sortedEvdShares [][]byte + sortedMsgShares [][]byte + ) + + // iterate over each row index + for x := uint(0); x < originalWidth; x++ { + // iterate over each share in the original data square + row := eds.Row(x) + + for _, share := range row[:originalWidth] { + // sort the data of that share types via namespace + nid := share[:consts.NamespaceSize] + switch { + case bytes.Equal(consts.TxNamespaceID, nid): + sortedTxShares = append(sortedTxShares, share) + + case bytes.Equal(consts.EvidenceNamespaceID, nid): + sortedEvdShares = append(sortedEvdShares, share) + + case bytes.Equal(consts.TailPaddingNamespaceID, nid): + continue + + // ignore unused but reserved namespaces + case bytes.Compare(nid, consts.MaxReservedNamespace) < 1: + continue + + // every other namespaceID should be a message + default: + sortedMsgShares = append(sortedMsgShares, share) + } + } + } + + // pass the raw share data to their respective parsers + txs, err := ParseTxs(sortedTxShares) + if err != nil { + return Data{}, err + } + + evd, err := ParseEvd(sortedEvdShares) + if err != nil { + return Data{}, err + } + + msgs, err := ParseMsgs(sortedMsgShares) + if err != nil { + return Data{}, err + } + + return Data{ + Txs: txs, + Evidence: evd, + Messages: msgs, + }, nil +} + +// ParseTxs collects all of the transactions from the shares provided +func ParseTxs(shares [][]byte) (Txs, error) { + // parse the sharse + rawTxs, err := processContiguousShares(shares) + if err != nil { + return nil, err + } + + // convert to the Tx type + txs := make(Txs, len(rawTxs)) + for i := 0; i < len(txs); i++ { + txs[i] = Tx(rawTxs[i]) + } + + return txs, nil +} + +// ParseEvd collects all evidence from the shares provided. +func ParseEvd(shares [][]byte) (EvidenceData, error) { + // the raw data returned does not have length delimiters or namespaces and + // is ready to be unmarshaled + rawEvd, err := processContiguousShares(shares) + if err != nil { + return EvidenceData{}, err + } + + evdList := make(EvidenceList, len(rawEvd)) + + // parse into protobuf bytes + for i := 0; i < len(rawEvd); i++ { + // unmarshal the evidence + var protoEvd tmproto.Evidence + err := proto.Unmarshal(rawEvd[i], &protoEvd) + if err != nil { + return EvidenceData{}, err + } + evd, err := EvidenceFromProto(&protoEvd) + if err != nil { + return EvidenceData{}, err + } + + evdList[i] = evd + } + + return EvidenceData{Evidence: evdList}, nil +} + +// ParseMsgs collects all messages from the shares provided +func ParseMsgs(shares [][]byte) (Messages, error) { + msgList, err := parseMsgShares(shares) + if err != nil { + return Messages{}, err + } + + return Messages{ + MessagesList: msgList, + }, nil +} + +// processContiguousShares takes raw shares and extracts out transactions, +// intermediate state roots, or evidence. The returned [][]byte do have +// namespaces or length delimiters and are ready to be unmarshalled +func processContiguousShares(shares [][]byte) (txs [][]byte, err error) { + if len(shares) == 0 { + return nil, nil + } + + ss := newShareStack(shares) + return ss.resolve() +} + +// shareStack hold variables for peel +type shareStack struct { + shares [][]byte + txLen uint64 + txs [][]byte + cursor int +} + +func newShareStack(shares [][]byte) *shareStack { + return &shareStack{shares: shares} +} + +func (ss *shareStack) resolve() ([][]byte, error) { + if len(ss.shares) == 0 { + return nil, nil + } + err := ss.peel(ss.shares[0][consts.NamespaceSize+consts.ShareReservedBytes:], true) + return ss.txs, err +} + +// peel recursively parses each chunk of data (either a transaction, +// intermediate state root, or evidence) and adds it to the underlying slice of data. +func (ss *shareStack) peel(share []byte, delimited bool) (err error) { + if delimited { + var txLen uint64 + share, txLen, err = ParseDelimiter(share) + if err != nil { + return err + } + if txLen == 0 { + return nil + } + ss.txLen = txLen + } + // safeLen describes the point in the share where it can be safely split. If + // split beyond this point, it is possible to break apart a length + // delimiter, which will result in incorrect share merging + safeLen := len(share) - binary.MaxVarintLen64 + if safeLen < 0 { + safeLen = 0 + } + if ss.txLen <= uint64(safeLen) { + ss.txs = append(ss.txs, share[:ss.txLen]) + share = share[ss.txLen:] + return ss.peel(share, true) + } + // add the next share to the current share to continue merging if possible + if len(ss.shares) > ss.cursor+1 { + ss.cursor++ + share := append(share, ss.shares[ss.cursor][consts.NamespaceSize+consts.ShareReservedBytes:]...) + return ss.peel(share, false) + } + // collect any remaining data + if ss.txLen <= uint64(len(share)) { + ss.txs = append(ss.txs, share[:ss.txLen]) + share = share[ss.txLen:] + return ss.peel(share, true) + } + return errors.New("failure to parse block data: transaction length exceeded data length") +} + +// parseMsgShares iterates through raw shares and separates the contiguous chunks +// of data. It is only used for Messages, i.e. shares with a non-reserved namespace. +func parseMsgShares(shares [][]byte) ([]Message, error) { + if len(shares) == 0 { + return nil, nil + } + + // set the first nid and current share + nid := shares[0][:consts.NamespaceSize] + currentShare := shares[0][consts.NamespaceSize:] + // find and remove the msg len delimiter + currentShare, msgLen, err := ParseDelimiter(currentShare) + if err != nil { + return nil, err + } + + var msgs []Message + for cursor := uint64(0); cursor < uint64(len(shares)); { + var msg Message + currentShare, nid, cursor, msgLen, msg, err = nextMsg( + shares, + currentShare, + nid, + cursor, + msgLen, + ) + if err != nil { + return nil, err + } + if msg.Data != nil { + msgs = append(msgs, msg) + } + } + + return msgs, nil +} + +func nextMsg( + shares [][]byte, + current, + nid []byte, + cursor, + msgLen uint64, +) ([]byte, []byte, uint64, uint64, Message, error) { + switch { + // the message uses all of the current share data and at least some of the + // next share + case msgLen > uint64(len(current)): + // add the next share to the current one and try again + cursor++ + current = append(current, shares[cursor][consts.NamespaceSize:]...) + return nextMsg(shares, current, nid, cursor, msgLen) + + // the msg we're looking for is contained in the current share + case msgLen <= uint64(len(current)): + msg := Message{nid, current[:msgLen]} + cursor++ + + // call it a day if the work is done + if cursor >= uint64(len(shares)) { + return nil, nil, cursor, 0, msg, nil + } + + nextNid := shares[cursor][:consts.NamespaceSize] + next, msgLen, err := ParseDelimiter(shares[cursor][consts.NamespaceSize:]) + return next, nextNid, cursor, msgLen, msg, err + } + // this code is unreachable but the compiler doesn't know that + return nil, nil, 0, 0, Message{}, nil +} + +// ParseDelimiter finds and returns the length delimiter of the message provided +// while also removing the delimiter bytes from the input +func ParseDelimiter(input []byte) ([]byte, uint64, error) { + if len(input) == 0 { + return input, 0, nil + } + + l := binary.MaxVarintLen64 + if len(input) < binary.MaxVarintLen64 { + l = len(input) + } + + delimiter := zeroPadIfNecessary(input[:l], binary.MaxVarintLen64) + + // read the length of the message + r := bytes.NewBuffer(delimiter) + msgLen, err := binary.ReadUvarint(r) + if err != nil { + return nil, 0, err + } + + // calculate the number of bytes used by the delimiter + lenBuf := make([]byte, binary.MaxVarintLen64) + n := binary.PutUvarint(lenBuf, msgLen) + + // return the input without the length delimiter + return input[n:], msgLen, nil +} diff --git a/types/share_splitting.go b/types/share_splitting.go new file mode 100644 index 0000000000..f6c690e4de --- /dev/null +++ b/types/share_splitting.go @@ -0,0 +1,257 @@ +package types + +import ( + "bytes" + "fmt" + "sort" + + "github.com/celestiaorg/nmt/namespace" + "github.com/tendermint/tendermint/pkg/consts" +) + +// MessageShareWriter lazily merges messages into shares that will eventually be +// included in a data square. It also has methods to help progressively count +// how many shares the messages written take up. +type MessageShareWriter struct { + shares [][]NamespacedShare + count int +} + +func NewMessageShareWriter() *MessageShareWriter { + return &MessageShareWriter{} +} + +// Write adds the delimited data to the underlying contiguous shares. +func (msw *MessageShareWriter) Write(msg Message) { + rawMsg, err := msg.MarshalDelimited() + if err != nil { + panic(fmt.Sprintf("app accepted a Message that can not be encoded %#v", msg)) + } + newShares := make([]NamespacedShare, 0) + newShares = AppendToShares(newShares, msg.NamespaceID, rawMsg) + msw.shares = append(msw.shares, newShares) + msw.count += len(newShares) +} + +// Export finalizes and returns the underlying contiguous shares. +func (msw *MessageShareWriter) Export() NamespacedShares { + msw.sortMsgs() + shares := make([]NamespacedShare, msw.count) + cursor := 0 + for _, messageShares := range msw.shares { + for _, share := range messageShares { + shares[cursor] = share + cursor++ + } + } + return shares +} + +func (msw *MessageShareWriter) sortMsgs() { + sort.Slice(msw.shares, func(i, j int) bool { + return bytes.Compare(msw.shares[i][0].ID, msw.shares[j][0].ID) < 0 + }) +} + +// Count returns the current number of shares that will be made if exporting. +func (msw *MessageShareWriter) Count() int { + return msw.count +} + +// appendToShares appends raw data as shares. +// Used for messages. +func AppendToShares(shares []NamespacedShare, nid namespace.ID, rawData []byte) []NamespacedShare { + if len(rawData) <= consts.MsgShareSize { + rawShare := append(append( + make([]byte, 0, len(nid)+len(rawData)), + nid...), + rawData..., + ) + paddedShare := zeroPadIfNecessary(rawShare, consts.ShareSize) + share := NamespacedShare{paddedShare, nid} + shares = append(shares, share) + } else { // len(rawData) > MsgShareSize + shares = append(shares, splitMessage(rawData, nid)...) + } + return shares +} + +// splitMessage breaks the data in a message into the minimum number of +// namespaced shares +func splitMessage(rawData []byte, nid namespace.ID) NamespacedShares { + shares := make([]NamespacedShare, 0) + firstRawShare := append(append( + make([]byte, 0, consts.ShareSize), + nid...), + rawData[:consts.MsgShareSize]..., + ) + shares = append(shares, NamespacedShare{firstRawShare, nid}) + rawData = rawData[consts.MsgShareSize:] + for len(rawData) > 0 { + shareSizeOrLen := min(consts.MsgShareSize, len(rawData)) + rawShare := append(append( + make([]byte, 0, consts.ShareSize), + nid...), + rawData[:shareSizeOrLen]..., + ) + paddedShare := zeroPadIfNecessary(rawShare, consts.ShareSize) + share := NamespacedShare{paddedShare, nid} + shares = append(shares, share) + rawData = rawData[shareSizeOrLen:] + } + return shares +} + +// ContiguousShareWriter will write raw data contiguously across a progressively +// increasing set of shares. It is used to lazily split block data such as transactions +// into shares. +type ContiguousShareWriter struct { + shares []NamespacedShare + pendingShare NamespacedShare + namespace namespace.ID +} + +// NewContiguousShareWriter returns a ContigousShareWriter using the provided +// namespace. +func NewContiguousShareWriter(ns namespace.ID) *ContiguousShareWriter { + pendingShare := NamespacedShare{ID: ns, Share: make([]byte, 0, consts.ShareSize)} + pendingShare.Share = append(pendingShare.Share, ns...) + return &ContiguousShareWriter{pendingShare: pendingShare, namespace: ns} +} + +// Write adds the delimited data to the underlying contiguous shares. +func (csw *ContiguousShareWriter) Write(rawData []byte) { + // if this is the first time writing to a pending share, we must add the + // reserved bytes + if len(csw.pendingShare.Share) == consts.NamespaceSize { + csw.pendingShare.Share = append(csw.pendingShare.Share, 0) + } + + txCursor := len(rawData) + for txCursor != 0 { + // find the len left in the pending share + pendingLeft := consts.ShareSize - len(csw.pendingShare.Share) + + // if we can simply add the tx to the share without creating a new + // pending share, do so and return + if len(rawData) <= pendingLeft { + csw.pendingShare.Share = append(csw.pendingShare.Share, rawData...) + break + } + + // if we can only add a portion of the transaction to the pending share, + // then we add it and add the pending share to the finalized shares. + chunk := rawData[:pendingLeft] + csw.pendingShare.Share = append(csw.pendingShare.Share, chunk...) + csw.stackPending() + + // update the cursor + rawData = rawData[pendingLeft:] + txCursor = len(rawData) + + // add the share reserved bytes to the new pending share + pendingCursor := len(rawData) + consts.NamespaceSize + consts.ShareReservedBytes + var reservedByte byte + if pendingCursor >= consts.ShareSize { + // the share reserve byte is zero when some contiguously written + // data takes up the entire share + reservedByte = byte(0) + } else { + reservedByte = byte(pendingCursor) + } + + csw.pendingShare.Share = append(csw.pendingShare.Share, reservedByte) + } + + // if the share is exactly the correct size, then append to shares + if len(csw.pendingShare.Share) == consts.ShareSize { + csw.stackPending() + } +} + +// stackPending will add the pending share to accumlated shares provided that it is long enough +func (csw *ContiguousShareWriter) stackPending() { + if len(csw.pendingShare.Share) < consts.ShareSize { + return + } + csw.shares = append(csw.shares, csw.pendingShare) + newPendingShare := make([]byte, 0, consts.ShareSize) + newPendingShare = append(newPendingShare, csw.namespace...) + csw.pendingShare = NamespacedShare{ + Share: newPendingShare, + ID: csw.namespace, + } +} + +// Export finalizes and returns the underlying contiguous shares. +func (csw *ContiguousShareWriter) Export() NamespacedShares { + // add the pending share to the current shares before returning + if len(csw.pendingShare.Share) > consts.NamespaceSize { + csw.pendingShare.Share = zeroPadIfNecessary(csw.pendingShare.Share, consts.ShareSize) + csw.shares = append(csw.shares, csw.pendingShare) + } + // force the last share to have a reserve byte of zero + if len(csw.shares) == 0 { + return csw.shares + } + lastShare := csw.shares[len(csw.shares)-1] + rawLastShare := lastShare.Data() + + for i := 0; i < consts.ShareReservedBytes; i++ { + // here we force the last share reserved byte to be zero to avoid any + // confusion for light clients parsing these shares, as the rest of the + // data after transaction is padding. See + // https://github.com/celestiaorg/celestia-specs/blob/master/src/specs/data_structures.md#share + rawLastShare[consts.NamespaceSize+i] = byte(0) + } + + newLastShare := NamespacedShare{ + Share: rawLastShare, + ID: lastShare.NamespaceID(), + } + csw.shares[len(csw.shares)-1] = newLastShare + return csw.shares +} + +// Count returns the current number of shares that will be made if exporting. +func (csw *ContiguousShareWriter) Count() (count, availableBytes int) { + availableBytes = consts.TxShareSize - (len(csw.pendingShare.Share) - consts.NamespaceSize) + return len(csw.shares), availableBytes +} + +// tail is filler for all tail padded shares +// it is allocated once and used everywhere +var tailPaddingShare = append( + append(make([]byte, 0, consts.ShareSize), consts.TailPaddingNamespaceID...), + bytes.Repeat([]byte{0}, consts.ShareSize-consts.NamespaceSize)..., +) + +func TailPaddingShares(n int) NamespacedShares { + shares := make([]NamespacedShare, n) + for i := 0; i < n; i++ { + shares[i] = NamespacedShare{ + Share: tailPaddingShare, + ID: consts.TailPaddingNamespaceID, + } + } + return shares +} + +func min(a, b int) int { + if a <= b { + return a + } + return b +} + +func zeroPadIfNecessary(share []byte, width int) []byte { + oldLen := len(share) + if oldLen < width { + missingBytes := width - oldLen + padByte := []byte{0} + padding := bytes.Repeat(padByte, missingBytes) + share = append(share, padding...) + return share + } + return share +} diff --git a/types/shares.go b/types/shares.go new file mode 100644 index 0000000000..ac0b517009 --- /dev/null +++ b/types/shares.go @@ -0,0 +1,54 @@ +package types + +import ( + "encoding/binary" + + "github.com/celestiaorg/nmt/namespace" +) + +// Share contains the raw share data without the corresponding namespace. +type Share []byte + +// NamespacedShare extends a Share with the corresponding namespace. +type NamespacedShare struct { + Share + ID namespace.ID +} + +func (n NamespacedShare) NamespaceID() namespace.ID { + return n.ID +} + +func (n NamespacedShare) Data() []byte { + return n.Share +} + +// NamespacedShares is just a list of NamespacedShare elements. +// It can be used to extract the raw raw shares. +type NamespacedShares []NamespacedShare + +// RawShares returns the raw shares that can be fed into the erasure coding +// library (e.g. rsmt2d). +func (ns NamespacedShares) RawShares() [][]byte { + res := make([][]byte, len(ns)) + for i, nsh := range ns { + res[i] = nsh.Share + } + return res +} + +func (tx Tx) MarshalDelimited() ([]byte, error) { + lenBuf := make([]byte, binary.MaxVarintLen64) + length := uint64(len(tx)) + n := binary.PutUvarint(lenBuf, length) + return append(lenBuf[:n], tx...), nil +} + +// MarshalDelimited marshals the raw data (excluding the namespace) of this +// message and prefixes it with the length of that encoding. +func (msg Message) MarshalDelimited() ([]byte, error) { + lenBuf := make([]byte, binary.MaxVarintLen64) + length := uint64(len(msg.Data)) + n := binary.PutUvarint(lenBuf, length) + return append(lenBuf[:n], msg.Data...), nil +} diff --git a/types/shares_test.go b/types/shares_test.go new file mode 100644 index 0000000000..eebd8477d3 --- /dev/null +++ b/types/shares_test.go @@ -0,0 +1,574 @@ +package types + +import ( + "bytes" + "context" + "fmt" + "math" + "math/rand" + "reflect" + "sort" + "testing" + "time" + + "github.com/celestiaorg/nmt/namespace" + "github.com/celestiaorg/rsmt2d" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/internal/libs/protoio" + "github.com/tendermint/tendermint/pkg/consts" +) + +type Splitter interface { + SplitIntoShares() NamespacedShares +} + +func TestMakeShares(t *testing.T) { + reservedTxNamespaceID := append(bytes.Repeat([]byte{0}, 7), 1) + reservedEvidenceNamespaceID := append(bytes.Repeat([]byte{0}, 7), 3) + val := NewMockPV() + blockID := makeBlockID([]byte("blockhash"), 1000, []byte("partshash")) + blockID2 := makeBlockID([]byte("blockhash2"), 1000, []byte("partshash")) + vote1 := makeVote(t, val, "chainID", 0, 10, 2, 1, blockID, defaultVoteTime) + vote2 := makeVote(t, val, "chainID", 0, 10, 2, 1, blockID2, defaultVoteTime) + testEvidence := &DuplicateVoteEvidence{ + VoteA: vote1, + VoteB: vote2, + } + protoTestEvidence, err := EvidenceToProto(testEvidence) + if err != nil { + t.Error(err) + } + testEvidenceBytes, err := protoio.MarshalDelimited(protoTestEvidence) + largeTx := Tx(bytes.Repeat([]byte("large Tx"), 50)) + largeTxLenDelimited, _ := largeTx.MarshalDelimited() + smolTx := Tx("small Tx") + smolTxLenDelimited, _ := smolTx.MarshalDelimited() + msg1 := Message{ + NamespaceID: namespace.ID("8bytesss"), + Data: []byte("some data"), + } + msg1Marshaled, _ := msg1.MarshalDelimited() + if err != nil { + t.Fatalf("Could not encode evidence: %v, error: %v\n", testEvidence, err) + } + + type args struct { + data Splitter + } + tests := []struct { + name string + args args + want NamespacedShares + }{ + { + name: "evidence", + args: args{ + data: &EvidenceData{ + Evidence: []Evidence{testEvidence}, + }, + }, + want: NamespacedShares{ + NamespacedShare{ + Share: append( + append(reservedEvidenceNamespaceID, byte(0)), + testEvidenceBytes[:consts.TxShareSize]..., + ), + ID: reservedEvidenceNamespaceID, + }, + NamespacedShare{ + Share: append( + append(reservedEvidenceNamespaceID, byte(0)), + zeroPadIfNecessary(testEvidenceBytes[consts.TxShareSize:], consts.TxShareSize)..., + ), + ID: reservedEvidenceNamespaceID, + }, + }, + }, + {"small LL Tx", + args{ + data: Txs{smolTx}, + }, + NamespacedShares{ + NamespacedShare{ + Share: append( + append(reservedTxNamespaceID, byte(0)), + zeroPadIfNecessary(smolTxLenDelimited, consts.TxShareSize)..., + ), + ID: reservedTxNamespaceID, + }, + }, + }, + {"one large LL Tx", + args{ + data: Txs{largeTx}, + }, + NamespacedShares{ + NamespacedShare{ + Share: append( + append(reservedTxNamespaceID, byte(0)), + largeTxLenDelimited[:consts.TxShareSize]..., + ), + ID: reservedTxNamespaceID, + }, + NamespacedShare{ + Share: append( + append(reservedTxNamespaceID, byte(0)), + zeroPadIfNecessary(largeTxLenDelimited[consts.TxShareSize:], consts.TxShareSize)..., + ), + ID: reservedTxNamespaceID, + }, + }, + }, + {"large then small LL Tx", + args{ + data: Txs{largeTx, smolTx}, + }, + NamespacedShares{ + NamespacedShare{ + Share: append( + append(reservedTxNamespaceID, byte(0)), + largeTxLenDelimited[:consts.TxShareSize]..., + ), + ID: reservedTxNamespaceID, + }, + NamespacedShare{ + Share: append( + append( + reservedTxNamespaceID, + byte(0), + ), + zeroPadIfNecessary( + append(largeTxLenDelimited[consts.TxShareSize:], smolTxLenDelimited...), + consts.TxShareSize, + )..., + ), + ID: reservedTxNamespaceID, + }, + }, + }, + {"ll-app message", + args{ + data: Messages{[]Message{msg1}}, + }, + NamespacedShares{ + NamespacedShare{ + Share: append( + []byte(msg1.NamespaceID), + zeroPadIfNecessary(msg1Marshaled, consts.MsgShareSize)..., + ), + ID: msg1.NamespaceID, + }, + }, + }, + } + for i, tt := range tests { + tt := tt // stupid scopelint :-/ + i := i + t.Run(tt.name, func(t *testing.T) { + got := tt.args.data.SplitIntoShares() + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("%v: makeShares() = \n%+v\nwant\n%+v\n", i, got, tt.want) + } + }) + } +} + +func Test_zeroPadIfNecessary(t *testing.T) { + type args struct { + share []byte + width int + } + tests := []struct { + name string + args args + want []byte + }{ + {"pad", args{[]byte{1, 2, 3}, 6}, []byte{1, 2, 3, 0, 0, 0}}, + {"not necessary (equal to shareSize)", args{[]byte{1, 2, 3}, 3}, []byte{1, 2, 3}}, + {"not necessary (greater shareSize)", args{[]byte{1, 2, 3}, 2}, []byte{1, 2, 3}}, + } + for _, tt := range tests { + tt := tt // stupid scopelint :-/ + t.Run(tt.name, func(t *testing.T) { + if got := zeroPadIfNecessary(tt.args.share, tt.args.width); !reflect.DeepEqual(got, tt.want) { + t.Errorf("zeroPadIfNecessary() = %v, want %v", got, tt.want) + } + }) + } +} + +func Test_appendToSharesOverwrite(t *testing.T) { + var shares NamespacedShares + + // generate some arbitrary namespaced shares first share that must be split + newShare := generateRandomNamespacedShares(1, consts.MsgShareSize+1)[0] + + // make a copy of the portion of the share to check if it's overwritten later + extraCopy := make([]byte, consts.MsgShareSize) + copy(extraCopy, newShare.Share[:consts.MsgShareSize]) + + // use appendToShares to add our new share + AppendToShares(shares, newShare.ID, newShare.Share) + + // check if the original share data has been overwritten. + assert.Equal(t, extraCopy, []byte(newShare.Share[:consts.MsgShareSize])) +} + +func TestDataFromSquare(t *testing.T) { + type test struct { + name string + txCount int + evdCount int + msgCount int + maxSize int // max size of each tx or msg + } + + tests := []test{ + {"one of each random small size", 1, 1, 1, 40}, + {"one of each random large size", 1, 1, 1, 400}, + {"many of each random large size", 10, 10, 10, 40}, + {"many of each random large size", 10, 10, 10, 400}, + {"only transactions", 10, 0, 0, 400}, + {"only evidence", 0, 10, 0, 400}, + {"only messages", 0, 0, 10, 400}, + } + + for _, tc := range tests { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + // generate random data + data := generateRandomBlockData( + tc.txCount, + tc.evdCount, + tc.msgCount, + tc.maxSize, + ) + + shares, _, err := data.ComputeShares(0) + require.NoError(t, err) + rawShares := shares.RawShares() + + eds, err := rsmt2d.ComputeExtendedDataSquare(rawShares, consts.DefaultCodec(), rsmt2d.NewDefaultTree) + if err != nil { + t.Error(err) + } + + res, err := DataFromSquare(eds) + if err != nil { + t.Fatal(err) + } + + // we have to compare the evidence by string because the the + // timestamps differ not by actual time represented, but by + // internals see https://github.com/stretchr/testify/issues/666 + for i := 0; i < len(data.Evidence.Evidence); i++ { + inputEvidence := data.Evidence.Evidence[i].(*DuplicateVoteEvidence) + resultEvidence := res.Evidence.Evidence[i].(*DuplicateVoteEvidence) + assert.Equal(t, inputEvidence.String(), resultEvidence.String()) + } + + // compare the original to the result w/o the evidence + data.Evidence = EvidenceData{} + res.Evidence = EvidenceData{} + + res.OriginalSquareSize = data.OriginalSquareSize + + assert.Equal(t, data, res) + }) + } +} + +func TestFuzz_DataFromSquare(t *testing.T) { + t.Skip() + // run random shares through processContiguousShares for a minute + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + for { + select { + case <-ctx.Done(): + return + default: + TestDataFromSquare(t) + } + } +} + +func Test_processContiguousShares(t *testing.T) { + // exactTxShareSize is the length of tx that will fit exactly into a single + // share, accounting for namespace id and the length delimiter prepended to + // each tx + const exactTxShareSize = consts.TxShareSize - 1 + + type test struct { + name string + txSize int + txCount int + } + + // each test is ran twice, once using txSize as an exact size, and again + // using it as a cap for randomly sized txs + tests := []test{ + {"single small tx", 10, 1}, + {"many small txs", 10, 10}, + {"single big tx", 1000, 1}, + {"many big txs", 1000, 10}, + {"single exact size tx", exactTxShareSize, 1}, + {"many exact size txs", exactTxShareSize, 10}, + } + + for _, tc := range tests { + tc := tc + + // run the tests with identically sized txs + t.Run(fmt.Sprintf("%s idendically sized ", tc.name), func(t *testing.T) { + txs := generateRandomContiguousShares(tc.txCount, tc.txSize) + + shares := txs.SplitIntoShares() + + parsedTxs, err := processContiguousShares(shares.RawShares()) + if err != nil { + t.Error(err) + } + + // check that the data parsed is identical + for i := 0; i < len(txs); i++ { + assert.Equal(t, []byte(txs[i]), parsedTxs[i]) + } + }) + + // run the same tests using randomly sized txs with caps of tc.txSize + t.Run(fmt.Sprintf("%s randomly sized", tc.name), func(t *testing.T) { + txs := generateRandomlySizedContiguousShares(tc.txCount, tc.txSize) + + shares := txs.SplitIntoShares() + + parsedTxs, err := processContiguousShares(shares.RawShares()) + if err != nil { + t.Error(err) + } + + // check that the data parsed is identical to the original + for i := 0; i < len(txs); i++ { + assert.Equal(t, []byte(txs[i]), parsedTxs[i]) + } + }) + } +} + +func TestFuzz_processContiguousShares(t *testing.T) { + t.Skip() + // run random shares through processContiguousShares for a minute + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + for { + select { + case <-ctx.Done(): + return + default: + Test_processContiguousShares(t) + } + } +} + +func Test_parseMsgShares(t *testing.T) { + // exactMsgShareSize is the length of message that will fit exactly into a single + // share, accounting for namespace id and the length delimiter prepended to + // each message + const exactMsgShareSize = consts.MsgShareSize - 2 + + type test struct { + name string + msgSize int + msgCount int + } + + // each test is ran twice, once using msgSize as an exact size, and again + // using it as a cap for randomly sized leaves + tests := []test{ + {"single small msg", 1, 1}, + {"many small msgs", 4, 10}, + {"single big msg", 1000, 1}, + {"many big msgs", 1000, 10}, + {"single exact size msg", exactMsgShareSize, 1}, + {"many exact size msgs", exactMsgShareSize, 10}, + } + + for _, tc := range tests { + tc := tc + + // run the tests with identically sized messagses + t.Run(fmt.Sprintf("%s idendically sized ", tc.name), func(t *testing.T) { + rawmsgs := make([]Message, tc.msgCount) + for i := 0; i < tc.msgCount; i++ { + rawmsgs[i] = generateRandomMessage(tc.msgSize) + } + msgs := Messages{MessagesList: rawmsgs} + + shares := msgs.SplitIntoShares() + + parsedMsgs, err := parseMsgShares(shares.RawShares()) + if err != nil { + t.Error(err) + } + + // check that the namesapces and data are the same + for i := 0; i < len(msgs.MessagesList); i++ { + assert.Equal(t, msgs.MessagesList[i].NamespaceID, parsedMsgs[i].NamespaceID) + assert.Equal(t, msgs.MessagesList[i].Data, parsedMsgs[i].Data) + } + }) + + // run the same tests using randomly sized messages with caps of tc.msgSize + t.Run(fmt.Sprintf("%s randomly sized", tc.name), func(t *testing.T) { + msgs := generateRandomlySizedMessages(tc.msgCount, tc.msgSize) + shares := msgs.SplitIntoShares() + + parsedMsgs, err := parseMsgShares(shares.RawShares()) + if err != nil { + t.Error(err) + } + + // check that the namesapces and data are the same + for i := 0; i < len(msgs.MessagesList); i++ { + assert.Equal(t, msgs.MessagesList[i].NamespaceID, parsedMsgs[i].NamespaceID) + assert.Equal(t, msgs.MessagesList[i].Data, parsedMsgs[i].Data) + } + }) + } +} + +func TestContigShareWriter(t *testing.T) { + // note that this test is mainly for debugging purposes, the main round trip + // tests occur in TestDataFromSquare and Test_processContiguousShares + w := NewContiguousShareWriter(consts.TxNamespaceID) + txs := generateRandomContiguousShares(33, 200) + for _, tx := range txs { + rawTx, _ := tx.MarshalDelimited() + w.Write(rawTx) + } + resShares := w.Export() + rawResTxs, err := processContiguousShares(resShares.RawShares()) + resTxs := ToTxs(rawResTxs) + require.NoError(t, err) + + assert.Equal(t, txs, resTxs) +} + +func Test_parseDelimiter(t *testing.T) { + for i := uint64(0); i < 100; i++ { + tx := generateRandomContiguousShares(1, int(i))[0] + input, err := tx.MarshalDelimited() + if err != nil { + panic(err) + } + res, txLen, err := ParseDelimiter(input) + if err != nil { + panic(err) + } + assert.Equal(t, i, txLen) + assert.Equal(t, []byte(tx), res) + } +} + +// generateRandomBlockData returns randomly generated block data for testing purposes +func generateRandomBlockData(txCount, evdCount, msgCount, maxSize int) Data { + var out Data + out.Txs = generateRandomlySizedContiguousShares(txCount, maxSize) + out.Evidence = generateIdenticalEvidence(evdCount) + out.Messages = generateRandomlySizedMessages(msgCount, maxSize) + return out +} + +func generateRandomlySizedContiguousShares(count, max int) Txs { + txs := make(Txs, count) + for i := 0; i < count; i++ { + size := rand.Intn(max) + if size == 0 { + size = 1 + } + txs[i] = generateRandomContiguousShares(1, size)[0] + } + return txs +} + +func generateRandomContiguousShares(count, size int) Txs { + txs := make(Txs, count) + for i := 0; i < count; i++ { + tx := make([]byte, size) + _, err := rand.Read(tx) + if err != nil { + panic(err) + } + txs[i] = tx + } + return txs +} + +func generateIdenticalEvidence(count int) EvidenceData { + evidence := make([]Evidence, count) + for i := 0; i < count; i++ { + ev := NewMockDuplicateVoteEvidence(math.MaxInt64, time.Now(), "chainID") + evidence[i] = ev + } + return EvidenceData{Evidence: evidence} +} + +func generateRandomlySizedMessages(count, maxMsgSize int) Messages { + msgs := make([]Message, count) + for i := 0; i < count; i++ { + msgs[i] = generateRandomMessage(rand.Intn(maxMsgSize)) + } + + // this is just to let us use assert.Equal + if count == 0 { + msgs = nil + } + + messages := Messages{MessagesList: msgs} + messages.sortMessages() + return messages +} + +func generateRandomMessage(size int) Message { + share := generateRandomNamespacedShares(1, size)[0] + msg := Message{ + NamespaceID: share.NamespaceID(), + Data: share.Data(), + } + return msg +} + +func generateRandomNamespacedShares(count, msgSize int) NamespacedShares { + shares := generateRandNamespacedRawData(uint32(count), consts.NamespaceSize, uint32(msgSize)) + msgs := make([]Message, count) + for i, s := range shares { + msgs[i] = Message{ + Data: s[consts.NamespaceSize:], + NamespaceID: s[:consts.NamespaceSize], + } + } + return Messages{MessagesList: msgs}.SplitIntoShares() +} + +func generateRandNamespacedRawData(total, nidSize, leafSize uint32) [][]byte { + data := make([][]byte, total) + for i := uint32(0); i < total; i++ { + nid := make([]byte, nidSize) + rand.Read(nid) + data[i] = nid + } + sortByteArrays(data) + for i := uint32(0); i < total; i++ { + d := make([]byte, leafSize) + rand.Read(d) + data[i] = append(data[i], d...) + } + + return data +} + +func sortByteArrays(src [][]byte) { + sort.Slice(src, func(i, j int) bool { return bytes.Compare(src[i], src[j]) < 0 }) +} diff --git a/types/tx.go b/types/tx.go index 19ee41dace..d2cce07bb4 100644 --- a/types/tx.go +++ b/types/tx.go @@ -6,9 +6,12 @@ import ( "errors" "fmt" + "github.com/celestiaorg/nmt" + "github.com/gogo/protobuf/proto" "github.com/tendermint/tendermint/crypto/merkle" "github.com/tendermint/tendermint/crypto/tmhash" tmbytes "github.com/tendermint/tendermint/libs/bytes" + "github.com/tendermint/tendermint/pkg/consts" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" ) @@ -61,83 +64,108 @@ func (txs Txs) IndexByHash(hash []byte) int { return -1 } -// Proof returns a simple merkle proof for this node. -// Panics if i < 0 or i >= len(txs) -// TODO: optimize this! -func (txs Txs) Proof(i int) TxProof { - l := len(txs) - bzs := make([][]byte, l) - for i := 0; i < l; i++ { - bzs[i] = txs[i].Hash() - } - root, proofs := merkle.ProofsFromByteSlices(bzs) - - return TxProof{ - RootHash: root, - Data: txs[i], - Proof: *proofs[i], - } -} - // TxProof represents a Merkle proof of the presence of a transaction in the Merkle tree. type TxProof struct { - RootHash tmbytes.HexBytes `json:"root_hash"` - Data Tx `json:"data"` - Proof merkle.Proof `json:"proof"` -} - -// Leaf returns the hash(tx), which is the leaf in the merkle tree which this proof refers to. -func (tp TxProof) Leaf() []byte { - return tp.Data.Hash() + RowRoots []tmbytes.HexBytes `json:"root_hash"` + Data [][]byte `json:"data"` + Proofs []*tmproto.NMTProof `json:"proof"` } // Validate verifies the proof. It returns nil if the RootHash matches the dataHash argument, // and if the proof is internally consistent. Otherwise, it returns a sensible error. -func (tp TxProof) Validate(dataHash []byte) error { - if !bytes.Equal(dataHash, tp.RootHash) { - return errors.New("proof matches different data hash") - } - if tp.Proof.Index < 0 { - return errors.New("proof index cannot be negative") - } - if tp.Proof.Total <= 0 { - return errors.New("proof total must be positive") +func (tp TxProof) Validate() error { + if len(tp.RowRoots) != len(tp.Proofs) || len(tp.Data) != len(tp.Proofs) { + return errors.New( + "invalid number of proofs, row roots, or data. they all must be the same to verify the proof", + ) } - valid := tp.Proof.Verify(tp.RootHash, tp.Leaf()) - if valid != nil { - return errors.New("proof is not internally consistent") + for _, proof := range tp.Proofs { + if proof.Start < 0 { + return errors.New("proof index cannot be negative") + } + if (proof.End - proof.Start) <= 0 { + return errors.New("proof total must be positive") + } + valid := tp.VerifyProof() + if !valid { + return errors.New("proof is not internally consistent") + } } + return nil } -func (tp TxProof) ToProto() tmproto.TxProof { +func (tp *TxProof) VerifyProof() bool { + for i, proof := range tp.Proofs { + nmtProof := nmt.NewInclusionProof( + int(proof.Start), + int(proof.End), + proof.Nodes, + true, + ) + valid := nmtProof.VerifyInclusion( + consts.NewBaseHashFunc(), + consts.TxNamespaceID, + tp.Data[i], + tp.RowRoots[i], + ) + if !valid { + return false + } + } + return true +} - pbProof := tp.Proof.ToProto() +func (tp *TxProof) IncludesTx(tx Tx) bool { + return bytes.Contains(bytes.Join(tp.Data, []byte{}), tx) +} +func (tp TxProof) ToProto() tmproto.TxProof { + rowRoots := make([][]byte, len(tp.RowRoots)) + for i, root := range tp.RowRoots { + rowRoots[i] = root.Bytes() + } pbtp := tmproto.TxProof{ - RootHash: tp.RootHash, + RowRoots: rowRoots, Data: tp.Data, - Proof: pbProof, + Proofs: tp.Proofs, } return pbtp } -func TxProofFromProto(pb tmproto.TxProof) (TxProof, error) { - pbProof, err := merkle.ProofFromProto(pb.Proof) - if err != nil { - return TxProof{}, err +func TxProofFromProto(pb tmproto.TxProof) (TxProof, error) { + rowRoots := make([]tmbytes.HexBytes, len(pb.RowRoots)) + for i, root := range pb.RowRoots { + rowRoots[i] = tmbytes.HexBytes(root) } - pbtp := TxProof{ - RootHash: pb.RootHash, + RowRoots: rowRoots, Data: pb.Data, - Proof: *pbProof, + Proofs: pb.Proofs, } return pbtp, nil } +func (txs Txs) SplitIntoShares() NamespacedShares { + rawDatas := make([][]byte, len(txs)) + for i, tx := range txs { + rawData, err := tx.MarshalDelimited() + if err != nil { + panic(fmt.Sprintf("included Tx in mem-pool that can not be encoded %v", tx)) + } + rawDatas[i] = rawData + } + + w := NewContiguousShareWriter(consts.TxNamespaceID) + for _, tx := range rawDatas { + w.Write(tx) + } + + return w.Export() +} + // ComputeProtoSizeForTxs wraps the transactions in tmproto.Data{} and calculates the size. // https://developers.google.com/protocol-buffers/docs/encoding func ComputeProtoSizeForTxs(txs []Tx) int64 { @@ -145,3 +173,58 @@ func ComputeProtoSizeForTxs(txs []Tx) int64 { pdData := data.ToProto() return int64(pdData.Size()) } + +// ToTxs converts a raw slice of byte slices into a Txs type. +func ToTxs(txs [][]byte) Txs { + txBzs := make(Txs, len(txs)) + for i := 0; i < len(txs); i++ { + txBzs[i] = txs[i] + } + return txBzs +} + +// ToSliceOfBytes converts a Txs to slice of byte slices. +func (txs Txs) ToSliceOfBytes() [][]byte { + txBzs := make([][]byte, len(txs)) + for i := 0; i < len(txs); i++ { + txBzs[i] = txs[i] + } + return txBzs +} + +// UnwrapMalleatedTx attempts to unmarshal the provided transaction into a malleated +// transaction wrapper, if this an be done, then it returns true. A malleated +// transaction is a normal transaction that has been derived (malleated) from a +// different original transaction. The returned hash is that of the original +// transaction, which allows us to remove the original transaction from the +// mempool. NOTE: protobuf sometimes does not throw an error if the transaction +// passed is not a tmproto.MalleatedTx, since the schema for PayForMessage is kept +// in the app, we cannot perform further checks without creating an import +// cycle. +func UnwrapMalleatedTx(tx Tx) (originalHash []byte, unwrapped Tx, isMalleated bool) { + // attempt to unmarshal into a a malleated transaction + var malleatedTx tmproto.MalleatedTx + err := proto.Unmarshal(tx, &malleatedTx) + if err != nil { + return nil, nil, false + } + // this check will fail to catch unwanted types should those unmarshalled + // types happen to have a hash sized slice of bytes in the same field number + // as originalTxHash. TODO(evan): either fix this, or better yet use a different + // mechanism + if len(malleatedTx.OriginalTxHash) != tmhash.Size { + return nil, nil, false + } + return malleatedTx.OriginalTxHash, malleatedTx.Tx, true +} + +// WrapMalleatedTx creates a wrapped Tx that includes the original transaction's hash +// so that it can be easily removed from the mempool. note: must be unwrapped to +// be a viable sdk.Tx +func WrapMalleatedTx(originalHash []byte, malleated Tx) (Tx, error) { + wTx := tmproto.MalleatedTx{ + OriginalTxHash: originalHash, + Tx: malleated, + } + return proto.Marshal(&wTx) +} diff --git a/types/tx_test.go b/types/tx_test.go index 8fe277da82..d77ba00e8f 100644 --- a/types/tx_test.go +++ b/types/tx_test.go @@ -1,16 +1,11 @@ package types import ( - "bytes" - mrand "math/rand" "testing" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - ctest "github.com/tendermint/tendermint/internal/libs/test" tmrand "github.com/tendermint/tendermint/libs/rand" - tmproto "github.com/tendermint/tendermint/proto/tendermint/types" ) func makeTxs(cnt, size int) Txs { @@ -21,11 +16,6 @@ func makeTxs(cnt, size int) Txs { return txs } -func randInt(low, high int) int { - off := mrand.Int() % (high - low) - return low + off -} - func TestTxIndex(t *testing.T) { for i := 0; i < 20; i++ { txs := makeTxs(15, 60) @@ -51,102 +41,3 @@ func TestTxIndexByHash(t *testing.T) { assert.Equal(t, -1, txs.IndexByHash(Tx("foodnwkf").Hash())) } } - -func TestValidTxProof(t *testing.T) { - cases := []struct { - txs Txs - }{ - {Txs{{1, 4, 34, 87, 163, 1}}}, - {Txs{{5, 56, 165, 2}, {4, 77}}}, - {Txs{Tx("foo"), Tx("bar"), Tx("baz")}}, - {makeTxs(20, 5)}, - {makeTxs(7, 81)}, - {makeTxs(61, 15)}, - } - - for h, tc := range cases { - txs := tc.txs - root := txs.Hash() - // make sure valid proof for every tx - for i := range txs { - tx := []byte(txs[i]) - proof := txs.Proof(i) - assert.EqualValues(t, i, proof.Proof.Index, "%d: %d", h, i) - assert.EqualValues(t, len(txs), proof.Proof.Total, "%d: %d", h, i) - assert.EqualValues(t, root, proof.RootHash, "%d: %d", h, i) - assert.EqualValues(t, tx, proof.Data, "%d: %d", h, i) - assert.EqualValues(t, txs[i].Hash(), proof.Leaf(), "%d: %d", h, i) - assert.Nil(t, proof.Validate(root), "%d: %d", h, i) - assert.NotNil(t, proof.Validate([]byte("foobar")), "%d: %d", h, i) - - // read-write must also work - var ( - p2 TxProof - pb2 tmproto.TxProof - ) - pbProof := proof.ToProto() - bin, err := pbProof.Marshal() - require.NoError(t, err) - - err = pb2.Unmarshal(bin) - require.NoError(t, err) - - p2, err = TxProofFromProto(pb2) - if assert.Nil(t, err, "%d: %d: %+v", h, i, err) { - assert.Nil(t, p2.Validate(root), "%d: %d", h, i) - } - } - } -} - -func TestTxProofUnchangable(t *testing.T) { - // run the other test a bunch... - for i := 0; i < 40; i++ { - testTxProofUnchangable(t) - } -} - -func testTxProofUnchangable(t *testing.T) { - // make some proof - txs := makeTxs(randInt(2, 100), randInt(16, 128)) - root := txs.Hash() - i := randInt(0, len(txs)-1) - proof := txs.Proof(i) - - // make sure it is valid to start with - assert.Nil(t, proof.Validate(root)) - pbProof := proof.ToProto() - bin, err := pbProof.Marshal() - require.NoError(t, err) - - // try mutating the data and make sure nothing breaks - for j := 0; j < 500; j++ { - bad := ctest.MutateByteSlice(bin) - if !bytes.Equal(bad, bin) { - assertBadProof(t, root, bad, proof) - } - } -} - -// This makes sure that the proof doesn't deserialize into something valid. -func assertBadProof(t *testing.T, root []byte, bad []byte, good TxProof) { - - var ( - proof TxProof - pbProof tmproto.TxProof - ) - err := pbProof.Unmarshal(bad) - if err == nil { - proof, err = TxProofFromProto(pbProof) - if err == nil { - err = proof.Validate(root) - if err == nil { - // XXX Fix simple merkle proofs so the following is *not* OK. - // This can happen if we have a slightly different total (where the - // path ends up the same). If it is something else, we have a real - // problem. - assert.NotEqual(t, proof.Proof.Total, good.Proof.Total, "bad: %#v\ngood: %#v", proof, good) - } - } - } -}