diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 61fe636ac..160148f78 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -6,6 +6,7 @@ on: push: branches: - main + - occ-main # TODO: remove after occ work is done permissions: contents: read diff --git a/baseapp/abci.go b/baseapp/abci.go index b70874ace..7986d1899 100644 --- a/baseapp/abci.go +++ b/baseapp/abci.go @@ -12,6 +12,8 @@ import ( "syscall" "time" + "github.com/cosmos/cosmos-sdk/tasks" + "github.com/armon/go-metrics" "github.com/gogo/protobuf/proto" abci "github.com/tendermint/tendermint/abci/types" @@ -69,11 +71,6 @@ func (app *BaseApp) InitChain(ctx context.Context, req *abci.RequestInitChain) ( return } - // add block gas meter for any genesis transactions (allow infinite gas) - app.deliverState.ctx = app.deliverState.ctx.WithBlockGasMeter(sdk.NewInfiniteGasMeter()) - app.prepareProposalState.ctx = app.prepareProposalState.ctx.WithBlockGasMeter(sdk.NewInfiniteGasMeter()) - app.processProposalState.ctx = app.processProposalState.ctx.WithBlockGasMeter(sdk.NewInfiniteGasMeter()) - resp := app.initChainer(app.deliverState.ctx, *req) app.initChainer(app.prepareProposalState.ctx, *req) app.initChainer(app.processProposalState.ctx, *req) @@ -205,7 +202,7 @@ func (app *BaseApp) EndBlock(ctx sdk.Context, req abci.RequestEndBlock) (res abc // internal CheckTx state if the AnteHandler passes. Otherwise, the ResponseCheckTx // will contain releveant error information. Regardless of tx execution outcome, // the ResponseCheckTx will contain relevant gas execution context. -func (app *BaseApp) CheckTx(ctx context.Context, req *abci.RequestCheckTx) (*abci.ResponseCheckTx, error) { +func (app *BaseApp) CheckTx(ctx context.Context, req *abci.RequestCheckTx) (*abci.ResponseCheckTxV2, error) { defer telemetry.MeasureSince(time.Now(), "abci", "check_tx") var mode runTxMode @@ -222,25 +219,60 @@ func (app *BaseApp) CheckTx(ctx context.Context, req *abci.RequestCheckTx) (*abc } sdkCtx := app.getContextForTx(mode, req.Tx) - gInfo, result, _, priority, err := app.runTx(sdkCtx, mode, req.Tx) + tx, err := app.txDecoder(req.Tx) + if err != nil { + res := sdkerrors.ResponseCheckTx(err, 0, 0, app.trace) + return &abci.ResponseCheckTxV2{ResponseCheckTx: &res}, err + } + gInfo, result, _, priority, pendingTxChecker, expireTxHandler, txCtx, err := app.runTx(sdkCtx, mode, tx, sha256.Sum256(req.Tx)) if err != nil { res := sdkerrors.ResponseCheckTx(err, gInfo.GasWanted, gInfo.GasUsed, app.trace) - return &res, err + return &abci.ResponseCheckTxV2{ResponseCheckTx: &res}, err } - return &abci.ResponseCheckTx{ - GasWanted: int64(gInfo.GasWanted), // TODO: Should type accept unsigned ints? - Data: result.Data, - Priority: priority, - }, nil + res := &abci.ResponseCheckTxV2{ + ResponseCheckTx: &abci.ResponseCheckTx{ + GasWanted: int64(gInfo.GasWanted), // TODO: Should type accept unsigned ints? + Data: result.Data, + Priority: priority, + }, + ExpireTxHandler: expireTxHandler, + EVMNonce: txCtx.EVMNonce(), + EVMSenderAddress: txCtx.EVMSenderAddress(), + IsEVM: txCtx.IsEVM(), + } + if pendingTxChecker != nil { + res.IsPendingTransaction = true + res.Checker = pendingTxChecker + } + + return res, nil +} + +// DeliverTxBatch executes multiple txs +func (app *BaseApp) DeliverTxBatch(ctx sdk.Context, req sdk.DeliverTxBatchRequest) (res sdk.DeliverTxBatchResponse) { + scheduler := tasks.NewScheduler(app.concurrencyWorkers, app.TracingInfo, app.DeliverTx) + // This will basically no-op the actual prefill if the metadata for the txs is empty + + // process all txs, this will also initializes the MVS if prefill estimates was disabled + txRes, err := scheduler.ProcessAll(ctx, req.TxEntries) + if err != nil { + // TODO: handle error + } + + responses := make([]*sdk.DeliverTxResult, 0, len(req.TxEntries)) + for _, tx := range txRes { + responses = append(responses, &sdk.DeliverTxResult{Response: tx}) + } + return sdk.DeliverTxBatchResponse{Results: responses} } // DeliverTx implements the ABCI interface and executes a tx in DeliverTx mode. // State only gets persisted if all messages are valid and get executed successfully. -// Otherwise, the ResponseDeliverTx will contain releveant error information. +// Otherwise, the ResponseDeliverTx will contain relevant error information. // Regardless of tx execution outcome, the ResponseDeliverTx will contain relevant // gas execution context. -func (app *BaseApp) DeliverTx(ctx sdk.Context, req abci.RequestDeliverTx) (res abci.ResponseDeliverTx) { +func (app *BaseApp) DeliverTx(ctx sdk.Context, req abci.RequestDeliverTx, tx sdk.Tx, checksum [32]byte) (res abci.ResponseDeliverTx) { defer telemetry.MeasureSince(time.Now(), "abci", "deliver_tx") defer func() { for _, streamingListener := range app.abciListeners { @@ -260,7 +292,7 @@ func (app *BaseApp) DeliverTx(ctx sdk.Context, req abci.RequestDeliverTx) (res a telemetry.SetGauge(float32(gInfo.GasWanted), "tx", "gas", "wanted") }() - gInfo, result, anteEvents, _, err := app.runTx(ctx.WithTxBytes(req.Tx).WithVoteInfos(app.voteInfos), runTxModeDeliver, req.Tx) + gInfo, result, anteEvents, _, _, _, _, err := app.runTx(ctx.WithTxBytes(req.Tx).WithVoteInfos(app.voteInfos), runTxModeDeliver, tx, checksum) if err != nil { resultStr = "failed" // if we have a result, use those events instead of just the anteEvents @@ -1012,16 +1044,23 @@ func (app *BaseApp) ProcessProposal(ctx context.Context, req *abci.RequestProces app.setProcessProposalHeader(header) } - // add block gas meter - var gasMeter sdk.GasMeter - if maxGas := app.getMaximumBlockGas(app.processProposalState.ctx); maxGas > 0 { - gasMeter = sdk.NewGasMeter(maxGas) - } else { - gasMeter = sdk.NewInfiniteGasMeter() - } - // NOTE: header hash is not set in NewContext, so we manually set it here - app.prepareProcessProposalState(gasMeter, req.Hash) + + app.prepareProcessProposalState(req.Hash) + + defer func() { + if err := recover(); err != nil { + app.logger.Error( + "panic recovered in ProcessProposal", + "height", req.Height, + "time", req.Time, + "hash", fmt.Sprintf("%X", req.Hash), + "panic", err, + ) + + resp = &abci.ResponseProcessProposal{Status: abci.ResponseProcessProposal_REJECT} + } + }() defer func() { if err := recover(); err != nil { @@ -1094,22 +1133,14 @@ func (app *BaseApp) FinalizeBlock(ctx context.Context, req *abci.RequestFinalize app.setDeliverStateHeader(header) } - // add block gas meter - var gasMeter sdk.GasMeter - if maxGas := app.getMaximumBlockGas(app.deliverState.ctx); maxGas > 0 { - gasMeter = sdk.NewGasMeter(maxGas) - } else { - gasMeter = sdk.NewInfiniteGasMeter() - } - // NOTE: header hash is not set in NewContext, so we manually set it here - app.prepareDeliverState(gasMeter, req.Hash) + app.prepareDeliverState(req.Hash) // we also set block gas meter to checkState in case the application needs to // verify gas consumption during (Re)CheckTx if app.checkState != nil { - app.checkState.SetContext(app.checkState.ctx.WithBlockGasMeter(gasMeter).WithHeaderHash(req.Hash)) + app.checkState.SetContext(app.checkState.ctx.WithHeaderHash(req.Hash)) } if app.finalizeBlocker != nil { diff --git a/baseapp/baseapp.go b/baseapp/baseapp.go index ea9c31134..b3f11e68b 100644 --- a/baseapp/baseapp.go +++ b/baseapp/baseapp.go @@ -2,7 +2,6 @@ package baseapp import ( "context" - "crypto/sha256" "fmt" "reflect" "strings" @@ -14,6 +13,7 @@ import ( "go.opentelemetry.io/otel/trace" "github.com/armon/go-metrics" + "github.com/cosmos/cosmos-sdk/server/config" "github.com/cosmos/cosmos-sdk/utils/tracing" "github.com/gogo/protobuf/proto" sdbm "github.com/sei-protocol/sei-tm-db/backends" @@ -57,7 +57,9 @@ const ( FlagArchivalArweaveIndexDBFullPath = "archival-arweave-index-db-full-path" FlagArchivalArweaveNodeURL = "archival-arweave-node-url" - FlagChainID = "chain-id" + FlagChainID = "chain-id" + FlagConcurrencyWorkers = "concurrency-workers" + FlagOccEnabled = "occ-enabled" ) var ( @@ -155,14 +157,19 @@ type BaseApp struct { //nolint: maligned ChainID string - votesInfoLock sync.RWMutex - commitLock *sync.Mutex + votesInfoLock sync.RWMutex + commitLock *sync.Mutex + checkTxStateLock *sync.RWMutex compactionInterval uint64 TmConfig *tmcfg.Config - TracingInfo *tracing.Info + TracingInfo *tracing.Info + TracingEnabled bool + + concurrencyWorkers int + occEnabled bool } type appStore struct { @@ -238,7 +245,8 @@ func NewBaseApp( tp := trace.NewNoopTracerProvider() otel.SetTracerProvider(trace.NewNoopTracerProvider()) tr := tp.Tracer("component-main") - if tracingEnabled := cast.ToBool(appOpts.Get(tracing.FlagTracing)); tracingEnabled { + tracingEnabled := cast.ToBool(appOpts.Get(tracing.FlagTracing)) + if tracingEnabled { tp, err := tracing.DefaultTracerProvider() if err != nil { panic(err) @@ -261,12 +269,14 @@ func NewBaseApp( grpcQueryRouter: NewGRPCQueryRouter(), msgServiceRouter: NewMsgServiceRouter(), }, - txDecoder: txDecoder, - TmConfig: tmConfig, + txDecoder: txDecoder, + TmConfig: tmConfig, + TracingEnabled: tracingEnabled, TracingInfo: &tracing.Info{ Tracer: &tr, }, - commitLock: &sync.Mutex{}, + commitLock: &sync.Mutex{}, + checkTxStateLock: &sync.RWMutex{}, } app.TracingInfo.SetContext(context.Background()) @@ -286,6 +296,16 @@ func NewBaseApp( } app.startCompactionRoutine(db) + // if no option overrode already, initialize to the flags value + // this avoids forcing every implementation to pass an option, but allows it + if app.concurrencyWorkers == 0 { + app.concurrencyWorkers = cast.ToInt(appOpts.Get(FlagConcurrencyWorkers)) + } + // safely default this to the default value if 0 + if app.concurrencyWorkers == 0 { + app.concurrencyWorkers = config.DefaultConcurrencyWorkers + } + return app } @@ -299,6 +319,16 @@ func (app *BaseApp) AppVersion() uint64 { return app.appVersion } +// ConcurrencyWorkers returns the number of concurrent workers for the BaseApp. +func (app *BaseApp) ConcurrencyWorkers() int { + return app.concurrencyWorkers +} + +// OccEnabled returns the whether OCC is enabled for the BaseApp. +func (app *BaseApp) OccEnabled() bool { + return app.occEnabled +} + // Version returns the application's version string. func (app *BaseApp) Version() string { return app.version @@ -501,6 +531,8 @@ func (app *BaseApp) IsSealed() bool { return app.sealed } func (app *BaseApp) setCheckState(header tmproto.Header) { ms := app.cms.CacheMultiStore() ctx := sdk.NewContext(ms, header, true, app.logger).WithMinGasPrices(app.minGasPrices) + app.checkTxStateLock.Lock() + defer app.checkTxStateLock.Unlock() if app.checkState == nil { app.checkState = &state{ ms: ms, @@ -587,8 +619,8 @@ func (app *BaseApp) preparePrepareProposalState() { } } -func (app *BaseApp) prepareProcessProposalState(gasMeter sdk.GasMeter, headerHash []byte) { - app.processProposalState.SetContext(app.processProposalState.Context().WithBlockGasMeter(gasMeter). +func (app *BaseApp) prepareProcessProposalState(headerHash []byte) { + app.processProposalState.SetContext(app.processProposalState.Context(). WithHeaderHash(headerHash). WithConsensusParams(app.GetConsensusParams(app.processProposalState.Context()))) @@ -597,9 +629,8 @@ func (app *BaseApp) prepareProcessProposalState(gasMeter sdk.GasMeter, headerHas } } -func (app *BaseApp) prepareDeliverState(gasMeter sdk.GasMeter, headerHash []byte) { +func (app *BaseApp) prepareDeliverState(headerHash []byte) { app.deliverState.SetContext(app.deliverState.Context(). - WithBlockGasMeter(gasMeter). WithHeaderHash(headerHash). WithConsensusParams(app.GetConsensusParams(app.deliverState.Context()))) } @@ -698,27 +729,6 @@ func (app *BaseApp) StoreConsensusParams(ctx sdk.Context, cp *tmproto.ConsensusP app.paramStore.Set(ctx, ParamStoreKeyABCIParams, cp.Abci) } -// getMaximumBlockGas gets the maximum gas from the consensus params. It panics -// if maximum block gas is less than negative one and returns zero if negative -// one. -func (app *BaseApp) getMaximumBlockGas(ctx sdk.Context) uint64 { - cp := app.GetConsensusParams(ctx) - if cp == nil || cp.Block == nil { - return 0 - } - - maxGas := cp.Block.MaxGas - - // TODO::: This is a temporary fix, max gas causes non-deterministic behavior - // with parallel TX - switch { - case maxGas < -1: - panic(fmt.Sprintf("invalid maximum block gas: %d", maxGas)) - default: - return 0 - } -} - func (app *BaseApp) validateHeight(req abci.RequestBeginBlock) error { if req.Header.Height < 1 { return fmt.Errorf("invalid height: %d", req.Header.Height) @@ -795,7 +805,7 @@ func (app *BaseApp) getContextForTx(mode runTxMode, txBytes []byte) sdk.Context // cacheTxContext returns a new context based off of the provided context with // a branched multi-store. -func (app *BaseApp) cacheTxContext(ctx sdk.Context, txBytes []byte) (sdk.Context, sdk.CacheMultiStore) { +func (app *BaseApp) cacheTxContext(ctx sdk.Context, checksum [32]byte) (sdk.Context, sdk.CacheMultiStore) { ms := ctx.MultiStore() // TODO: https://github.com/cosmos/cosmos-sdk/issues/2824 msCache := ms.CacheMultiStore() @@ -803,7 +813,7 @@ func (app *BaseApp) cacheTxContext(ctx sdk.Context, txBytes []byte) (sdk.Context msCache = msCache.SetTracingContext( sdk.TraceContext( map[string]interface{}{ - "txHash": fmt.Sprintf("%X", sha256.Sum256(txBytes)), + "txHash": fmt.Sprintf("%X", checksum), }, ), ).(sdk.CacheMultiStore) @@ -819,8 +829,16 @@ func (app *BaseApp) cacheTxContext(ctx sdk.Context, txBytes []byte) (sdk.Context // Note, gas execution info is always returned. A reference to a Result is // returned if the tx does not run out of gas and if all the messages are valid // and execute successfully. An error is returned otherwise. -func (app *BaseApp) runTx(ctx sdk.Context, mode runTxMode, txBytes []byte) (gInfo sdk.GasInfo, result *sdk.Result, anteEvents []abci.Event, priority int64, err error) { - +func (app *BaseApp) runTx(ctx sdk.Context, mode runTxMode, tx sdk.Tx, checksum [32]byte) ( + gInfo sdk.GasInfo, + result *sdk.Result, + anteEvents []abci.Event, + priority int64, + pendingTxChecker abci.PendingTxChecker, + expireHandler abci.ExpireTxHandler, + txCtx sdk.Context, + err error, +) { defer telemetry.MeasureThroughputSinceWithLabels( telemetry.TxCount, []metrics.Label{ @@ -839,11 +857,13 @@ func (app *BaseApp) runTx(ctx sdk.Context, mode runTxMode, txBytes []byte) (gInf // resources are acceessed by the ante handlers and message handlers. defer acltypes.SendAllSignalsForTx(ctx.TxCompletionChannels()) acltypes.WaitForAllSignalsForTx(ctx.TxBlockingChannels()) - // check for existing parent tracer, and if applicable, use it - spanCtx, span := app.TracingInfo.StartWithContext("RunTx", ctx.TraceSpanContext()) - defer span.End() - ctx = ctx.WithTraceSpanContext(spanCtx) - span.SetAttributes(attribute.String("txHash", fmt.Sprintf("%X", sha256.Sum256(txBytes)))) + if app.TracingEnabled { + // check for existing parent tracer, and if applicable, use it + spanCtx, span := app.TracingInfo.StartWithContext("RunTx", ctx.TraceSpanContext()) + defer span.End() + ctx = ctx.WithTraceSpanContext(spanCtx) + span.SetAttributes(attribute.String("txHash", fmt.Sprintf("%X", checksum))) + } // NOTE: GasWanted should be returned by the AnteHandler. GasUsed is // determined by the GasMeter. We need access to the context to get the gas @@ -852,11 +872,6 @@ func (app *BaseApp) runTx(ctx sdk.Context, mode runTxMode, txBytes []byte) (gInf ms := ctx.MultiStore() - // only run the tx if there is block gas remaining - if mode == runTxModeDeliver && ctx.BlockGasMeter().IsOutOfGas() { - return gInfo, nil, nil, -1, sdkerrors.Wrap(sdkerrors.ErrOutOfGas, "no block gas left to run tx") - } - defer func() { if r := recover(); r != nil { acltypes.SendAllSignalsForTx(ctx.TxCompletionChannels()) @@ -869,42 +884,23 @@ func (app *BaseApp) runTx(ctx sdk.Context, mode runTxMode, txBytes []byte) (gInf gInfo = sdk.GasInfo{GasWanted: gasWanted, GasUsed: ctx.GasMeter().GasConsumed()} }() - blockGasConsumed := false - // consumeBlockGas makes sure block gas is consumed at most once. It must happen after - // tx processing, and must be execute even if tx processing fails. Hence we use trick with `defer` - consumeBlockGas := func() { - if !blockGasConsumed { - blockGasConsumed = true - ctx.BlockGasMeter().ConsumeGas( - ctx.GasMeter().GasConsumedToLimit(), "block gas meter", - ) - } - } - - // If BlockGasMeter() panics it will be caught by the above recover and will - // return an error - in any case BlockGasMeter will consume gas past the limit. - // - // NOTE: This must exist in a separate defer function for the above recovery - // to recover from this one. - if mode == runTxModeDeliver { - defer consumeBlockGas() - } - - tx, err := app.txDecoder(txBytes) - if err != nil { - return sdk.GasInfo{}, nil, nil, 0, err + if tx == nil { + return sdk.GasInfo{}, nil, nil, 0, nil, nil, ctx, sdkerrors.Wrap(sdkerrors.ErrTxDecode, "tx decode error") } msgs := tx.GetMsgs() if err := validateBasicTxMsgs(msgs); err != nil { - return sdk.GasInfo{}, nil, nil, 0, err + return sdk.GasInfo{}, nil, nil, 0, nil, nil, ctx, err } if app.anteHandler != nil { - // trace AnteHandler - _, anteSpan := app.TracingInfo.StartWithContext("AnteHandler", ctx.TraceSpanContext()) - defer anteSpan.End() + var anteSpan trace.Span + if app.TracingEnabled { + // trace AnteHandler + _, anteSpan = app.TracingInfo.StartWithContext("AnteHandler", ctx.TraceSpanContext()) + defer anteSpan.End() + } var ( anteCtx sdk.Context msCache sdk.CacheMultiStore @@ -916,7 +912,7 @@ func (app *BaseApp) runTx(ctx sdk.Context, mode runTxMode, txBytes []byte) (gInf // NOTE: Alternatively, we could require that AnteHandler ensures that // writes do not happen if aborted/failed. This may have some // performance benefits, but it'll be more difficult to get right. - anteCtx, msCache = app.cacheTxContext(ctx, txBytes) + anteCtx, msCache = app.cacheTxContext(ctx, checksum) anteCtx = anteCtx.WithEventManager(sdk.NewEventManager()) newCtx, err := app.anteHandler(anteCtx, tx, mode == runTxModeSimulate) @@ -940,7 +936,7 @@ func (app *BaseApp) runTx(ctx sdk.Context, mode runTxMode, txBytes []byte) (gInf // GasMeter expected to be set in AnteHandler gasWanted = ctx.GasMeter().Limit() if err != nil { - return gInfo, nil, nil, 0, err + return gInfo, nil, nil, 0, nil, nil, ctx, err } // Dont need to validate in checkTx mode @@ -948,6 +944,7 @@ func (app *BaseApp) runTx(ctx sdk.Context, mode runTxMode, txBytes []byte) (gInf storeAccessOpEvents := msCache.GetEvents() accessOps := ctx.TxMsgAccessOps()[acltypes.ANTE_MSG_INDEX] + // TODO: (occ) This is an example of where we do our current validation. Note that this validation operates on the declared dependencies for a TX / antehandler + the utilized dependencies, whereas the validation missingAccessOps := ctx.MsgValidator().ValidateAccessOperations(accessOps, storeAccessOpEvents) if len(missingAccessOps) != 0 { for op := range missingAccessOps { @@ -955,20 +952,24 @@ func (app *BaseApp) runTx(ctx sdk.Context, mode runTxMode, txBytes []byte) (gInf op.EmitValidationFailMetrics() } errMessage := fmt.Sprintf("Invalid Concurrent Execution antehandler missing %d access operations", len(missingAccessOps)) - return gInfo, nil, nil, 0, sdkerrors.Wrap(sdkerrors.ErrInvalidConcurrencyExecution, errMessage) + return gInfo, nil, nil, 0, nil, nil, ctx, sdkerrors.Wrap(sdkerrors.ErrInvalidConcurrencyExecution, errMessage) } } priority = ctx.Priority() + pendingTxChecker = ctx.PendingTxChecker() + expireHandler = ctx.ExpireTxHandler() msCache.Write() anteEvents = events.ToABCIEvents() - anteSpan.End() + if app.TracingEnabled { + anteSpan.End() + } } // Create a new Context based off of the existing Context with a MultiStore branch // in case message processing fails. At this point, the MultiStore // is a branch of a branch. - runMsgCtx, msCache := app.cacheTxContext(ctx, txBytes) + runMsgCtx, msCache := app.cacheTxContext(ctx, checksum) // Attempt to execute all messages and only update state if all messages pass // and we're in DeliverTx. Note, runMsgs will never return a reference to a @@ -976,9 +977,6 @@ func (app *BaseApp) runTx(ctx sdk.Context, mode runTxMode, txBytes []byte) (gInf result, err = app.runMsgs(runMsgCtx, msgs, mode) if err == nil && mode == runTxModeDeliver { - // When block gas exceeds, it'll panic and won't commit the cached store. - consumeBlockGas() - msCache.Write() } // we do this since we will only be looking at result in DeliverTx @@ -986,7 +984,10 @@ func (app *BaseApp) runTx(ctx sdk.Context, mode runTxMode, txBytes []byte) (gInf // append the events in the order of occurrence result.Events = append(anteEvents, result.Events...) } - return gInfo, result, anteEvents, priority, err + if ctx.CheckTxCallback() != nil { + ctx.CheckTxCallback()(err) + } + return gInfo, result, anteEvents, priority, pendingTxChecker, expireHandler, ctx, err } // runMsgs iterates through a list of messages and executes them with the provided @@ -1010,9 +1011,11 @@ func (app *BaseApp) runMsgs(ctx sdk.Context, msgs []sdk.Msg, mode runTxMode) (*s panic(err) } }() - spanCtx, span := app.TracingInfo.StartWithContext("RunMsgs", ctx.TraceSpanContext()) - defer span.End() - ctx = ctx.WithTraceSpanContext(spanCtx) + if app.TracingEnabled { + spanCtx, span := app.TracingInfo.StartWithContext("RunMsgs", ctx.TraceSpanContext()) + defer span.End() + ctx = ctx.WithTraceSpanContext(spanCtx) + } msgLogs := make(sdk.ABCIMessageLogs, 0, len(msgs)) events := sdk.EmptyEvents() txMsgData := &sdk.TxMsgData{ @@ -1031,7 +1034,7 @@ func (app *BaseApp) runMsgs(ctx sdk.Context, msgs []sdk.Msg, mode runTxMode) (*s err error ) - msgCtx, msgMsCache := app.cacheTxContext(ctx, []byte{}) + msgCtx, msgMsCache := app.cacheTxContext(ctx, [32]byte{}) msgCtx = msgCtx.WithMessageIndex(i) startTime := time.Now() @@ -1092,6 +1095,8 @@ func (app *BaseApp) runMsgs(ctx sdk.Context, msgs []sdk.Msg, mode runTxMode) (*s storeAccessOpEvents := msgMsCache.GetEvents() accessOps := ctx.TxMsgAccessOps()[i] missingAccessOps := ctx.MsgValidator().ValidateAccessOperations(accessOps, storeAccessOpEvents) + // TODO: (occ) This is where we are currently validating our per message dependencies, + // whereas validation will be done holistically based on the mvkv for OCC approach if len(missingAccessOps) != 0 { for op := range missingAccessOps { ctx.Logger().Info((fmt.Sprintf("eventMsgName=%s Missing Access Operation:%s ", eventMsgName, op.String()))) @@ -1172,5 +1177,7 @@ func (app *BaseApp) ReloadDB() error { } func (app *BaseApp) GetCheckCtx() sdk.Context { + app.checkTxStateLock.RLock() + defer app.checkTxStateLock.RUnlock() return app.checkState.ctx } diff --git a/baseapp/baseapp_test.go b/baseapp/baseapp_test.go index 6fcebace5..20cfc326f 100644 --- a/baseapp/baseapp_test.go +++ b/baseapp/baseapp_test.go @@ -140,6 +140,11 @@ func TestSetMinGasPrices(t *testing.T) { require.Equal(t, minGasPrices, app.minGasPrices) } +func TestSetOccEnabled(t *testing.T) { + app := newBaseApp(t.Name(), SetOccEnabled(true)) + require.True(t, app.OccEnabled()) +} + // func TestGetMaximumBlockGas(t *testing.T) { // app := setupBaseApp(t) // app.InitChain(context.Background(), &abci.RequestInitChain{}) diff --git a/baseapp/block_gas_test.go b/baseapp/block_gas_test.go deleted file mode 100644 index 2d154709c..000000000 --- a/baseapp/block_gas_test.go +++ /dev/null @@ -1,202 +0,0 @@ -package baseapp_test - -import ( - "context" - "encoding/json" - "fmt" - "math" - "testing" - - "github.com/stretchr/testify/require" - abci "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tendermint/libs/log" - tmproto "github.com/tendermint/tendermint/proto/tendermint/types" - dbm "github.com/tendermint/tm-db" - - "github.com/cosmos/cosmos-sdk/baseapp" - "github.com/cosmos/cosmos-sdk/client" - "github.com/cosmos/cosmos-sdk/client/tx" - cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types" - "github.com/cosmos/cosmos-sdk/simapp" - "github.com/cosmos/cosmos-sdk/testutil/testdata" - sdk "github.com/cosmos/cosmos-sdk/types" - sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" - txtypes "github.com/cosmos/cosmos-sdk/types/tx" - "github.com/cosmos/cosmos-sdk/types/tx/signing" - xauthsigning "github.com/cosmos/cosmos-sdk/x/auth/signing" - banktypes "github.com/cosmos/cosmos-sdk/x/bank/types" - minttypes "github.com/cosmos/cosmos-sdk/x/mint/types" -) - -func TestBaseApp_BlockGas(t *testing.T) { - testcases := []struct { - name string - gasToConsume uint64 // gas to consume in the msg execution - panicTx bool // panic explicitly in tx execution - expErr bool - }{ - {"less than block gas meter", 10, false, false}, - // {"more than block gas meter", blockMaxGas, false, true}, - // {"more than block gas meter", uint64(float64(blockMaxGas) * 1.2), false, true}, - // {"consume MaxUint64", math.MaxUint64, false, true}, - {"consume MaxGasWanted", txtypes.MaxGasWanted, false, true}, - {"consume block gas when paniced", 10, true, true}, - } - for _, tc := range testcases { - t.Run(tc.name, func(t *testing.T) { - var app *simapp.SimApp - routerOpt := func(bapp *baseapp.BaseApp) { - route := (&testdata.TestMsg{}).Route() - bapp.Router().AddRoute(sdk.NewRoute(route, func(ctx sdk.Context, msg sdk.Msg) (*sdk.Result, error) { - _, ok := msg.(*testdata.TestMsg) - if !ok { - return &sdk.Result{}, fmt.Errorf("Wrong Msg type, expected %T, got %T", (*testdata.TestMsg)(nil), msg) - } - ctx.KVStore(app.GetKey(banktypes.ModuleName)).Set([]byte("ok"), []byte("ok")) - ctx.GasMeter().ConsumeGas(tc.gasToConsume, "TestMsg") - if tc.panicTx { - panic("panic in tx execution") - } - return &sdk.Result{}, nil - })) - } - encCfg := simapp.MakeTestEncodingConfig() - encCfg.Amino.RegisterConcrete(&testdata.TestMsg{}, "testdata.TestMsg", nil) - encCfg.InterfaceRegistry.RegisterImplementations((*sdk.Msg)(nil), - &testdata.TestMsg{}, - ) - app = simapp.NewSimApp(log.NewNopLogger(), dbm.NewMemDB(), nil, true, map[int64]bool{}, "", 0, nil, encCfg, &simapp.EmptyAppOptions{}, routerOpt) - genState := simapp.NewDefaultGenesisState(encCfg.Marshaler) - stateBytes, err := json.MarshalIndent(genState, "", " ") - require.NoError(t, err) - app.InitChain(context.Background(), &abci.RequestInitChain{ - Validators: []abci.ValidatorUpdate{}, - ConsensusParams: simapp.DefaultConsensusParams, - AppStateBytes: stateBytes, - }) - - ctx := app.NewContext(false, tmproto.Header{}) - - // tx fee - feeCoin := sdk.NewCoin("atom", sdk.NewInt(150)) - feeAmount := sdk.NewCoins(feeCoin) - - // test account and fund - priv1, _, addr1 := testdata.KeyTestPubAddr() - err = app.BankKeeper.MintCoins(ctx, minttypes.ModuleName, feeAmount) - require.NoError(t, err) - err = app.BankKeeper.SendCoinsFromModuleToAccount(ctx, minttypes.ModuleName, addr1, feeAmount) - require.NoError(t, err) - require.Equal(t, feeCoin.Amount, app.BankKeeper.GetBalance(ctx, addr1, feeCoin.Denom).Amount) - seq, _ := app.AccountKeeper.GetSequence(ctx, addr1) - require.Equal(t, uint64(0), seq) - - // msg and signatures - msg := testdata.NewTestMsg(addr1) - - txBuilder := encCfg.TxConfig.NewTxBuilder() - require.NoError(t, txBuilder.SetMsgs(msg)) - txBuilder.SetFeeAmount(feeAmount) - txBuilder.SetGasLimit(txtypes.MaxGasWanted) // tx validation checks that gasLimit can't be bigger than this - - privs, accNums, accSeqs := []cryptotypes.PrivKey{priv1}, []uint64{6}, []uint64{0} - _, txBytes, err := createTestTx(encCfg.TxConfig, txBuilder, privs, accNums, accSeqs, ctx.ChainID()) - require.NoError(t, err) - - rsp, _ := app.FinalizeBlock(context.Background(), &abci.RequestFinalizeBlock{ - Height: 1, - Txs: [][]byte{txBytes}, - }) - - // check result - ctx = app.GetContextForDeliverTx(txBytes) - okValue := ctx.KVStore(app.GetKey(banktypes.ModuleName)).Get([]byte("ok")) - - if tc.expErr { - if tc.panicTx { - require.Equal(t, sdkerrors.ErrPanic.ABCICode(), rsp.TxResults[0].Code) - } else { - require.Equal(t, sdkerrors.ErrOutOfGas.ABCICode(), rsp.TxResults[0].Code) - } - require.Empty(t, okValue) - } else { - require.Equal(t, uint32(0), rsp.TxResults[0].Code) - require.Equal(t, []byte("ok"), okValue) - } - // check block gas is always consumed - this value may change if we update the logic for - // how gas is consumed - baseGas := uint64(62766) // baseGas is the gas consumed before tx msg - expGasConsumed := addUint64Saturating(tc.gasToConsume, baseGas) - if expGasConsumed > txtypes.MaxGasWanted { - // capped by gasLimit - expGasConsumed = txtypes.MaxGasWanted - } - require.Equal(t, int(expGasConsumed), int(ctx.BlockGasMeter().GasConsumed())) - // tx fee is always deducted - require.Equal(t, 0, int(app.BankKeeper.GetBalance(ctx, addr1, feeCoin.Denom).Amount.Int64())) - // sender's sequence is always increased - seq, err = app.AccountKeeper.GetSequence(ctx, addr1) - require.NoError(t, err) - require.Equal(t, uint64(1), seq) - }) - } -} - -func createTestTx(txConfig client.TxConfig, txBuilder client.TxBuilder, privs []cryptotypes.PrivKey, accNums []uint64, accSeqs []uint64, chainID string) (xauthsigning.Tx, []byte, error) { - // First round: we gather all the signer infos. We use the "set empty - // signature" hack to do that. - var sigsV2 []signing.SignatureV2 - for i, priv := range privs { - sigV2 := signing.SignatureV2{ - PubKey: priv.PubKey(), - Data: &signing.SingleSignatureData{ - SignMode: txConfig.SignModeHandler().DefaultMode(), - Signature: nil, - }, - Sequence: accSeqs[i], - } - - sigsV2 = append(sigsV2, sigV2) - } - err := txBuilder.SetSignatures(sigsV2...) - if err != nil { - return nil, nil, err - } - - // Second round: all signer infos are set, so each signer can sign. - sigsV2 = []signing.SignatureV2{} - for i, priv := range privs { - signerData := xauthsigning.SignerData{ - ChainID: chainID, - AccountNumber: accNums[i], - Sequence: accSeqs[i], - } - sigV2, err := tx.SignWithPrivKey( - txConfig.SignModeHandler().DefaultMode(), signerData, - txBuilder, priv, txConfig, accSeqs[i]) - if err != nil { - return nil, nil, err - } - - sigsV2 = append(sigsV2, sigV2) - } - err = txBuilder.SetSignatures(sigsV2...) - if err != nil { - return nil, nil, err - } - - txBytes, err := txConfig.TxEncoder()(txBuilder.GetTx()) - if err != nil { - return nil, nil, err - } - - return txBuilder.GetTx(), txBytes, nil -} - -func addUint64Saturating(a, b uint64) uint64 { - if math.MaxUint64-a < b { - return math.MaxUint64 - } - - return a + b -} diff --git a/baseapp/deliver_tx_batch_test.go b/baseapp/deliver_tx_batch_test.go new file mode 100644 index 000000000..3cf6e0739 --- /dev/null +++ b/baseapp/deliver_tx_batch_test.go @@ -0,0 +1,146 @@ +package baseapp + +import ( + "context" + "fmt" + "testing" + + "github.com/stretchr/testify/require" + abci "github.com/tendermint/tendermint/abci/types" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" + + "github.com/cosmos/cosmos-sdk/codec" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +func anteHandler(capKey sdk.StoreKey, storeKey []byte) sdk.AnteHandler { + return func(ctx sdk.Context, tx sdk.Tx, simulate bool) (sdk.Context, error) { + store := ctx.KVStore(capKey) + txTest := tx.(txTest) + + if txTest.FailOnAnte { + return ctx, sdkerrors.Wrap(sdkerrors.ErrUnauthorized, "ante handler failure") + } + + val := getIntFromStore(store, storeKey) + setIntOnStore(store, storeKey, val+1) + + ctx.EventManager().EmitEvents( + counterEvent("ante-val", val+1), + ) + + return ctx, nil + } +} + +func handlerKVStore(capKey sdk.StoreKey) sdk.Handler { + return func(ctx sdk.Context, msg sdk.Msg) (*sdk.Result, error) { + ctx = ctx.WithEventManager(sdk.NewEventManager()) + res := &sdk.Result{} + + // Extract the unique ID from the message (assuming you have added this) + txIndex := ctx.TxIndex() + + // Use the unique ID to get a specific key for this transaction + sharedKey := []byte(fmt.Sprintf("shared")) + txKey := []byte(fmt.Sprintf("tx-%d", txIndex)) + + // Similar steps as before: Get the store, retrieve a value, increment it, store back, emit an event + // Get the store + store := ctx.KVStore(capKey) + + // increment per-tx key (no conflict) + val := getIntFromStore(store, txKey) + setIntOnStore(store, txKey, val+1) + + // increment shared key + sharedVal := getIntFromStore(store, sharedKey) + setIntOnStore(store, sharedKey, sharedVal+1) + + // Emit an event with the incremented value and the unique ID + ctx.EventManager().EmitEvent( + sdk.NewEvent(sdk.EventTypeMessage, + sdk.NewAttribute("shared-val", fmt.Sprintf("%d", sharedVal+1)), + sdk.NewAttribute("tx-val", fmt.Sprintf("%d", val+1)), + sdk.NewAttribute("tx-id", fmt.Sprintf("%d", txIndex)), + ), + ) + + res.Events = ctx.EventManager().Events().ToABCIEvents() + return res, nil + } +} + +func requireAttribute(t *testing.T, evts []abci.Event, name string, val string) { + for _, evt := range evts { + for _, att := range evt.Attributes { + if string(att.Key) == name { + require.Equal(t, val, string(att.Value)) + return + } + } + } + require.Fail(t, fmt.Sprintf("attribute %s not found via value %s", name, val)) +} + +func TestDeliverTxBatch(t *testing.T) { + // test increments in the ante + anteKey := []byte("ante-key") + + anteOpt := func(bapp *BaseApp) { + bapp.SetAnteHandler(anteHandler(capKey1, anteKey)) + } + + // test increments in the handler + routerOpt := func(bapp *BaseApp) { + r := sdk.NewRoute(routeMsgCounter, handlerKVStore(capKey1)) + bapp.Router().AddRoute(r) + } + + app := setupBaseApp(t, anteOpt, routerOpt) + app.InitChain(context.Background(), &abci.RequestInitChain{}) + + // Create same codec used in txDecoder + codec := codec.NewLegacyAmino() + registerTestCodec(codec) + + nBlocks := 3 + txPerHeight := 5 + + for blockN := 0; blockN < nBlocks; blockN++ { + header := tmproto.Header{Height: int64(blockN) + 1} + app.setDeliverState(header) + app.BeginBlock(app.deliverState.ctx, abci.RequestBeginBlock{Header: header}) + + var requests []*sdk.DeliverTxEntry + for i := 0; i < txPerHeight; i++ { + counter := int64(blockN*txPerHeight + i) + tx := newTxCounter(counter, counter) + + txBytes, err := codec.Marshal(tx) + require.NoError(t, err) + requests = append(requests, &sdk.DeliverTxEntry{ + Request: abci.RequestDeliverTx{Tx: txBytes}, + SdkTx: *tx, + AbsoluteIndex: i, + }) + } + + responses := app.DeliverTxBatch(app.deliverState.ctx, sdk.DeliverTxBatchRequest{TxEntries: requests}) + require.Len(t, responses.Results, txPerHeight) + + for idx, deliverTxRes := range responses.Results { + res := deliverTxRes.Response + require.Equal(t, abci.CodeTypeOK, res.Code) + requireAttribute(t, res.Events, "tx-id", fmt.Sprintf("%d", idx)) + requireAttribute(t, res.Events, "tx-val", fmt.Sprintf("%d", blockN+1)) + requireAttribute(t, res.Events, "shared-val", fmt.Sprintf("%d", blockN*txPerHeight+idx+1)) + } + + app.EndBlock(app.deliverState.ctx, abci.RequestEndBlock{}) + require.Empty(t, app.deliverState.ctx.MultiStore().GetEvents()) + app.SetDeliverStateToCommit() + app.Commit(context.Background()) + } +} diff --git a/baseapp/deliver_tx_test.go b/baseapp/deliver_tx_test.go index c9fdc767d..11e451c36 100644 --- a/baseapp/deliver_tx_test.go +++ b/baseapp/deliver_tx_test.go @@ -3,6 +3,7 @@ package baseapp import ( "bytes" "context" + "crypto/sha256" "encoding/binary" "fmt" "math/rand" @@ -219,7 +220,6 @@ func TestWithRouter(t *testing.T) { for blockN := 0; blockN < nBlocks; blockN++ { header := tmproto.Header{Height: int64(blockN) + 1} app.setDeliverState(header) - app.deliverState.ctx = app.deliverState.ctx.WithBlockGasMeter(sdk.NewInfiniteGasMeter()) app.BeginBlock(app.deliverState.ctx, abci.RequestBeginBlock{Header: header}) for i := 0; i < txPerHeight; i++ { @@ -229,7 +229,8 @@ func TestWithRouter(t *testing.T) { txBytes, err := codec.Marshal(tx) require.NoError(t, err) - res := app.DeliverTx(app.deliverState.ctx, abci.RequestDeliverTx{Tx: txBytes}) + decoded, _ := app.txDecoder(txBytes) + res := app.DeliverTx(app.deliverState.ctx, abci.RequestDeliverTx{Tx: txBytes}, decoded, sha256.Sum256(txBytes)) require.True(t, res.IsOK(), fmt.Sprintf("%v", res)) } @@ -267,7 +268,6 @@ func TestBaseApp_EndBlock(t *testing.T) { app.Seal() app.setDeliverState(tmproto.Header{}) - app.deliverState.ctx = app.deliverState.ctx.WithBlockGasMeter(sdk.NewInfiniteGasMeter()) res := app.EndBlock(app.deliverState.ctx, abci.RequestEndBlock{}) require.Empty(t, app.deliverState.ctx.MultiStore().GetEvents()) @@ -323,7 +323,6 @@ func TestQuery(t *testing.T) { // query is still empty after a DeliverTx before we commit header := tmproto.Header{Height: app.LastBlockHeight() + 1} app.setDeliverState(header) - app.deliverState.ctx = app.deliverState.ctx.WithBlockGasMeter(sdk.NewInfiniteGasMeter()) app.BeginBlock(app.deliverState.ctx, abci.RequestBeginBlock{Header: header}) _, resTx, err = app.Deliver(aminoTxEncoder(), tx) @@ -352,7 +351,6 @@ func TestGRPCQuery(t *testing.T) { app.InitChain(context.Background(), &abci.RequestInitChain{}) header := tmproto.Header{Height: app.LastBlockHeight() + 1} app.setDeliverState(header) - app.deliverState.ctx = app.deliverState.ctx.WithBlockGasMeter(sdk.NewInfiniteGasMeter()) app.BeginBlock(app.deliverState.ctx, abci.RequestBeginBlock{Header: header}) app.SetDeliverStateToCommit() app.Commit(context.Background()) @@ -434,12 +432,12 @@ func TestMultiMsgDeliverTx(t *testing.T) { header := tmproto.Header{Height: 1} app.setDeliverState(header) - app.deliverState.ctx = app.deliverState.ctx.WithBlockGasMeter(sdk.NewInfiniteGasMeter()) app.BeginBlock(app.deliverState.ctx, abci.RequestBeginBlock{Header: header}) tx := newTxCounter(0, 0, 1, 2) txBytes, err := codec.Marshal(tx) require.NoError(t, err) - res := app.DeliverTx(app.deliverState.ctx, abci.RequestDeliverTx{Tx: txBytes}) + decoded, _ := app.txDecoder(txBytes) + res := app.DeliverTx(app.deliverState.ctx, abci.RequestDeliverTx{Tx: txBytes}, decoded, sha256.Sum256(txBytes)) require.True(t, res.IsOK(), fmt.Sprintf("%v", res)) store := app.deliverState.ctx.KVStore(capKey1) @@ -459,7 +457,8 @@ func TestMultiMsgDeliverTx(t *testing.T) { tx.Msgs = append(tx.Msgs, msgCounter2{1}) txBytes, err = codec.Marshal(tx) require.NoError(t, err) - res = app.DeliverTx(app.deliverState.ctx, abci.RequestDeliverTx{Tx: txBytes}) + decoded, _ = app.txDecoder(txBytes) + res = app.DeliverTx(app.deliverState.ctx, abci.RequestDeliverTx{Tx: txBytes}, decoded, sha256.Sum256(txBytes)) require.True(t, res.IsOK(), fmt.Sprintf("%v", res)) store = app.deliverState.ctx.KVStore(capKey1) @@ -517,7 +516,6 @@ func TestSimulateTx(t *testing.T) { count := int64(blockN + 1) header := tmproto.Header{Height: count} app.setDeliverState(header) - app.deliverState.ctx = app.deliverState.ctx.WithBlockGasMeter(sdk.NewInfiniteGasMeter()) app.BeginBlock(app.deliverState.ctx, abci.RequestBeginBlock{Header: header}) tx := newTxCounter(count, count) @@ -577,7 +575,6 @@ func TestRunInvalidTransaction(t *testing.T) { header := tmproto.Header{Height: 1} app.setDeliverState(header) - app.deliverState.ctx = app.deliverState.ctx.WithBlockGasMeter(sdk.NewInfiniteGasMeter()) app.BeginBlock(app.deliverState.ctx, abci.RequestBeginBlock{Header: header}) // transaction with no messages @@ -658,9 +655,8 @@ func TestRunInvalidTransaction(t *testing.T) { txBytes, err := newCdc.Marshal(tx) require.NoError(t, err) - res := app.DeliverTx(app.deliverState.ctx, abci.RequestDeliverTx{Tx: txBytes}) - require.EqualValues(t, sdkerrors.ErrTxDecode.ABCICode(), res.Code) - require.EqualValues(t, sdkerrors.ErrTxDecode.Codespace(), res.Codespace) + _, err = app.txDecoder(txBytes) + require.NotNil(t, err) } } @@ -706,7 +702,6 @@ func TestTxGasLimits(t *testing.T) { header := tmproto.Header{Height: 1} app.setDeliverState(header) - app.deliverState.ctx = app.deliverState.ctx.WithBlockGasMeter(sdk.NewInfiniteGasMeter()) app.BeginBlock(app.deliverState.ctx, abci.RequestBeginBlock{Header: header}) testCases := []struct { @@ -877,7 +872,6 @@ func TestCustomRunTxPanicHandler(t *testing.T) { header := tmproto.Header{Height: 1} app.setDeliverState(header) - app.deliverState.ctx = app.deliverState.ctx.WithBlockGasMeter(sdk.NewInfiniteGasMeter()) app.BeginBlock(app.deliverState.ctx, abci.RequestBeginBlock{Header: header}) app.AddRunTxRecoveryHandler(func(recoveryObj interface{}) error { @@ -921,7 +915,6 @@ func TestBaseAppAnteHandler(t *testing.T) { header := tmproto.Header{Height: app.LastBlockHeight() + 1} app.setDeliverState(header) - app.deliverState.ctx = app.deliverState.ctx.WithBlockGasMeter(sdk.NewInfiniteGasMeter()) app.BeginBlock(app.deliverState.ctx, abci.RequestBeginBlock{Header: header}) // execute a tx that will fail ante handler execution @@ -932,7 +925,8 @@ func TestBaseAppAnteHandler(t *testing.T) { tx.setFailOnAnte(true) txBytes, err := cdc.Marshal(tx) require.NoError(t, err) - res := app.DeliverTx(app.deliverState.ctx, abci.RequestDeliverTx{Tx: txBytes}) + decoded, _ := app.txDecoder(txBytes) + res := app.DeliverTx(app.deliverState.ctx, abci.RequestDeliverTx{Tx: txBytes}, decoded, sha256.Sum256(txBytes)) require.Empty(t, res.Events) require.False(t, res.IsOK(), fmt.Sprintf("%v", res)) @@ -948,7 +942,8 @@ func TestBaseAppAnteHandler(t *testing.T) { txBytes, err = cdc.Marshal(tx) require.NoError(t, err) - res = app.DeliverTx(app.deliverState.ctx, abci.RequestDeliverTx{Tx: txBytes}) + decoded, _ = app.txDecoder(txBytes) + res = app.DeliverTx(app.deliverState.ctx, abci.RequestDeliverTx{Tx: txBytes}, decoded, sha256.Sum256(txBytes)) // should emit ante event require.NotEmpty(t, res.Events) require.False(t, res.IsOK(), fmt.Sprintf("%v", res)) @@ -965,7 +960,8 @@ func TestBaseAppAnteHandler(t *testing.T) { txBytes, err = cdc.Marshal(tx) require.NoError(t, err) - res = app.DeliverTx(app.deliverState.ctx, abci.RequestDeliverTx{Tx: txBytes}) + decoded, _ = app.txDecoder(txBytes) + res = app.DeliverTx(app.deliverState.ctx, abci.RequestDeliverTx{Tx: txBytes}, decoded, sha256.Sum256(txBytes)) require.NotEmpty(t, res.Events) require.True(t, res.IsOK(), fmt.Sprintf("%v", res)) @@ -1035,7 +1031,6 @@ func TestGasConsumptionBadTx(t *testing.T) { header := tmproto.Header{Height: app.LastBlockHeight() + 1} app.setDeliverState(header) - app.deliverState.ctx = app.deliverState.ctx.WithBlockGasMeter(sdk.NewGasMeter(app.getMaximumBlockGas(app.deliverState.ctx))) app.BeginBlock(app.deliverState.ctx, abci.RequestBeginBlock{Header: header}) tx := newTxCounter(5, 0) @@ -1043,16 +1038,10 @@ func TestGasConsumptionBadTx(t *testing.T) { txBytes, err := cdc.Marshal(tx) require.NoError(t, err) - res := app.DeliverTx(app.deliverState.ctx, abci.RequestDeliverTx{Tx: txBytes}) + res := app.DeliverTx(app.deliverState.ctx, abci.RequestDeliverTx{Tx: txBytes}, tx, sha256.Sum256(txBytes)) require.False(t, res.IsOK(), fmt.Sprintf("%v", res)) - // require next tx to fail due to black gas limit - tx = newTxCounter(5, 0) - txBytes, err = cdc.Marshal(tx) - require.NoError(t, err) - - res = app.DeliverTx(app.deliverState.ctx, abci.RequestDeliverTx{Tx: txBytes}) - require.False(t, res.IsOK(), fmt.Sprintf("%v", res)) + // removed the block gas exceeded because of removal of block gas meter, gasWanted < max block gas is still fulfilled by various other checks } func TestInitChainer(t *testing.T) { @@ -1132,7 +1121,6 @@ func TestInitChainer(t *testing.T) { // commit and ensure we can still query header := tmproto.Header{Height: app.LastBlockHeight() + 1} app.setDeliverState(header) - app.deliverState.ctx = app.deliverState.ctx.WithBlockGasMeter(sdk.NewInfiniteGasMeter()) app.BeginBlock(app.deliverState.ctx, abci.RequestBeginBlock{Header: header}) app.SetDeliverStateToCommit() app.Commit(context.Background()) @@ -1172,7 +1160,6 @@ func TestBeginBlock_WithInitialHeight(t *testing.T) { ) app.setDeliverState(tmproto.Header{Height: 4}) - app.deliverState.ctx = app.deliverState.ctx.WithBlockGasMeter(sdk.NewInfiniteGasMeter()) require.PanicsWithError(t, "invalid height: 4; expected: 3", func() { app.BeginBlock(app.deliverState.ctx, abci.RequestBeginBlock{ Header: tmproto.Header{ @@ -1465,11 +1452,9 @@ func TestCheckTx(t *testing.T) { // If a block is committed, CheckTx state should be reset. header := tmproto.Header{Height: 1} app.setDeliverState(header) - app.checkState.ctx = app.checkState.ctx.WithBlockGasMeter(sdk.NewInfiniteGasMeter()).WithHeaderHash([]byte("hash")) - app.deliverState.ctx = app.deliverState.ctx.WithBlockGasMeter(sdk.NewInfiniteGasMeter()) + app.checkState.ctx = app.checkState.ctx.WithHeaderHash([]byte("hash")) app.BeginBlock(app.deliverState.ctx, abci.RequestBeginBlock{Header: header, Hash: []byte("hash")}) - require.NotNil(t, app.checkState.ctx.BlockGasMeter(), "block gas meter should have been set to checkState") require.NotEmpty(t, app.checkState.ctx.HeaderHash()) app.EndBlock(app.deliverState.ctx, abci.RequestEndBlock{}) @@ -1510,7 +1495,6 @@ func TestDeliverTx(t *testing.T) { for blockN := 0; blockN < nBlocks; blockN++ { header := tmproto.Header{Height: int64(blockN) + 1} app.setDeliverState(header) - app.deliverState.ctx = app.deliverState.ctx.WithBlockGasMeter(sdk.NewInfiniteGasMeter()) app.BeginBlock(app.deliverState.ctx, abci.RequestBeginBlock{Header: header}) for i := 0; i < txPerHeight; i++ { @@ -1520,7 +1504,8 @@ func TestDeliverTx(t *testing.T) { txBytes, err := codec.Marshal(tx) require.NoError(t, err) - res := app.DeliverTx(app.deliverState.ctx, abci.RequestDeliverTx{Tx: txBytes}) + decoded, _ := app.txDecoder(txBytes) + res := app.DeliverTx(app.deliverState.ctx, abci.RequestDeliverTx{Tx: txBytes}, decoded, sha256.Sum256(txBytes)) require.True(t, res.IsOK(), fmt.Sprintf("%v", res)) events := res.GetEvents() require.Len(t, events, 3, "should contain ante handler, message type and counter events respectively") @@ -1663,7 +1648,6 @@ func TestLoadVersionInvalid(t *testing.T) { header := tmproto.Header{Height: 1} app.setDeliverState(header) - app.deliverState.ctx = app.deliverState.ctx.WithBlockGasMeter(sdk.NewInfiniteGasMeter()) app.BeginBlock(app.deliverState.ctx, abci.RequestBeginBlock{Header: header}) app.SetDeliverStateToCommit() app.Commit(context.Background()) @@ -1715,7 +1699,6 @@ func setupBaseAppWithSnapshots(t *testing.T, blocks uint, blockTxs int, options keyCounter := 0 for height := int64(1); height <= int64(blocks); height++ { app.setDeliverState(tmproto.Header{Height: height}) - app.deliverState.ctx = app.deliverState.ctx.WithBlockGasMeter(sdk.NewInfiniteGasMeter()) app.BeginBlock(app.deliverState.ctx, abci.RequestBeginBlock{Header: tmproto.Header{Height: height}}) for txNum := 0; txNum < blockTxs; txNum++ { tx := txTest{Msgs: []sdk.Msg{}} @@ -1724,12 +1707,12 @@ func setupBaseAppWithSnapshots(t *testing.T, blocks uint, blockTxs int, options value := make([]byte, 10000) _, err := r.Read(value) require.NoError(t, err) - tx.Msgs = append(tx.Msgs, msgKeyValue{Key: key, Value: value}) + tx.Msgs = append(tx.Msgs, &msgKeyValue{Key: key, Value: value}) keyCounter++ } txBytes, err := codec.Marshal(tx) require.NoError(t, err) - resp := app.DeliverTx(app.deliverState.ctx, abci.RequestDeliverTx{Tx: txBytes}) + resp := app.DeliverTx(app.deliverState.ctx, abci.RequestDeliverTx{Tx: txBytes}, tx, sha256.Sum256(txBytes)) require.True(t, resp.IsOK(), "%v", resp.String()) } app.EndBlock(app.deliverState.ctx, abci.RequestEndBlock{Height: height}) @@ -1793,7 +1776,6 @@ func TestLoadVersion(t *testing.T) { // execute a block, collect commit ID header := tmproto.Header{Height: 1} app.setDeliverState(header) - app.deliverState.ctx = app.deliverState.ctx.WithBlockGasMeter(sdk.NewInfiniteGasMeter()) app.BeginBlock(app.deliverState.ctx, abci.RequestBeginBlock{Header: header}) app.SetDeliverStateToCommit() app.Commit(context.Background()) @@ -1801,7 +1783,6 @@ func TestLoadVersion(t *testing.T) { // execute a block, collect commit ID header = tmproto.Header{Height: 2} app.setDeliverState(header) - app.deliverState.ctx = app.deliverState.ctx.WithBlockGasMeter(sdk.NewInfiniteGasMeter()) app.BeginBlock(app.deliverState.ctx, abci.RequestBeginBlock{Header: header}) app.SetDeliverStateToCommit() app.Commit(context.Background()) @@ -1885,7 +1866,6 @@ func TestSetLoader(t *testing.T) { // "execute" one block app.setDeliverState(tmproto.Header{Height: 2}) - app.deliverState.ctx = app.deliverState.ctx.WithBlockGasMeter(sdk.NewInfiniteGasMeter()) app.BeginBlock(app.deliverState.ctx, abci.RequestBeginBlock{Header: tmproto.Header{Height: 2}}) app.SetDeliverStateToCommit() app.Commit(context.Background()) diff --git a/baseapp/msg_service_router_test.go b/baseapp/msg_service_router_test.go index 6c504aba0..0cd6bf385 100644 --- a/baseapp/msg_service_router_test.go +++ b/baseapp/msg_service_router_test.go @@ -2,6 +2,7 @@ package baseapp_test import ( "context" + "crypto/sha256" "testing" "github.com/stretchr/testify/require" @@ -73,7 +74,8 @@ func TestMsgService(t *testing.T) { encCfg := simapp.MakeTestEncodingConfig() testdata.RegisterInterfaces(encCfg.InterfaceRegistry) db := dbm.NewMemDB() - app := baseapp.NewBaseApp("test", log.NewTestingLogger(t), db, encCfg.TxConfig.TxDecoder(), nil, &testutil.TestAppOpts{}) + decoder := encCfg.TxConfig.TxDecoder() + app := baseapp.NewBaseApp("test", log.NewTestingLogger(t), db, decoder, nil, &testutil.TestAppOpts{}) app.SetInterfaceRegistry(encCfg.InterfaceRegistry) testdata.RegisterMsgServer( app.MsgServiceRouter(), @@ -81,10 +83,15 @@ func TestMsgService(t *testing.T) { ) app.SetFinalizeBlocker(func(ctx sdk.Context, req *abci.RequestFinalizeBlock) (*abci.ResponseFinalizeBlock, error) { txResults := []*abci.ExecTxResult{} - for _, tx := range req.Txs { + for _, txbz := range req.Txs { + tx, err := decoder(txbz) + if err != nil { + txResults = append(txResults, &abci.ExecTxResult{}) + continue + } deliverTxResp := app.DeliverTx(ctx, abci.RequestDeliverTx{ - Tx: tx, - }) + Tx: txbz, + }, tx, sha256.Sum256(txbz)) txResults = append(txResults, &abci.ExecTxResult{ Code: deliverTxResp.Code, Data: deliverTxResp.Data, diff --git a/baseapp/options.go b/baseapp/options.go index 75ccf93db..1ef2622b3 100644 --- a/baseapp/options.go +++ b/baseapp/options.go @@ -82,6 +82,14 @@ func SetSnapshotInterval(interval uint64) func(*BaseApp) { return func(app *BaseApp) { app.SetSnapshotInterval(interval) } } +func SetConcurrencyWorkers(workers int) func(*BaseApp) { + return func(app *BaseApp) { app.SetConcurrencyWorkers(workers) } +} + +func SetOccEnabled(occEnabled bool) func(*BaseApp) { + return func(app *BaseApp) { app.SetOccEnabled(occEnabled) } +} + // SetSnapshotKeepRecent sets the recent snapshots to keep. func SetSnapshotKeepRecent(keepRecent uint32) func(*BaseApp) { return func(app *BaseApp) { app.SetSnapshotKeepRecent(keepRecent) } @@ -290,6 +298,20 @@ func (app *BaseApp) SetSnapshotInterval(snapshotInterval uint64) { app.snapshotInterval = snapshotInterval } +func (app *BaseApp) SetConcurrencyWorkers(workers int) { + if app.sealed { + panic("SetConcurrencyWorkers() on sealed BaseApp") + } + app.concurrencyWorkers = workers +} + +func (app *BaseApp) SetOccEnabled(occEnabled bool) { + if app.sealed { + panic("SetOccEnabled() on sealed BaseApp") + } + app.occEnabled = occEnabled +} + // SetSnapshotKeepRecent sets the number of recent snapshots to keep. func (app *BaseApp) SetSnapshotKeepRecent(snapshotKeepRecent uint32) { if app.sealed { diff --git a/baseapp/test_helpers.go b/baseapp/test_helpers.go index d8b114395..dc8b5150f 100644 --- a/baseapp/test_helpers.go +++ b/baseapp/test_helpers.go @@ -1,6 +1,8 @@ package baseapp import ( + "crypto/sha256" + sdk "github.com/cosmos/cosmos-sdk/types" sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" @@ -15,7 +17,7 @@ func (app *BaseApp) Check(txEncoder sdk.TxEncoder, tx sdk.Tx) (sdk.GasInfo, *sdk return sdk.GasInfo{}, nil, sdkerrors.Wrapf(sdkerrors.ErrInvalidRequest, "%s", err) } ctx := app.checkState.ctx.WithTxBytes(bz).WithVoteInfos(app.voteInfos).WithConsensusParams(app.GetConsensusParams(app.checkState.ctx)) - gasInfo, result, _, _, err := app.runTx(ctx, runTxModeCheck, bz) + gasInfo, result, _, _, _, _, _, err := app.runTx(ctx, runTxModeCheck, tx, sha256.Sum256(bz)) if len(ctx.MultiStore().GetEvents()) > 0 { panic("Expected checkTx events to be empty") } @@ -25,7 +27,11 @@ func (app *BaseApp) Check(txEncoder sdk.TxEncoder, tx sdk.Tx) (sdk.GasInfo, *sdk func (app *BaseApp) Simulate(txBytes []byte) (sdk.GasInfo, *sdk.Result, error) { ctx := app.checkState.ctx.WithTxBytes(txBytes).WithVoteInfos(app.voteInfos).WithConsensusParams(app.GetConsensusParams(app.checkState.ctx)) ctx, _ = ctx.CacheContext() - gasInfo, result, _, _, err := app.runTx(ctx, runTxModeSimulate, txBytes) + tx, err := app.txDecoder(txBytes) + if err != nil { + return sdk.GasInfo{}, nil, err + } + gasInfo, result, _, _, _, _, _, err := app.runTx(ctx, runTxModeSimulate, tx, sha256.Sum256(txBytes)) if len(ctx.MultiStore().GetEvents()) > 0 { panic("Expected simulate events to be empty") } @@ -39,7 +45,11 @@ func (app *BaseApp) Deliver(txEncoder sdk.TxEncoder, tx sdk.Tx) (sdk.GasInfo, *s return sdk.GasInfo{}, nil, sdkerrors.Wrapf(sdkerrors.ErrInvalidRequest, "%s", err) } ctx := app.deliverState.ctx.WithTxBytes(bz).WithVoteInfos(app.voteInfos).WithConsensusParams(app.GetConsensusParams(app.deliverState.ctx)) - gasInfo, result, _, _, err := app.runTx(ctx, runTxModeDeliver, bz) + decoded, err := app.txDecoder(bz) + if err != nil { + return sdk.GasInfo{}, &sdk.Result{}, err + } + gasInfo, result, _, _, _, _, _, err := app.runTx(ctx, runTxModeDeliver, decoded, sha256.Sum256(bz)) return gasInfo, result, err } diff --git a/go.mod b/go.mod index 4d21e52e3..4f93606ed 100644 --- a/go.mod +++ b/go.mod @@ -36,7 +36,7 @@ require ( github.com/regen-network/cosmos-proto v0.3.1 github.com/rs/zerolog v1.30.0 github.com/savaki/jq v0.0.0-20161209013833-0e6baecebbf8 - github.com/sei-protocol/sei-db v0.0.22 + github.com/sei-protocol/sei-db v0.0.27-0.20240123064153-d6dfa112e760 github.com/sei-protocol/sei-tm-db v0.0.5 github.com/spf13/cast v1.5.0 github.com/spf13/cobra v1.6.1 @@ -180,10 +180,10 @@ replace ( // TODO Remove it: https://github.com/cosmos/cosmos-sdk/issues/10409 github.com/gin-gonic/gin => github.com/gin-gonic/gin v1.7.0 github.com/gogo/protobuf => github.com/regen-network/protobuf v1.3.3-alpha.regen.1 - github.com/sei-protocol/sei-db => github.com/sei-protocol/sei-db v0.0.25 + github.com/sei-protocol/sei-db => github.com/sei-protocol/sei-db v0.0.28 // Latest goleveldb is broken, we have to stick to this version github.com/syndtr/goleveldb => github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 - github.com/tendermint/tendermint => github.com/sei-protocol/sei-tendermint v0.2.35 + github.com/tendermint/tendermint => github.com/sei-protocol/sei-tendermint v0.2.36-evm-rebase-5 // latest grpc doesn't work with with our modified proto compiler, so we need to enforce // the following version across all dependencies. google.golang.org/grpc => google.golang.org/grpc v1.33.2 diff --git a/go.sum b/go.sum index 5ec11222e..1d3677009 100644 --- a/go.sum +++ b/go.sum @@ -781,12 +781,12 @@ github.com/savaki/jq v0.0.0-20161209013833-0e6baecebbf8/go.mod h1:Nw/CCOXNyF5JDd github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZKJ48R1S7H23Ji7oFO5Bw= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/segmentio/fasthash v1.0.3/go.mod h1:waKX8l2N8yckOgmSsXJi7x1ZfdKZ4x7KRMzBtS3oedY= -github.com/sei-protocol/sei-db v0.0.25 h1:jC1ivcaNxSR7EmxqvxexqPpnN/G0vUTZNHZI+C9T8M8= -github.com/sei-protocol/sei-db v0.0.25/go.mod h1:F/ZKZA8HJPcUzSZPA8yt6pfwlGriJ4RDR4eHKSGLStI= +github.com/sei-protocol/sei-db v0.0.28 h1:RE4k2aXSERUixJC2kZri201w1fG3WJ7PZfvCJHCYkiM= +github.com/sei-protocol/sei-db v0.0.28/go.mod h1:F/ZKZA8HJPcUzSZPA8yt6pfwlGriJ4RDR4eHKSGLStI= github.com/sei-protocol/sei-iavl v0.1.9 h1:y4mVYftxLNRs6533zl7N0/Ch+CzRQc04JDfHolIxgBE= github.com/sei-protocol/sei-iavl v0.1.9/go.mod h1:7PfkEVT5dcoQE+s/9KWdoXJ8VVVP1QpYYPLdxlkSXFk= -github.com/sei-protocol/sei-tendermint v0.2.35 h1:TvmcsNLsv1l8gJRVwsFRNa/YsySp7jtPku/JfyOBO4w= -github.com/sei-protocol/sei-tendermint v0.2.35/go.mod h1:4LSlJdhl3nf3OmohliwRNUFLOB1XWlrmSodrIP7fLh4= +github.com/sei-protocol/sei-tendermint v0.2.36-evm-rebase-5 h1:+exV5/mhDzqI00cdWoSUluocoyYzZszjKSUBymVCBSI= +github.com/sei-protocol/sei-tendermint v0.2.36-evm-rebase-5/go.mod h1:4LSlJdhl3nf3OmohliwRNUFLOB1XWlrmSodrIP7fLh4= github.com/sei-protocol/sei-tm-db v0.0.5 h1:3WONKdSXEqdZZeLuWYfK5hP37TJpfaUa13vAyAlvaQY= github.com/sei-protocol/sei-tm-db v0.0.5/go.mod h1:Cpa6rGyczgthq7/0pI31jys2Fw0Nfrc+/jKdP1prVqY= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= diff --git a/proto/cosmos/accesscontrol/constants.proto b/proto/cosmos/accesscontrol/constants.proto index a8820fb48..7df3e7d06 100644 --- a/proto/cosmos/accesscontrol/constants.proto +++ b/proto/cosmos/accesscontrol/constants.proto @@ -130,7 +130,26 @@ enum ResourceType { KV_DEX_SHORT_ORDER_COUNT = 92; // child of KV_DEX KV_BANK_DEFERRED = 93; // child of KV + reserved 94; KV_BANK_DEFERRED_MODULE_TX_INDEX = 95; // child of KV_BANK_DEFERRED + + KV_EVM = 96; // child of KV + KV_EVM_BALANCE = 97; // child of KV_EVM; deprecated + KV_EVM_TRANSIENT = 98; // child of KV_EVM + KV_EVM_ACCOUNT_TRANSIENT = 99; // child of KV_EVM + KV_EVM_MODULE_TRANSIENT = 100; // child of KV_EVM + KV_EVM_NONCE = 101; // child of KV_EVM + KV_EVM_RECEIPT = 102; // child of KV_EVM + KV_EVM_S2E = 103; // child of KV_EVM + KV_EVM_E2S = 104; // child of KV_EVM + KV_EVM_CODE_HASH = 105; // child of KV_EVM + KV_EVM_CODE = 106; // child of KV_EVM + KV_EVM_CODE_SIZE = 107; // child of KV_EVM + + KV_BANK_WEI_BALANCE = 108; // child of KV_BANK + + KV_DEX_MEM_CONTRACTS_TO_PROCESS = 109; // child of KV_DEX_MEM + KV_DEX_MEM_DOWNSTREAM_CONTRACTS = 110; // child of KV_DEX_MEM } enum WasmMessageSubtype { diff --git a/server/config/config.go b/server/config/config.go index 5d2c7c00d..e7d6c3c6d 100644 --- a/server/config/config.go +++ b/server/config/config.go @@ -21,6 +21,12 @@ const ( // DefaultGRPCWebAddress defines the default address to bind the gRPC-web server to. DefaultGRPCWebAddress = "0.0.0.0:9091" + + // DefaultConcurrencyWorkers defines the default workers to use for concurrent transactions + DefaultConcurrencyWorkers = 20 + + // DefaultOccEanbled defines whether to use OCC for tx processing + DefaultOccEnabled = true ) // BaseConfig defines the server's basic configuration @@ -88,6 +94,12 @@ type BaseConfig struct { SeparateOrphanVersionsToKeep int64 `mapstructure:"separate-orphan-versions-to-keep"` NumOrphanPerFile int `mapstructure:"num-orphan-per-file"` OrphanDirectory string `mapstructure:"orphan-dir"` + + // ConcurrencyWorkers defines the number of workers to use for concurrent + // transaction execution. A value of -1 means unlimited workers. Default value is 10. + ConcurrencyWorkers int `mapstructure:"concurrency-workers"` + // Whether to enable optimistic concurrency control for tx execution, default is true + OccEnabled bool `mapstructure:"occ-enabled"` } // APIConfig defines the API listener configuration. @@ -238,6 +250,8 @@ func DefaultConfig() *Config { IAVLDisableFastNode: true, CompactionInterval: 0, NoVersioning: false, + ConcurrencyWorkers: DefaultConcurrencyWorkers, + OccEnabled: DefaultOccEnabled, }, Telemetry: telemetry.Config{ Enabled: false, @@ -314,6 +328,8 @@ func GetConfig(v *viper.Viper) (Config, error) { SeparateOrphanVersionsToKeep: v.GetInt64("separate-orphan-versions-to-keep"), NumOrphanPerFile: v.GetInt("num-orphan-per-file"), OrphanDirectory: v.GetString("orphan-dir"), + ConcurrencyWorkers: v.GetInt("concurrency-workers"), + OccEnabled: v.GetBool("occ-enabled"), }, Telemetry: telemetry.Config{ ServiceName: v.GetString("telemetry.service-name"), diff --git a/server/config/config_test.go b/server/config/config_test.go index ce733c346..61801e9ca 100644 --- a/server/config/config_test.go +++ b/server/config/config_test.go @@ -23,3 +23,13 @@ func TestSetSnapshotDirectory(t *testing.T) { cfg := DefaultConfig() require.Equal(t, "", cfg.StateSync.SnapshotDirectory) } + +func TestSetConcurrencyWorkers(t *testing.T) { + cfg := DefaultConfig() + require.Equal(t, DefaultConcurrencyWorkers, cfg.ConcurrencyWorkers) +} + +func TestOCCEnabled(t *testing.T) { + cfg := DefaultConfig() + require.Equal(t, true, cfg.OccEnabled) +} diff --git a/server/config/toml.go b/server/config/toml.go index 5c1e2b9f2..6e1addfc4 100644 --- a/server/config/toml.go +++ b/server/config/toml.go @@ -23,7 +23,7 @@ const DefaultConfigTemplate = `# This is a TOML config file. # specified in this config (e.g. 0.25token1;0.0001token2). minimum-gas-prices = "{{ .BaseConfig.MinGasPrices }}" -# Pruning Strategies: +# Pruning Strategies: # - default: Keep the recent 362880 blocks and prune is triggered every 10 blocks # - nothing: all historic states will be saved, nothing will be deleted (i.e. archiving node) # - everything: all saved states will be deleted, storing only the recent 2 blocks; pruning at every block @@ -75,11 +75,11 @@ inter-block-cache = {{ .BaseConfig.InterBlockCache }} # ["message.sender", "message.recipient"] index-events = {{ .BaseConfig.IndexEvents }} -# IavlCacheSize set the size of the iavl tree cache. +# IavlCacheSize set the size of the iavl tree cache. # Default cache size is 50mb. iavl-cache-size = {{ .BaseConfig.IAVLCacheSize }} -# IAVLDisableFastNode enables or disables the fast node feature of IAVL. +# IAVLDisableFastNode enables or disables the fast node feature of IAVL. # Default is true. iavl-disable-fastnode = {{ .BaseConfig.IAVLDisableFastNode }} @@ -104,6 +104,12 @@ num-orphan-per-file = {{ .BaseConfig.NumOrphanPerFile }} # if separate-orphan-storage is true, where to store orphan data orphan-dir = "{{ .BaseConfig.OrphanDirectory }}" +# concurrency-workers defines how many workers to run for concurrent transaction execution +# concurrency-workers = {{ .BaseConfig.ConcurrencyWorkers }} + +# occ-enabled defines whether OCC is enabled or not for transaction execution +occ-enabled = {{ .BaseConfig.OccEnabled }} + ############################################################################### ### Telemetry Configuration ### ############################################################################### diff --git a/server/mock/app.go b/server/mock/app.go index 959bce012..5aacd477d 100644 --- a/server/mock/app.go +++ b/server/mock/app.go @@ -2,6 +2,7 @@ package mock import ( "context" + "crypto/sha256" "encoding/json" "errors" "fmt" @@ -39,10 +40,15 @@ func NewApp(rootDir string, logger log.Logger) (abci.Application, error) { baseApp.SetInitChainer(InitChainer(capKeyMainStore)) baseApp.SetFinalizeBlocker(func(ctx sdk.Context, req *abci.RequestFinalizeBlock) (*abci.ResponseFinalizeBlock, error) { txResults := []*abci.ExecTxResult{} - for _, tx := range req.Txs { + for _, txbz := range req.Txs { + tx, err := decodeTx(txbz) + if err != nil { + txResults = append(txResults, &abci.ExecTxResult{}) + continue + } deliverTxResp := baseApp.DeliverTx(ctx, abci.RequestDeliverTx{ - Tx: tx, - }) + Tx: txbz, + }, tx, sha256.Sum256(txbz)) txResults = append(txResults, &abci.ExecTxResult{ Code: deliverTxResp.Code, Data: deliverTxResp.Data, diff --git a/server/mock/store.go b/server/mock/store.go index 0a47cc08c..6096f35d4 100644 --- a/server/mock/store.go +++ b/server/mock/store.go @@ -226,6 +226,14 @@ func (kv kvStore) ReverseSubspaceIterator(prefix []byte) sdk.Iterator { panic("not implemented") } +func (kv kvStore) VersionExists(version int64) bool { + panic("not implemented") +} + +func (kv kvStore) DeleteAll(start, end []byte) error { + panic("not implemented") +} + func NewCommitMultiStore() sdk.CommitMultiStore { return multiStore{kv: make(map[sdk.StoreKey]kvStore)} } @@ -233,3 +241,11 @@ func NewCommitMultiStore() sdk.CommitMultiStore { func (ms multiStore) Close() error { return nil } + +func (ms multiStore) SetKVStores(handler func(key store.StoreKey, s sdk.KVStore) store.CacheWrap) store.MultiStore { + panic("not implemented") +} + +func (ms multiStore) StoreKeys() []sdk.StoreKey { + panic("not implemented") +} diff --git a/server/start.go b/server/start.go index 14f4e9770..aedc274e4 100644 --- a/server/start.go +++ b/server/start.go @@ -70,6 +70,7 @@ const ( FlagSeparateOrphanVersionsToKeep = "separate-orphan-versions-to-keep" FlagNumOrphanPerFile = "num-orphan-per-file" FlagOrphanDirectory = "orphan-dir" + FlagConcurrencyWorkers = "concurrency-workers" // state sync-related flags FlagStateSyncSnapshotInterval = "state-sync.snapshot-interval" @@ -252,6 +253,7 @@ is performed. Note, when enabled, gRPC will also be automatically enabled. cmd.Flags().Int64(FlagSeparateOrphanVersionsToKeep, 2, "Number of versions to keep if storing orphans separately") cmd.Flags().Int(FlagNumOrphanPerFile, 100000, "Number of orphans to store on each file if storing orphans separately") cmd.Flags().String(FlagOrphanDirectory, path.Join(defaultNodeHome, "orphans"), "Directory to store orphan files if storing orphans separately") + cmd.Flags().Int(FlagConcurrencyWorkers, config.DefaultConcurrencyWorkers, "Number of workers to process concurrent transactions") cmd.Flags().Bool(flagGRPCOnly, false, "Start the node in gRPC query only mode (no Tendermint process is started)") cmd.Flags().Bool(flagGRPCEnable, true, "Define if the gRPC server should be enabled") diff --git a/simapp/app.go b/simapp/app.go index 4c0af924c..cb4b031c3 100644 --- a/simapp/app.go +++ b/simapp/app.go @@ -2,6 +2,7 @@ package simapp import ( "context" + "crypto/sha256" "encoding/json" "fmt" "io" @@ -515,9 +516,13 @@ func (app *SimApp) FinalizeBlocker(ctx sdk.Context, req *abci.RequestFinalizeBlo txResults := []*abci.ExecTxResult{} for i, tx := range req.Txs { ctx = ctx.WithContext(context.WithValue(ctx.Context(), ante.ContextKeyTxIndexKey, i)) + if typedTxs[i] == nil { + txResults = append(txResults, &abci.ExecTxResult{}) // empty result + continue + } deliverTxResp := app.DeliverTx(ctx, abci.RequestDeliverTx{ Tx: tx, - }) + }, typedTxs[i], sha256.Sum256(tx)) txResults = append(txResults, &abci.ExecTxResult{ Code: deliverTxResp.Code, Data: deliverTxResp.Data, diff --git a/simapp/simd/cmd/root.go b/simapp/simd/cmd/root.go index c84c0e835..4b3f5dfee 100644 --- a/simapp/simd/cmd/root.go +++ b/simapp/simd/cmd/root.go @@ -305,6 +305,7 @@ func (a appCreator) newApp(logger log.Logger, db dbm.DB, traceStore io.Writer, t baseapp.SetIAVLCacheSize(cast.ToInt(appOpts.Get(server.FlagIAVLCacheSize))), baseapp.SetIAVLDisableFastNode(cast.ToBool(appOpts.Get(server.FlagIAVLFastNode))), baseapp.SetCompactionInterval(cast.ToUint64(appOpts.Get(server.FlagCompactionInterval))), + baseapp.SetOccEnabled(cast.ToBool(appOpts.Get(baseapp.FlagOccEnabled))), ) } diff --git a/store/cachekv/mergeiterator.go b/store/cachekv/mergeiterator.go index f13c4025c..a32dfb346 100644 --- a/store/cachekv/mergeiterator.go +++ b/store/cachekv/mergeiterator.go @@ -16,11 +16,10 @@ import ( // // TODO: Optimize by memoizing. type cacheMergeIterator struct { - parent types.Iterator - cache types.Iterator - ascending bool - storeKey sdktypes.StoreKey - eventManager *sdktypes.EventManager + parent types.Iterator + cache types.Iterator + ascending bool + storeKey sdktypes.StoreKey } var _ types.Iterator = (*cacheMergeIterator)(nil) @@ -29,14 +28,12 @@ func NewCacheMergeIterator( parent, cache types.Iterator, ascending bool, storeKey sdktypes.StoreKey, - eventManager *sdktypes.EventManager, ) *cacheMergeIterator { iter := &cacheMergeIterator{ - parent: parent, - cache: cache, - ascending: ascending, - storeKey: storeKey, - eventManager: eventManager, + parent: parent, + cache: cache, + ascending: ascending, + storeKey: storeKey, } return iter @@ -138,14 +135,12 @@ func (iter *cacheMergeIterator) Value() []byte { // If parent is invalid, get the cache value. if !iter.parent.Valid() { value := iter.cache.Value() - iter.eventManager.EmitResourceAccessReadEvent("iterator", iter.storeKey, iter.cache.Key(), value) return value } // If cache is invalid, get the parent value. if !iter.cache.Valid() { value := iter.parent.Value() - iter.eventManager.EmitResourceAccessReadEvent("iterator", iter.storeKey, iter.parent.Key(), value) return value } @@ -156,11 +151,9 @@ func (iter *cacheMergeIterator) Value() []byte { switch cmp { case -1: // parent < cache value := iter.parent.Value() - iter.eventManager.EmitResourceAccessReadEvent("iterator", iter.storeKey, keyP, value) return value case 0, 1: // parent >= cache value := iter.cache.Value() - iter.eventManager.EmitResourceAccessReadEvent("iterator", iter.storeKey, keyC, value) return value default: panic("invalid comparison result") diff --git a/store/cachekv/mergeiterator_test.go b/store/cachekv/mergeiterator_test.go index 00f065151..b2648a865 100644 --- a/store/cachekv/mergeiterator_test.go +++ b/store/cachekv/mergeiterator_test.go @@ -6,7 +6,6 @@ import ( "github.com/cosmos/cosmos-sdk/store/cachekv" "github.com/cosmos/cosmos-sdk/store/dbadapter" "github.com/cosmos/cosmos-sdk/store/types" - sdktypes "github.com/cosmos/cosmos-sdk/types" "github.com/stretchr/testify/require" dbm "github.com/tendermint/tm-db" ) @@ -14,7 +13,6 @@ import ( func TestMangerIterator(t *testing.T) { // initiate mock kvstore mem := dbadapter.Store{DB: dbm.NewMemDB()} - eventManager := sdktypes.NewEventManager() kvstore := cachekv.NewStore(mem, types.NewKVStoreKey("CacheKvTest"), types.DefaultCacheSizeLimit) value := randSlice(defaultValueSizeBz) startKey := randSlice(32) @@ -29,27 +27,13 @@ func TestMangerIterator(t *testing.T) { cache := kvstore.Iterator(nil, nil) for ; cache.Valid(); cache.Next() { } - iter := cachekv.NewCacheMergeIterator(parent, cache, true, types.NewKVStoreKey("CacheKvTest"), eventManager) + iter := cachekv.NewCacheMergeIterator(parent, cache, true, types.NewKVStoreKey("CacheKvTest")) - // get the next value - iter.Value() - - // assert the resource access is still emitted correctly when the cache store is unavailable - require.Equal(t, "access_type", string(eventManager.Events()[0].Attributes[0].Key)) - require.Equal(t, "read", string(eventManager.Events()[0].Attributes[0].Value)) - require.Equal(t, "store_key", string(eventManager.Events()[0].Attributes[1].Key)) - require.Equal(t, "CacheKvTest", string(eventManager.Events()[0].Attributes[1].Value)) - - // assert event emission when cache is available - cache = kvstore.Iterator(keys[1], keys[2]) - iter = cachekv.NewCacheMergeIterator(parent, cache, true, types.NewKVStoreKey("CacheKvTest"), eventManager) + // get the next value and it should not be nil + nextValue := iter.Value() + require.NotNil(t, nextValue) // get the next value - iter.Value() - - // assert the resource access is still emitted correctly when the cache store is available - require.Equal(t, "access_type", string(eventManager.Events()[0].Attributes[0].Key)) - require.Equal(t, "read", string(eventManager.Events()[0].Attributes[0].Value)) - require.Equal(t, "store_key", string(eventManager.Events()[0].Attributes[1].Key)) - require.Equal(t, "CacheKvTest", string(eventManager.Events()[0].Attributes[1].Value)) + nextValue = iter.Value() + require.NotNil(t, nextValue) } diff --git a/store/cachekv/store.go b/store/cachekv/store.go index f9dee6cbb..a33adf17c 100644 --- a/store/cachekv/store.go +++ b/store/cachekv/store.go @@ -56,8 +56,6 @@ func (store *Store) GetEvents() []abci.Event { // Implements Store func (store *Store) ResetEvents() { - store.mtx.Lock() - defer store.mtx.Unlock() store.eventManager = sdktypes.NewEventManager() } @@ -77,7 +75,6 @@ func (store *Store) getFromCache(key []byte) []byte { // Get implements types.KVStore. func (store *Store) Get(key []byte) (value []byte) { types.AssertValidKey(key) - store.eventManager.EmitResourceAccessReadEvent("get", store.storeKey, key, value) return store.getFromCache(key) } @@ -86,13 +83,11 @@ func (store *Store) Set(key []byte, value []byte) { types.AssertValidKey(key) types.AssertValidValue(value) store.setCacheValue(key, value, false, true) - store.eventManager.EmitResourceAccessWriteEvent("set", store.storeKey, key, value) } // Has implements types.KVStore. func (store *Store) Has(key []byte) bool { value := store.Get(key) - store.eventManager.EmitResourceAccessReadEvent("has", store.storeKey, key, value) return value != nil } @@ -194,7 +189,11 @@ func (store *Store) iterator(start, end []byte, ascending bool) types.Iterator { }() store.dirtyItems(start, end) cache = newMemIterator(start, end, store.sortedCache, store.deleted, ascending, store.eventManager, store.storeKey) - return NewCacheMergeIterator(parent, cache, ascending, store.storeKey, store.eventManager) + return NewCacheMergeIterator(parent, cache, ascending, store.storeKey) +} + +func (store *Store) VersionExists(version int64) bool { + return store.parent.VersionExists(version) } func findStartIndex(strL []string, startQ string) int { @@ -364,3 +363,22 @@ func (store *Store) isDeleted(key string) bool { _, ok := store.deleted.Load(key) return ok } + +func (store *Store) GetParent() types.KVStore { + return store.parent +} + +func (store *Store) DeleteAll(start, end []byte) error { + store.dirtyItems(start, end) + // memdb iterator + cachedIter, err := store.sortedCache.Iterator(start, end) + if err != nil { + return err + } + defer cachedIter.Close() + for ; cachedIter.Valid(); cachedIter.Next() { + // `Delete` would not touch sortedCache so it's okay to perform inside iterator + store.Delete(cachedIter.Key()) + } + return nil +} diff --git a/store/cachekv/store_test.go b/store/cachekv/store_test.go index 3a6d5ab53..c6e7ab5b6 100644 --- a/store/cachekv/store_test.go +++ b/store/cachekv/store_test.go @@ -62,6 +62,19 @@ func TestCacheKVStore(t *testing.T) { st.Write() require.Empty(t, st.Get(keyFmt(1)), "Expected `key1` to be empty") require.Empty(t, mem.Get(keyFmt(1)), "Expected `key1` to be empty") + + // GetParent returns parent store + require.NotNil(t, st.GetParent()) + + // DeleteAll deletes all entries in cache but not affect mem + st = cachekv.NewStore(mem, types.NewKVStoreKey("CacheKvTest"), types.DefaultCacheSizeLimit) + mem.Set(keyFmt(1), valFmt(1)) + st.Set(keyFmt(1), valFmt(2)) + st.Set(keyFmt(2), valFmt(3)) + require.Nil(t, st.DeleteAll(nil, nil)) + require.Nil(t, st.Get(keyFmt(1))) + require.Nil(t, st.Get(keyFmt(2))) + require.Equal(t, valFmt(1), mem.Get(keyFmt(1))) } func TestCacheKVStoreNoNilSet(t *testing.T) { diff --git a/store/cachemulti/store.go b/store/cachemulti/store.go index 43e00c32b..96ce20dfc 100644 --- a/store/cachemulti/store.go +++ b/store/cachemulti/store.go @@ -208,3 +208,20 @@ func (cms Store) GetKVStore(key types.StoreKey) types.KVStore { func (cms Store) GetWorkingHash() ([]byte, error) { panic("should never attempt to get working hash from cache multi store") } + +// StoreKeys returns a list of all store keys +func (cms Store) StoreKeys() []types.StoreKey { + keys := make([]types.StoreKey, 0, len(cms.stores)) + for _, key := range cms.keys { + keys = append(keys, key) + } + return keys +} + +// SetKVStores sets the underlying KVStores via a handler for each key +func (cms Store) SetKVStores(handler func(sk types.StoreKey, s types.KVStore) types.CacheWrap) types.MultiStore { + for k, s := range cms.stores { + cms.stores[k] = handler(k, s.(types.KVStore)) + } + return cms +} diff --git a/store/dbadapter/store.go b/store/dbadapter/store.go index bf0c95364..b8de090f1 100644 --- a/store/dbadapter/store.go +++ b/store/dbadapter/store.go @@ -95,5 +95,22 @@ func (dsa Store) CacheWrapWithListeners(storeKey types.StoreKey, listeners []typ return cachekv.NewStore(listenkv.NewStore(dsa, storeKey, listeners), storeKey, types.DefaultCacheSizeLimit) } +func (dsa Store) VersionExists(version int64) bool { + panic("no versioning for dbadater") +} + +func (dsa Store) DeleteAll(start, end []byte) error { + iter := dsa.Iterator(start, end) + keys := [][]byte{} + for ; iter.Valid(); iter.Next() { + keys = append(keys, iter.Key()) + } + iter.Close() + for _, key := range keys { + dsa.Delete(key) + } + return nil +} + // dbm.DB implements KVStore so we can CacheKVStore it. var _ types.KVStore = Store{} diff --git a/store/dbadapter/store_test.go b/store/dbadapter/store_test.go index 467ba6421..f13209cbe 100644 --- a/store/dbadapter/store_test.go +++ b/store/dbadapter/store_test.go @@ -13,6 +13,7 @@ import ( "github.com/cosmos/cosmos-sdk/store/dbadapter" "github.com/cosmos/cosmos-sdk/store/types" "github.com/cosmos/cosmos-sdk/tests/mocks" + dbm "github.com/tendermint/tm-db" ) var errFoo = errors.New("dummy") @@ -74,6 +75,17 @@ func TestAccessors(t *testing.T) { require.Panics(t, func() { store.ReverseIterator(start, end) }) } +func TestDeleteAll(t *testing.T) { + mem := dbadapter.Store{DB: dbm.NewMemDB()} + mem.Set([]byte("1"), []byte("2")) + mem.Set([]byte("3"), []byte("4")) + require.NotNil(t, mem.Get([]byte("1"))) + require.NotNil(t, mem.Get([]byte("3"))) + require.Nil(t, mem.DeleteAll(nil, nil)) + require.Nil(t, mem.Get([]byte("1"))) + require.Nil(t, mem.Get([]byte("3"))) +} + func TestCacheWraps(t *testing.T) { mockCtrl := gomock.NewController(t) mockDB := mocks.NewMockDB(mockCtrl) diff --git a/store/gaskv/store.go b/store/gaskv/store.go index 0aa9f4282..62e3d89c4 100644 --- a/store/gaskv/store.go +++ b/store/gaskv/store.go @@ -126,6 +126,14 @@ func (gs *Store) iterator(start, end []byte, ascending bool) types.Iterator { return gi } +func (gs *Store) VersionExists(version int64) bool { + return gs.parent.VersionExists(version) +} + +func (gs *Store) DeleteAll(start, end []byte) error { + return gs.parent.DeleteAll(start, end) +} + type gasIterator struct { gasMeter types.GasMeter gasConfig types.GasConfig diff --git a/store/iavl/store.go b/store/iavl/store.go index 3bcc9af97..2d5bfad88 100644 --- a/store/iavl/store.go +++ b/store/iavl/store.go @@ -423,6 +423,19 @@ func (st *Store) Query(req abci.RequestQuery) (res abci.ResponseQuery) { return res } +func (st *Store) DeleteAll(start, end []byte) error { + iter := st.Iterator(start, end) + keys := [][]byte{} + for ; iter.Valid(); iter.Next() { + keys = append(keys, iter.Key()) + } + iter.Close() + for _, key := range keys { + st.Delete(key) + } + return nil +} + // Takes a MutableTree, a key, and a flag for creating existence or absence proof and returns the // appropriate merkle.Proof. Since this must be called after querying for the value, this function should never error // Thus, it will panic on error rather than returning it diff --git a/store/iavl/store_test.go b/store/iavl/store_test.go index 3cf0194e9..de56b4277 100644 --- a/store/iavl/store_test.go +++ b/store/iavl/store_test.go @@ -463,6 +463,21 @@ func TestIAVLNoPrune(t *testing.T) { } } +func TestIAVLStoreDeleteAll(t *testing.T) { + db := dbm.NewMemDB() + tree, err := iavl.NewMutableTree(db, cacheSize, false) + require.NoError(t, err) + + iavlStore := UnsafeNewStore(tree) + iavlStore.Set([]byte("1"), []byte("2")) + iavlStore.Set([]byte("3"), []byte("4")) + require.NotNil(t, iavlStore.Get([]byte("1"))) + require.NotNil(t, iavlStore.Get([]byte("3"))) + require.Nil(t, iavlStore.DeleteAll(nil, nil)) + require.Nil(t, iavlStore.Get([]byte("1"))) + require.Nil(t, iavlStore.Get([]byte("3"))) +} + func TestIAVLStoreQuery(t *testing.T) { db := dbm.NewMemDB() tree, err := iavl.NewMutableTree(db, cacheSize, false) diff --git a/store/listenkv/store.go b/store/listenkv/store.go index bf49b1282..20f103ec0 100644 --- a/store/listenkv/store.go +++ b/store/listenkv/store.go @@ -81,6 +81,10 @@ func (s *Store) iterator(start, end []byte, ascending bool) types.Iterator { return newTraceIterator(parent, s.listeners) } +func (s *Store) VersionExists(version int64) bool { + return s.parent.VersionExists(version) +} + type listenIterator struct { parent types.Iterator listeners []types.WriteListener @@ -157,3 +161,7 @@ func (s *Store) onWrite(delete bool, key, value []byte) { l.OnWrite(s.parentStoreKey, key, value, delete) } } + +func (s *Store) DeleteAll(start, end []byte) error { + return s.parent.DeleteAll(start, end) +} diff --git a/store/multiversion/data_structures.go b/store/multiversion/data_structures.go new file mode 100644 index 000000000..cba10d0f4 --- /dev/null +++ b/store/multiversion/data_structures.go @@ -0,0 +1,200 @@ +package multiversion + +import ( + "sync" + + "github.com/cosmos/cosmos-sdk/store/types" + "github.com/google/btree" +) + +const ( + // The approximate number of items and children per B-tree node. Tuned with benchmarks. + multiVersionBTreeDegree = 2 // should be equivalent to a binary search tree TODO: benchmark this +) + +type MultiVersionValue interface { + GetLatest() (value MultiVersionValueItem, found bool) + GetLatestNonEstimate() (value MultiVersionValueItem, found bool) + GetLatestBeforeIndex(index int) (value MultiVersionValueItem, found bool) + Set(index int, incarnation int, value []byte) + SetEstimate(index int, incarnation int) + Delete(index int, incarnation int) + Remove(index int) +} + +type MultiVersionValueItem interface { + IsDeleted() bool + IsEstimate() bool + Value() []byte + Incarnation() int + Index() int +} + +type multiVersionItem struct { + valueTree *btree.BTree // contains versions values written to this key + mtx sync.RWMutex // manages read + write accesses +} + +var _ MultiVersionValue = (*multiVersionItem)(nil) + +func NewMultiVersionItem() *multiVersionItem { + return &multiVersionItem{ + valueTree: btree.New(multiVersionBTreeDegree), + } +} + +// GetLatest returns the latest written value to the btree, and returns a boolean indicating whether it was found. +func (item *multiVersionItem) GetLatest() (MultiVersionValueItem, bool) { + item.mtx.RLock() + defer item.mtx.RUnlock() + + bTreeItem := item.valueTree.Max() + if bTreeItem == nil { + return nil, false + } + valueItem := bTreeItem.(*valueItem) + return valueItem, true +} + +// GetLatestNonEstimate returns the latest written value that isn't an ESTIMATE and returns a boolean indicating whether it was found. +// This can be used when we want to write finalized values, since ESTIMATEs can be considered to be irrelevant at that point +func (item *multiVersionItem) GetLatestNonEstimate() (MultiVersionValueItem, bool) { + item.mtx.RLock() + defer item.mtx.RUnlock() + + var vItem *valueItem + var found bool + item.valueTree.Descend(func(bTreeItem btree.Item) bool { + // only return if non-estimate + item := bTreeItem.(*valueItem) + if item.IsEstimate() { + // if estimate, continue + return true + } + // else we want to return + vItem = item + found = true + return false + }) + return vItem, found +} + +// GetLatest returns the latest written value to the btree prior to the index passed in, and returns a boolean indicating whether it was found. +// +// A `nil` value along with `found=true` indicates a deletion that has occurred and the underlying parent store doesn't need to be hit. +func (item *multiVersionItem) GetLatestBeforeIndex(index int) (MultiVersionValueItem, bool) { + item.mtx.RLock() + defer item.mtx.RUnlock() + + // we want to find the value at the index that is LESS than the current index + pivot := &valueItem{index: index - 1} + + var vItem *valueItem + var found bool + // start from pivot which contains our current index, and return on first item we hit. + // This will ensure we get the latest indexed value relative to our current index + item.valueTree.DescendLessOrEqual(pivot, func(bTreeItem btree.Item) bool { + vItem = bTreeItem.(*valueItem) + found = true + return false + }) + return vItem, found +} + +func (item *multiVersionItem) Set(index int, incarnation int, value []byte) { + types.AssertValidValue(value) + item.mtx.Lock() + defer item.mtx.Unlock() + + valueItem := NewValueItem(index, incarnation, value) + item.valueTree.ReplaceOrInsert(valueItem) +} + +func (item *multiVersionItem) Delete(index int, incarnation int) { + item.mtx.Lock() + defer item.mtx.Unlock() + + deletedItem := NewDeletedItem(index, incarnation) + item.valueTree.ReplaceOrInsert(deletedItem) +} + +func (item *multiVersionItem) Remove(index int) { + item.mtx.Lock() + defer item.mtx.Unlock() + + item.valueTree.Delete(&valueItem{index: index}) +} + +func (item *multiVersionItem) SetEstimate(index int, incarnation int) { + item.mtx.Lock() + defer item.mtx.Unlock() + + estimateItem := NewEstimateItem(index, incarnation) + item.valueTree.ReplaceOrInsert(estimateItem) +} + +type valueItem struct { + index int + incarnation int + value []byte + estimate bool +} + +var _ MultiVersionValueItem = (*valueItem)(nil) + +// Index implements MultiVersionValueItem. +func (v *valueItem) Index() int { + return v.index +} + +// Incarnation implements MultiVersionValueItem. +func (v *valueItem) Incarnation() int { + return v.incarnation +} + +// IsDeleted implements MultiVersionValueItem. +func (v *valueItem) IsDeleted() bool { + return v.value == nil && !v.estimate +} + +// IsEstimate implements MultiVersionValueItem. +func (v *valueItem) IsEstimate() bool { + return v.estimate +} + +// Value implements MultiVersionValueItem. +func (v *valueItem) Value() []byte { + return v.value +} + +// implement Less for btree.Item for valueItem +func (i *valueItem) Less(other btree.Item) bool { + return i.index < other.(*valueItem).index +} + +func NewValueItem(index int, incarnation int, value []byte) *valueItem { + return &valueItem{ + index: index, + incarnation: incarnation, + value: value, + estimate: false, + } +} + +func NewEstimateItem(index int, incarnation int) *valueItem { + return &valueItem{ + index: index, + incarnation: incarnation, + value: nil, + estimate: true, + } +} + +func NewDeletedItem(index int, incarnation int) *valueItem { + return &valueItem{ + index: index, + incarnation: incarnation, + value: nil, + estimate: false, + } +} diff --git a/store/multiversion/data_structures_test.go b/store/multiversion/data_structures_test.go new file mode 100644 index 000000000..fccc26a8b --- /dev/null +++ b/store/multiversion/data_structures_test.go @@ -0,0 +1,228 @@ +package multiversion_test + +import ( + "testing" + + mv "github.com/cosmos/cosmos-sdk/store/multiversion" + "github.com/stretchr/testify/require" +) + +func TestMultiversionItemGetLatest(t *testing.T) { + mvItem := mv.NewMultiVersionItem() + // We have no value, should get found == false and a nil value + value, found := mvItem.GetLatest() + require.False(t, found) + require.Nil(t, value) + + // assert that we find a value after it's set + one := []byte("one") + mvItem.Set(1, 0, one) + value, found = mvItem.GetLatest() + require.True(t, found) + require.Equal(t, one, value.Value()) + + // assert that we STILL get the "one" value since it is the latest + zero := []byte("zero") + mvItem.Set(0, 0, zero) + value, found = mvItem.GetLatest() + require.True(t, found) + require.Equal(t, one, value.Value()) + require.Equal(t, 1, value.Index()) + require.Equal(t, 0, value.Incarnation()) + + // we should see a deletion as the latest now, aka nil value and found == true + mvItem.Delete(2, 0) + value, found = mvItem.GetLatest() + require.True(t, found) + require.True(t, value.IsDeleted()) + require.Nil(t, value.Value()) + + // Overwrite the deleted value with some data + two := []byte("two") + mvItem.Set(2, 3, two) + value, found = mvItem.GetLatest() + require.True(t, found) + require.Equal(t, two, value.Value()) + require.Equal(t, 2, value.Index()) + require.Equal(t, 3, value.Incarnation()) +} + +func TestMultiversionItemGetByIndex(t *testing.T) { + mvItem := mv.NewMultiVersionItem() + // We have no value, should get found == false and a nil value + value, found := mvItem.GetLatestBeforeIndex(9) + require.False(t, found) + require.Nil(t, value) + + // assert that we find a value after it's set + one := []byte("one") + mvItem.Set(1, 0, one) + // should not be found because we specifically search "LESS THAN" + value, found = mvItem.GetLatestBeforeIndex(1) + require.False(t, found) + require.Nil(t, value) + // querying from "two" should be found + value, found = mvItem.GetLatestBeforeIndex(2) + require.True(t, found) + require.Equal(t, one, value.Value()) + + // verify that querying for an earlier index returns nil + value, found = mvItem.GetLatestBeforeIndex(0) + require.False(t, found) + require.Nil(t, value) + + // assert that we STILL get the "one" value when querying with a later index + zero := []byte("zero") + mvItem.Set(0, 0, zero) + // verify that querying for zero should ALWAYS return nil + value, found = mvItem.GetLatestBeforeIndex(0) + require.False(t, found) + require.Nil(t, value) + + value, found = mvItem.GetLatestBeforeIndex(2) + require.True(t, found) + require.Equal(t, one, value.Value()) + // verify we get zero when querying with index 1 + value, found = mvItem.GetLatestBeforeIndex(1) + require.True(t, found) + require.Equal(t, zero, value.Value()) + + // we should see a deletion as the latest now, aka nil value and found == true, but index 4 still returns `one` + mvItem.Delete(4, 0) + value, found = mvItem.GetLatestBeforeIndex(4) + require.True(t, found) + require.Equal(t, one, value.Value()) + // should get deletion item for a later index + value, found = mvItem.GetLatestBeforeIndex(5) + require.True(t, found) + require.True(t, value.IsDeleted()) + + // verify that we still read the proper underlying item for an older index + value, found = mvItem.GetLatestBeforeIndex(3) + require.True(t, found) + require.Equal(t, one, value.Value()) + + // Overwrite the deleted value with some data and verify we read it properly + four := []byte("four") + mvItem.Set(4, 0, four) + // also reads the four + value, found = mvItem.GetLatestBeforeIndex(6) + require.True(t, found) + require.Equal(t, four, value.Value()) + // still reads the `one` + value, found = mvItem.GetLatestBeforeIndex(4) + require.True(t, found) + require.Equal(t, one, value.Value()) +} + +func TestMultiversionItemEstimate(t *testing.T) { + mvItem := mv.NewMultiVersionItem() + // We have no value, should get found == false and a nil value + value, found := mvItem.GetLatestBeforeIndex(9) + require.False(t, found) + require.Nil(t, value) + + // assert that we find a value after it's set + one := []byte("one") + mvItem.Set(1, 0, one) + // should not be found because we specifically search "LESS THAN" + value, found = mvItem.GetLatestBeforeIndex(1) + require.False(t, found) + require.Nil(t, value) + // querying from "two" should be found + value, found = mvItem.GetLatestBeforeIndex(2) + require.True(t, found) + require.False(t, value.IsEstimate()) + require.Equal(t, one, value.Value()) + // set as estimate + mvItem.SetEstimate(1, 2) + // should not be found because we specifically search "LESS THAN" + value, found = mvItem.GetLatestBeforeIndex(1) + require.False(t, found) + require.Nil(t, value) + // querying from "two" should be found as ESTIMATE + value, found = mvItem.GetLatestBeforeIndex(2) + require.True(t, found) + require.True(t, value.IsEstimate()) + require.Equal(t, 1, value.Index()) + require.Equal(t, 2, value.Incarnation()) + + // verify that querying for an earlier index returns nil + value, found = mvItem.GetLatestBeforeIndex(0) + require.False(t, found) + require.Nil(t, value) + + // assert that we STILL get the "one" value when querying with a later index + zero := []byte("zero") + mvItem.Set(0, 0, zero) + // verify that querying for zero should ALWAYS return nil + value, found = mvItem.GetLatestBeforeIndex(0) + require.False(t, found) + require.Nil(t, value) + + value, found = mvItem.GetLatestBeforeIndex(2) + require.True(t, found) + require.True(t, value.IsEstimate()) + // verify we get zero when querying with index 1 + value, found = mvItem.GetLatestBeforeIndex(1) + require.True(t, found) + require.Equal(t, zero, value.Value()) + // reset one to no longer be an estiamte + mvItem.Set(1, 0, one) + // we should see a deletion as the latest now, aka nil value and found == true, but index 4 still returns `one` + mvItem.Delete(4, 1) + value, found = mvItem.GetLatestBeforeIndex(4) + require.True(t, found) + require.Equal(t, one, value.Value()) + // should get deletion item for a later index + value, found = mvItem.GetLatestBeforeIndex(5) + require.True(t, found) + require.True(t, value.IsDeleted()) + require.Equal(t, 4, value.Index()) + require.Equal(t, 1, value.Incarnation()) + + // verify that we still read the proper underlying item for an older index + value, found = mvItem.GetLatestBeforeIndex(3) + require.True(t, found) + require.Equal(t, one, value.Value()) + + // Overwrite the deleted value with an estimate and verify we read it properly + mvItem.SetEstimate(4, 0) + // also reads the four + value, found = mvItem.GetLatestBeforeIndex(6) + require.True(t, found) + require.True(t, value.IsEstimate()) + require.False(t, value.IsDeleted()) + // still reads the `one` + value, found = mvItem.GetLatestBeforeIndex(4) + require.True(t, found) + require.Equal(t, one, value.Value()) +} + +func TestMultiversionItemRemove(t *testing.T) { + mvItem := mv.NewMultiVersionItem() + + mvItem.Set(1, 0, []byte("one")) + mvItem.Set(2, 0, []byte("two")) + + mvItem.Remove(2) + value, found := mvItem.GetLatest() + require.True(t, found) + require.Equal(t, []byte("one"), value.Value()) +} + +func TestMultiversionItemGetLatestNonEstimate(t *testing.T) { + mvItem := mv.NewMultiVersionItem() + + mvItem.SetEstimate(3, 0) + + value, found := mvItem.GetLatestNonEstimate() + require.False(t, found) + require.Nil(t, value) + + mvItem.Set(1, 0, []byte("one")) + value, found = mvItem.GetLatestNonEstimate() + require.True(t, found) + require.Equal(t, []byte("one"), value.Value()) + +} diff --git a/store/multiversion/memiterator.go b/store/multiversion/memiterator.go new file mode 100644 index 000000000..32cb257b8 --- /dev/null +++ b/store/multiversion/memiterator.go @@ -0,0 +1,116 @@ +package multiversion + +import ( + dbm "github.com/tendermint/tm-db" + + "github.com/cosmos/cosmos-sdk/store/types" + occtypes "github.com/cosmos/cosmos-sdk/types/occ" +) + +// Iterates over iterKVCache items. +// if key is nil, means it was deleted. +// Implements Iterator. +type memIterator struct { + types.Iterator + mvkv *VersionIndexedStore +} + +func (store *VersionIndexedStore) newMemIterator( + start, end []byte, + items *dbm.MemDB, + ascending bool, +) *memIterator { + var iter types.Iterator + var err error + + if ascending { + iter, err = items.Iterator(start, end) + } else { + iter, err = items.ReverseIterator(start, end) + } + + if err != nil { + if iter != nil { + iter.Close() + } + panic(err) + } + + return &memIterator{ + Iterator: iter, + mvkv: store, + } +} + +// try to get value from the writeset, otherwise try to get from multiversion store, otherwise try to get from parent +func (mi *memIterator) Value() []byte { + key := mi.Iterator.Key() + // TODO: verify that this is correct + return mi.mvkv.Get(key) +} + +type validationIterator struct { + types.Iterator + + mvStore MultiVersionStore + writeset WriteSet + index int + abortChannel chan occtypes.Abort +} + +func (store *Store) newMVSValidationIterator( + index int, + start, end []byte, + items *dbm.MemDB, + ascending bool, + writeset WriteSet, + abortChannel chan occtypes.Abort, +) *validationIterator { + var iter types.Iterator + var err error + + if ascending { + iter, err = items.Iterator(start, end) + } else { + iter, err = items.ReverseIterator(start, end) + } + + if err != nil { + if iter != nil { + iter.Close() + } + panic(err) + } + + return &validationIterator{ + Iterator: iter, + mvStore: store, + index: index, + abortChannel: abortChannel, + writeset: writeset, + } +} + +// try to get value from the writeset, otherwise try to get from multiversion store, otherwise try to get from parent iterator +func (vi *validationIterator) Value() []byte { + key := vi.Iterator.Key() + + // try fetch from writeset - return if exists + if val, ok := vi.writeset[string(key)]; ok { + return val + } + + // get the value from the multiversion store + val := vi.mvStore.GetLatestBeforeIndex(vi.index, key) + + // if we have an estimate, write to abort channel + if val.IsEstimate() { + vi.abortChannel <- occtypes.NewEstimateAbort(val.Index()) + } + + // if we have a deleted value, return nil + if val.IsDeleted() { + return nil + } + return val.Value() +} diff --git a/store/multiversion/mergeiterator.go b/store/multiversion/mergeiterator.go new file mode 100644 index 000000000..1e398cf94 --- /dev/null +++ b/store/multiversion/mergeiterator.go @@ -0,0 +1,263 @@ +package multiversion + +import ( + "bytes" + "errors" + + "github.com/cosmos/cosmos-sdk/store/types" +) + +// mvsMergeIterator merges a parent Iterator and a cache Iterator. +// The cache iterator may return nil keys to signal that an item +// had been deleted (but not deleted in the parent). +// If the cache iterator has the same key as the parent, the +// cache shadows (overrides) the parent. +type mvsMergeIterator struct { + parent types.Iterator + cache types.Iterator + ascending bool + ReadsetHandler +} + +var _ types.Iterator = (*mvsMergeIterator)(nil) + +func NewMVSMergeIterator( + parent, cache types.Iterator, + ascending bool, + readsetHandler ReadsetHandler, +) *mvsMergeIterator { + iter := &mvsMergeIterator{ + parent: parent, + cache: cache, + ascending: ascending, + ReadsetHandler: readsetHandler, + } + + return iter +} + +// Domain implements Iterator. +// It returns the union of the iter.Parent doman, and the iter.Cache domain. +// If the domains are disjoint, this includes the domain in between them as well. +func (iter *mvsMergeIterator) Domain() (start, end []byte) { + startP, endP := iter.parent.Domain() + startC, endC := iter.cache.Domain() + + if iter.compare(startP, startC) < 0 { + start = startP + } else { + start = startC + } + + if iter.compare(endP, endC) < 0 { + end = endC + } else { + end = endP + } + + return start, end +} + +// Valid implements Iterator. +func (iter *mvsMergeIterator) Valid() bool { + return iter.skipUntilExistsOrInvalid() +} + +// Next implements Iterator +func (iter *mvsMergeIterator) Next() { + iter.skipUntilExistsOrInvalid() + iter.assertValid() + + // If parent is invalid, get the next cache item. + if !iter.parent.Valid() { + iter.cache.Next() + return + } + + // If cache is invalid, get the next parent item. + if !iter.cache.Valid() { + iter.parent.Next() + return + } + + // Both are valid. Compare keys. + keyP, keyC := iter.parent.Key(), iter.cache.Key() + switch iter.compare(keyP, keyC) { + case -1: // parent < cache + iter.parent.Next() + case 0: // parent == cache + iter.parent.Next() + iter.cache.Next() + case 1: // parent > cache + iter.cache.Next() + } +} + +// Key implements Iterator +func (iter *mvsMergeIterator) Key() []byte { + iter.skipUntilExistsOrInvalid() + iter.assertValid() + + // If parent is invalid, get the cache key. + if !iter.parent.Valid() { + return iter.cache.Key() + } + + // If cache is invalid, get the parent key. + if !iter.cache.Valid() { + return iter.parent.Key() + } + + // Both are valid. Compare keys. + keyP, keyC := iter.parent.Key(), iter.cache.Key() + + cmp := iter.compare(keyP, keyC) + switch cmp { + case -1: // parent < cache + return keyP + case 0: // parent == cache + return keyP + case 1: // parent > cache + return keyC + default: + panic("invalid compare result") + } +} + +// Value implements Iterator +func (iter *mvsMergeIterator) Value() []byte { + iter.skipUntilExistsOrInvalid() + iter.assertValid() + + // If parent is invalid, get the cache value. + if !iter.parent.Valid() { + value := iter.cache.Value() + return value + } + + // If cache is invalid, get the parent value. + if !iter.cache.Valid() { + value := iter.parent.Value() + // add values read from parent to readset + iter.ReadsetHandler.UpdateReadSet(iter.parent.Key(), value) + return value + } + + // Both are valid. Compare keys. + keyP, keyC := iter.parent.Key(), iter.cache.Key() + + cmp := iter.compare(keyP, keyC) + switch cmp { + case -1: // parent < cache + value := iter.parent.Value() + // add values read from parent to readset + iter.ReadsetHandler.UpdateReadSet(iter.parent.Key(), value) + return value + case 0, 1: // parent >= cache + value := iter.cache.Value() + return value + default: + panic("invalid comparison result") + } +} + +// Close implements Iterator +func (iter *mvsMergeIterator) Close() error { + if err := iter.parent.Close(); err != nil { + // still want to close cache iterator regardless + iter.cache.Close() + return err + } + + return iter.cache.Close() +} + +// Error returns an error if the mvsMergeIterator is invalid defined by the +// Valid method. +func (iter *mvsMergeIterator) Error() error { + if !iter.Valid() { + return errors.New("invalid mvsMergeIterator") + } + + return nil +} + +// If not valid, panics. +// NOTE: May have side-effect of iterating over cache. +func (iter *mvsMergeIterator) assertValid() { + if err := iter.Error(); err != nil { + panic(err) + } +} + +// Like bytes.Compare but opposite if not ascending. +func (iter *mvsMergeIterator) compare(a, b []byte) int { + if iter.ascending { + return bytes.Compare(a, b) + } + + return bytes.Compare(a, b) * -1 +} + +// Skip all delete-items from the cache w/ `key < until`. After this function, +// current cache item is a non-delete-item, or `until <= key`. +// If the current cache item is not a delete item, does nothing. +// If `until` is nil, there is no limit, and cache may end up invalid. +// CONTRACT: cache is valid. +func (iter *mvsMergeIterator) skipCacheDeletes(until []byte) { + for iter.cache.Valid() && + iter.cache.Value() == nil && + (until == nil || iter.compare(iter.cache.Key(), until) < 0) { + iter.cache.Next() + } +} + +// Fast forwards cache (or parent+cache in case of deleted items) until current +// item exists, or until iterator becomes invalid. +// Returns whether the iterator is valid. +func (iter *mvsMergeIterator) skipUntilExistsOrInvalid() bool { + for { + // If parent is invalid, fast-forward cache. + if !iter.parent.Valid() { + iter.skipCacheDeletes(nil) + return iter.cache.Valid() + } + // Parent is valid. + if !iter.cache.Valid() { + return true + } + // Parent is valid, cache is valid. + + // Compare parent and cache. + keyP := iter.parent.Key() + keyC := iter.cache.Key() + + switch iter.compare(keyP, keyC) { + case -1: // parent < cache. + return true + + case 0: // parent == cache. + // Skip over if cache item is a delete. + valueC := iter.cache.Value() + if valueC == nil { + iter.parent.Next() + iter.cache.Next() + + continue + } + // Cache is not a delete. + + return true // cache exists. + case 1: // cache < parent + // Skip over if cache item is a delete. + valueC := iter.cache.Value() + if valueC == nil { + iter.skipCacheDeletes(keyP) + continue + } + // Cache is not a delete. + + return true // cache exists. + } + } +} diff --git a/store/multiversion/mvkv.go b/store/multiversion/mvkv.go new file mode 100644 index 000000000..22f5d9456 --- /dev/null +++ b/store/multiversion/mvkv.go @@ -0,0 +1,391 @@ +package multiversion + +import ( + "io" + "sort" + + abci "github.com/tendermint/tendermint/abci/types" + + "github.com/cosmos/cosmos-sdk/store/types" + scheduler "github.com/cosmos/cosmos-sdk/types/occ" + dbm "github.com/tendermint/tm-db" +) + +// exposes a handler for adding items to readset, useful for iterators +type ReadsetHandler interface { + UpdateReadSet(key []byte, value []byte) +} + +type NoOpHandler struct{} + +func (NoOpHandler) UpdateReadSet(key []byte, value []byte) {} + +// exposes a handler for adding items to iterateset, to be called upon iterator close +type IterateSetHandler interface { + UpdateIterateSet(iterationTracker) +} + +type iterationTracker struct { + startKey []byte // start of the iteration range + endKey []byte // end of the iteration range + earlyStopKey []byte // key that caused early stop + iteratedKeys map[string]struct{} // TODO: is a map okay because the ordering will be enforced when we replay the iterator? + ascending bool + + writeset WriteSet + + // TODO: is it possible that terimation is affected by keys later in iteration that weren't reached? eg. number of keys affecting iteration? + // TODO: i believe to get number of keys the iteration would need to be done fully so its not a concern? + + // TODO: maybe we need to store keys served from writeset for the transaction? that way if theres OTHER keys within the writeset and the iteration range, and were written to the writeset later, we can discriminate between the groups? + // keysServedFromWriteset map[string]struct{} + + // actually its simpler to just store a copy of the writeset at the time of iterator creation +} + +func NewIterationTracker(startKey, endKey []byte, ascending bool, writeset WriteSet) iterationTracker { + copyWriteset := make(WriteSet, len(writeset)) + + for key, value := range writeset { + copyWriteset[key] = value + } + + return iterationTracker{ + startKey: startKey, + endKey: endKey, + iteratedKeys: make(map[string]struct{}), + ascending: ascending, + writeset: copyWriteset, + } +} + +func (item *iterationTracker) AddKey(key []byte) { + item.iteratedKeys[string(key)] = struct{}{} +} + +func (item *iterationTracker) SetEarlyStopKey(key []byte) { + item.earlyStopKey = key +} + +// Version Indexed Store wraps the multiversion store in a way that implements the KVStore interface, but also stores the index of the transaction, and so store actions are applied to the multiversion store using that index +type VersionIndexedStore struct { + // TODO: this shouldnt NEED a mutex because its used within single transaction execution, therefore no concurrency + // mtx sync.Mutex + // used for tracking reads and writes for eventual validation + persistence into multi-version store + // TODO: does this need sync.Map? + readset map[string][]byte // contains the key -> value mapping for all keys read from the store (not mvkv, underlying store) + writeset map[string][]byte // contains the key -> value mapping for all keys written to the store + iterateset Iterateset + // TODO: need to add iterateset here as well + + // used for iterators - populated at the time of iterator instantiation + // TODO: when we want to perform iteration, we need to move all the dirty keys (writeset and readset) into the sortedTree and then combine with the iterators for the underlying stores + sortedStore *dbm.MemDB // always ascending sorted + // parent stores (both multiversion and underlying parent store) + multiVersionStore MultiVersionStore + parent types.KVStore + // transaction metadata for versioned operations + transactionIndex int + incarnation int + // have abort channel here for aborting transactions + abortChannel chan scheduler.Abort +} + +var _ types.KVStore = (*VersionIndexedStore)(nil) +var _ ReadsetHandler = (*VersionIndexedStore)(nil) +var _ IterateSetHandler = (*VersionIndexedStore)(nil) + +func NewVersionIndexedStore(parent types.KVStore, multiVersionStore MultiVersionStore, transactionIndex, incarnation int, abortChannel chan scheduler.Abort) *VersionIndexedStore { + return &VersionIndexedStore{ + readset: make(map[string][]byte), + writeset: make(map[string][]byte), + iterateset: []iterationTracker{}, + sortedStore: dbm.NewMemDB(), + parent: parent, + multiVersionStore: multiVersionStore, + transactionIndex: transactionIndex, + incarnation: incarnation, + abortChannel: abortChannel, + } +} + +// GetReadset returns the readset +func (store *VersionIndexedStore) GetReadset() map[string][]byte { + return store.readset +} + +// GetWriteset returns the writeset +func (store *VersionIndexedStore) GetWriteset() map[string][]byte { + return store.writeset +} + +// Get implements types.KVStore. +func (store *VersionIndexedStore) Get(key []byte) []byte { + // first try to get from writeset cache, if cache miss, then try to get from multiversion store, if that misses, then get from parent store + // if the key is in the cache, return it + + // don't have RW mutex because we have to update readset + // TODO: remove? + // store.mtx.Lock() + // defer store.mtx.Unlock() + // defer telemetry.MeasureSince(time.Now(), "store", "mvkv", "get") + + types.AssertValidKey(key) + strKey := string(key) + // first check the MVKV writeset, and return that value if present + cacheValue, ok := store.writeset[strKey] + if ok { + // return the value from the cache, no need to update any readset stuff + return cacheValue + } + // read the readset to see if the value exists - and return if applicable + if readsetVal, ok := store.readset[strKey]; ok { + return readsetVal + } + + // if we didn't find it, then we want to check the multivalue store + add to readset if applicable + mvsValue := store.multiVersionStore.GetLatestBeforeIndex(store.transactionIndex, key) + if mvsValue != nil { + if mvsValue.IsEstimate() { + store.abortChannel <- scheduler.NewEstimateAbort(mvsValue.Index()) + return nil + } else { + // This handles both detecting readset conflicts and updating readset if applicable + return store.parseValueAndUpdateReadset(strKey, mvsValue) + } + } + // if we didn't find it in the multiversion store, then we want to check the parent store + add to readset + parentValue := store.parent.Get(key) + store.UpdateReadSet(key, parentValue) + return parentValue +} + +// This functions handles reads with deleted items and values and verifies that the data is consistent to what we currently have in the readset (IF we have a readset value for that key) +func (store *VersionIndexedStore) parseValueAndUpdateReadset(strKey string, mvsValue MultiVersionValueItem) []byte { + value := mvsValue.Value() + if mvsValue.IsDeleted() { + value = nil + } + store.UpdateReadSet([]byte(strKey), value) + return value +} + +// This function iterates over the readset, validating that the values in the readset are consistent with the values in the multiversion store and underlying parent store, and returns a boolean indicating validity +func (store *VersionIndexedStore) ValidateReadset() bool { + // TODO: remove? + // store.mtx.Lock() + // defer store.mtx.Unlock() + // defer telemetry.MeasureSince(time.Now(), "store", "mvkv", "validate_readset") + + // sort the readset keys - this is so we have consistent behavior when theres varying conflicts within the readset (eg. read conflict vs estimate) + readsetKeys := make([]string, 0, len(store.readset)) + for key := range store.readset { + readsetKeys = append(readsetKeys, key) + } + sort.Strings(readsetKeys) + + // iterate over readset keys and values + for _, strKey := range readsetKeys { + key := []byte(strKey) + value := store.readset[strKey] + mvsValue := store.multiVersionStore.GetLatestBeforeIndex(store.transactionIndex, key) + if mvsValue != nil { + if mvsValue.IsEstimate() { + // if we see an estimate, that means that we need to abort and rerun + store.abortChannel <- scheduler.NewEstimateAbort(mvsValue.Index()) + return false + } else { + if mvsValue.IsDeleted() { + // check for `nil` + if value != nil { + return false + } + } else { + // check for equality + if string(value) != string(mvsValue.Value()) { + return false + } + } + } + continue // value is valid, continue to next key + } + + parentValue := store.parent.Get(key) + if string(parentValue) != string(value) { + // this shouldnt happen because if we have a conflict it should always happen within multiversion store + panic("we shouldn't ever have a readset conflict in parent store") + } + // value was correct, we can continue to the next value + } + return true +} + +// Delete implements types.KVStore. +func (store *VersionIndexedStore) Delete(key []byte) { + // TODO: remove? + // store.mtx.Lock() + // defer store.mtx.Unlock() + // defer telemetry.MeasureSince(time.Now(), "store", "mvkv", "delete") + + types.AssertValidKey(key) + store.setValue(key, nil) +} + +// Has implements types.KVStore. +func (store *VersionIndexedStore) Has(key []byte) bool { + // necessary locking happens within store.Get + return store.Get(key) != nil +} + +// Set implements types.KVStore. +func (store *VersionIndexedStore) Set(key []byte, value []byte) { + // TODO: remove? + // store.mtx.Lock() + // defer store.mtx.Unlock() + // defer telemetry.MeasureSince(time.Now(), "store", "mvkv", "set") + + types.AssertValidKey(key) + store.setValue(key, value) +} + +// Iterator implements types.KVStore. +func (v *VersionIndexedStore) Iterator(start []byte, end []byte) dbm.Iterator { + return v.iterator(start, end, true) +} + +// ReverseIterator implements types.KVStore. +func (v *VersionIndexedStore) ReverseIterator(start []byte, end []byte) dbm.Iterator { + return v.iterator(start, end, false) +} + +// TODO: still needs iterateset tracking +// Iterator implements types.KVStore. +func (store *VersionIndexedStore) iterator(start []byte, end []byte, ascending bool) dbm.Iterator { + // TODO: remove? + // store.mtx.Lock() + // defer store.mtx.Unlock() + + // get the sorted keys from MVS + // TODO: ideally we take advantage of mvs keys already being sorted + // TODO: ideally merge btree and mvs keys into a single sorted btree + memDB := store.multiVersionStore.CollectIteratorItems(store.transactionIndex) + + // TODO: ideally we persist writeset keys into a sorted btree for later use + // make a set of total keys across mvkv and mvs to iterate + for key := range store.writeset { + memDB.Set([]byte(key), []byte{}) + } + // also add readset elements such that they fetch from readset instead of parent + for key := range store.readset { + memDB.Set([]byte(key), []byte{}) + } + + var parent, memIterator types.Iterator + + // make a memIterator + memIterator = store.newMemIterator(start, end, memDB, ascending) + + if ascending { + parent = store.parent.Iterator(start, end) + } else { + parent = store.parent.ReverseIterator(start, end) + } + + mergeIterator := NewMVSMergeIterator(parent, memIterator, ascending, store) + + iterationTracker := NewIterationTracker(start, end, ascending, store.writeset) + trackedIterator := NewTrackedIterator(mergeIterator, iterationTracker, store, store) + + // mergeIterator + return trackedIterator + +} + +func (v *VersionIndexedStore) VersionExists(version int64) bool { + return v.parent.VersionExists(version) +} + +func (v *VersionIndexedStore) DeleteAll(start, end []byte) error { + return v.parent.DeleteAll(start, end) +} + +// GetStoreType implements types.KVStore. +func (v *VersionIndexedStore) GetStoreType() types.StoreType { + return v.parent.GetStoreType() +} + +// CacheWrap implements types.KVStore. +func (*VersionIndexedStore) CacheWrap(storeKey types.StoreKey) types.CacheWrap { + panic("CacheWrap not supported for version indexed store") +} + +// CacheWrapWithListeners implements types.KVStore. +func (*VersionIndexedStore) CacheWrapWithListeners(storeKey types.StoreKey, listeners []types.WriteListener) types.CacheWrap { + panic("CacheWrapWithListeners not supported for version indexed store") +} + +// CacheWrapWithTrace implements types.KVStore. +func (*VersionIndexedStore) CacheWrapWithTrace(storeKey types.StoreKey, w io.Writer, tc types.TraceContext) types.CacheWrap { + panic("CacheWrapWithTrace not supported for version indexed store") +} + +// GetWorkingHash implements types.KVStore. +func (v *VersionIndexedStore) GetWorkingHash() ([]byte, error) { + panic("should never attempt to get working hash from version indexed store") +} + +// Only entrypoint to mutate writeset +func (store *VersionIndexedStore) setValue(key, value []byte) { + types.AssertValidKey(key) + + keyStr := string(key) + store.writeset[keyStr] = value +} + +func (store *VersionIndexedStore) WriteToMultiVersionStore() { + // TODO: remove? + // store.mtx.Lock() + // defer store.mtx.Unlock() + // defer telemetry.MeasureSince(time.Now(), "store", "mvkv", "write_mvs") + store.multiVersionStore.SetWriteset(store.transactionIndex, store.incarnation, store.writeset) + store.multiVersionStore.SetReadset(store.transactionIndex, store.readset) + store.multiVersionStore.SetIterateset(store.transactionIndex, store.iterateset) +} + +func (store *VersionIndexedStore) WriteEstimatesToMultiVersionStore() { + // TODO: remove? + // store.mtx.Lock() + // defer store.mtx.Unlock() + // defer telemetry.MeasureSince(time.Now(), "store", "mvkv", "write_mvs") + store.multiVersionStore.SetEstimatedWriteset(store.transactionIndex, store.incarnation, store.writeset) + // TODO: do we need to write readset and iterateset in this case? I don't think so since if this is called it means we aren't doing validation +} + +func (store *VersionIndexedStore) UpdateReadSet(key []byte, value []byte) { + // add to readset + keyStr := string(key) + // TODO: maybe only add if not already existing? + if _, ok := store.readset[keyStr]; !ok { + store.readset[keyStr] = value + } +} + +// Write implements types.CacheWrap so this store can exist on the cache multi store +func (store *VersionIndexedStore) Write() { + panic("not implemented") +} + +// GetEvents implements types.CacheWrap so this store can exist on the cache multi store +func (store *VersionIndexedStore) GetEvents() []abci.Event { + panic("not implemented") +} + +// ResetEvents implements types.CacheWrap so this store can exist on the cache multi store +func (store *VersionIndexedStore) ResetEvents() { + panic("not implemented") +} + +func (store *VersionIndexedStore) UpdateIterateSet(iterationTracker iterationTracker) { + // append to iterateset + store.iterateset = append(store.iterateset, iterationTracker) +} diff --git a/store/multiversion/mvkv_test.go b/store/multiversion/mvkv_test.go new file mode 100644 index 000000000..008a7fa61 --- /dev/null +++ b/store/multiversion/mvkv_test.go @@ -0,0 +1,391 @@ +package multiversion_test + +import ( + "testing" + + "github.com/cosmos/cosmos-sdk/store/cachekv" + "github.com/cosmos/cosmos-sdk/store/dbadapter" + "github.com/cosmos/cosmos-sdk/store/multiversion" + "github.com/cosmos/cosmos-sdk/store/types" + scheduler "github.com/cosmos/cosmos-sdk/types/occ" + "github.com/stretchr/testify/require" + dbm "github.com/tendermint/tm-db" +) + +func TestVersionIndexedStoreGetters(t *testing.T) { + mem := dbadapter.Store{DB: dbm.NewMemDB()} + parentKVStore := cachekv.NewStore(mem, types.NewKVStoreKey("mock"), 1000) + mvs := multiversion.NewMultiVersionStore(parentKVStore) + // initialize a new VersionIndexedStore + vis := multiversion.NewVersionIndexedStore(parentKVStore, mvs, 1, 2, make(chan scheduler.Abort)) + + // mock a value in the parent store + parentKVStore.Set([]byte("key1"), []byte("value1")) + + // read key that doesn't exist + val := vis.Get([]byte("key2")) + require.Nil(t, val) + require.False(t, vis.Has([]byte("key2"))) + + // read key that falls down to parent store + val2 := vis.Get([]byte("key1")) + require.Equal(t, []byte("value1"), val2) + require.True(t, vis.Has([]byte("key1"))) + // verify value now in readset + require.Equal(t, []byte("value1"), vis.GetReadset()["key1"]) + + // read the same key that should now be served from the readset (can be verified by setting a different value for the key in the parent store) + parentKVStore.Set([]byte("key1"), []byte("value2")) // realistically shouldn't happen, modifying to verify readset access + val3 := vis.Get([]byte("key1")) + require.True(t, vis.Has([]byte("key1"))) + require.Equal(t, []byte("value1"), val3) + + // test deleted value written to MVS but not parent store + mvs.SetWriteset(0, 2, map[string][]byte{ + "delKey": nil, + }) + parentKVStore.Set([]byte("delKey"), []byte("value4")) + valDel := vis.Get([]byte("delKey")) + require.Nil(t, valDel) + require.False(t, vis.Has([]byte("delKey"))) + + // set different key in MVS - for various indices + mvs.SetWriteset(0, 2, map[string][]byte{ + "delKey": nil, + "key3": []byte("value3"), + }) + mvs.SetWriteset(2, 1, map[string][]byte{ + "key3": []byte("value4"), + }) + mvs.SetEstimatedWriteset(5, 0, map[string][]byte{ + "key3": nil, + }) + + // read the key that falls down to MVS + val4 := vis.Get([]byte("key3")) + // should equal value3 because value4 is later than the key in question + require.Equal(t, []byte("value3"), val4) + require.True(t, vis.Has([]byte("key3"))) + + // try a read that falls through to MVS with a later tx index + vis2 := multiversion.NewVersionIndexedStore(parentKVStore, mvs, 3, 2, make(chan scheduler.Abort)) + val5 := vis2.Get([]byte("key3")) + // should equal value3 because value4 is later than the key in question + require.Equal(t, []byte("value4"), val5) + require.True(t, vis2.Has([]byte("key3"))) + + // test estimate values writing to abortChannel + abortChannel := make(chan scheduler.Abort) + vis3 := multiversion.NewVersionIndexedStore(parentKVStore, mvs, 6, 2, abortChannel) + go func() { + vis3.Get([]byte("key3")) + }() + abort := <-abortChannel // read the abort from the channel + require.Equal(t, 5, abort.DependentTxIdx) + require.Equal(t, scheduler.ErrReadEstimate, abort.Err) + + vis.Set([]byte("key4"), []byte("value4")) + // verify proper response for GET + val6 := vis.Get([]byte("key4")) + require.True(t, vis.Has([]byte("key4"))) + require.Equal(t, []byte("value4"), val6) + // verify that its in the writeset + require.Equal(t, []byte("value4"), vis.GetWriteset()["key4"]) + // verify that its not in the readset + require.Nil(t, vis.GetReadset()["key4"]) +} + +func TestVersionIndexedStoreSetters(t *testing.T) { + mem := dbadapter.Store{DB: dbm.NewMemDB()} + parentKVStore := cachekv.NewStore(mem, types.NewKVStoreKey("mock"), 1000) + mvs := multiversion.NewMultiVersionStore(parentKVStore) + // initialize a new VersionIndexedStore + vis := multiversion.NewVersionIndexedStore(parentKVStore, mvs, 1, 2, make(chan scheduler.Abort)) + + // test simple set + vis.Set([]byte("key1"), []byte("value1")) + require.Equal(t, []byte("value1"), vis.GetWriteset()["key1"]) + + mvs.SetWriteset(0, 1, map[string][]byte{ + "key2": []byte("value2"), + }) + vis.Delete([]byte("key2")) + require.Nil(t, vis.Get([]byte("key2"))) + // because the delete should be at the writeset level, we should not have populated the readset + require.Zero(t, len(vis.GetReadset())) + + // try setting the value again, and then read + vis.Set([]byte("key2"), []byte("value3")) + require.Equal(t, []byte("value3"), vis.Get([]byte("key2"))) + require.Zero(t, len(vis.GetReadset())) +} + +func TestVersionIndexedStoreBoilerplateFunctions(t *testing.T) { + mem := dbadapter.Store{DB: dbm.NewMemDB()} + parentKVStore := cachekv.NewStore(mem, types.NewKVStoreKey("mock"), 1000) + mvs := multiversion.NewMultiVersionStore(parentKVStore) + // initialize a new VersionIndexedStore + vis := multiversion.NewVersionIndexedStore(parentKVStore, mvs, 1, 2, make(chan scheduler.Abort)) + + // asserts panics where appropriate + require.Panics(t, func() { vis.CacheWrap(types.NewKVStoreKey("mock")) }) + require.Panics(t, func() { vis.CacheWrapWithListeners(types.NewKVStoreKey("mock"), nil) }) + require.Panics(t, func() { vis.CacheWrapWithTrace(types.NewKVStoreKey("mock"), nil, nil) }) + require.Panics(t, func() { vis.GetWorkingHash() }) + + // assert properly returns store type + require.Equal(t, types.StoreTypeDB, vis.GetStoreType()) +} + +func TestVersionIndexedStoreWrite(t *testing.T) { + mem := dbadapter.Store{DB: dbm.NewMemDB()} + parentKVStore := cachekv.NewStore(mem, types.NewKVStoreKey("mock"), 1000) + mvs := multiversion.NewMultiVersionStore(parentKVStore) + // initialize a new VersionIndexedStore + vis := multiversion.NewVersionIndexedStore(parentKVStore, mvs, 1, 2, make(chan scheduler.Abort)) + + mvs.SetWriteset(0, 1, map[string][]byte{ + "key3": []byte("value3"), + }) + + require.False(t, mvs.Has(3, []byte("key1"))) + require.False(t, mvs.Has(3, []byte("key2"))) + require.True(t, mvs.Has(3, []byte("key3"))) + + // write some keys + vis.Set([]byte("key1"), []byte("value1")) + vis.Set([]byte("key2"), []byte("value2")) + vis.Delete([]byte("key3")) + + vis.WriteToMultiVersionStore() + + require.Equal(t, []byte("value1"), mvs.GetLatest([]byte("key1")).Value()) + require.Equal(t, []byte("value2"), mvs.GetLatest([]byte("key2")).Value()) + require.True(t, mvs.GetLatest([]byte("key3")).IsDeleted()) +} + +func TestVersionIndexedStoreWriteEstimates(t *testing.T) { + mem := dbadapter.Store{DB: dbm.NewMemDB()} + parentKVStore := cachekv.NewStore(mem, types.NewKVStoreKey("mock"), 1000) + mvs := multiversion.NewMultiVersionStore(parentKVStore) + // initialize a new VersionIndexedStore + vis := multiversion.NewVersionIndexedStore(parentKVStore, mvs, 1, 2, make(chan scheduler.Abort)) + + mvs.SetWriteset(0, 1, map[string][]byte{ + "key3": []byte("value3"), + }) + + require.False(t, mvs.Has(3, []byte("key1"))) + require.False(t, mvs.Has(3, []byte("key2"))) + require.True(t, mvs.Has(3, []byte("key3"))) + + // write some keys + vis.Set([]byte("key1"), []byte("value1")) + vis.Set([]byte("key2"), []byte("value2")) + vis.Delete([]byte("key3")) + + vis.WriteEstimatesToMultiVersionStore() + + require.True(t, mvs.GetLatest([]byte("key1")).IsEstimate()) + require.True(t, mvs.GetLatest([]byte("key2")).IsEstimate()) + require.True(t, mvs.GetLatest([]byte("key3")).IsEstimate()) +} + +func TestVersionIndexedStoreValidation(t *testing.T) { + mem := dbadapter.Store{DB: dbm.NewMemDB()} + parentKVStore := cachekv.NewStore(mem, types.NewKVStoreKey("mock"), 1000) + mvs := multiversion.NewMultiVersionStore(parentKVStore) + // initialize a new VersionIndexedStore + abortC := make(chan scheduler.Abort) + vis := multiversion.NewVersionIndexedStore(parentKVStore, mvs, 2, 2, abortC) + // set some initial values + parentKVStore.Set([]byte("key4"), []byte("value4")) + parentKVStore.Set([]byte("key5"), []byte("value5")) + parentKVStore.Set([]byte("deletedKey"), []byte("foo")) + + mvs.SetWriteset(0, 1, map[string][]byte{ + "key1": []byte("value1"), + "key2": []byte("value2"), + "deletedKey": nil, + }) + + // load those into readset + vis.Get([]byte("key1")) + vis.Get([]byte("key2")) + vis.Get([]byte("key4")) + vis.Get([]byte("key5")) + vis.Get([]byte("keyDNE")) + vis.Get([]byte("deletedKey")) + + // everything checks out, so we should be able to validate successfully + require.True(t, vis.ValidateReadset()) + // modify underlying transaction key that is unrelated + mvs.SetWriteset(1, 1, map[string][]byte{ + "key3": []byte("value3"), + }) + // should still have valid readset + require.True(t, vis.ValidateReadset()) + + // modify underlying transaction key that is related + mvs.SetWriteset(1, 1, map[string][]byte{ + "key3": []byte("value3"), + "key1": []byte("value1_b"), + }) + // should now have invalid readset + require.False(t, vis.ValidateReadset()) + // reset so readset is valid again + mvs.SetWriteset(1, 1, map[string][]byte{ + "key3": []byte("value3"), + "key1": []byte("value1"), + }) + require.True(t, vis.ValidateReadset()) + + // mvs has a value that was initially read from parent + mvs.SetWriteset(1, 1, map[string][]byte{ + "key3": []byte("value3"), + "key1": []byte("value1"), + "key4": []byte("value4_b"), + }) + require.False(t, vis.ValidateReadset()) + // reset key + mvs.SetWriteset(1, 1, map[string][]byte{ + "key3": []byte("value3"), + "key1": []byte("value1"), + "key4": []byte("value4"), + }) + require.True(t, vis.ValidateReadset()) + + // mvs has a value that was initially read from parent - BUT in a later tx index + mvs.SetWriteset(4, 2, map[string][]byte{ + "key4": []byte("value4_c"), + }) + // readset should remain valid + require.True(t, vis.ValidateReadset()) + + // mvs has an estimate + mvs.SetEstimatedWriteset(1, 1, map[string][]byte{ + "key2": nil, + }) + // readset should be invalid now - but via abort channel write + go func() { + vis.ValidateReadset() + }() + abort := <-abortC // read the abort from the channel + require.Equal(t, 1, abort.DependentTxIdx) + + // test key deleted later + mvs.SetWriteset(1, 1, map[string][]byte{ + "key3": []byte("value3"), + "key1": []byte("value1"), + "key4": []byte("value4"), + "key2": nil, + }) + require.False(t, vis.ValidateReadset()) + // reset key2 + mvs.SetWriteset(1, 1, map[string][]byte{ + "key3": []byte("value3"), + "key1": []byte("value1"), + "key4": []byte("value4"), + "key2": []byte("value2"), + }) + + // lastly verify panic if parent kvstore has a conflict - this shouldn't happen but lets assert that it would panic + parentKVStore.Set([]byte("keyDNE"), []byte("foobar")) + require.Equal(t, []byte("foobar"), parentKVStore.Get([]byte("keyDNE"))) + require.Panics(t, func() { + vis.ValidateReadset() + }) +} + +func TestIterator(t *testing.T) { + mem := dbadapter.Store{DB: dbm.NewMemDB()} + parentKVStore := cachekv.NewStore(mem, types.NewKVStoreKey("mock"), 1000) + mvs := multiversion.NewMultiVersionStore(parentKVStore) + // initialize a new VersionIndexedStore + abortC := make(chan scheduler.Abort) + vis := multiversion.NewVersionIndexedStore(parentKVStore, mvs, 2, 2, abortC) + + // set some initial values + parentKVStore.Set([]byte("key4"), []byte("value4")) + parentKVStore.Set([]byte("key5"), []byte("value5")) + parentKVStore.Set([]byte("deletedKey"), []byte("foo")) + mvs.SetWriteset(0, 1, map[string][]byte{ + "key1": []byte("value1"), + "key2": []byte("value2"), + "deletedKey": nil, + }) + // add an estimate to MVS + mvs.SetEstimatedWriteset(3, 1, map[string][]byte{ + "key3": []byte("value1_b"), + }) + + // iterate over the keys - exclusive on key5 + iter := vis.Iterator([]byte("000"), []byte("key5")) + + // verify domain is superset + start, end := iter.Domain() + require.Equal(t, []byte("000"), start) + require.Equal(t, []byte("key5"), end) + + vals := []string{} + defer iter.Close() + for ; iter.Valid(); iter.Next() { + vals = append(vals, string(iter.Value())) + } + require.Equal(t, []string{"value1", "value2", "value4"}, vals) + iter.Close() + + // test reverse iteration + vals2 := []string{} + iter2 := vis.ReverseIterator([]byte("000"), []byte("key6")) + defer iter2.Close() + for ; iter2.Valid(); iter2.Next() { + vals2 = append(vals2, string(iter2.Value())) + } + // has value5 because of end being key6 + require.Equal(t, []string{"value5", "value4", "value2", "value1"}, vals2) + iter2.Close() + + // add items to writeset + vis.Set([]byte("key3"), []byte("value3")) + vis.Set([]byte("key4"), []byte("valueNew")) + + // iterate over the keys - exclusive on key5 + iter3 := vis.Iterator([]byte("000"), []byte("key5")) + vals3 := []string{} + defer iter3.Close() + for ; iter3.Valid(); iter3.Next() { + vals3 = append(vals3, string(iter3.Value())) + } + require.Equal(t, []string{"value1", "value2", "value3", "valueNew"}, vals3) + iter3.Close() + + vis.Set([]byte("key6"), []byte("value6")) + // iterate over the keys, writeset being the last of the iteration range + iter4 := vis.Iterator([]byte("000"), []byte("key7")) + vals4 := []string{} + defer iter4.Close() + for ; iter4.Valid(); iter4.Next() { + vals4 = append(vals4, string(iter4.Value())) + } + require.Equal(t, []string{"value1", "value2", "value3", "valueNew", "value5", "value6"}, vals4) + iter4.Close() + + // add an estimate to MVS + mvs.SetEstimatedWriteset(1, 1, map[string][]byte{ + "key2": []byte("value1_b"), + }) + // need to reset readset + abortC2 := make(chan scheduler.Abort) + visNew := multiversion.NewVersionIndexedStore(parentKVStore, mvs, 2, 3, abortC2) + go func() { + // new iter + iter4 := visNew.Iterator([]byte("000"), []byte("key5")) + defer iter4.Close() + for ; iter4.Valid(); iter4.Next() { + } + }() + abort := <-abortC2 // read the abort from the channel + require.Equal(t, 1, abort.DependentTxIdx) + +} diff --git a/store/multiversion/store.go b/store/multiversion/store.go new file mode 100644 index 000000000..eae1175d5 --- /dev/null +++ b/store/multiversion/store.go @@ -0,0 +1,427 @@ +package multiversion + +import ( + "bytes" + "sort" + "sync" + + "github.com/cosmos/cosmos-sdk/store/types" + "github.com/cosmos/cosmos-sdk/types/occ" + occtypes "github.com/cosmos/cosmos-sdk/types/occ" + db "github.com/tendermint/tm-db" +) + +type MultiVersionStore interface { + GetLatest(key []byte) (value MultiVersionValueItem) + GetLatestBeforeIndex(index int, key []byte) (value MultiVersionValueItem) + Has(index int, key []byte) bool + WriteLatestToStore() + SetWriteset(index int, incarnation int, writeset WriteSet) + InvalidateWriteset(index int, incarnation int) + SetEstimatedWriteset(index int, incarnation int, writeset WriteSet) + GetAllWritesetKeys() map[int][]string + CollectIteratorItems(index int) *db.MemDB + SetReadset(index int, readset ReadSet) + GetReadset(index int) ReadSet + ClearReadset(index int) + VersionedIndexedStore(index int, incarnation int, abortChannel chan occ.Abort) *VersionIndexedStore + SetIterateset(index int, iterateset Iterateset) + GetIterateset(index int) Iterateset + ClearIterateset(index int) + ValidateTransactionState(index int) (bool, []int) +} + +type WriteSet map[string][]byte +type ReadSet map[string][]byte +type Iterateset []iterationTracker + +var _ MultiVersionStore = (*Store)(nil) + +type Store struct { + // map that stores the key string -> MultiVersionValue mapping for accessing from a given key + multiVersionMap *sync.Map + // TODO: do we need to support iterators as well similar to how cachekv does it - yes + + txWritesetKeys *sync.Map // map of tx index -> writeset keys []string + txReadSets *sync.Map // map of tx index -> readset ReadSet + txIterateSets *sync.Map // map of tx index -> iterateset Iterateset + + parentStore types.KVStore +} + +func NewMultiVersionStore(parentStore types.KVStore) *Store { + return &Store{ + multiVersionMap: &sync.Map{}, + txWritesetKeys: &sync.Map{}, + txReadSets: &sync.Map{}, + txIterateSets: &sync.Map{}, + parentStore: parentStore, + } +} + +// VersionedIndexedStore creates a new versioned index store for a given incarnation and transaction index +func (s *Store) VersionedIndexedStore(index int, incarnation int, abortChannel chan occ.Abort) *VersionIndexedStore { + return NewVersionIndexedStore(s.parentStore, s, index, incarnation, abortChannel) +} + +// GetLatest implements MultiVersionStore. +func (s *Store) GetLatest(key []byte) (value MultiVersionValueItem) { + keyString := string(key) + mvVal, found := s.multiVersionMap.Load(keyString) + // if the key doesn't exist in the overall map, return nil + if !found { + return nil + } + latestVal, found := mvVal.(MultiVersionValue).GetLatest() + if !found { + return nil // this is possible IF there is are writeset that are then removed for that key + } + return latestVal +} + +// GetLatestBeforeIndex implements MultiVersionStore. +func (s *Store) GetLatestBeforeIndex(index int, key []byte) (value MultiVersionValueItem) { + keyString := string(key) + mvVal, found := s.multiVersionMap.Load(keyString) + // if the key doesn't exist in the overall map, return nil + if !found { + return nil + } + val, found := mvVal.(MultiVersionValue).GetLatestBeforeIndex(index) + // otherwise, we may have found a value for that key, but its not written before the index passed in + if !found { + return nil + } + // found a value prior to the passed in index, return that value (could be estimate OR deleted, but it is a definitive value) + return val +} + +// Has implements MultiVersionStore. It checks if the key exists in the multiversion store at or before the specified index. +func (s *Store) Has(index int, key []byte) bool { + + keyString := string(key) + mvVal, found := s.multiVersionMap.Load(keyString) + // if the key doesn't exist in the overall map, return nil + if !found { + return false // this is okay because the caller of this will THEN need to access the parent store to verify that the key doesnt exist there + } + _, foundVal := mvVal.(MultiVersionValue).GetLatestBeforeIndex(index) + return foundVal +} + +func (s *Store) removeOldWriteset(index int, newWriteSet WriteSet) { + writeset := make(map[string][]byte) + if newWriteSet != nil { + // if non-nil writeset passed in, we can use that to optimize removals + writeset = newWriteSet + } + // if there is already a writeset existing, we should remove that fully + oldKeys, loaded := s.txWritesetKeys.LoadAndDelete(index) + if loaded { + keys := oldKeys.([]string) + // we need to delete all of the keys in the writeset from the multiversion store + for _, key := range keys { + // small optimization to check if the new writeset is going to write this key, if so, we can leave it behind + if _, ok := writeset[key]; ok { + // we don't need to remove this key because it will be overwritten anyways - saves the operation of removing + rebalancing underlying btree + continue + } + // remove from the appropriate item if present in multiVersionMap + mvVal, found := s.multiVersionMap.Load(key) + // if the key doesn't exist in the overall map, return nil + if !found { + continue + } + mvVal.(MultiVersionValue).Remove(index) + } + } +} + +// SetWriteset sets a writeset for a transaction index, and also writes all of the multiversion items in the writeset to the multiversion store. +// TODO: returns a list of NEW keys added +func (s *Store) SetWriteset(index int, incarnation int, writeset WriteSet) { + // TODO: add telemetry spans + // remove old writeset if it exists + s.removeOldWriteset(index, writeset) + + writeSetKeys := make([]string, 0, len(writeset)) + for key, value := range writeset { + writeSetKeys = append(writeSetKeys, key) + loadVal, _ := s.multiVersionMap.LoadOrStore(key, NewMultiVersionItem()) // init if necessary + mvVal := loadVal.(MultiVersionValue) + if value == nil { + // delete if nil value + // TODO: sync map + mvVal.Delete(index, incarnation) + } else { + mvVal.Set(index, incarnation, value) + } + } + sort.Strings(writeSetKeys) // TODO: if we're sorting here anyways, maybe we just put it into a btree instead of a slice + s.txWritesetKeys.Store(index, writeSetKeys) +} + +// InvalidateWriteset iterates over the keys for the given index and incarnation writeset and replaces with ESTIMATEs +func (s *Store) InvalidateWriteset(index int, incarnation int) { + keysAny, found := s.txWritesetKeys.Load(index) + if !found { + return + } + keys := keysAny.([]string) + for _, key := range keys { + // invalidate all of the writeset items - is this suboptimal? - we could potentially do concurrently if slow because locking is on an item specific level + val, _ := s.multiVersionMap.LoadOrStore(key, NewMultiVersionItem()) + val.(MultiVersionValue).SetEstimate(index, incarnation) + } + // we leave the writeset in place because we'll need it for key removal later if/when we replace with a new writeset +} + +// SetEstimatedWriteset is used to directly write estimates instead of writing a writeset and later invalidating +func (s *Store) SetEstimatedWriteset(index int, incarnation int, writeset WriteSet) { + // remove old writeset if it exists + s.removeOldWriteset(index, writeset) + + writeSetKeys := make([]string, 0, len(writeset)) + // still need to save the writeset so we can remove the elements later: + for key := range writeset { + writeSetKeys = append(writeSetKeys, key) + + mvVal, _ := s.multiVersionMap.LoadOrStore(key, NewMultiVersionItem()) // init if necessary + mvVal.(MultiVersionValue).SetEstimate(index, incarnation) + } + sort.Strings(writeSetKeys) + s.txWritesetKeys.Store(index, writeSetKeys) +} + +// GetAllWritesetKeys implements MultiVersionStore. +func (s *Store) GetAllWritesetKeys() map[int][]string { + writesetKeys := make(map[int][]string) + // TODO: is this safe? + s.txWritesetKeys.Range(func(key, value interface{}) bool { + index := key.(int) + keys := value.([]string) + writesetKeys[index] = keys + return true + }) + + return writesetKeys +} + +func (s *Store) SetReadset(index int, readset ReadSet) { + s.txReadSets.Store(index, readset) +} + +func (s *Store) GetReadset(index int) ReadSet { + readsetAny, found := s.txReadSets.Load(index) + if !found { + return nil + } + return readsetAny.(ReadSet) +} + +func (s *Store) SetIterateset(index int, iterateset Iterateset) { + s.txIterateSets.Store(index, iterateset) +} + +func (s *Store) GetIterateset(index int) Iterateset { + iteratesetAny, found := s.txIterateSets.Load(index) + if !found { + return nil + } + return iteratesetAny.(Iterateset) +} + +func (s *Store) ClearReadset(index int) { + s.txReadSets.Delete(index) +} + +func (s *Store) ClearIterateset(index int) { + s.txIterateSets.Delete(index) +} + +// CollectIteratorItems implements MultiVersionStore. It will return a memDB containing all of the keys present in the multiversion store within the iteration range prior to (exclusive of) the index. +func (s *Store) CollectIteratorItems(index int) *db.MemDB { + sortedItems := db.NewMemDB() + + // get all writeset keys prior to index + for i := 0; i < index; i++ { + writesetAny, found := s.txWritesetKeys.Load(i) + if !found { + continue + } + indexedWriteset := writesetAny.([]string) + // TODO: do we want to exclude keys out of the range or just let the iterator handle it? + for _, key := range indexedWriteset { + // TODO: inefficient because (logn) for each key + rebalancing? maybe theres a better way to add to a tree to reduce rebalancing overhead + sortedItems.Set([]byte(key), []byte{}) + } + } + return sortedItems +} + +func (s *Store) validateIterator(index int, tracker iterationTracker) bool { + // collect items from multiversion store + sortedItems := s.CollectIteratorItems(index) + // add the iterationtracker writeset keys to the sorted items + for key := range tracker.writeset { + sortedItems.Set([]byte(key), []byte{}) + } + validChannel := make(chan bool, 1) + abortChannel := make(chan occtypes.Abort, 1) + + // listen for abort while iterating + go func(iterationTracker iterationTracker, items *db.MemDB, returnChan chan bool, abortChan chan occtypes.Abort) { + var parentIter types.Iterator + expectedKeys := iterationTracker.iteratedKeys + foundKeys := 0 + iter := s.newMVSValidationIterator(index, iterationTracker.startKey, iterationTracker.endKey, items, iterationTracker.ascending, iterationTracker.writeset, abortChan) + if iterationTracker.ascending { + parentIter = s.parentStore.Iterator(iterationTracker.startKey, iterationTracker.endKey) + } else { + parentIter = s.parentStore.ReverseIterator(iterationTracker.startKey, iterationTracker.endKey) + } + // create a new MVSMergeiterator + mergeIterator := NewMVSMergeIterator(parentIter, iter, iterationTracker.ascending, NoOpHandler{}) + defer mergeIterator.Close() + for ; mergeIterator.Valid(); mergeIterator.Next() { + if (len(expectedKeys) - foundKeys) == 0 { + // if we have no more expected keys, then the iterator is invalid + returnChan <- false + return + } + key := mergeIterator.Key() + // TODO: is this ok to not delete the key since we shouldnt have duplicate keys? + if _, ok := expectedKeys[string(key)]; !ok { + // if key isn't found + returnChan <- false + return + } + // remove from expected keys + foundKeys += 1 + // delete(expectedKeys, string(key)) + + // if our iterator key was the early stop, then we can break + if bytes.Equal(key, iterationTracker.earlyStopKey) { + returnChan <- true + return + } + } + returnChan <- !((len(expectedKeys) - foundKeys) > 0) + }(tracker, sortedItems, validChannel, abortChannel) + select { + case <-abortChannel: + // if we get an abort, then we know that the iterator is invalid + return false + case valid := <-validChannel: + return valid + } +} + +func (s *Store) checkIteratorAtIndex(index int) bool { + valid := true + iterateSetAny, found := s.txIterateSets.Load(index) + if !found { + return true + } + iterateset := iterateSetAny.(Iterateset) + for _, iterationTracker := range iterateset { + iteratorValid := s.validateIterator(index, iterationTracker) + valid = valid && iteratorValid + } + return valid +} + +func (s *Store) checkReadsetAtIndex(index int) (bool, []int) { + conflictSet := make(map[int]struct{}) + valid := true + + readSetAny, found := s.txReadSets.Load(index) + if !found { + return true, []int{} + } + readset := readSetAny.(ReadSet) + // iterate over readset and check if the value is the same as the latest value relateive to txIndex in the multiversion store + for key, value := range readset { + // get the latest value from the multiversion store + latestValue := s.GetLatestBeforeIndex(index, []byte(key)) + if latestValue == nil { + // this is possible if we previously read a value from a transaction write that was later reverted, so this time we read from parent store + parentVal := s.parentStore.Get([]byte(key)) + if !bytes.Equal(parentVal, value) { + valid = false + } + } else { + // if estimate, mark as conflict index - but don't invalidate + if latestValue.IsEstimate() { + conflictSet[latestValue.Index()] = struct{}{} + } else if latestValue.IsDeleted() { + if value != nil { + // conflict + // TODO: would we want to return early? + valid = false + } + } else if !bytes.Equal(latestValue.Value(), value) { + valid = false + } + } + } + + conflictIndices := make([]int, 0, len(conflictSet)) + for index := range conflictSet { + conflictIndices = append(conflictIndices, index) + } + + sort.Ints(conflictIndices) + + return valid, conflictIndices +} + +// TODO: do we want to return bool + []int where bool indicates whether it was valid and then []int indicates only ones for which we need to wait due to estimates? - yes i think so? +func (s *Store) ValidateTransactionState(index int) (bool, []int) { + // defer telemetry.MeasureSince(time.Now(), "store", "mvs", "validate") + + // TODO: can we parallelize for all iterators? + iteratorValid := s.checkIteratorAtIndex(index) + + readsetValid, conflictIndices := s.checkReadsetAtIndex(index) + + return iteratorValid && readsetValid, conflictIndices +} + +func (s *Store) WriteLatestToStore() { + // sort the keys + keys := []string{} + s.multiVersionMap.Range(func(key, value interface{}) bool { + keys = append(keys, key.(string)) + return true + }) + sort.Strings(keys) + + for _, key := range keys { + val, ok := s.multiVersionMap.Load(key) + if !ok { + continue + } + mvValue, found := val.(MultiVersionValue).GetLatestNonEstimate() + if !found { + // this means that at some point, there was an estimate, but we have since removed it so there isn't anything writeable at the key, so we can skip + continue + } + // we shouldn't have any ESTIMATE values when performing the write, because we read the latest non-estimate values only + if mvValue.IsEstimate() { + panic("should not have any estimate values when writing to parent store") + } + // if the value is deleted, then delete it from the parent store + if mvValue.IsDeleted() { + // We use []byte(key) instead of conv.UnsafeStrToBytes because we cannot + // be sure if the underlying store might do a save with the byteslice or + // not. Once we get confirmation that .Delete is guaranteed not to + // save the byteslice, then we can assume only a read-only copy is sufficient. + s.parentStore.Delete([]byte(key)) + continue + } + if mvValue.Value() != nil { + s.parentStore.Set([]byte(key), mvValue.Value()) + } + } +} diff --git a/store/multiversion/store_test.go b/store/multiversion/store_test.go new file mode 100644 index 000000000..ae0f3afda --- /dev/null +++ b/store/multiversion/store_test.go @@ -0,0 +1,642 @@ +package multiversion_test + +import ( + "bytes" + "testing" + + "github.com/cosmos/cosmos-sdk/store/dbadapter" + "github.com/cosmos/cosmos-sdk/store/multiversion" + "github.com/cosmos/cosmos-sdk/types/occ" + "github.com/stretchr/testify/require" + dbm "github.com/tendermint/tm-db" +) + +func TestMultiVersionStore(t *testing.T) { + store := multiversion.NewMultiVersionStore(nil) + + // Test Set and GetLatest + store.SetWriteset(1, 1, map[string][]byte{ + "key1": []byte("value1"), + }) + store.SetWriteset(2, 1, map[string][]byte{ + "key1": []byte("value2"), + }) + store.SetWriteset(3, 1, map[string][]byte{ + "key2": []byte("value3"), + }) + + require.Equal(t, []byte("value2"), store.GetLatest([]byte("key1")).Value()) + require.Equal(t, []byte("value3"), store.GetLatest([]byte("key2")).Value()) + + // Test SetEstimate + store.SetEstimatedWriteset(4, 1, map[string][]byte{ + "key1": nil, + }) + require.True(t, store.GetLatest([]byte("key1")).IsEstimate()) + + // Test Delete + store.SetWriteset(5, 1, map[string][]byte{ + "key1": nil, + }) + require.True(t, store.GetLatest([]byte("key1")).IsDeleted()) + + // Test GetLatestBeforeIndex + store.SetWriteset(6, 1, map[string][]byte{ + "key1": []byte("value4"), + }) + require.True(t, store.GetLatestBeforeIndex(5, []byte("key1")).IsEstimate()) + require.Equal(t, []byte("value4"), store.GetLatestBeforeIndex(7, []byte("key1")).Value()) + + // Test Has + require.True(t, store.Has(2, []byte("key1"))) + require.False(t, store.Has(0, []byte("key1"))) + require.False(t, store.Has(5, []byte("key4"))) +} + +func TestMultiVersionStoreHasLaterValue(t *testing.T) { + store := multiversion.NewMultiVersionStore(nil) + + store.SetWriteset(5, 1, map[string][]byte{ + "key1": []byte("value2"), + }) + + require.Nil(t, store.GetLatestBeforeIndex(4, []byte("key1"))) + require.Equal(t, []byte("value2"), store.GetLatestBeforeIndex(6, []byte("key1")).Value()) +} + +func TestMultiVersionStoreKeyDNE(t *testing.T) { + store := multiversion.NewMultiVersionStore(nil) + + require.Nil(t, store.GetLatest([]byte("key1"))) + require.Nil(t, store.GetLatestBeforeIndex(0, []byte("key1"))) + require.False(t, store.Has(0, []byte("key1"))) +} + +func TestMultiVersionStoreWriteToParent(t *testing.T) { + // initialize cachekv store + parentKVStore := dbadapter.Store{DB: dbm.NewMemDB()} + mvs := multiversion.NewMultiVersionStore(parentKVStore) + + parentKVStore.Set([]byte("key2"), []byte("value0")) + parentKVStore.Set([]byte("key4"), []byte("value4")) + + mvs.SetWriteset(1, 1, map[string][]byte{ + "key1": []byte("value1"), + "key3": nil, + "key4": nil, + }) + mvs.SetWriteset(2, 1, map[string][]byte{ + "key1": []byte("value2"), + }) + mvs.SetWriteset(3, 1, map[string][]byte{ + "key2": []byte("value3"), + }) + + mvs.WriteLatestToStore() + + // assert state in parent store + require.Equal(t, []byte("value2"), parentKVStore.Get([]byte("key1"))) + require.Equal(t, []byte("value3"), parentKVStore.Get([]byte("key2"))) + require.False(t, parentKVStore.Has([]byte("key3"))) + require.False(t, parentKVStore.Has([]byte("key4"))) + + // verify no-op if mvs contains ESTIMATE + mvs.SetEstimatedWriteset(1, 2, map[string][]byte{ + "key1": []byte("value1"), + "key3": nil, + "key4": nil, + "key5": nil, + }) + mvs.WriteLatestToStore() + require.False(t, parentKVStore.Has([]byte("key5"))) +} + +func TestMultiVersionStoreWritesetSetAndInvalidate(t *testing.T) { + mvs := multiversion.NewMultiVersionStore(nil) + + writeset := make(map[string][]byte) + writeset["key1"] = []byte("value1") + writeset["key2"] = []byte("value2") + writeset["key3"] = nil + + mvs.SetWriteset(1, 2, writeset) + require.Equal(t, []byte("value1"), mvs.GetLatest([]byte("key1")).Value()) + require.Equal(t, []byte("value2"), mvs.GetLatest([]byte("key2")).Value()) + require.True(t, mvs.GetLatest([]byte("key3")).IsDeleted()) + + writeset2 := make(map[string][]byte) + writeset2["key1"] = []byte("value3") + + mvs.SetWriteset(2, 1, writeset2) + require.Equal(t, []byte("value3"), mvs.GetLatest([]byte("key1")).Value()) + + // invalidate writeset1 + mvs.InvalidateWriteset(1, 2) + + // verify estimates + require.True(t, mvs.GetLatestBeforeIndex(2, []byte("key1")).IsEstimate()) + require.True(t, mvs.GetLatestBeforeIndex(2, []byte("key2")).IsEstimate()) + require.True(t, mvs.GetLatestBeforeIndex(2, []byte("key3")).IsEstimate()) + + // third writeset + writeset3 := make(map[string][]byte) + writeset3["key4"] = []byte("foo") + writeset3["key5"] = nil + + // write the writeset directly as estimate + mvs.SetEstimatedWriteset(3, 1, writeset3) + + require.True(t, mvs.GetLatest([]byte("key4")).IsEstimate()) + require.True(t, mvs.GetLatest([]byte("key5")).IsEstimate()) + + // try replacing writeset1 to verify old keys removed + writeset1_b := make(map[string][]byte) + writeset1_b["key1"] = []byte("value4") + + mvs.SetWriteset(1, 2, writeset1_b) + require.Equal(t, []byte("value4"), mvs.GetLatestBeforeIndex(2, []byte("key1")).Value()) + require.Nil(t, mvs.GetLatestBeforeIndex(2, []byte("key2"))) + // verify that GetLatest for key3 returns nil - because of removal from writeset + require.Nil(t, mvs.GetLatest([]byte("key3"))) + + // verify output for GetAllWritesetKeys + writesetKeys := mvs.GetAllWritesetKeys() + // we have 3 writesets + require.Equal(t, 3, len(writesetKeys)) + require.Equal(t, []string{"key1"}, writesetKeys[1]) + require.Equal(t, []string{"key1"}, writesetKeys[2]) + require.Equal(t, []string{"key4", "key5"}, writesetKeys[3]) + +} + +func TestMultiVersionStoreValidateState(t *testing.T) { + parentKVStore := dbadapter.Store{DB: dbm.NewMemDB()} + mvs := multiversion.NewMultiVersionStore(parentKVStore) + + parentKVStore.Set([]byte("key2"), []byte("value0")) + parentKVStore.Set([]byte("key3"), []byte("value3")) + parentKVStore.Set([]byte("key4"), []byte("value4")) + parentKVStore.Set([]byte("key5"), []byte("value5")) + + writeset := make(multiversion.WriteSet) + writeset["key1"] = []byte("value1") + writeset["key2"] = []byte("value2") + writeset["key3"] = nil + mvs.SetWriteset(1, 2, writeset) + + readset := make(multiversion.ReadSet) + readset["key1"] = []byte("value1") + readset["key2"] = []byte("value2") + readset["key3"] = nil + readset["key4"] = []byte("value4") + readset["key5"] = []byte("value5") + mvs.SetReadset(5, readset) + + // assert no readset is valid + valid, conflicts := mvs.ValidateTransactionState(4) + require.True(t, valid) + require.Empty(t, conflicts) + + // assert readset index 5 is valid + valid, conflicts = mvs.ValidateTransactionState(5) + require.True(t, valid) + require.Empty(t, conflicts) + + // introduce conflict + mvs.SetWriteset(2, 1, map[string][]byte{ + "key3": []byte("value6"), + }) + + // expect failure with empty conflicts + valid, conflicts = mvs.ValidateTransactionState(5) + require.False(t, valid) + require.Empty(t, conflicts) + + // add a conflict due to deletion + mvs.SetWriteset(3, 1, map[string][]byte{ + "key1": nil, + }) + + // expect failure with empty conflicts + valid, conflicts = mvs.ValidateTransactionState(5) + require.False(t, valid) + require.Empty(t, conflicts) + + // add a conflict due to estimate + mvs.SetEstimatedWriteset(4, 1, map[string][]byte{ + "key2": []byte("test"), + }) + + // expect index 4 to be returned + valid, conflicts = mvs.ValidateTransactionState(5) + require.False(t, valid) + require.Equal(t, []int{4}, conflicts) +} + +func TestMultiVersionStoreParentValidationMismatch(t *testing.T) { + parentKVStore := dbadapter.Store{DB: dbm.NewMemDB()} + mvs := multiversion.NewMultiVersionStore(parentKVStore) + + parentKVStore.Set([]byte("key2"), []byte("value0")) + parentKVStore.Set([]byte("key3"), []byte("value3")) + parentKVStore.Set([]byte("key4"), []byte("value4")) + parentKVStore.Set([]byte("key5"), []byte("value5")) + + writeset := make(multiversion.WriteSet) + writeset["key1"] = []byte("value1") + writeset["key2"] = []byte("value2") + writeset["key3"] = nil + mvs.SetWriteset(1, 2, writeset) + + readset := make(multiversion.ReadSet) + readset["key1"] = []byte("value1") + readset["key2"] = []byte("value2") + readset["key3"] = nil + readset["key4"] = []byte("value4") + readset["key5"] = []byte("value5") + mvs.SetReadset(5, readset) + + // assert no readset is valid + valid, conflicts := mvs.ValidateTransactionState(4) + require.True(t, valid) + require.Empty(t, conflicts) + + // assert readset index 5 is valid + valid, conflicts = mvs.ValidateTransactionState(5) + require.True(t, valid) + require.Empty(t, conflicts) + + // overwrite tx writeset for tx1 - no longer writes key1 + writeset2 := make(multiversion.WriteSet) + writeset2["key2"] = []byte("value2") + writeset2["key3"] = nil + mvs.SetWriteset(1, 3, writeset2) + + // assert readset index 5 is invalid - because of mismatch with parent store + valid, conflicts = mvs.ValidateTransactionState(5) + require.False(t, valid) + require.Empty(t, conflicts) +} + +func TestMVSValidationWithOnlyEstimate(t *testing.T) { + parentKVStore := dbadapter.Store{DB: dbm.NewMemDB()} + mvs := multiversion.NewMultiVersionStore(parentKVStore) + + parentKVStore.Set([]byte("key2"), []byte("value0")) + parentKVStore.Set([]byte("key3"), []byte("value3")) + parentKVStore.Set([]byte("key4"), []byte("value4")) + parentKVStore.Set([]byte("key5"), []byte("value5")) + + writeset := make(multiversion.WriteSet) + writeset["key1"] = []byte("value1") + writeset["key2"] = []byte("value2") + writeset["key3"] = nil + mvs.SetWriteset(1, 2, writeset) + + readset := make(multiversion.ReadSet) + readset["key1"] = []byte("value1") + readset["key2"] = []byte("value2") + readset["key3"] = nil + readset["key4"] = []byte("value4") + readset["key5"] = []byte("value5") + mvs.SetReadset(5, readset) + + // add a conflict due to estimate + mvs.SetEstimatedWriteset(4, 1, map[string][]byte{ + "key2": []byte("test"), + }) + + valid, conflicts := mvs.ValidateTransactionState(5) + require.True(t, valid) + require.Equal(t, []int{4}, conflicts) + +} + +func TestMVSIteratorValidation(t *testing.T) { + parentKVStore := dbadapter.Store{DB: dbm.NewMemDB()} + mvs := multiversion.NewMultiVersionStore(parentKVStore) + vis := multiversion.NewVersionIndexedStore(parentKVStore, mvs, 5, 1, make(chan occ.Abort)) + + parentKVStore.Set([]byte("key2"), []byte("value0")) + parentKVStore.Set([]byte("key3"), []byte("value3")) + parentKVStore.Set([]byte("key4"), []byte("value4")) + parentKVStore.Set([]byte("key5"), []byte("value5")) + + writeset := make(multiversion.WriteSet) + writeset["key1"] = []byte("value1") + writeset["key2"] = []byte("value2") + writeset["key3"] = nil + mvs.SetWriteset(1, 2, writeset) + + // test basic iteration + iter := vis.ReverseIterator([]byte("key1"), []byte("key6")) + for ; iter.Valid(); iter.Next() { + // read value + iter.Value() + } + iter.Close() + vis.WriteToMultiVersionStore() + + // should be valid + valid, conflicts := mvs.ValidateTransactionState(5) + require.True(t, valid) + require.Empty(t, conflicts) +} + +func TestMVSIteratorValidationWithEstimate(t *testing.T) { + parentKVStore := dbadapter.Store{DB: dbm.NewMemDB()} + mvs := multiversion.NewMultiVersionStore(parentKVStore) + vis := multiversion.NewVersionIndexedStore(parentKVStore, mvs, 5, 1, make(chan occ.Abort)) + + parentKVStore.Set([]byte("key2"), []byte("value0")) + parentKVStore.Set([]byte("key3"), []byte("value3")) + parentKVStore.Set([]byte("key4"), []byte("value4")) + parentKVStore.Set([]byte("key5"), []byte("value5")) + + writeset := make(multiversion.WriteSet) + writeset["key1"] = []byte("value1") + writeset["key2"] = []byte("value2") + writeset["key3"] = nil + mvs.SetWriteset(1, 2, writeset) + + iter := vis.Iterator([]byte("key1"), []byte("key6")) + for ; iter.Valid(); iter.Next() { + // read value + iter.Value() + } + iter.Close() + vis.WriteToMultiVersionStore() + + writeset2 := make(multiversion.WriteSet) + writeset2["key2"] = []byte("value2") + mvs.SetEstimatedWriteset(2, 2, writeset2) + + // should be invalid + valid, conflicts := mvs.ValidateTransactionState(5) + require.False(t, valid) + require.Equal(t, []int{2}, conflicts) +} + +func TestMVSIteratorValidationWithKeySwitch(t *testing.T) { + parentKVStore := dbadapter.Store{DB: dbm.NewMemDB()} + mvs := multiversion.NewMultiVersionStore(parentKVStore) + vis := multiversion.NewVersionIndexedStore(parentKVStore, mvs, 5, 1, make(chan occ.Abort)) + + parentKVStore.Set([]byte("key2"), []byte("value0")) + parentKVStore.Set([]byte("key3"), []byte("value3")) + parentKVStore.Set([]byte("key4"), []byte("value4")) + parentKVStore.Set([]byte("key5"), []byte("value5")) + + writeset := make(multiversion.WriteSet) + writeset["key1"] = []byte("value1") + writeset["key2"] = []byte("value2") + writeset["key3"] = nil + mvs.SetWriteset(1, 2, writeset) + + iter := vis.Iterator([]byte("key1"), []byte("key6")) + for ; iter.Valid(); iter.Next() { + // read value + iter.Value() + } + iter.Close() + vis.WriteToMultiVersionStore() + + // deletion of 2 and introduction of 3 + writeset2 := make(multiversion.WriteSet) + writeset2["key2"] = nil + writeset2["key3"] = []byte("valueX") + mvs.SetWriteset(2, 2, writeset2) + + // should be invalid + valid, conflicts := mvs.ValidateTransactionState(5) + require.False(t, valid) + require.Empty(t, conflicts) +} + +func TestMVSIteratorValidationWithKeyAdded(t *testing.T) { + parentKVStore := dbadapter.Store{DB: dbm.NewMemDB()} + mvs := multiversion.NewMultiVersionStore(parentKVStore) + vis := multiversion.NewVersionIndexedStore(parentKVStore, mvs, 5, 1, make(chan occ.Abort)) + + parentKVStore.Set([]byte("key2"), []byte("value0")) + parentKVStore.Set([]byte("key3"), []byte("value3")) + parentKVStore.Set([]byte("key4"), []byte("value4")) + parentKVStore.Set([]byte("key5"), []byte("value5")) + + writeset := make(multiversion.WriteSet) + writeset["key1"] = []byte("value1") + writeset["key2"] = []byte("value2") + writeset["key3"] = nil + mvs.SetWriteset(1, 2, writeset) + + iter := vis.Iterator([]byte("key1"), []byte("key7")) + for ; iter.Valid(); iter.Next() { + // read value + iter.Value() + } + iter.Close() + vis.WriteToMultiVersionStore() + + // addition of key6 + writeset2 := make(multiversion.WriteSet) + writeset2["key6"] = []byte("value6") + mvs.SetWriteset(2, 2, writeset2) + + // should be invalid + valid, conflicts := mvs.ValidateTransactionState(5) + require.False(t, valid) + require.Empty(t, conflicts) +} + +func TestMVSIteratorValidationWithWritesetValues(t *testing.T) { + parentKVStore := dbadapter.Store{DB: dbm.NewMemDB()} + mvs := multiversion.NewMultiVersionStore(parentKVStore) + vis := multiversion.NewVersionIndexedStore(parentKVStore, mvs, 5, 1, make(chan occ.Abort)) + + parentKVStore.Set([]byte("key2"), []byte("value0")) + parentKVStore.Set([]byte("key3"), []byte("value3")) + parentKVStore.Set([]byte("key4"), []byte("value4")) + parentKVStore.Set([]byte("key5"), []byte("value5")) + + writeset := make(multiversion.WriteSet) + writeset["key1"] = []byte("value1") + writeset["key2"] = []byte("value2") + writeset["key3"] = nil + mvs.SetWriteset(1, 2, writeset) + + // set a key BEFORE iteration occurred + vis.Set([]byte("key6"), []byte("value6")) + + iter := vis.Iterator([]byte("key1"), []byte("key7")) + for ; iter.Valid(); iter.Next() { + } + iter.Close() + vis.WriteToMultiVersionStore() + + // should be valid + valid, conflicts := mvs.ValidateTransactionState(5) + require.True(t, valid) + require.Empty(t, conflicts) +} + +func TestMVSIteratorValidationWithWritesetValuesSetAfterIteration(t *testing.T) { + parentKVStore := dbadapter.Store{DB: dbm.NewMemDB()} + mvs := multiversion.NewMultiVersionStore(parentKVStore) + vis := multiversion.NewVersionIndexedStore(parentKVStore, mvs, 5, 1, make(chan occ.Abort)) + + parentKVStore.Set([]byte("key2"), []byte("value0")) + parentKVStore.Set([]byte("key3"), []byte("value3")) + parentKVStore.Set([]byte("key4"), []byte("value4")) + parentKVStore.Set([]byte("key5"), []byte("value5")) + + writeset := make(multiversion.WriteSet) + writeset["key1"] = []byte("value1") + writeset["key2"] = []byte("value2") + writeset["key3"] = nil + mvs.SetWriteset(1, 2, writeset) + + readset := make(multiversion.ReadSet) + readset["key1"] = []byte("value1") + readset["key2"] = []byte("value2") + readset["key3"] = nil + readset["key4"] = []byte("value4") + readset["key5"] = []byte("value5") + mvs.SetReadset(5, readset) + + // no key6 because the iteration was performed BEFORE the write + iter := vis.Iterator([]byte("key1"), []byte("key7")) + for ; iter.Valid(); iter.Next() { + } + iter.Close() + + // write key 6 AFTER iterator went + vis.Set([]byte("key6"), []byte("value6")) + vis.WriteToMultiVersionStore() + + // should be valid + valid, conflicts := mvs.ValidateTransactionState(5) + require.True(t, valid) + require.Empty(t, conflicts) +} + +func TestMVSIteratorValidationReverse(t *testing.T) { + parentKVStore := dbadapter.Store{DB: dbm.NewMemDB()} + mvs := multiversion.NewMultiVersionStore(parentKVStore) + vis := multiversion.NewVersionIndexedStore(parentKVStore, mvs, 5, 1, make(chan occ.Abort)) + + parentKVStore.Set([]byte("key2"), []byte("value0")) + parentKVStore.Set([]byte("key3"), []byte("value3")) + parentKVStore.Set([]byte("key4"), []byte("value4")) + parentKVStore.Set([]byte("key5"), []byte("value5")) + + writeset := make(multiversion.WriteSet) + writeset["key1"] = []byte("value1") + writeset["key2"] = []byte("value2") + writeset["key3"] = nil + mvs.SetWriteset(1, 2, writeset) + + readset := make(multiversion.ReadSet) + readset["key1"] = []byte("value1") + readset["key2"] = []byte("value2") + readset["key3"] = nil + readset["key4"] = []byte("value4") + readset["key5"] = []byte("value5") + mvs.SetReadset(5, readset) + + // set a key BEFORE iteration occurred + vis.Set([]byte("key6"), []byte("value6")) + + iter := vis.ReverseIterator([]byte("key1"), []byte("key7")) + for ; iter.Valid(); iter.Next() { + } + iter.Close() + vis.WriteToMultiVersionStore() + + // should be valid + valid, conflicts := mvs.ValidateTransactionState(5) + require.True(t, valid) + require.Empty(t, conflicts) +} + +func TestMVSIteratorValidationEarlyStop(t *testing.T) { + parentKVStore := dbadapter.Store{DB: dbm.NewMemDB()} + mvs := multiversion.NewMultiVersionStore(parentKVStore) + vis := multiversion.NewVersionIndexedStore(parentKVStore, mvs, 5, 1, make(chan occ.Abort)) + + parentKVStore.Set([]byte("key2"), []byte("value0")) + parentKVStore.Set([]byte("key3"), []byte("value3")) + parentKVStore.Set([]byte("key4"), []byte("value4")) + parentKVStore.Set([]byte("key5"), []byte("value5")) + + writeset := make(multiversion.WriteSet) + writeset["key1"] = []byte("value1") + writeset["key2"] = []byte("value2") + writeset["key3"] = nil + mvs.SetWriteset(1, 2, writeset) + + readset := make(multiversion.ReadSet) + readset["key1"] = []byte("value1") + readset["key2"] = []byte("value2") + readset["key3"] = nil + readset["key4"] = []byte("value4") + mvs.SetReadset(5, readset) + + iter := vis.Iterator([]byte("key1"), []byte("key7")) + for ; iter.Valid(); iter.Next() { + // read the value and see if we want to break + if bytes.Equal(iter.Key(), []byte("key4")) { + break + } + } + iter.Close() + vis.WriteToMultiVersionStore() + + // removal of key5 - but irrelevant because of early stop + writeset2 := make(multiversion.WriteSet) + writeset2["key5"] = nil + mvs.SetWriteset(2, 2, writeset2) + + // should be valid + valid, conflicts := mvs.ValidateTransactionState(5) + require.True(t, valid) + require.Empty(t, conflicts) +} + +// TODO: what about early stop with a new key added in the range? - especially if its the last key that we stopped at? +func TestMVSIteratorValidationEarlyStopAtEndOfRange(t *testing.T) { + parentKVStore := dbadapter.Store{DB: dbm.NewMemDB()} + mvs := multiversion.NewMultiVersionStore(parentKVStore) + vis := multiversion.NewVersionIndexedStore(parentKVStore, mvs, 5, 1, make(chan occ.Abort)) + + parentKVStore.Set([]byte("key2"), []byte("value0")) + parentKVStore.Set([]byte("key3"), []byte("value3")) + parentKVStore.Set([]byte("key4"), []byte("value4")) + parentKVStore.Set([]byte("key5"), []byte("value5")) + + writeset := make(multiversion.WriteSet) + writeset["key1"] = []byte("value1") + writeset["key2"] = []byte("value2") + writeset["key3"] = nil + mvs.SetWriteset(1, 2, writeset) + + // test basic iteration + iter := vis.Iterator([]byte("key1"), []byte("key7")) + for ; iter.Valid(); iter.Next() { + // read the value and see if we want to break + if bytes.Equal(iter.Key(), []byte("key5")) { + break + } + } + iter.Close() + vis.WriteToMultiVersionStore() + + // add key6 + writeset2 := make(multiversion.WriteSet) + writeset2["key6"] = []byte("value6") + mvs.SetWriteset(2, 2, writeset2) + + // should be valid + valid, conflicts := mvs.ValidateTransactionState(5) + require.True(t, valid) + require.Empty(t, conflicts) +} diff --git a/store/multiversion/trackediterator.go b/store/multiversion/trackediterator.go new file mode 100644 index 000000000..24a1d7a16 --- /dev/null +++ b/store/multiversion/trackediterator.go @@ -0,0 +1,60 @@ +package multiversion + +import "github.com/cosmos/cosmos-sdk/store/types" + +// tracked iterator is a wrapper around an existing iterator to track the iterator progress and monitor which keys are iterated. +type trackedIterator struct { + types.Iterator + + iterateset iterationTracker + ReadsetHandler + IterateSetHandler +} + +// TODO: test + +func NewTrackedIterator(iter types.Iterator, iterationTracker iterationTracker, iterateSetHandler IterateSetHandler, readSetHandler ReadsetHandler) *trackedIterator { + return &trackedIterator{ + Iterator: iter, + iterateset: iterationTracker, + IterateSetHandler: iterateSetHandler, + ReadsetHandler: readSetHandler, + } +} + +// Close calls first updates the iterateset from the iterator, and then calls iterator.Close() +func (ti *trackedIterator) Close() error { + // TODO: if there are more keys to the iterator, then we consider it early stopped? + if ti.Iterator.Valid() { + // TODO: test whether reaching end of iteration range means valid is true or false + ti.iterateset.SetEarlyStopKey(ti.Iterator.Key()) + } + // Update iterate set + ti.IterateSetHandler.UpdateIterateSet(ti.iterateset) + return ti.Iterator.Close() +} + +// Key calls the iterator.Key() and adds the key to the iterateset, then returns the key from the iterator +func (ti *trackedIterator) Key() []byte { + key := ti.Iterator.Key() + // add key to the tracker + ti.iterateset.AddKey(key) + return key +} + +// Value calls the iterator.Key() and adds the key to the iterateset, then returns the value from the iterator +func (ti *trackedIterator) Value() []byte { + key := ti.Iterator.Key() + val := ti.Iterator.Value() + // add key to the tracker + ti.iterateset.AddKey(key) + return val +} + +func (ti *trackedIterator) Next() { + // add current key to the tracker + key := ti.Iterator.Key() + ti.iterateset.AddKey(key) + // call next + ti.Iterator.Next() +} diff --git a/store/prefix/store.go b/store/prefix/store.go index f6c29fd15..fd1f2672b 100644 --- a/store/prefix/store.go +++ b/store/prefix/store.go @@ -90,6 +90,18 @@ func (s Store) Delete(key []byte) { s.parent.Delete(s.key(key)) } +func (s Store) DeleteAll(start, end []byte) error { + newstart := cloneAppend(s.prefix, start) + + var newend []byte + if end == nil { + newend = cpIncr(s.prefix) + } else { + newend = cloneAppend(s.prefix, end) + } + return s.parent.DeleteAll(newstart, newend) +} + // Implements KVStore // Check https://github.com/tendermint/tendermint/blob/master/libs/db/prefix_db.go#L106 func (s Store) Iterator(start, end []byte) types.Iterator { @@ -124,6 +136,10 @@ func (s Store) ReverseIterator(start, end []byte) types.Iterator { return newPrefixIterator(s.prefix, start, end, iter) } +func (s Store) VersionExists(version int64) bool { + return s.parent.VersionExists(version) +} + var _ types.Iterator = (*prefixIterator)(nil) type prefixIterator struct { diff --git a/store/rootmulti/store.go b/store/rootmulti/store.go index a8488bfea..77f7a728c 100644 --- a/store/rootmulti/store.go +++ b/store/rootmulti/store.go @@ -1201,3 +1201,15 @@ func flushPruningHeights(batch dbm.Batch, pruneHeights []int64) { func (rs *Store) Close() error { return rs.db.Close() } + +func (rs *Store) SetKVStores(handler func(key types.StoreKey, s types.KVStore) types.CacheWrap) types.MultiStore { + panic("SetKVStores is not implemented for rootmulti") +} + +func (rs *Store) StoreKeys() []types.StoreKey { + res := make([]types.StoreKey, len(rs.keysByName)) + for _, sk := range rs.keysByName { + res = append(res, sk) + } + return res +} diff --git a/store/tracekv/store.go b/store/tracekv/store.go index 2798d90ec..81fe2034c 100644 --- a/store/tracekv/store.go +++ b/store/tracekv/store.go @@ -182,6 +182,14 @@ func (tkv *Store) CacheWrapWithListeners(_ types.StoreKey, _ []types.WriteListen panic("cannot CacheWrapWithListeners a TraceKVStore") } +func (tkv *Store) VersionExists(version int64) bool { + return tkv.parent.VersionExists(version) +} + +func (tkv *Store) DeleteAll(start, end []byte) error { + return tkv.parent.DeleteAll(start, end) +} + // writeOperation writes a KVStore operation to the underlying io.Writer as // JSON-encoded data where the key/value pair is base64 encoded. func writeOperation(w io.Writer, op operation, tc types.TraceContext, key, value []byte) { diff --git a/store/types/cache.go b/store/types/cache.go index 53f45d6b3..b00335a76 100644 --- a/store/types/cache.go +++ b/store/types/cache.go @@ -47,7 +47,7 @@ type BoundedCache struct { CacheBackend limit int - mu *sync.Mutex + mu *sync.Mutex metricName []string } @@ -88,7 +88,7 @@ func (c *BoundedCache) emitKeysEvictedMetrics(keysToEvict int) { func (c *BoundedCache) Set(key string, val *CValue) { c.mu.Lock() defer c.mu.Unlock() - defer c.emitCacheSizeMetric() + // defer c.emitCacheSizeMetric() if c.Len() >= c.limit { numEntries := c.Len() @@ -112,7 +112,7 @@ func (c *BoundedCache) Set(key string, val *CValue) { func (c *BoundedCache) Delete(key string) { c.mu.Lock() defer c.mu.Unlock() - defer c.emitCacheSizeMetric() + // defer c.emitCacheSizeMetric() c.CacheBackend.Delete(key) } @@ -120,7 +120,7 @@ func (c *BoundedCache) Delete(key string) { func (c *BoundedCache) DeleteAll() { c.mu.Lock() defer c.mu.Unlock() - defer c.emitCacheSizeMetric() + // defer c.emitCacheSizeMetric() c.CacheBackend.Range(func(key string, _ *CValue) bool { c.CacheBackend.Delete(key) diff --git a/store/types/store.go b/store/types/store.go index 19115ab72..e1f4b0a97 100644 --- a/store/types/store.go +++ b/store/types/store.go @@ -145,6 +145,12 @@ type MultiStore interface { // Resets the tracked event list ResetEvents() + + // SetKVStores is a generalized wrapper method + SetKVStores(handler func(key StoreKey, s KVStore) CacheWrap) MultiStore + + // StoreKeys returns a list of store keys + StoreKeys() []StoreKey } // From MultiStore.CacheMultiStore().... @@ -247,6 +253,10 @@ type KVStore interface { ReverseIterator(start, end []byte) Iterator GetWorkingHash() ([]byte, error) + + VersionExists(version int64) bool + + DeleteAll(start, end []byte) error } // Iterator is an alias db's Iterator for convenience. diff --git a/storev2/commitment/store.go b/storev2/commitment/store.go index a51073875..f48362873 100644 --- a/storev2/commitment/store.go +++ b/storev2/commitment/store.go @@ -177,3 +177,21 @@ func (st *Store) Query(req abci.RequestQuery) (res abci.ResponseQuery) { return res } + +func (st *Store) VersionExists(version int64) bool { + // one version per SC tree + return version == st.tree.Version() +} + +func (st *Store) DeleteAll(start, end []byte) error { + iter := st.Iterator(start, end) + keys := [][]byte{} + for ; iter.Valid(); iter.Next() { + keys = append(keys, iter.Key()) + } + iter.Close() + for _, key := range keys { + st.Delete(key) + } + return nil +} diff --git a/storev2/rootmulti/store.go b/storev2/rootmulti/store.go index be7f4f2af..f7146f09b 100644 --- a/storev2/rootmulti/store.go +++ b/storev2/rootmulti/store.go @@ -91,7 +91,7 @@ func NewStore( // Commit implements interface Committer, called by ABCI Commit func (rs *Store) Commit(bumpVersion bool) types.CommitID { if !bumpVersion { - return rs.lastCommitInfo.CommitID() + panic("Commit should always bump version in root multistore") } if err := rs.flush(); err != nil { panic(err) @@ -141,7 +141,7 @@ func (rs *Store) StateStoreCommit() { // Flush all the pending changesets to commit store. func (rs *Store) flush() error { var changeSets []*proto.NamedChangeSet - currentVersion := rs.lastCommitInfo.Version + currentVersion := rs.lastCommitInfo.Version + 1 for key := range rs.ckvStores { // it'll unwrap the inter-block cache store := rs.GetCommitKVStore(key) @@ -206,8 +206,8 @@ func (rs *Store) GetStoreType() types.StoreType { } // Implements interface CacheWrapper -func (rs *Store) CacheWrap(storeKey types.StoreKey) types.CacheWrap { - return rs.CacheMultiStore().CacheWrap(storeKey) +func (rs *Store) CacheWrap(_ types.StoreKey) types.CacheWrap { + return rs.CacheMultiStore().(types.CacheWrap) } // Implements interface CacheWrapper @@ -356,6 +356,12 @@ func (rs *Store) LoadVersionAndUpgrade(version int64, upgrades *types.StoreUpgra if err := rs.scStore.Initialize(initialStores); err != nil { return err } + if version > 0 { + _, err := rs.scStore.LoadVersion(version, false) + if err != nil { + return nil + } + } var treeUpgrades []*proto.TreeNameUpgrade for _, key := range storesKeys { @@ -488,6 +494,7 @@ func (rs *Store) Query(req abci.RequestQuery) abci.ResponseQuery { return sdkerrors.QueryResult(err) } var store types.Queryable + var commitInfo *types.CommitInfo if !req.Prove && version < rs.lastCommitInfo.Version && rs.ssStore != nil { // Serve abci query from ss store if no proofs needed @@ -500,9 +507,13 @@ func (rs *Store) Query(req abci.RequestQuery) abci.ResponseQuery { return sdkerrors.QueryResult(err) } store = types.Queryable(commitment.NewStore(scStore.GetTreeByName(storeName), rs.logger)) + commitInfo = convertCommitInfo(scStore.LastCommitInfo()) + commitInfo = amendCommitInfo(commitInfo, rs.storesParams) } else { // Serve directly from latest sc store store = types.Queryable(commitment.NewStore(rs.scStore.GetTreeByName(storeName), rs.logger)) + commitInfo = convertCommitInfo(rs.scStore.LastCommitInfo()) + commitInfo = amendCommitInfo(commitInfo, rs.storesParams) } // trim the path and execute the query @@ -511,14 +522,13 @@ func (rs *Store) Query(req abci.RequestQuery) abci.ResponseQuery { if !req.Prove || !rootmulti.RequireProof(subPath) { return res + } else if commitInfo != nil { + // Restore origin path and append proof op. + res.ProofOps.Ops = append(res.ProofOps.Ops, commitInfo.ProofOp(storeName)) } if res.ProofOps == nil || len(res.ProofOps.Ops) == 0 { return sdkerrors.QueryResult(errors.Wrap(sdkerrors.ErrInvalidRequest, "proof is unexpectedly empty; ensure height has not been pruned")) } - commitInfo := convertCommitInfo(rs.scStore.LastCommitInfo()) - commitInfo = amendCommitInfo(commitInfo, rs.storesParams) - // Restore origin path and append proof op. - res.ProofOps.Ops = append(res.ProofOps.Ops, commitInfo.ProofOp(storeName)) return res } @@ -780,3 +790,17 @@ func (rs *Store) Snapshot(height uint64, protoWriter protoio.Writer) error { return nil } + +// SetKVStores implements types.CommitMultiStore. +func (*Store) SetKVStores(handler func(key types.StoreKey, s types.KVStore) types.CacheWrap) types.MultiStore { + panic("unimplemented") +} + +// StoreKeys implements types.CommitMultiStore. +func (s *Store) StoreKeys() []types.StoreKey { + res := make([]types.StoreKey, len(s.storeKeys)) + for _, sk := range s.storeKeys { + res = append(res, sk) + } + return res +} diff --git a/storev2/state/store.go b/storev2/state/store.go index 72e4b0ce9..b9c9c7599 100644 --- a/storev2/state/store.go +++ b/storev2/state/store.go @@ -1,10 +1,11 @@ package state import ( - "cosmossdk.io/errors" "fmt" "io" + "cosmossdk.io/errors" + "github.com/cosmos/cosmos-sdk/store/cachekv" "github.com/cosmos/cosmos-sdk/store/listenkv" "github.com/cosmos/cosmos-sdk/store/tracekv" @@ -97,6 +98,7 @@ func (st *Store) Query(req abci.RequestQuery) (res abci.ResponseQuery) { if req.Height > 0 && req.Height > st.version { return sdkerrors.QueryResult(errors.Wrap(sdkerrors.ErrInvalidHeight, "invalid height")) } + res.Height = st.version switch req.Path { case "/key": // get by key res.Key = req.Data // data holds the key bytes @@ -105,7 +107,6 @@ func (st *Store) Query(req abci.RequestQuery) (res abci.ResponseQuery) { pairs := kv.Pairs{ Pairs: make([]kv.Pair, 0), } - subspace := req.Data res.Key = subspace iterator := types.KVStorePrefixIterator(st, subspace) @@ -125,3 +126,24 @@ func (st *Store) Query(req abci.RequestQuery) (res abci.ResponseQuery) { return res } + +func (st *Store) VersionExists(version int64) bool { + earliest, err := st.store.GetEarliestVersion() + if err != nil { + panic(err) + } + return version >= earliest +} + +func (st *Store) DeleteAll(start, end []byte) error { + iter := st.Iterator(start, end) + keys := [][]byte{} + for ; iter.Valid(); iter.Next() { + keys = append(keys, iter.Key()) + } + iter.Close() + for _, key := range keys { + st.Delete(key) + } + return nil +} diff --git a/tasks/scheduler.go b/tasks/scheduler.go new file mode 100644 index 000000000..38ef391cd --- /dev/null +++ b/tasks/scheduler.go @@ -0,0 +1,519 @@ +package tasks + +import ( + "context" + "crypto/sha256" + "fmt" + "sort" + "sync" + + "github.com/cosmos/cosmos-sdk/store/multiversion" + store "github.com/cosmos/cosmos-sdk/store/types" + "github.com/cosmos/cosmos-sdk/telemetry" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/occ" + "github.com/cosmos/cosmos-sdk/utils/tracing" + "github.com/tendermint/tendermint/abci/types" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" +) + +type status string + +const ( + // statusPending tasks are ready for execution + // all executing tasks are in pending state + statusPending status = "pending" + // statusExecuted tasks are ready for validation + // these tasks did not abort during execution + statusExecuted status = "executed" + // statusAborted means the task has been aborted + // these tasks transition to pending upon next execution + statusAborted status = "aborted" + // statusValidated means the task has been validated + // tasks in this status can be reset if an earlier task fails validation + statusValidated status = "validated" + // statusWaiting tasks are waiting for another tx to complete + statusWaiting status = "waiting" +) + +type deliverTxTask struct { + Ctx sdk.Context + AbortCh chan occ.Abort + + mx sync.RWMutex + Status status + Dependencies []int + Abort *occ.Abort + Index int + Incarnation int + Request types.RequestDeliverTx + SdkTx sdk.Tx + Checksum [32]byte + AbsoluteIndex int + Response *types.ResponseDeliverTx + VersionStores map[sdk.StoreKey]*multiversion.VersionIndexedStore + ValidateCh chan status +} + +func (dt *deliverTxTask) IsStatus(s status) bool { + dt.mx.RLock() + defer dt.mx.RUnlock() + return dt.Status == s +} + +func (dt *deliverTxTask) SetStatus(s status) { + dt.mx.Lock() + defer dt.mx.Unlock() + dt.Status = s +} + +func (dt *deliverTxTask) Reset() { + dt.SetStatus(statusPending) + dt.Response = nil + dt.Abort = nil + dt.AbortCh = nil + dt.Dependencies = nil + dt.VersionStores = nil +} + +func (dt *deliverTxTask) Increment() { + dt.Incarnation++ + dt.ValidateCh = make(chan status, 1) +} + +// Scheduler processes tasks concurrently +type Scheduler interface { + ProcessAll(ctx sdk.Context, reqs []*sdk.DeliverTxEntry) ([]types.ResponseDeliverTx, error) +} + +type scheduler struct { + deliverTx func(ctx sdk.Context, req types.RequestDeliverTx, tx sdk.Tx, checksum [32]byte) (res types.ResponseDeliverTx) + workers int + multiVersionStores map[sdk.StoreKey]multiversion.MultiVersionStore + tracingInfo *tracing.Info + allTasks []*deliverTxTask + executeCh chan func() + validateCh chan func() + metrics *schedulerMetrics +} + +// NewScheduler creates a new scheduler +func NewScheduler(workers int, tracingInfo *tracing.Info, deliverTxFunc func(ctx sdk.Context, req types.RequestDeliverTx, tx sdk.Tx, checksum [32]byte) (res types.ResponseDeliverTx)) Scheduler { + return &scheduler{ + workers: workers, + deliverTx: deliverTxFunc, + tracingInfo: tracingInfo, + metrics: &schedulerMetrics{}, + } +} + +func (s *scheduler) invalidateTask(task *deliverTxTask) { + for _, mv := range s.multiVersionStores { + mv.InvalidateWriteset(task.AbsoluteIndex, task.Incarnation) + mv.ClearReadset(task.AbsoluteIndex) + mv.ClearIterateset(task.AbsoluteIndex) + } +} + +func start(ctx context.Context, ch chan func(), workers int) { + for i := 0; i < workers; i++ { + go func() { + for { + select { + case <-ctx.Done(): + return + case work := <-ch: + work() + } + } + }() + } +} + +func (s *scheduler) DoValidate(work func()) { + s.validateCh <- work +} + +func (s *scheduler) DoExecute(work func()) { + s.executeCh <- work +} + +func (s *scheduler) findConflicts(task *deliverTxTask) (bool, []int) { + var conflicts []int + uniq := make(map[int]struct{}) + valid := true + for _, mv := range s.multiVersionStores { + ok, mvConflicts := mv.ValidateTransactionState(task.AbsoluteIndex) + for _, c := range mvConflicts { + if _, ok := uniq[c]; !ok { + conflicts = append(conflicts, c) + uniq[c] = struct{}{} + } + } + // any non-ok value makes valid false + valid = ok && valid + } + sort.Ints(conflicts) + return valid, conflicts +} + +func toTasks(reqs []*sdk.DeliverTxEntry) []*deliverTxTask { + res := make([]*deliverTxTask, 0, len(reqs)) + for idx, r := range reqs { + res = append(res, &deliverTxTask{ + Request: r.Request, + SdkTx: r.SdkTx, + Checksum: r.Checksum, + AbsoluteIndex: r.AbsoluteIndex, + Index: idx, + Status: statusPending, + ValidateCh: make(chan status, 1), + }) + } + return res +} + +func (s *scheduler) collectResponses(tasks []*deliverTxTask) []types.ResponseDeliverTx { + res := make([]types.ResponseDeliverTx, 0, len(tasks)) + var maxIncarnation int + for _, t := range tasks { + if t.Incarnation > maxIncarnation { + maxIncarnation = t.Incarnation + } + res = append(res, *t.Response) + } + s.metrics.maxIncarnation = maxIncarnation + return res +} + +func (s *scheduler) tryInitMultiVersionStore(ctx sdk.Context) { + if s.multiVersionStores != nil { + return + } + mvs := make(map[sdk.StoreKey]multiversion.MultiVersionStore) + keys := ctx.MultiStore().StoreKeys() + for _, sk := range keys { + mvs[sk] = multiversion.NewMultiVersionStore(ctx.MultiStore().GetKVStore(sk)) + } + s.multiVersionStores = mvs +} + +func indexesValidated(tasks []*deliverTxTask, idx []int) bool { + for _, i := range idx { + if !tasks[i].IsStatus(statusValidated) { + return false + } + } + return true +} + +func allValidated(tasks []*deliverTxTask) bool { + for _, t := range tasks { + if !t.IsStatus(statusValidated) { + return false + } + } + return true +} + +func (s *scheduler) PrefillEstimates(reqs []*sdk.DeliverTxEntry) { + // iterate over TXs, update estimated writesets where applicable + for i, req := range reqs { + mappedWritesets := req.EstimatedWritesets + // order shouldnt matter for storeKeys because each storeKey partitioned MVS is independent + for storeKey, writeset := range mappedWritesets { + // we use `-1` to indicate a prefill incarnation + s.multiVersionStores[storeKey].SetEstimatedWriteset(i, -1, writeset) + } + } +} + +// schedulerMetrics contains metrics for the scheduler +type schedulerMetrics struct { + // maxIncarnation is the highest incarnation seen in this set + maxIncarnation int + // retries is the number of tx attempts beyond the first attempt + retries int +} + +func (s *scheduler) emitMetrics() { + telemetry.IncrCounter(float32(s.metrics.retries), "scheduler", "retries") + telemetry.IncrCounter(float32(s.metrics.maxIncarnation), "scheduler", "incarnations") +} + +func (s *scheduler) ProcessAll(ctx sdk.Context, reqs []*sdk.DeliverTxEntry) ([]types.ResponseDeliverTx, error) { + // initialize mutli-version stores if they haven't been initialized yet + s.tryInitMultiVersionStore(ctx) + // prefill estimates + s.PrefillEstimates(reqs) + tasks := toTasks(reqs) + s.allTasks = tasks + s.executeCh = make(chan func(), len(tasks)) + s.validateCh = make(chan func(), len(tasks)) + defer s.emitMetrics() + + // default to number of tasks if workers is negative or 0 by this point + workers := s.workers + if s.workers < 1 { + workers = len(tasks) + } + + workerCtx, cancel := context.WithCancel(ctx.Context()) + defer cancel() + + // execution tasks are limited by workers + start(workerCtx, s.executeCh, workers) + + // validation tasks uses length of tasks to avoid blocking on validation + start(workerCtx, s.validateCh, len(tasks)) + + toExecute := tasks + for !allValidated(tasks) { + var err error + + // execute sets statuses of tasks to either executed or aborted + if len(toExecute) > 0 { + err = s.executeAll(ctx, toExecute) + if err != nil { + return nil, err + } + } + + // validate returns any that should be re-executed + // note this processes ALL tasks, not just those recently executed + toExecute, err = s.validateAll(ctx, tasks) + if err != nil { + return nil, err + } + // these are retries which apply to metrics + s.metrics.retries += len(toExecute) + } + for _, mv := range s.multiVersionStores { + mv.WriteLatestToStore() + } + return s.collectResponses(tasks), nil +} + +func (s *scheduler) shouldRerun(task *deliverTxTask) bool { + switch task.Status { + + case statusAborted, statusPending: + return true + + // validated tasks can become unvalidated if an earlier re-run task now conflicts + case statusExecuted, statusValidated: + // With the current scheduler, we won't actually get to this step if a previous task has already been determined to be invalid, + // since we choose to fail fast and mark the subsequent tasks as invalid as well. + // TODO: in a future async scheduler that no longer exhaustively validates in order, we may need to carefully handle the `valid=true` with conflicts case + if valid, conflicts := s.findConflicts(task); !valid { + s.invalidateTask(task) + + // if the conflicts are now validated, then rerun this task + if indexesValidated(s.allTasks, conflicts) { + return true + } else { + // otherwise, wait for completion + task.Dependencies = conflicts + task.SetStatus(statusWaiting) + return false + } + } else if len(conflicts) == 0 { + // mark as validated, which will avoid re-validating unless a lower-index re-validates + task.SetStatus(statusValidated) + return false + } + // conflicts and valid, so it'll validate next time + return false + + case statusWaiting: + // if conflicts are done, then this task is ready to run again + return indexesValidated(s.allTasks, task.Dependencies) + } + panic("unexpected status: " + task.Status) +} + +func (s *scheduler) validateTask(ctx sdk.Context, task *deliverTxTask) bool { + _, span := s.traceSpan(ctx, "SchedulerValidate", task) + defer span.End() + + if s.shouldRerun(task) { + return false + } + return true +} + +func (s *scheduler) findFirstNonValidated() (int, bool) { + for i, t := range s.allTasks { + if t.Status != statusValidated { + return i, true + } + } + return 0, false +} + +func (s *scheduler) validateAll(ctx sdk.Context, tasks []*deliverTxTask) ([]*deliverTxTask, error) { + ctx, span := s.traceSpan(ctx, "SchedulerValidateAll", nil) + defer span.End() + + var mx sync.Mutex + var res []*deliverTxTask + + startIdx, anyLeft := s.findFirstNonValidated() + + if !anyLeft { + return nil, nil + } + + wg := &sync.WaitGroup{} + for i := startIdx; i < len(tasks); i++ { + wg.Add(1) + t := tasks[i] + s.DoValidate(func() { + defer wg.Done() + if !s.validateTask(ctx, t) { + mx.Lock() + defer mx.Unlock() + t.Reset() + t.Increment() + res = append(res, t) + } + }) + } + wg.Wait() + + return res, nil +} + +// ExecuteAll executes all tasks concurrently +func (s *scheduler) executeAll(ctx sdk.Context, tasks []*deliverTxTask) error { + ctx, span := s.traceSpan(ctx, "SchedulerExecuteAll", nil) + defer span.End() + + // validationWg waits for all validations to complete + // validations happen in separate goroutines in order to wait on previous index + wg := &sync.WaitGroup{} + wg.Add(len(tasks)) + + for _, task := range tasks { + t := task + s.DoExecute(func() { + s.prepareAndRunTask(wg, ctx, t) + }) + } + + wg.Wait() + + return nil +} + +func (s *scheduler) prepareAndRunTask(wg *sync.WaitGroup, ctx sdk.Context, task *deliverTxTask) { + eCtx, eSpan := s.traceSpan(ctx, "SchedulerExecute", task) + defer eSpan.End() + + task.Ctx = eCtx + s.executeTask(task) + wg.Done() +} + +func (s *scheduler) traceSpan(ctx sdk.Context, name string, task *deliverTxTask) (sdk.Context, trace.Span) { + spanCtx, span := s.tracingInfo.StartWithContext(name, ctx.TraceSpanContext()) + if task != nil { + span.SetAttributes(attribute.String("txHash", fmt.Sprintf("%X", sha256.Sum256(task.Request.Tx)))) + span.SetAttributes(attribute.Int("txIndex", task.Index)) + span.SetAttributes(attribute.Int("absoluteIndex", task.AbsoluteIndex)) + span.SetAttributes(attribute.Int("txIncarnation", task.Incarnation)) + } + ctx = ctx.WithTraceSpanContext(spanCtx) + return ctx, span +} + +// prepareTask initializes the context and version stores for a task +func (s *scheduler) prepareTask(task *deliverTxTask) { + ctx := task.Ctx.WithTxIndex(task.AbsoluteIndex) + + _, span := s.traceSpan(ctx, "SchedulerPrepare", task) + defer span.End() + + // initialize the context + abortCh := make(chan occ.Abort, len(s.multiVersionStores)) + + // if there are no stores, don't try to wrap, because there's nothing to wrap + if len(s.multiVersionStores) > 0 { + // non-blocking + cms := ctx.MultiStore().CacheMultiStore() + + // init version stores by store key + vs := make(map[store.StoreKey]*multiversion.VersionIndexedStore) + for storeKey, mvs := range s.multiVersionStores { + vs[storeKey] = mvs.VersionedIndexedStore(task.AbsoluteIndex, task.Incarnation, abortCh) + } + + // save off version store so we can ask it things later + task.VersionStores = vs + ms := cms.SetKVStores(func(k store.StoreKey, kvs sdk.KVStore) store.CacheWrap { + return vs[k] + }) + + ctx = ctx.WithMultiStore(ms) + } + + task.AbortCh = abortCh + task.Ctx = ctx +} + +func (s *scheduler) executeTask(task *deliverTxTask) { + dCtx, dSpan := s.traceSpan(task.Ctx, "SchedulerExecuteTask", task) + defer dSpan.End() + task.Ctx = dCtx + + s.prepareTask(task) + + // Channel to signal the completion of deliverTx + doneCh := make(chan types.ResponseDeliverTx) + + // Run deliverTx in a separate goroutine + go func() { + doneCh <- s.deliverTx(task.Ctx, task.Request, task.SdkTx, task.Checksum) + }() + + // Flag to mark if abort has happened + var abortOccurred bool + + var wg sync.WaitGroup + wg.Add(1) + + var abort *occ.Abort + // Drain the AbortCh in a non-blocking way + go func() { + defer wg.Done() + for abt := range task.AbortCh { + if !abortOccurred { + abortOccurred = true + abort = &abt + } + } + }() + + // Wait for deliverTx to complete + resp := <-doneCh + + close(task.AbortCh) + + wg.Wait() + + // If abort has occurred, return, else set the response and status + if abortOccurred { + task.SetStatus(statusAborted) + task.Abort = abort + return + } + + task.SetStatus(statusExecuted) + task.Response = &resp + + // write from version store to multiversion stores + for _, v := range task.VersionStores { + v.WriteToMultiVersionStore() + } +} diff --git a/tasks/scheduler_test.go b/tasks/scheduler_test.go new file mode 100644 index 000000000..77193e58c --- /dev/null +++ b/tasks/scheduler_test.go @@ -0,0 +1,289 @@ +package tasks + +import ( + "context" + "errors" + "fmt" + "net/http" + _ "net/http/pprof" + "runtime" + "testing" + + "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/abci/types" + dbm "github.com/tendermint/tm-db" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/trace" + + "github.com/cosmos/cosmos-sdk/store/cachekv" + "github.com/cosmos/cosmos-sdk/store/cachemulti" + "github.com/cosmos/cosmos-sdk/store/dbadapter" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/utils/tracing" +) + +type mockDeliverTxFunc func(ctx sdk.Context, req types.RequestDeliverTx, tx sdk.Tx, checksum [32]byte) (res types.ResponseDeliverTx) + +var testStoreKey = sdk.NewKVStoreKey("mock") +var itemKey = []byte("key") + +func requestList(n int) []*sdk.DeliverTxEntry { + tasks := make([]*sdk.DeliverTxEntry, n) + for i := 0; i < n; i++ { + tasks[i] = &sdk.DeliverTxEntry{ + Request: types.RequestDeliverTx{ + Tx: []byte(fmt.Sprintf("%d", i)), + }, + AbsoluteIndex: i, + // TODO: maybe we need to add dummy sdkTx message types and handler routers too + } + + } + return tasks +} + +func initTestCtx(injectStores bool) sdk.Context { + ctx := sdk.Context{}.WithContext(context.Background()) + keys := make(map[string]sdk.StoreKey) + stores := make(map[sdk.StoreKey]sdk.CacheWrapper) + db := dbm.NewMemDB() + if injectStores { + mem := dbadapter.Store{DB: db} + stores[testStoreKey] = cachekv.NewStore(mem, testStoreKey, 1000) + keys[testStoreKey.Name()] = testStoreKey + } + store := cachemulti.NewStore(db, stores, keys, nil, nil, nil) + ctx = ctx.WithMultiStore(&store) + return ctx +} + +func generateTasks(count int) []*deliverTxTask { + var res []*deliverTxTask + for i := 0; i < count; i++ { + res = append(res, &deliverTxTask{Index: i}) + } + return res +} + +func TestProcessAll(t *testing.T) { + runtime.SetBlockProfileRate(1) + + go func() { + http.ListenAndServe("localhost:6060", nil) + }() + + tests := []struct { + name string + workers int + runs int + before func(ctx sdk.Context) + requests []*sdk.DeliverTxEntry + deliverTxFunc mockDeliverTxFunc + addStores bool + expectedErr error + assertions func(t *testing.T, ctx sdk.Context, res []types.ResponseDeliverTx) + }{ + { + name: "Test zero txs does not hang", + workers: 20, + runs: 10, + addStores: true, + requests: requestList(0), + deliverTxFunc: func(ctx sdk.Context, req types.RequestDeliverTx, tx sdk.Tx, checksum [32]byte) (res types.ResponseDeliverTx) { + panic("should not deliver") + }, + assertions: func(t *testing.T, ctx sdk.Context, res []types.ResponseDeliverTx) { + require.Len(t, res, 0) + }, + expectedErr: nil, + }, + { + name: "Test tx writing to a store that another tx is iterating", + workers: 50, + runs: 1, + requests: requestList(100), + addStores: true, + before: func(ctx sdk.Context) { + kv := ctx.MultiStore().GetKVStore(testStoreKey) + // initialize 100 test values in the base kv store so iterating isn't too fast + for i := 0; i < 10; i++ { + kv.Set([]byte(fmt.Sprintf("%d", i)), []byte(fmt.Sprintf("%d", i))) + } + }, + deliverTxFunc: func(ctx sdk.Context, req types.RequestDeliverTx, tx sdk.Tx, checksum [32]byte) (res types.ResponseDeliverTx) { + kv := ctx.MultiStore().GetKVStore(testStoreKey) + if ctx.TxIndex()%2 == 0 { + // For even-indexed transactions, write to the store + kv.Set(req.Tx, req.Tx) + return types.ResponseDeliverTx{ + Info: "write", + } + } else { + // For odd-indexed transactions, iterate over the store + + // just write so we have more writes going on + kv.Set(req.Tx, req.Tx) + iterator := kv.Iterator(nil, nil) + defer iterator.Close() + for ; iterator.Valid(); iterator.Next() { + // Do nothing, just iterate + } + return types.ResponseDeliverTx{ + Info: "iterate", + } + } + }, + assertions: func(t *testing.T, ctx sdk.Context, res []types.ResponseDeliverTx) { + for idx, response := range res { + if idx%2 == 0 { + require.Equal(t, "write", response.Info) + } else { + require.Equal(t, "iterate", response.Info) + } + } + }, + expectedErr: nil, + }, + { + name: "Test no overlap txs", + workers: 20, + runs: 10, + addStores: true, + requests: requestList(1000), + deliverTxFunc: func(ctx sdk.Context, req types.RequestDeliverTx, tx sdk.Tx, checksum [32]byte) (res types.ResponseDeliverTx) { + // all txs read and write to the same key to maximize conflicts + kv := ctx.MultiStore().GetKVStore(testStoreKey) + + // write to the store with this tx's index + kv.Set(req.Tx, req.Tx) + val := string(kv.Get(req.Tx)) + + // return what was read from the store (final attempt should be index-1) + return types.ResponseDeliverTx{ + Info: val, + } + }, + assertions: func(t *testing.T, ctx sdk.Context, res []types.ResponseDeliverTx) { + for idx, response := range res { + require.Equal(t, fmt.Sprintf("%d", idx), response.Info) + } + store := ctx.MultiStore().GetKVStore(testStoreKey) + for i := 0; i < len(res); i++ { + val := store.Get([]byte(fmt.Sprintf("%d", i))) + require.Equal(t, []byte(fmt.Sprintf("%d", i)), val) + } + }, + expectedErr: nil, + }, + { + name: "Test every tx accesses same key", + workers: 50, + runs: 1, + addStores: true, + requests: requestList(1000), + deliverTxFunc: func(ctx sdk.Context, req types.RequestDeliverTx, tx sdk.Tx, checksum [32]byte) (res types.ResponseDeliverTx) { + // all txs read and write to the same key to maximize conflicts + kv := ctx.MultiStore().GetKVStore(testStoreKey) + val := string(kv.Get(itemKey)) + + // write to the store with this tx's index + kv.Set(itemKey, req.Tx) + + // return what was read from the store (final attempt should be index-1) + return types.ResponseDeliverTx{ + Info: val, + } + }, + assertions: func(t *testing.T, ctx sdk.Context, res []types.ResponseDeliverTx) { + for idx, response := range res { + if idx == 0 { + require.Equal(t, "", response.Info) + } else { + // the info is what was read from the kv store by the tx + // each tx writes its own index, so the info should be the index of the previous tx + require.Equal(t, fmt.Sprintf("%d", idx-1), response.Info) + } + } + // confirm last write made it to the parent store + latest := ctx.MultiStore().GetKVStore(testStoreKey).Get(itemKey) + require.Equal(t, []byte(fmt.Sprintf("%d", len(res)-1)), latest) + }, + expectedErr: nil, + }, + { + name: "Test some tx accesses same key", + workers: 50, + runs: 1, + addStores: true, + requests: requestList(2000), + deliverTxFunc: func(ctx sdk.Context, req types.RequestDeliverTx, tx sdk.Tx, checksum [32]byte) types.ResponseDeliverTx { + if ctx.TxIndex()%10 != 0 { + return types.ResponseDeliverTx{ + Info: "none", + } + } + // all txs read and write to the same key to maximize conflicts + kv := ctx.MultiStore().GetKVStore(testStoreKey) + val := string(kv.Get(itemKey)) + + // write to the store with this tx's index + kv.Set(itemKey, req.Tx) + + // return what was read from the store (final attempt should be index-1) + return types.ResponseDeliverTx{ + Info: val, + } + }, + assertions: func(t *testing.T, ctx sdk.Context, res []types.ResponseDeliverTx) {}, + expectedErr: nil, + }, + { + name: "Test no stores on context should not panic", + workers: 50, + runs: 10, + addStores: false, + requests: requestList(10), + deliverTxFunc: func(ctx sdk.Context, req types.RequestDeliverTx, tx sdk.Tx, checksum [32]byte) (res types.ResponseDeliverTx) { + return types.ResponseDeliverTx{ + Info: fmt.Sprintf("%d", ctx.TxIndex()), + } + }, + assertions: func(t *testing.T, ctx sdk.Context, res []types.ResponseDeliverTx) { + for idx, response := range res { + require.Equal(t, fmt.Sprintf("%d", idx), response.Info) + } + }, + expectedErr: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + for i := 0; i < tt.runs; i++ { + // set a tracer provider + tp := trace.NewNoopTracerProvider() + otel.SetTracerProvider(trace.NewNoopTracerProvider()) + tr := tp.Tracer("scheduler-test") + ti := &tracing.Info{ + Tracer: &tr, + } + + s := NewScheduler(tt.workers, ti, tt.deliverTxFunc) + ctx := initTestCtx(tt.addStores) + + if tt.before != nil { + tt.before(ctx) + } + + res, err := s.ProcessAll(ctx, tt.requests) + require.Len(t, res, len(tt.requests)) + + if !errors.Is(err, tt.expectedErr) { + t.Errorf("Expected error %v, got %v", tt.expectedErr, err) + } else { + tt.assertions(t, ctx, res) + } + } + }) + } +} diff --git a/types/accesscontrol/constants.pb.go b/types/accesscontrol/constants.pb.go index 1625561ed..799d51d50 100644 --- a/types/accesscontrol/constants.pb.go +++ b/types/accesscontrol/constants.pb.go @@ -195,101 +195,131 @@ const ( ResourceType_KV_DEX_SHORT_ORDER_COUNT ResourceType = 92 ResourceType_KV_BANK_DEFERRED ResourceType = 93 ResourceType_KV_BANK_DEFERRED_MODULE_TX_INDEX ResourceType = 95 + ResourceType_KV_EVM ResourceType = 96 + ResourceType_KV_EVM_BALANCE ResourceType = 97 + ResourceType_KV_EVM_TRANSIENT ResourceType = 98 + ResourceType_KV_EVM_ACCOUNT_TRANSIENT ResourceType = 99 + ResourceType_KV_EVM_MODULE_TRANSIENT ResourceType = 100 + ResourceType_KV_EVM_NONCE ResourceType = 101 + ResourceType_KV_EVM_RECEIPT ResourceType = 102 + ResourceType_KV_EVM_S2E ResourceType = 103 + ResourceType_KV_EVM_E2S ResourceType = 104 + ResourceType_KV_EVM_CODE_HASH ResourceType = 105 + ResourceType_KV_EVM_CODE ResourceType = 106 + ResourceType_KV_EVM_CODE_SIZE ResourceType = 107 + ResourceType_KV_BANK_WEI_BALANCE ResourceType = 108 + ResourceType_KV_DEX_MEM_CONTRACTS_TO_PROCESS ResourceType = 109 + ResourceType_KV_DEX_MEM_DOWNSTREAM_CONTRACTS ResourceType = 110 ) var ResourceType_name = map[int32]string{ - 0: "ANY", - 1: "KV", - 2: "Mem", - 3: "DexMem", - 4: "KV_BANK", - 5: "KV_STAKING", - 6: "KV_WASM", - 7: "KV_ORACLE", - 8: "KV_DEX", - 9: "KV_EPOCH", - 10: "KV_TOKENFACTORY", - 11: "KV_ORACLE_VOTE_TARGETS", - 12: "KV_ORACLE_AGGREGATE_VOTES", - 13: "KV_ORACLE_FEEDERS", - 14: "KV_STAKING_DELEGATION", - 15: "KV_STAKING_VALIDATOR", - 16: "KV_AUTH", - 17: "KV_AUTH_ADDRESS_STORE", - 18: "KV_BANK_SUPPLY", - 19: "KV_BANK_DENOM", - 20: "KV_BANK_BALANCES", - 21: "KV_TOKENFACTORY_DENOM", - 22: "KV_TOKENFACTORY_METADATA", - 23: "KV_TOKENFACTORY_ADMIN", - 24: "KV_TOKENFACTORY_CREATOR", - 25: "KV_ORACLE_EXCHANGE_RATE", - 26: "KV_ORACLE_VOTE_PENALTY_COUNTER", - 27: "KV_ORACLE_PRICE_SNAPSHOT", - 28: "KV_STAKING_VALIDATION_POWER", - 29: "KV_STAKING_TOTAL_POWER", - 30: "KV_STAKING_VALIDATORS_CON_ADDR", - 31: "KV_STAKING_UNBONDING_DELEGATION", - 32: "KV_STAKING_UNBONDING_DELEGATION_VAL", - 33: "KV_STAKING_REDELEGATION", - 34: "KV_STAKING_REDELEGATION_VAL_SRC", - 35: "KV_STAKING_REDELEGATION_VAL_DST", - 36: "KV_STAKING_REDELEGATION_QUEUE", - 37: "KV_STAKING_VALIDATOR_QUEUE", - 38: "KV_STAKING_HISTORICAL_INFO", - 39: "KV_STAKING_UNBONDING", - 41: "KV_STAKING_VALIDATORS_BY_POWER", - 40: "KV_DISTRIBUTION", - 42: "KV_DISTRIBUTION_FEE_POOL", - 43: "KV_DISTRIBUTION_PROPOSER_KEY", - 44: "KV_DISTRIBUTION_OUTSTANDING_REWARDS", - 45: "KV_DISTRIBUTION_DELEGATOR_WITHDRAW_ADDR", - 46: "KV_DISTRIBUTION_DELEGATOR_STARTING_INFO", - 47: "KV_DISTRIBUTION_VAL_HISTORICAL_REWARDS", - 48: "KV_DISTRIBUTION_VAL_CURRENT_REWARDS", - 49: "KV_DISTRIBUTION_VAL_ACCUM_COMMISSION", - 50: "KV_DISTRIBUTION_SLASH_EVENT", - 51: "KV_DEX_CONTRACT_LONGBOOK", - 52: "KV_DEX_CONTRACT_SHORTBOOK", - 53: "KV_DEX_SETTLEMENT", - 54: "KV_DEX_PAIR_PREFIX", - 55: "KV_DEX_TWAP", - 56: "KV_DEX_PRICE", - 57: "KV_DEX_SETTLEMENT_ENTRY", - 58: "KV_DEX_REGISTERED_PAIR", - 60: "KV_DEX_ORDER", - 61: "KV_DEX_CANCEL", - 62: "KV_DEX_ACCOUNT_ACTIVE_ORDERS", - 64: "KV_DEX_ASSET_LIST", - 65: "KV_DEX_NEXT_ORDER_ID", - 66: "KV_DEX_NEXT_SETTLEMENT_ID", - 67: "KV_DEX_MATCH_RESULT", - 68: "KV_DEX_SETTLEMENT_ORDER_ID", - 69: "KV_DEX_ORDER_BOOK", - 71: "KV_ACCESSCONTROL", - 72: "KV_ACCESSCONTROL_WASM_DEPENDENCY_MAPPING", - 73: "KV_WASM_CODE", - 74: "KV_WASM_CONTRACT_ADDRESS", - 75: "KV_WASM_CONTRACT_STORE", - 76: "KV_WASM_SEQUENCE_KEY", - 77: "KV_WASM_CONTRACT_CODE_HISTORY", - 78: "KV_WASM_CONTRACT_BY_CODE_ID", - 79: "KV_WASM_PINNED_CODE_INDEX", - 80: "KV_AUTH_GLOBAL_ACCOUNT_NUMBER", - 81: "KV_AUTHZ", - 82: "KV_FEEGRANT", - 83: "KV_FEEGRANT_ALLOWANCE", - 84: "KV_SLASHING", - 85: "KV_SLASHING_VAL_SIGNING_INFO", - 86: "KV_SLASHING_ADDR_PUBKEY_RELATION_KEY", - 87: "KV_DEX_MEM_ORDER", - 88: "KV_DEX_MEM_CANCEL", - 89: "KV_DEX_MEM_DEPOSIT", - 90: "KV_DEX_CONTRACT", - 91: "KV_DEX_LONG_ORDER_COUNT", - 92: "KV_DEX_SHORT_ORDER_COUNT", - 93: "KV_BANK_DEFERRED", - 95: "KV_BANK_DEFERRED_MODULE_TX_INDEX", + 0: "ANY", + 1: "KV", + 2: "Mem", + 3: "DexMem", + 4: "KV_BANK", + 5: "KV_STAKING", + 6: "KV_WASM", + 7: "KV_ORACLE", + 8: "KV_DEX", + 9: "KV_EPOCH", + 10: "KV_TOKENFACTORY", + 11: "KV_ORACLE_VOTE_TARGETS", + 12: "KV_ORACLE_AGGREGATE_VOTES", + 13: "KV_ORACLE_FEEDERS", + 14: "KV_STAKING_DELEGATION", + 15: "KV_STAKING_VALIDATOR", + 16: "KV_AUTH", + 17: "KV_AUTH_ADDRESS_STORE", + 18: "KV_BANK_SUPPLY", + 19: "KV_BANK_DENOM", + 20: "KV_BANK_BALANCES", + 21: "KV_TOKENFACTORY_DENOM", + 22: "KV_TOKENFACTORY_METADATA", + 23: "KV_TOKENFACTORY_ADMIN", + 24: "KV_TOKENFACTORY_CREATOR", + 25: "KV_ORACLE_EXCHANGE_RATE", + 26: "KV_ORACLE_VOTE_PENALTY_COUNTER", + 27: "KV_ORACLE_PRICE_SNAPSHOT", + 28: "KV_STAKING_VALIDATION_POWER", + 29: "KV_STAKING_TOTAL_POWER", + 30: "KV_STAKING_VALIDATORS_CON_ADDR", + 31: "KV_STAKING_UNBONDING_DELEGATION", + 32: "KV_STAKING_UNBONDING_DELEGATION_VAL", + 33: "KV_STAKING_REDELEGATION", + 34: "KV_STAKING_REDELEGATION_VAL_SRC", + 35: "KV_STAKING_REDELEGATION_VAL_DST", + 36: "KV_STAKING_REDELEGATION_QUEUE", + 37: "KV_STAKING_VALIDATOR_QUEUE", + 38: "KV_STAKING_HISTORICAL_INFO", + 39: "KV_STAKING_UNBONDING", + 41: "KV_STAKING_VALIDATORS_BY_POWER", + 40: "KV_DISTRIBUTION", + 42: "KV_DISTRIBUTION_FEE_POOL", + 43: "KV_DISTRIBUTION_PROPOSER_KEY", + 44: "KV_DISTRIBUTION_OUTSTANDING_REWARDS", + 45: "KV_DISTRIBUTION_DELEGATOR_WITHDRAW_ADDR", + 46: "KV_DISTRIBUTION_DELEGATOR_STARTING_INFO", + 47: "KV_DISTRIBUTION_VAL_HISTORICAL_REWARDS", + 48: "KV_DISTRIBUTION_VAL_CURRENT_REWARDS", + 49: "KV_DISTRIBUTION_VAL_ACCUM_COMMISSION", + 50: "KV_DISTRIBUTION_SLASH_EVENT", + 51: "KV_DEX_CONTRACT_LONGBOOK", + 52: "KV_DEX_CONTRACT_SHORTBOOK", + 53: "KV_DEX_SETTLEMENT", + 54: "KV_DEX_PAIR_PREFIX", + 55: "KV_DEX_TWAP", + 56: "KV_DEX_PRICE", + 57: "KV_DEX_SETTLEMENT_ENTRY", + 58: "KV_DEX_REGISTERED_PAIR", + 60: "KV_DEX_ORDER", + 61: "KV_DEX_CANCEL", + 62: "KV_DEX_ACCOUNT_ACTIVE_ORDERS", + 64: "KV_DEX_ASSET_LIST", + 65: "KV_DEX_NEXT_ORDER_ID", + 66: "KV_DEX_NEXT_SETTLEMENT_ID", + 67: "KV_DEX_MATCH_RESULT", + 68: "KV_DEX_SETTLEMENT_ORDER_ID", + 69: "KV_DEX_ORDER_BOOK", + 71: "KV_ACCESSCONTROL", + 72: "KV_ACCESSCONTROL_WASM_DEPENDENCY_MAPPING", + 73: "KV_WASM_CODE", + 74: "KV_WASM_CONTRACT_ADDRESS", + 75: "KV_WASM_CONTRACT_STORE", + 76: "KV_WASM_SEQUENCE_KEY", + 77: "KV_WASM_CONTRACT_CODE_HISTORY", + 78: "KV_WASM_CONTRACT_BY_CODE_ID", + 79: "KV_WASM_PINNED_CODE_INDEX", + 80: "KV_AUTH_GLOBAL_ACCOUNT_NUMBER", + 81: "KV_AUTHZ", + 82: "KV_FEEGRANT", + 83: "KV_FEEGRANT_ALLOWANCE", + 84: "KV_SLASHING", + 85: "KV_SLASHING_VAL_SIGNING_INFO", + 86: "KV_SLASHING_ADDR_PUBKEY_RELATION_KEY", + 87: "KV_DEX_MEM_ORDER", + 88: "KV_DEX_MEM_CANCEL", + 89: "KV_DEX_MEM_DEPOSIT", + 90: "KV_DEX_CONTRACT", + 91: "KV_DEX_LONG_ORDER_COUNT", + 92: "KV_DEX_SHORT_ORDER_COUNT", + 93: "KV_BANK_DEFERRED", + 95: "KV_BANK_DEFERRED_MODULE_TX_INDEX", + 96: "KV_EVM", + 97: "KV_EVM_BALANCE", + 98: "KV_EVM_TRANSIENT", + 99: "KV_EVM_ACCOUNT_TRANSIENT", + 100: "KV_EVM_MODULE_TRANSIENT", + 101: "KV_EVM_NONCE", + 102: "KV_EVM_RECEIPT", + 103: "KV_EVM_S2E", + 104: "KV_EVM_E2S", + 105: "KV_EVM_CODE_HASH", + 106: "KV_EVM_CODE", + 107: "KV_EVM_CODE_SIZE", + 108: "KV_BANK_WEI_BALANCE", + 109: "KV_DEX_MEM_CONTRACTS_TO_PROCESS", + 110: "KV_DEX_MEM_DOWNSTREAM_CONTRACTS", } var ResourceType_value = map[string]int32{ @@ -385,6 +415,21 @@ var ResourceType_value = map[string]int32{ "KV_DEX_SHORT_ORDER_COUNT": 92, "KV_BANK_DEFERRED": 93, "KV_BANK_DEFERRED_MODULE_TX_INDEX": 95, + "KV_EVM": 96, + "KV_EVM_BALANCE": 97, + "KV_EVM_TRANSIENT": 98, + "KV_EVM_ACCOUNT_TRANSIENT": 99, + "KV_EVM_MODULE_TRANSIENT": 100, + "KV_EVM_NONCE": 101, + "KV_EVM_RECEIPT": 102, + "KV_EVM_S2E": 103, + "KV_EVM_E2S": 104, + "KV_EVM_CODE_HASH": 105, + "KV_EVM_CODE": 106, + "KV_EVM_CODE_SIZE": 107, + "KV_BANK_WEI_BALANCE": 108, + "KV_DEX_MEM_CONTRACTS_TO_PROCESS": 109, + "KV_DEX_MEM_DOWNSTREAM_CONTRACTS": 110, } func (x ResourceType) String() string { @@ -432,96 +477,105 @@ func init() { } var fileDescriptor_36568f7561081112 = []byte{ - // 1445 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x56, 0x5b, 0x73, 0x13, 0xbf, - 0x15, 0xcf, 0xfd, 0xa2, 0x04, 0x38, 0x28, 0xdc, 0x13, 0x0c, 0x84, 0x14, 0x68, 0x80, 0x84, 0x4b, - 0xaf, 0xd0, 0x96, 0xca, 0xab, 0x63, 0x7b, 0xe3, 0x5d, 0x69, 0x2d, 0x69, 0x7d, 0xa1, 0xed, 0x68, - 0x12, 0xd7, 0x43, 0x99, 0x92, 0x98, 0x89, 0x4d, 0xa7, 0xfd, 0x0c, 0x7d, 0xe9, 0xc7, 0xea, 0x23, - 0x8f, 0x7d, 0x64, 0xe0, 0x8b, 0x74, 0xb4, 0x2b, 0x9b, 0xb5, 0x09, 0x7f, 0x9e, 0x12, 0x9f, 0xdf, - 0x4f, 0x67, 0x75, 0x7e, 0xe7, 0x26, 0xb2, 0xd3, 0xed, 0x0f, 0x8e, 0xfb, 0x83, 0xfd, 0xc3, 0x6e, - 0xb7, 0x37, 0x18, 0x74, 0xfb, 0x27, 0xc3, 0xd3, 0xfe, 0xbb, 0xfd, 0x6e, 0xff, 0x64, 0x30, 0x3c, - 0x3c, 0x19, 0x0e, 0xf6, 0xde, 0x9f, 0xf6, 0x87, 0x7d, 0xba, 0x95, 0xb3, 0xf6, 0x26, 0x58, 0x7b, - 0xff, 0x78, 0x7a, 0xd4, 0x1b, 0x1e, 0x3e, 0xdd, 0x7d, 0x41, 0x08, 0xcb, 0x00, 0xf3, 0xaf, 0xf7, - 0x3d, 0xba, 0x46, 0x96, 0x53, 0x51, 0x17, 0xb2, 0x25, 0x60, 0x86, 0xae, 0x90, 0x05, 0x85, 0x8c, - 0xc3, 0x2c, 0x5d, 0x25, 0x8b, 0x2d, 0x15, 0x1a, 0x84, 0x39, 0x4a, 0xc8, 0x52, 0x20, 0xe3, 0x38, - 0x34, 0x30, 0xbf, 0xfb, 0xef, 0x39, 0xb2, 0x99, 0x1f, 0x96, 0xef, 0x7b, 0xa7, 0x87, 0xc3, 0xb7, - 0xfd, 0x13, 0xdd, 0x7b, 0xd7, 0xeb, 0x0e, 0xfb, 0xa7, 0x99, 0xb7, 0x15, 0xb2, 0x20, 0xa4, 0x40, - 0x98, 0xa1, 0x4b, 0x64, 0xee, 0xa0, 0x01, 0xb3, 0xf4, 0x32, 0xb9, 0x78, 0xd0, 0xb0, 0x65, 0x0c, - 0x6a, 0xcf, 0x9f, 0x59, 0xc6, 0xb9, 0x42, 0xad, 0x61, 0x8e, 0x96, 0xc8, 0x8d, 0x83, 0x86, 0x8d, - 0x50, 0x54, 0x4d, 0xcd, 0x26, 0x0a, 0x2b, 0x61, 0x1b, 0xf9, 0x18, 0x9f, 0xa7, 0xd7, 0xc9, 0x65, - 0x8d, 0x82, 0xa3, 0x9a, 0x3e, 0xba, 0x40, 0xb7, 0x49, 0xc9, 0x43, 0xdf, 0x3b, 0xbe, 0x48, 0x2f, - 0x11, 0x08, 0xa4, 0x30, 0x8a, 0x05, 0x66, 0x6c, 0x5d, 0xa2, 0x37, 0xc8, 0x95, 0x83, 0x86, 0x8d, - 0x51, 0x6b, 0x56, 0x45, 0x1b, 0x48, 0xc1, 0x43, 0x13, 0x4a, 0xc1, 0x22, 0x58, 0x76, 0x58, 0x20, - 0x85, 0x36, 0x4c, 0x18, 0xab, 0x8d, 0x0a, 0x45, 0xd5, 0x1a, 0x69, 0x6b, 0xd8, 0x86, 0x15, 0x7a, - 0x85, 0xd0, 0xb1, 0x37, 0x85, 0x15, 0x54, 0x28, 0x02, 0x84, 0xd5, 0xdd, 0x4f, 0x1b, 0x64, 0x5d, - 0xf5, 0x06, 0xfd, 0x0f, 0xa7, 0xdd, 0x5e, 0x16, 0xfe, 0x32, 0x99, 0x67, 0xa2, 0x93, 0x47, 0x5f, - 0x6f, 0xc2, 0xac, 0x33, 0xc4, 0xbd, 0xe3, 0x5c, 0x44, 0xde, 0xfb, 0xa7, 0xfb, 0x7f, 0xde, 0x49, - 0x5e, 0x6f, 0xda, 0x32, 0x13, 0x75, 0x58, 0xa0, 0xe7, 0x09, 0xa9, 0x37, 0xad, 0x36, 0xac, 0x1e, - 0x8a, 0x2a, 0x2c, 0x7a, 0xb0, 0xc5, 0x74, 0x0c, 0x4b, 0xf4, 0x1c, 0x59, 0xad, 0x37, 0xad, 0x54, - 0x2c, 0x88, 0x10, 0x96, 0x9d, 0x93, 0x7a, 0xd3, 0xf2, 0xec, 0x4e, 0xeb, 0x64, 0xa5, 0xde, 0xb4, - 0x98, 0xc8, 0xa0, 0x06, 0xab, 0x74, 0x83, 0x5c, 0xa8, 0x37, 0xad, 0x91, 0x75, 0x14, 0x15, 0x16, - 0x18, 0xa9, 0x3a, 0x40, 0x5c, 0x48, 0xe3, 0xd3, 0xb6, 0x29, 0x0d, 0x5a, 0xc3, 0x54, 0x15, 0x8d, - 0x86, 0x35, 0x7a, 0x93, 0x5c, 0xff, 0x8a, 0xb1, 0x6a, 0x55, 0x61, 0x95, 0x99, 0x9c, 0xa5, 0x61, - 0xdd, 0x65, 0xed, 0x2b, 0x5c, 0x41, 0xe4, 0xa8, 0x34, 0x9c, 0x73, 0x59, 0xf9, 0x7a, 0x59, 0xcb, - 0x31, 0x72, 0xa7, 0x42, 0x29, 0xe0, 0x3c, 0xbd, 0x46, 0x2e, 0x15, 0xa0, 0x26, 0x8b, 0x42, 0xce, - 0x8c, 0x54, 0x70, 0xc1, 0x47, 0xc4, 0x52, 0x53, 0x03, 0xf0, 0x1e, 0xdc, 0x8f, 0x51, 0x5e, 0xac, - 0x36, 0x52, 0x21, 0x5c, 0xa4, 0x94, 0x9c, 0xf7, 0xb2, 0x58, 0x9d, 0x26, 0x49, 0xd4, 0x01, 0x4a, - 0x2f, 0x92, 0x73, 0x23, 0x1b, 0x47, 0x21, 0x63, 0xd8, 0x70, 0xa9, 0x1d, 0x99, 0xca, 0x2c, 0x62, - 0x22, 0x40, 0x0d, 0x97, 0xbc, 0xdf, 0xa2, 0x00, 0xfe, 0xc0, 0x65, 0xba, 0x45, 0xae, 0x4d, 0x43, - 0x31, 0x1a, 0xc6, 0x99, 0x61, 0x70, 0xe5, 0xac, 0x83, 0x8c, 0xc7, 0xa1, 0x80, 0xab, 0x74, 0x93, - 0x5c, 0x9d, 0x86, 0x02, 0x85, 0x59, 0x54, 0xd7, 0x3c, 0xe8, 0x15, 0xc2, 0x76, 0x50, 0x63, 0xa2, - 0x8a, 0x56, 0x31, 0x83, 0x70, 0xdd, 0x95, 0xe8, 0x94, 0xf2, 0x09, 0x0a, 0x16, 0x99, 0x8e, 0x0d, - 0x64, 0x2a, 0x0c, 0x2a, 0xb8, 0xe1, 0xaf, 0xe5, 0x39, 0x89, 0x0a, 0x03, 0xb4, 0x5a, 0xb0, 0x44, - 0xd7, 0xa4, 0x81, 0x4d, 0x7a, 0x8b, 0x6c, 0x7e, 0x2b, 0x67, 0x28, 0x85, 0x4d, 0x64, 0x0b, 0x15, - 0x6c, 0xf9, 0xe4, 0x8e, 0x08, 0x46, 0x1a, 0x16, 0x79, 0xec, 0xa6, 0xff, 0xfc, 0x37, 0xb9, 0xd0, - 0xae, 0xe4, 0x33, 0xd9, 0xa1, 0x44, 0xef, 0x92, 0x5b, 0x05, 0x4e, 0x2a, 0xca, 0xae, 0x1b, 0x26, - 0x93, 0x7a, 0x8b, 0xde, 0x27, 0x77, 0x7f, 0x40, 0x72, 0xde, 0xe1, 0xb6, 0x57, 0x63, 0x44, 0x54, - 0x58, 0xf0, 0x72, 0x67, 0xea, 0x53, 0x45, 0xd0, 0x9d, 0xb6, 0x5a, 0x05, 0xb0, 0xfd, 0x23, 0x12, - 0xd7, 0x06, 0xee, 0xd2, 0x3b, 0xe4, 0xe6, 0xf7, 0x48, 0x8d, 0x14, 0x53, 0x84, 0x1d, 0x37, 0x58, - 0xce, 0x8a, 0xdd, 0xe3, 0x3f, 0x9b, 0xc2, 0x6b, 0xa1, 0xab, 0xbe, 0x30, 0x60, 0x91, 0x0d, 0x45, - 0x45, 0xc2, 0xbd, 0xa9, 0x3a, 0x1e, 0x87, 0x0c, 0xf7, 0xbf, 0xaf, 0x6a, 0xb9, 0xe3, 0x95, 0xff, - 0xb9, 0xef, 0x43, 0x1e, 0xba, 0x09, 0x52, 0x4e, 0xb3, 0xf8, 0x1f, 0xf8, 0x4c, 0x17, 0x8d, 0xae, - 0xa5, 0x6c, 0x22, 0x65, 0x04, 0xbb, 0xf4, 0x36, 0xd9, 0x9a, 0x46, 0x13, 0x25, 0x13, 0xa9, 0x51, - 0xd9, 0x3a, 0x76, 0xe0, 0xa1, 0xcf, 0xc2, 0x04, 0x43, 0xa6, 0xc6, 0x8d, 0x2a, 0x9e, 0xcb, 0xd0, - 0x62, 0x8a, 0x6b, 0x78, 0x44, 0x1f, 0x92, 0xfb, 0xd3, 0x44, 0xaf, 0x90, 0x54, 0xb6, 0x15, 0x9a, - 0x1a, 0x57, 0xac, 0x95, 0x17, 0xc0, 0xe3, 0x9f, 0x26, 0x6b, 0xc3, 0x94, 0x71, 0xce, 0x33, 0x55, - 0xf6, 0xe8, 0x2e, 0xb9, 0x37, 0x4d, 0x76, 0x59, 0x29, 0xc8, 0x37, 0xba, 0xc5, 0xfe, 0x59, 0xd7, - 0x75, 0xdc, 0x20, 0x55, 0x0a, 0x85, 0x19, 0x13, 0x9f, 0xd0, 0x07, 0x64, 0xe7, 0x2c, 0x22, 0x0b, - 0x82, 0x34, 0xb6, 0xd9, 0xca, 0xd1, 0xda, 0x29, 0xf8, 0xd4, 0x77, 0xc3, 0x04, 0x53, 0x47, 0x4c, - 0xd7, 0x2c, 0x36, 0x51, 0x18, 0x78, 0x36, 0x92, 0x18, 0xdb, 0x76, 0x3c, 0xa8, 0x23, 0x29, 0xaa, - 0x65, 0x29, 0xeb, 0xf0, 0xdc, 0x0f, 0xbb, 0x09, 0x54, 0xd7, 0xa4, 0x32, 0x19, 0xfc, 0x0b, 0x3f, - 0xec, 0x1c, 0xac, 0xd1, 0x98, 0x08, 0x63, 0xe7, 0xf3, 0x97, 0x6e, 0xea, 0x7b, 0x73, 0xc2, 0x42, - 0xe5, 0xb7, 0x0c, 0xfc, 0x8a, 0x5e, 0x20, 0x6b, 0xde, 0x6e, 0x5a, 0x2c, 0x81, 0x5f, 0x53, 0x20, - 0xeb, 0x23, 0xa2, 0x6b, 0x63, 0xf8, 0x8d, 0x6f, 0x87, 0x49, 0x8f, 0x16, 0x85, 0x51, 0x1d, 0xf8, - 0xad, 0xef, 0x5c, 0x07, 0x2a, 0xac, 0x86, 0xda, 0xa0, 0x42, 0x9e, 0x7d, 0x02, 0x5e, 0x14, 0x5c, - 0x49, 0xc5, 0x51, 0xc1, 0xef, 0xfc, 0x04, 0xcc, 0xee, 0xee, 0x66, 0x5d, 0x04, 0xbf, 0x1f, 0x55, - 0x0c, 0xb6, 0x9d, 0x54, 0x6e, 0x9e, 0x58, 0x16, 0x98, 0xb0, 0x89, 0xf9, 0x19, 0x0d, 0x7f, 0x28, - 0x44, 0xc4, 0xb4, 0x46, 0x63, 0xa3, 0x50, 0x1b, 0xf8, 0xa3, 0xaf, 0x6d, 0x67, 0x16, 0xd8, 0x36, - 0x39, 0xdd, 0x86, 0x1c, 0x58, 0x41, 0xa1, 0x0c, 0x29, 0xdc, 0x3a, 0xe4, 0x50, 0xa6, 0x57, 0xc9, - 0x86, 0x87, 0x63, 0x66, 0x82, 0x9a, 0x55, 0xa8, 0xd3, 0xc8, 0x40, 0xe0, 0xbb, 0x69, 0x2a, 0xd0, - 0xb1, 0x5f, 0x5e, 0xb8, 0x48, 0x6e, 0xcc, 0x14, 0x47, 0x3f, 0xc3, 0x59, 0x10, 0xa0, 0xd6, 0x59, - 0x4a, 0x64, 0x04, 0x55, 0xfa, 0x88, 0x3c, 0x98, 0xb6, 0x66, 0x8b, 0xd0, 0x72, 0x4c, 0xdc, 0xc2, - 0x17, 0x41, 0xc7, 0xc6, 0x2c, 0x49, 0x5c, 0x3b, 0xd6, 0xbc, 0x54, 0x19, 0x1e, 0x48, 0x8e, 0x10, - 0xfa, 0x22, 0xf0, 0x96, 0xa9, 0xe5, 0x7f, 0xe0, 0x65, 0x9f, 0x44, 0xf3, 0xd5, 0x53, 0xf7, 0xc2, - 0x64, 0x98, 0xc6, 0x46, 0xea, 0xd6, 0x7b, 0xd6, 0x7b, 0x91, 0x9f, 0x38, 0x93, 0xa7, 0xdc, 0xe7, - 0x7c, 0xe9, 0x77, 0x20, 0xf6, 0xc5, 0x39, 0x49, 0x29, 0x77, 0x72, 0x56, 0xc8, 0x41, 0x78, 0x71, - 0x33, 0x42, 0x12, 0x0a, 0x81, 0xdc, 0x63, 0xc2, 0x6d, 0x72, 0xe9, 0x3f, 0x91, 0xad, 0xc4, 0x6a, - 0x24, 0xcb, 0x79, 0x07, 0x64, 0x69, 0x15, 0x69, 0x5c, 0x46, 0x05, 0x89, 0x5f, 0xf6, 0x8e, 0xf2, - 0x1a, 0x1a, 0xbe, 0x00, 0x2b, 0x88, 0x55, 0xc5, 0x84, 0x01, 0xe5, 0x77, 0xd8, 0xc8, 0x60, 0x59, - 0x14, 0xc9, 0x96, 0x2b, 0x16, 0xd0, 0x9e, 0x9b, 0x35, 0x8b, 0x93, 0xcd, 0xf8, 0xe2, 0x19, 0x19, - 0xf2, 0x01, 0x1c, 0x56, 0xc5, 0xb8, 0xd7, 0x53, 0xdf, 0x96, 0x63, 0x86, 0x53, 0xd0, 0x26, 0x69, - 0xb9, 0x8e, 0x1d, 0xab, 0x30, 0xca, 0xa7, 0xad, 0x13, 0xa7, 0xe9, 0xd3, 0x98, 0x95, 0x05, 0xc6, - 0xbe, 0x62, 0x5b, 0x85, 0x9c, 0x3b, 0xab, 0xaf, 0xda, 0x76, 0xa1, 0x9d, 0x9c, 0x99, 0x63, 0x22, - 0x75, 0x68, 0xa0, 0x33, 0x1a, 0x99, 0x85, 0xe6, 0x84, 0xd7, 0x85, 0x06, 0x72, 0x6d, 0xec, 0x8b, - 0x27, 0x13, 0x05, 0xfe, 0x54, 0x68, 0xf6, 0xac, 0x8b, 0x27, 0xd0, 0x3f, 0x17, 0xdf, 0x07, 0xdc, - 0xbd, 0xd5, 0x14, 0x72, 0xf8, 0x0b, 0xdd, 0x21, 0xb7, 0xa7, 0xad, 0x36, 0x96, 0x3c, 0x8d, 0xd0, - 0x9a, 0xb6, 0x4f, 0x85, 0xdd, 0x5e, 0x58, 0x79, 0x09, 0x2f, 0xb7, 0x17, 0x56, 0x5e, 0xc1, 0xab, - 0xed, 0x85, 0x95, 0x0a, 0x54, 0x76, 0x1f, 0x11, 0xda, 0x3a, 0x1c, 0x1c, 0xc7, 0xbd, 0xc1, 0xe0, - 0xf0, 0x4d, 0x4f, 0x7f, 0x38, 0x1a, 0xba, 0x77, 0xde, 0x2a, 0x59, 0x6c, 0xa4, 0xa8, 0xdc, 0x4b, - 0x6f, 0x8d, 0x2c, 0x63, 0x1b, 0x83, 0xd4, 0x20, 0xcc, 0x96, 0x0f, 0xfe, 0xfb, 0xb9, 0x34, 0xfb, - 0xf1, 0x73, 0x69, 0xf6, 0xd3, 0xe7, 0xd2, 0xec, 0x7f, 0xbe, 0x94, 0x66, 0x3e, 0x7e, 0x29, 0xcd, - 0xfc, 0xef, 0x4b, 0x69, 0xe6, 0xf5, 0x93, 0x37, 0x6f, 0x87, 0x7f, 0xfb, 0x70, 0xb4, 0xd7, 0xed, - 0x1f, 0xef, 0xfb, 0x37, 0x7c, 0xfe, 0xe7, 0xf1, 0xe0, 0xaf, 0x7f, 0xdf, 0x77, 0x4e, 0xa7, 0x1e, - 0xf5, 0x47, 0x4b, 0xd9, 0x5b, 0xfe, 0xf9, 0xff, 0x03, 0x00, 0x00, 0xff, 0xff, 0x19, 0x02, 0xe2, - 0x5e, 0xf3, 0x0b, 0x00, 0x00, + // 1586 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x57, 0x59, 0x77, 0x1b, 0xb7, + 0x15, 0x96, 0x6c, 0x59, 0x0b, 0xbc, 0x5d, 0xc3, 0xfb, 0x26, 0x3b, 0xb2, 0x1b, 0xbb, 0x8a, 0x23, + 0xc5, 0x76, 0xd7, 0xa4, 0x6d, 0x0a, 0x0e, 0xae, 0xc8, 0x11, 0x67, 0x80, 0x11, 0x80, 0xe1, 0xe2, + 0xb6, 0x41, 0x25, 0x86, 0x75, 0xdc, 0x58, 0xa2, 0x8f, 0x48, 0xf7, 0xb4, 0xbf, 0xa1, 0x2f, 0xfd, + 0x43, 0x7d, 0xef, 0x63, 0x1e, 0xfb, 0xd8, 0x63, 0xff, 0x91, 0x1e, 0xcc, 0x5c, 0xd2, 0xc3, 0x89, + 0x9d, 0x3c, 0x49, 0xbc, 0xdf, 0x87, 0x0b, 0xe0, 0xbb, 0x1b, 0x86, 0xdd, 0x1f, 0x8c, 0xc6, 0x87, + 0xa3, 0xf1, 0xf6, 0xfe, 0x60, 0x30, 0x1c, 0x8f, 0x07, 0xa3, 0xa3, 0xc9, 0xf1, 0xe8, 0xe5, 0xf6, + 0x60, 0x74, 0x34, 0x9e, 0xec, 0x1f, 0x4d, 0xc6, 0x5b, 0xaf, 0x8e, 0x47, 0x93, 0x11, 0xbf, 0x55, + 0xb2, 0xb6, 0xe6, 0x58, 0x5b, 0x7f, 0x7b, 0x7c, 0x30, 0x9c, 0xec, 0x3f, 0xde, 0xfc, 0x9c, 0x31, + 0x51, 0x00, 0xee, 0x1f, 0xaf, 0x86, 0xfc, 0x34, 0x5b, 0xc9, 0x55, 0x5b, 0xe9, 0xae, 0x82, 0x05, + 0xbe, 0xca, 0x96, 0x0c, 0x0a, 0x09, 0x8b, 0x7c, 0x8d, 0x9d, 0xea, 0x9a, 0xd8, 0x21, 0x9c, 0xe0, + 0x8c, 0x2d, 0x47, 0x3a, 0x4d, 0x63, 0x07, 0x27, 0x37, 0xff, 0x79, 0x82, 0xdd, 0x2c, 0x17, 0xeb, + 0x57, 0xc3, 0xe3, 0xfd, 0xc9, 0x8b, 0xd1, 0x91, 0x1d, 0xbe, 0x1c, 0x0e, 0x26, 0xa3, 0xe3, 0xc2, + 0xdb, 0x2a, 0x5b, 0x52, 0x5a, 0x21, 0x2c, 0xf0, 0x65, 0x76, 0x62, 0x77, 0x0f, 0x16, 0xf9, 0x65, + 0x76, 0x61, 0x77, 0xcf, 0x37, 0x30, 0x6a, 0x3d, 0x7d, 0xe2, 0x85, 0x94, 0x06, 0xad, 0x85, 0x13, + 0x7c, 0x9d, 0xdd, 0xd8, 0xdd, 0xf3, 0x09, 0xaa, 0xa6, 0x6b, 0xf9, 0xcc, 0xe0, 0x4e, 0xdc, 0x43, + 0x39, 0xc3, 0x4f, 0xf2, 0xeb, 0xec, 0xb2, 0x45, 0x25, 0xd1, 0xd4, 0x97, 0x2e, 0xf1, 0x0d, 0xb6, + 0x4e, 0xd0, 0x87, 0x96, 0x9f, 0xe2, 0x97, 0x18, 0x44, 0x5a, 0x39, 0x23, 0x22, 0x37, 0xb3, 0x2e, + 0xf3, 0x1b, 0xec, 0xca, 0xee, 0x9e, 0x4f, 0xd1, 0x5a, 0xd1, 0x44, 0x1f, 0x69, 0x25, 0x63, 0x17, + 0x6b, 0x25, 0x12, 0x58, 0x09, 0x58, 0xa4, 0x95, 0x75, 0x42, 0x39, 0x6f, 0x9d, 0x89, 0x55, 0xd3, + 0x3b, 0xed, 0x5b, 0xd8, 0x83, 0x55, 0x7e, 0x85, 0xf1, 0x99, 0x37, 0x83, 0x3b, 0x68, 0x50, 0x45, + 0x08, 0x6b, 0x9b, 0xff, 0xbe, 0xc2, 0xce, 0x98, 0xe1, 0x78, 0xf4, 0xfa, 0x78, 0x30, 0x2c, 0xae, + 0xbf, 0xc2, 0x4e, 0x0a, 0xd5, 0x2f, 0x6f, 0xdf, 0xee, 0xc0, 0x62, 0x30, 0xa4, 0xc3, 0xc3, 0x52, + 0x44, 0x39, 0xfc, 0x7b, 0xf8, 0xff, 0x64, 0x90, 0xbc, 0xdd, 0xf1, 0x0d, 0xa1, 0xda, 0xb0, 0xc4, + 0xcf, 0x31, 0xd6, 0xee, 0x78, 0xeb, 0x44, 0x3b, 0x56, 0x4d, 0x38, 0x45, 0x60, 0x57, 0xd8, 0x14, + 0x96, 0xf9, 0x59, 0xb6, 0xd6, 0xee, 0x78, 0x6d, 0x44, 0x94, 0x20, 0xac, 0x04, 0x27, 0xed, 0x8e, + 0x97, 0xc5, 0x99, 0xce, 0xb0, 0xd5, 0x76, 0xc7, 0x63, 0xa6, 0xa3, 0x16, 0xac, 0xf1, 0x8b, 0xec, + 0x7c, 0xbb, 0xe3, 0x9d, 0x6e, 0xa3, 0xda, 0x11, 0x91, 0xd3, 0xa6, 0x0f, 0x2c, 0x5c, 0x69, 0xb6, + 0xda, 0x77, 0xb4, 0x43, 0xef, 0x84, 0x69, 0xa2, 0xb3, 0x70, 0x9a, 0xdf, 0x66, 0xd7, 0xdf, 0x61, + 0xa2, 0xd9, 0x34, 0xd8, 0x14, 0xae, 0x64, 0x59, 0x38, 0x13, 0xa2, 0xf6, 0x0e, 0xde, 0x41, 0x94, + 0x68, 0x2c, 0x9c, 0x0d, 0x51, 0x79, 0x77, 0x58, 0x2f, 0x31, 0x09, 0xab, 0x62, 0xad, 0xe0, 0x1c, + 0xbf, 0xc6, 0x2e, 0x55, 0xa0, 0x8e, 0x48, 0x62, 0x29, 0x9c, 0x36, 0x70, 0x9e, 0x6e, 0x24, 0x72, + 0xd7, 0x02, 0x20, 0x0f, 0xe1, 0xc7, 0x34, 0x2e, 0xde, 0x3a, 0x6d, 0x10, 0x2e, 0x70, 0xce, 0xce, + 0x91, 0x2c, 0xde, 0xe6, 0x59, 0x96, 0xf4, 0x81, 0xf3, 0x0b, 0xec, 0xec, 0xd4, 0x26, 0x51, 0xe9, + 0x14, 0x2e, 0x86, 0xd0, 0x4e, 0x4d, 0x0d, 0x91, 0x08, 0x15, 0xa1, 0x85, 0x4b, 0xe4, 0xb7, 0x2a, + 0x00, 0x2d, 0xb8, 0xcc, 0x6f, 0xb1, 0x6b, 0x75, 0x28, 0x45, 0x27, 0xa4, 0x70, 0x02, 0xae, 0xbc, + 0x6f, 0xa1, 0x90, 0x69, 0xac, 0xe0, 0x2a, 0xbf, 0xc9, 0xae, 0xd6, 0xa1, 0xc8, 0x60, 0x71, 0xab, + 0x6b, 0x04, 0x92, 0x42, 0xd8, 0x8b, 0x5a, 0x42, 0x35, 0xd1, 0x1b, 0xe1, 0x10, 0xae, 0x87, 0x14, + 0xad, 0x29, 0x9f, 0xa1, 0x12, 0x89, 0xeb, 0xfb, 0x48, 0xe7, 0xca, 0xa1, 0x81, 0x1b, 0x74, 0x2c, + 0xe2, 0x64, 0x26, 0x8e, 0xd0, 0x5b, 0x25, 0x32, 0xdb, 0xd2, 0x0e, 0x6e, 0xf2, 0x3b, 0xec, 0xe6, + 0xf7, 0xe5, 0x8c, 0xb5, 0xf2, 0x99, 0xee, 0xa2, 0x81, 0x5b, 0x14, 0xdc, 0x29, 0xc1, 0x69, 0x27, + 0x12, 0xc2, 0x6e, 0xd3, 0xf6, 0xdf, 0x8b, 0x85, 0x0d, 0x29, 0x5f, 0xc8, 0x0e, 0xeb, 0xfc, 0x1e, + 0xbb, 0x53, 0xe1, 0xe4, 0xaa, 0x11, 0xaa, 0x61, 0x3e, 0xa8, 0x77, 0xf8, 0x03, 0x76, 0xef, 0x47, + 0x48, 0xc1, 0x3b, 0xdc, 0x25, 0x35, 0xa6, 0x44, 0x83, 0x15, 0x2f, 0x1f, 0xd5, 0xb6, 0xaa, 0x82, + 0x61, 0xb5, 0xb7, 0x26, 0x82, 0x8d, 0x1f, 0x23, 0x49, 0xeb, 0xe0, 0x1e, 0xff, 0x88, 0xdd, 0xfe, + 0x10, 0x69, 0x2f, 0xc7, 0x1c, 0xe1, 0x7e, 0x68, 0x2c, 0xef, 0xbb, 0x3b, 0xe1, 0x3f, 0xa9, 0xe1, + 0xad, 0x38, 0x64, 0x5f, 0x1c, 0x89, 0xc4, 0xc7, 0x6a, 0x47, 0xc3, 0xc7, 0xb5, 0x3c, 0x9e, 0x5d, + 0x19, 0x1e, 0x7c, 0x58, 0xd5, 0x46, 0x9f, 0x94, 0xff, 0x29, 0xd5, 0xa1, 0x8c, 0x43, 0x07, 0x69, + 0xe4, 0xc5, 0xfd, 0x1f, 0x52, 0xa4, 0xab, 0xc6, 0x50, 0x52, 0x3e, 0xd3, 0x3a, 0x81, 0x4d, 0x7e, + 0x97, 0xdd, 0xaa, 0xa3, 0x99, 0xd1, 0x99, 0xb6, 0x68, 0x7c, 0x1b, 0xfb, 0xf0, 0x09, 0x45, 0x61, + 0x8e, 0xa1, 0x73, 0x17, 0x5a, 0x95, 0x2c, 0x65, 0xe8, 0x0a, 0x23, 0x2d, 0x3c, 0xe2, 0x9f, 0xb0, + 0x07, 0x75, 0x22, 0x29, 0xa4, 0x8d, 0xef, 0xc6, 0xae, 0x25, 0x8d, 0xe8, 0x96, 0x09, 0xf0, 0xe9, + 0x0f, 0x93, 0xad, 0x13, 0xc6, 0x05, 0xe7, 0x85, 0x2a, 0x5b, 0x7c, 0x93, 0x7d, 0x5c, 0x27, 0x87, + 0xa8, 0x54, 0xe4, 0x9b, 0x9e, 0x62, 0xfb, 0x7d, 0xc7, 0x0d, 0xdc, 0x28, 0x37, 0x06, 0x95, 0x9b, + 0x11, 0x3f, 0xe3, 0x0f, 0xd9, 0xfd, 0xf7, 0x11, 0x45, 0x14, 0xe5, 0xa9, 0x2f, 0x46, 0x8e, 0xb5, + 0x41, 0xc1, 0xc7, 0x54, 0x0d, 0x73, 0x4c, 0x9b, 0x08, 0xdb, 0xf2, 0xd8, 0x41, 0xe5, 0xe0, 0xc9, + 0x54, 0x62, 0xec, 0xf9, 0x59, 0xa3, 0x4e, 0xb4, 0x6a, 0x36, 0xb4, 0x6e, 0xc3, 0x53, 0x6a, 0x76, + 0x73, 0xa8, 0x6d, 0x69, 0xe3, 0x0a, 0xf8, 0x67, 0xd4, 0xec, 0x02, 0x6c, 0xd1, 0xb9, 0x04, 0xd3, + 0xe0, 0xf3, 0xe7, 0xa1, 0xeb, 0x93, 0x39, 0x13, 0xb1, 0xa1, 0x29, 0x03, 0xbf, 0xe0, 0xe7, 0xd9, + 0x69, 0xb2, 0xbb, 0xae, 0xc8, 0xe0, 0x97, 0x1c, 0xd8, 0x99, 0x29, 0x31, 0x94, 0x31, 0xfc, 0x8a, + 0xca, 0x61, 0xde, 0xa3, 0x47, 0xe5, 0x4c, 0x1f, 0x7e, 0x4d, 0x95, 0x1b, 0x40, 0x83, 0xcd, 0xd8, + 0x3a, 0x34, 0x28, 0x8b, 0x2d, 0xe0, 0xf3, 0x8a, 0x2b, 0x6d, 0x24, 0x1a, 0xf8, 0x0d, 0x75, 0xc0, + 0xe2, 0xec, 0xa1, 0xd7, 0x25, 0xf0, 0xdb, 0x69, 0xc6, 0x60, 0x2f, 0x48, 0x15, 0xfa, 0x89, 0x17, + 0x91, 0x8b, 0x3b, 0x58, 0xae, 0xb1, 0xf0, 0xbb, 0xca, 0x8d, 0x84, 0xb5, 0xe8, 0x7c, 0x12, 0x5b, + 0x07, 0xbf, 0xa7, 0xdc, 0x0e, 0x66, 0x85, 0x3d, 0x57, 0xd2, 0x7d, 0x2c, 0x41, 0x54, 0x14, 0x2a, + 0x90, 0xca, 0xa9, 0x63, 0x09, 0x0d, 0x7e, 0x95, 0x5d, 0x24, 0x38, 0x15, 0x2e, 0x6a, 0x79, 0x83, + 0x36, 0x4f, 0x1c, 0x44, 0x54, 0x4d, 0xb5, 0x8b, 0xce, 0xfc, 0xca, 0xca, 0x41, 0x4a, 0x63, 0xa1, + 0x38, 0x52, 0x0f, 0x17, 0x51, 0x84, 0xd6, 0x16, 0x21, 0xd1, 0x09, 0x34, 0xf9, 0x23, 0xf6, 0xb0, + 0x6e, 0x2d, 0x06, 0xa1, 0x97, 0x98, 0x85, 0x81, 0xaf, 0xa2, 0xbe, 0x4f, 0x45, 0x96, 0x85, 0x72, + 0x6c, 0x91, 0x54, 0x05, 0x1e, 0x69, 0x89, 0x10, 0x53, 0x12, 0x90, 0xa5, 0x36, 0xfc, 0x77, 0x49, + 0xf6, 0x79, 0xb4, 0x1c, 0x3d, 0x6d, 0x12, 0xa6, 0xc0, 0x2c, 0xee, 0xe5, 0x61, 0xbc, 0x17, 0xb5, + 0x97, 0x50, 0xc7, 0x99, 0x5f, 0x15, 0xb6, 0xa3, 0xd4, 0xef, 0x43, 0x4a, 0xc9, 0x39, 0x4f, 0x69, + 0xf4, 0x4b, 0x56, 0x2c, 0x41, 0x91, 0xb8, 0x05, 0x21, 0x8b, 0x95, 0x42, 0x49, 0x98, 0x0a, 0x93, + 0x5c, 0xd3, 0x16, 0xc5, 0x48, 0x6c, 0x26, 0xba, 0x51, 0x56, 0x40, 0x11, 0x56, 0x95, 0xa7, 0x0d, + 0x34, 0x90, 0xd1, 0xb0, 0x0f, 0x94, 0x67, 0xb0, 0x47, 0x09, 0xb8, 0x83, 0xd8, 0x34, 0x42, 0x39, + 0x30, 0x34, 0xc3, 0xa6, 0x06, 0x2f, 0x92, 0x44, 0x77, 0x43, 0xb2, 0x80, 0x25, 0x6e, 0x51, 0x2c, + 0x41, 0x36, 0x47, 0xc9, 0x33, 0x35, 0x94, 0x0d, 0x38, 0x6e, 0xaa, 0x59, 0xad, 0xe7, 0x54, 0x96, + 0x33, 0x46, 0x50, 0xd0, 0x67, 0x79, 0xa3, 0x8d, 0x7d, 0x6f, 0x30, 0x29, 0xbb, 0x6d, 0x10, 0xa7, + 0x43, 0x61, 0x2c, 0xd2, 0x02, 0x53, 0xca, 0xd8, 0x6e, 0x25, 0xe6, 0xc1, 0x4a, 0x59, 0xdb, 0xab, + 0x94, 0x53, 0x30, 0x4b, 0xcc, 0xb4, 0x8d, 0x1d, 0xf4, 0xa7, 0x2d, 0xb3, 0x52, 0x9c, 0xf0, 0xac, + 0x52, 0x40, 0xa1, 0x8c, 0x29, 0x79, 0x0a, 0x51, 0xe0, 0x0f, 0x95, 0x62, 0x2f, 0xaa, 0x78, 0x0e, + 0xfd, 0x63, 0xf5, 0x7d, 0x20, 0xc3, 0x5b, 0xcd, 0xa0, 0x84, 0x3f, 0xf1, 0xfb, 0xec, 0x6e, 0xdd, + 0xea, 0x53, 0x2d, 0xf3, 0x04, 0xbd, 0xeb, 0x51, 0x28, 0x3c, 0x3d, 0xb0, 0xb0, 0x93, 0xc2, 0x9f, + 0xe9, 0x39, 0x82, 0x9d, 0x74, 0xfa, 0xcc, 0x80, 0x7d, 0xf2, 0x1d, 0x6c, 0xce, 0x08, 0x65, 0xe3, + 0xd0, 0x28, 0x0e, 0xe8, 0x3c, 0xc1, 0x3a, 0x0d, 0xdc, 0x3b, 0x74, 0x40, 0x57, 0x09, 0xe8, 0x74, + 0xbf, 0x19, 0xf8, 0x35, 0x25, 0x71, 0x00, 0x95, 0x0e, 0x5b, 0x0c, 0x2b, 0xdb, 0x1a, 0x8c, 0x30, + 0xce, 0x1c, 0xfc, 0x85, 0xde, 0x88, 0xc1, 0x66, 0x9f, 0x20, 0x3c, 0xaf, 0xfc, 0xc6, 0x27, 0x16, + 0xbe, 0xa9, 0x1c, 0xab, 0x4c, 0x4d, 0x61, 0x5b, 0xf0, 0x82, 0x42, 0x3f, 0xb5, 0xc2, 0x5f, 0xeb, + 0x34, 0x1b, 0x3f, 0x43, 0xf8, 0x96, 0x6a, 0xbb, 0x50, 0xa6, 0x8b, 0xf1, 0xec, 0xb2, 0x2f, 0x69, + 0x22, 0xcf, 0xe2, 0x48, 0xc1, 0xb1, 0xe1, 0x59, 0x9c, 0x19, 0x1d, 0x8a, 0x14, 0x0e, 0x6b, 0x24, + 0xa9, 0xbb, 0xca, 0x3a, 0x83, 0xa2, 0xc2, 0x87, 0xa3, 0x8d, 0xa5, 0xd5, 0x2f, 0xe0, 0x8b, 0x8d, + 0xa5, 0xd5, 0x2f, 0xe1, 0xcb, 0x8d, 0xa5, 0xd5, 0x1d, 0xd8, 0xd9, 0x58, 0x5a, 0xfd, 0x0a, 0xbe, + 0xda, 0x7c, 0xc4, 0x78, 0x77, 0x7f, 0x7c, 0x98, 0x0e, 0xc7, 0xe3, 0xfd, 0xe7, 0x43, 0xfb, 0xfa, + 0x60, 0x12, 0x1e, 0xd1, 0x6b, 0xec, 0xd4, 0x5e, 0x8e, 0x26, 0x3c, 0xa3, 0x4f, 0xb3, 0x15, 0xec, + 0x61, 0x94, 0x3b, 0x84, 0xc5, 0xc6, 0xee, 0x7f, 0xde, 0xac, 0x2f, 0x7e, 0xf7, 0x66, 0x7d, 0xf1, + 0x7f, 0x6f, 0xd6, 0x17, 0xff, 0xf5, 0x76, 0x7d, 0xe1, 0xbb, 0xb7, 0xeb, 0x0b, 0xff, 0x7d, 0xbb, + 0xbe, 0xf0, 0xec, 0xb3, 0xe7, 0x2f, 0x26, 0xdf, 0xbc, 0x3e, 0xd8, 0x1a, 0x8c, 0x0e, 0xb7, 0xe9, + 0x03, 0xa9, 0xfc, 0xf3, 0xe9, 0xf8, 0xeb, 0x6f, 0xb7, 0x83, 0xd3, 0xda, 0x17, 0xd3, 0xc1, 0x72, + 0xf1, 0xa1, 0xf4, 0xf4, 0xff, 0x01, 0x00, 0x00, 0xff, 0xff, 0x26, 0x61, 0xa3, 0xab, 0x50, 0x0d, + 0x00, 0x00, } diff --git a/types/accesscontrol/resource.go b/types/accesscontrol/resource.go index a1c4c7190..5bbe30e61 100644 --- a/types/accesscontrol/resource.go +++ b/types/accesscontrol/resource.go @@ -22,6 +22,7 @@ var ResourceTree = map[ResourceType]TreeNode{ ResourceType_KV_FEEGRANT, ResourceType_KV_SLASHING, ResourceType_KV_BANK_DEFERRED, + ResourceType_KV_EVM, }}, ResourceType_Mem: {ResourceType_ANY, []ResourceType{ ResourceType_DexMem, @@ -31,12 +32,14 @@ var ResourceTree = map[ResourceType]TreeNode{ ResourceType_KV_BANK_SUPPLY, ResourceType_KV_BANK_DENOM, ResourceType_KV_BANK_BALANCES, + ResourceType_KV_BANK_WEI_BALANCE, }}, ResourceType_KV_BANK_SUPPLY: {ResourceType_KV_BANK, []ResourceType{}}, ResourceType_KV_BANK_DENOM: {ResourceType_KV_BANK, []ResourceType{}}, ResourceType_KV_BANK_BALANCES: {ResourceType_KV_BANK, []ResourceType{}}, ResourceType_KV_BANK_DEFERRED: {ResourceType_KV, []ResourceType{ResourceType_KV_BANK_DEFERRED_MODULE_TX_INDEX}}, ResourceType_KV_BANK_DEFERRED_MODULE_TX_INDEX: {ResourceType_KV_BANK_DEFERRED, []ResourceType{}}, + ResourceType_KV_BANK_WEI_BALANCE: {ResourceType_KV_BANK, []ResourceType{}}, ResourceType_KV_STAKING: {ResourceType_KV, []ResourceType{ ResourceType_KV_STAKING_DELEGATION, ResourceType_KV_STAKING_VALIDATOR, @@ -105,6 +108,8 @@ var ResourceTree = map[ResourceType]TreeNode{ ResourceType_KV_DEX_MEM_DEPOSIT, ResourceType_KV_DEX_LONG_ORDER_COUNT, ResourceType_KV_DEX_SHORT_ORDER_COUNT, + ResourceType_KV_DEX_MEM_CONTRACTS_TO_PROCESS, + ResourceType_KV_DEX_MEM_DOWNSTREAM_CONTRACTS, }}, ResourceType_KV_DEX_CONTRACT_LONGBOOK: {ResourceType_KV_DEX, []ResourceType{}}, ResourceType_KV_DEX_CONTRACT_SHORTBOOK: {ResourceType_KV_DEX, []ResourceType{}}, @@ -194,6 +199,32 @@ var ResourceTree = map[ResourceType]TreeNode{ ResourceType_KV_DEX_MEM_ORDER: {ResourceType_KV_DEX, []ResourceType{}}, ResourceType_KV_DEX_MEM_CANCEL: {ResourceType_KV_DEX, []ResourceType{}}, ResourceType_KV_DEX_MEM_DEPOSIT: {ResourceType_KV_DEX, []ResourceType{}}, + ResourceType_KV_DEX_MEM_CONTRACTS_TO_PROCESS: {ResourceType_KV_DEX, []ResourceType{}}, + ResourceType_KV_DEX_MEM_DOWNSTREAM_CONTRACTS: {ResourceType_KV_DEX, []ResourceType{}}, + ResourceType_KV_EVM: {ResourceType_KV, []ResourceType{ + ResourceType_KV_EVM_BALANCE, + ResourceType_KV_EVM_TRANSIENT, + ResourceType_KV_EVM_ACCOUNT_TRANSIENT, + ResourceType_KV_EVM_MODULE_TRANSIENT, + ResourceType_KV_EVM_NONCE, + ResourceType_KV_EVM_RECEIPT, + ResourceType_KV_EVM_S2E, + ResourceType_KV_EVM_E2S, + ResourceType_KV_EVM_CODE_HASH, + ResourceType_KV_EVM_CODE, + ResourceType_KV_EVM_CODE_SIZE, + }}, + ResourceType_KV_EVM_BALANCE: {ResourceType_KV_EVM, []ResourceType{}}, + ResourceType_KV_EVM_TRANSIENT: {ResourceType_KV_EVM, []ResourceType{}}, + ResourceType_KV_EVM_ACCOUNT_TRANSIENT: {ResourceType_KV_EVM, []ResourceType{}}, + ResourceType_KV_EVM_MODULE_TRANSIENT: {ResourceType_KV_EVM, []ResourceType{}}, + ResourceType_KV_EVM_NONCE: {ResourceType_KV_EVM, []ResourceType{}}, + ResourceType_KV_EVM_RECEIPT: {ResourceType_KV_EVM, []ResourceType{}}, + ResourceType_KV_EVM_S2E: {ResourceType_KV_EVM, []ResourceType{}}, + ResourceType_KV_EVM_E2S: {ResourceType_KV_EVM, []ResourceType{}}, + ResourceType_KV_EVM_CODE_HASH: {ResourceType_KV_EVM, []ResourceType{}}, + ResourceType_KV_EVM_CODE: {ResourceType_KV_EVM, []ResourceType{}}, + ResourceType_KV_EVM_CODE_SIZE: {ResourceType_KV_EVM, []ResourceType{}}, } // This returns a slice of all resource types that are dependent to a specific resource type diff --git a/types/accesscontrol/validation.go b/types/accesscontrol/validation.go index 40a525a92..ec83885c6 100644 --- a/types/accesscontrol/validation.go +++ b/types/accesscontrol/validation.go @@ -10,6 +10,7 @@ var ( ) type StoreKeyToResourceTypePrefixMap map[string]map[ResourceType][]byte +type ResourceTypeToStoreKeyMap map[ResourceType]string func DefaultStoreKeyToResourceTypePrefixMap() StoreKeyToResourceTypePrefixMap { return StoreKeyToResourceTypePrefixMap{ diff --git a/types/coin.go b/types/coin.go index 0c33ed757..f96996c13 100644 --- a/types/coin.go +++ b/types/coin.go @@ -120,6 +120,14 @@ func (coin Coin) Sub(coinB Coin) Coin { return res } +func (coin Coin) SubUnsafe(coinB Coin) Coin { + if coin.Denom != coinB.Denom { + panic(fmt.Sprintf("invalid coin denominations; %s, %s", coin.Denom, coinB.Denom)) + } + + return Coin{coin.Denom, coin.Amount.Sub(coinB.Amount)} +} + // SubAmount subtracts an amount from the Coin. func (coin Coin) SubAmount(amount Int) Coin { res := Coin{coin.Denom, coin.Amount.Sub(amount)} diff --git a/types/context.go b/types/context.go index ef847d3a3..0401af67c 100644 --- a/types/context.go +++ b/types/context.go @@ -24,27 +24,36 @@ but please do not over-use it. We try to keep all data structured and standard additions here would be better just to add to the Context struct */ type Context struct { - ctx context.Context - ms MultiStore - header tmproto.Header - headerHash tmbytes.HexBytes - chainID string - txBytes []byte - logger log.Logger - voteInfo []abci.VoteInfo - gasMeter GasMeter - blockGasMeter GasMeter - checkTx bool - recheckTx bool // if recheckTx == true, then checkTx must also be true - minGasPrice DecCoins - consParams *tmproto.ConsensusParams - eventManager *EventManager - priority int64 // The tx priority, only relevant in CheckTx + ctx context.Context + ms MultiStore + header tmproto.Header + headerHash tmbytes.HexBytes + chainID string + txBytes []byte + logger log.Logger + voteInfo []abci.VoteInfo + gasMeter GasMeter + occEnabled bool + checkTx bool + recheckTx bool // if recheckTx == true, then checkTx must also be true + minGasPrice DecCoins + consParams *tmproto.ConsensusParams + eventManager *EventManager + + priority int64 // The tx priority, only relevant in CheckTx + pendingTxChecker abci.PendingTxChecker // Checker for pending transaction, only relevant in CheckTx + checkTxCallback func(error) // callback to make at the end of CheckTx. Input param is the error (nil-able) of `runMsgs` + expireTxHandler func() // callback that the mempool invokes when a tx is expired txBlockingChannels acltypes.MessageAccessOpsChannelMapping txCompletionChannels acltypes.MessageAccessOpsChannelMapping txMsgAccessOps map[int][]acltypes.AccessOperation + // EVM properties + evm bool // EVM transaction flag + evmNonce uint64 // EVM Transaction nonce + evmSenderAddress string // EVM Sender address + msgValidator *acltypes.MsgValidator messageIndex int // Used to track current message being processed txIndex int @@ -92,10 +101,6 @@ func (c Context) GasMeter() GasMeter { return c.gasMeter } -func (c Context) BlockGasMeter() GasMeter { - return c.blockGasMeter -} - func (c Context) IsCheckTx() bool { return c.checkTx } @@ -104,6 +109,10 @@ func (c Context) IsReCheckTx() bool { return c.recheckTx } +func (c Context) IsOCCEnabled() bool { + return c.occEnabled +} + func (c Context) MinGasPrices() DecCoins { return c.minGasPrice } @@ -116,6 +125,30 @@ func (c Context) Priority() int64 { return c.priority } +func (c Context) ExpireTxHandler() abci.ExpireTxHandler { + return c.expireTxHandler +} + +func (c Context) EVMSenderAddress() string { + return c.evmSenderAddress +} + +func (c Context) EVMNonce() uint64 { + return c.evmNonce +} + +func (c Context) IsEVM() bool { + return c.evm +} + +func (c Context) PendingTxChecker() abci.PendingTxChecker { + return c.pendingTxChecker +} + +func (c Context) CheckTxCallback() func(error) { + return c.checkTxCallback +} + func (c Context) TxCompletionChannels() acltypes.MessageAccessOpsChannelMapping { return c.txCompletionChannels } @@ -269,18 +302,18 @@ func (c Context) WithGasMeter(meter GasMeter) Context { return c } -// WithBlockGasMeter returns a Context with an updated block GasMeter -func (c Context) WithBlockGasMeter(meter GasMeter) Context { - c.blockGasMeter = meter - return c -} - // WithIsCheckTx enables or disables CheckTx value for verifying transactions and returns an updated Context func (c Context) WithIsCheckTx(isCheckTx bool) Context { c.checkTx = isCheckTx return c } +// WithIsOCCEnabled enables or disables whether OCC is used as the concurrency algorithm +func (c Context) WithIsOCCEnabled(isOCCEnabled bool) Context { + c.occEnabled = isOCCEnabled + return c +} + // WithIsRecheckTx called with true will also set true on checkTx in order to // enforce the invariant that if recheckTx = true then checkTx = true as well. func (c Context) WithIsReCheckTx(isRecheckTx bool) Context { @@ -349,6 +382,36 @@ func (c Context) WithTraceSpanContext(ctx context.Context) Context { return c } +func (c Context) WithEVMSenderAddress(address string) Context { + c.evmSenderAddress = address + return c +} + +func (c Context) WithEVMNonce(nonce uint64) Context { + c.evmNonce = nonce + return c +} + +func (c Context) WithIsEVM(isEVM bool) Context { + c.evm = isEVM + return c +} + +func (c Context) WithPendingTxChecker(checker abci.PendingTxChecker) Context { + c.pendingTxChecker = checker + return c +} + +func (c Context) WithCheckTxCallback(checkTxCallback func(error)) Context { + c.checkTxCallback = checkTxCallback + return c +} + +func (c Context) WithExpireTxHandler(expireTxHandler func()) Context { + c.expireTxHandler = expireTxHandler + return c +} + // TODO: remove??? func (c Context) IsZero() bool { return c.ms == nil diff --git a/types/context_test.go b/types/context_test.go index 92f5dccaf..12d71b67b 100644 --- a/types/context_test.go +++ b/types/context_test.go @@ -87,11 +87,11 @@ func (s *contextTestSuite) TestContextWithCustom() { height := int64(1) chainid := "chainid" ischeck := true + isOCC := true txbytes := []byte("txbytes") logger := mocks.NewMockLogger(ctrl) voteinfos := []abci.VoteInfo{{}} meter := types.NewGasMeter(10000) - blockGasMeter := types.NewGasMeter(20000) minGasPrices := types.DecCoins{types.NewInt64DecCoin("feetoken", 1)} headerHash := []byte("headerHash") @@ -105,17 +105,18 @@ func (s *contextTestSuite) TestContextWithCustom() { WithVoteInfos(voteinfos). WithGasMeter(meter). WithMinGasPrices(minGasPrices). - WithBlockGasMeter(blockGasMeter). - WithHeaderHash(headerHash) + WithHeaderHash(headerHash). + WithIsOCCEnabled(isOCC) + s.Require().Equal(height, ctx.BlockHeight()) s.Require().Equal(chainid, ctx.ChainID()) s.Require().Equal(ischeck, ctx.IsCheckTx()) + s.Require().Equal(isOCC, ctx.IsOCCEnabled()) s.Require().Equal(txbytes, ctx.TxBytes()) s.Require().Equal(logger, ctx.Logger()) s.Require().Equal(voteinfos, ctx.VoteInfos()) s.Require().Equal(meter, ctx.GasMeter()) s.Require().Equal(minGasPrices, ctx.MinGasPrices()) - s.Require().Equal(blockGasMeter, ctx.BlockGasMeter()) s.Require().Equal(headerHash, ctx.HeaderHash().Bytes()) s.Require().False(ctx.WithIsCheckTx(false).IsCheckTx()) diff --git a/types/errors/errors.go b/types/errors/errors.go index 163a02199..b69a5f0df 100644 --- a/types/errors/errors.go +++ b/types/errors/errors.go @@ -147,6 +147,9 @@ var ( // ErrInvalidConcurrency defines an error occurred during concurrent execution ErrInvalidConcurrencyExecution = Register(RootCodespace, 41, "error during concurrent execution") + // ErrAlreadyExists defines an error for which the tx failed checkTx because the node has already seen it before + ErrAlreadyExists = Register(RootCodespace, 42, "error tx already exists") + // ErrPanic is only set when we recover from a panic, so we know to // redact potentially sensitive system info ErrPanic = Register(UndefinedCodespace, 111222, "panic") diff --git a/types/occ/types.go b/types/occ/types.go new file mode 100644 index 000000000..de321b7cb --- /dev/null +++ b/types/occ/types.go @@ -0,0 +1,22 @@ +package occ + +import ( + "errors" +) + +var ( + ErrReadEstimate = errors.New("multiversion store value contains estimate, cannot read, aborting") +) + +// Abort contains the information for a transaction's conflict +type Abort struct { + DependentTxIdx int + Err error +} + +func NewEstimateAbort(dependentTxIdx int) Abort { + return Abort{ + DependentTxIdx: dependentTxIdx, + Err: ErrReadEstimate, + } +} diff --git a/types/tx_batch.go b/types/tx_batch.go new file mode 100644 index 000000000..3a835715e --- /dev/null +++ b/types/tx_batch.go @@ -0,0 +1,37 @@ +package types + +import ( + "github.com/cosmos/cosmos-sdk/store/multiversion" + abci "github.com/tendermint/tendermint/abci/types" +) + +// DeliverTxEntry represents an individual transaction's request within a batch. +// This can be extended to include tx-level tracing or metadata +type DeliverTxEntry struct { + Request abci.RequestDeliverTx + SdkTx Tx + Checksum [32]byte + AbsoluteIndex int + EstimatedWritesets MappedWritesets +} + +// EstimatedWritesets represents an estimated writeset for a transaction mapped by storekey to the writeset estimate. +type MappedWritesets map[StoreKey]multiversion.WriteSet + +// DeliverTxBatchRequest represents a request object for a batch of transactions. +// This can be extended to include request-level tracing or metadata +type DeliverTxBatchRequest struct { + TxEntries []*DeliverTxEntry +} + +// DeliverTxResult represents an individual transaction's response within a batch. +// This can be extended to include tx-level tracing or metadata +type DeliverTxResult struct { + Response abci.ResponseDeliverTx +} + +// DeliverTxBatchResponse represents a response object for a batch of transactions. +// This can be extended to include response-level tracing or metadata +type DeliverTxBatchResponse struct { + Results []*DeliverTxResult +} diff --git a/x/accesscontrol/keeper/keeper.go b/x/accesscontrol/keeper/keeper.go index 2a2719f7e..13fc5ae8f 100644 --- a/x/accesscontrol/keeper/keeper.go +++ b/x/accesscontrol/keeper/keeper.go @@ -12,6 +12,7 @@ import ( "github.com/yourbasic/graph" "github.com/cosmos/cosmos-sdk/codec" + "github.com/cosmos/cosmos-sdk/store/multiversion" "github.com/cosmos/cosmos-sdk/telemetry" sdk "github.com/cosmos/cosmos-sdk/types" acltypes "github.com/cosmos/cosmos-sdk/types/accesscontrol" @@ -40,6 +41,7 @@ type ( MessageDependencyGeneratorMapper DependencyGeneratorMap AccountKeeper authkeeper.AccountKeeper StakingKeeper stakingkeeper.Keeper + ResourceTypeStoreKeyMapping acltypes.ResourceTypeToStoreKeyMap } ) @@ -493,14 +495,71 @@ func (k Keeper) IterateWasmDependencies(ctx sdk.Context, handler func(wasmDepend } } -func (k Keeper) BuildDependencyDag(ctx sdk.Context, txDecoder sdk.TxDecoder, anteDepGen sdk.AnteDepGenerator, txs [][]byte) (*types.Dag, error) { +type storeKeyMap map[string]sdk.StoreKey + +func (k Keeper) GetStoreKeyMap(ctx sdk.Context) storeKeyMap { + storeKeyMap := make(storeKeyMap) + for _, storeKey := range ctx.MultiStore().StoreKeys() { + storeKeyMap[storeKey.Name()] = storeKey + } + return storeKeyMap +} + +func (k Keeper) UpdateWritesetsWithAccessOps(accessOps []acltypes.AccessOperation, mappedWritesets sdk.MappedWritesets, storeKeyMap storeKeyMap) sdk.MappedWritesets { + for _, accessOp := range accessOps { + // we only want writes and unknowns (assumed writes) + if accessOp.AccessType != acltypes.AccessType_WRITE && accessOp.AccessType != acltypes.AccessType_UNKNOWN { + continue + } + // the accessOps should only have SPECIFIC identifiers (we don't want wildcards) + if accessOp.IdentifierTemplate == "*" { + continue + } + // check the resource type to store key map for potential store key + if storeKeyStr, ok := k.ResourceTypeStoreKeyMapping[accessOp.ResourceType]; ok { + // check that we have a storekey corresponding to that string + if storeKey, ok2 := storeKeyMap[storeKeyStr]; ok2 { + // if we have a StoreKey, add it to the writeset - writing empty bytes is ok because it will be saved as EstimatedWriteset + if _, ok := mappedWritesets[storeKey]; !ok { + mappedWritesets[storeKey] = make(multiversion.WriteSet) + } + mappedWritesets[storeKey][accessOp.IdentifierTemplate] = []byte{} + } + } + + } + return mappedWritesets +} + +// GenerateEstimatedWritesets utilizes the existing patterns for access operation generation to estimate the writesets for a transaction +func (k Keeper) GenerateEstimatedWritesets(ctx sdk.Context, anteDepGen sdk.AnteDepGenerator, txIndex int, tx sdk.Tx) (sdk.MappedWritesets, error) { + storeKeyMap := k.GetStoreKeyMap(ctx) + writesets := make(sdk.MappedWritesets) + // generate antedeps accessOps for tx + anteDeps, err := anteDepGen([]acltypes.AccessOperation{}, tx, txIndex) + if err != nil { + return nil, err + } + writesets = k.UpdateWritesetsWithAccessOps(anteDeps, writesets, storeKeyMap) + + // generate accessOps for each message + msgs := tx.GetMsgs() + for _, msg := range msgs { + msgDependencies := k.GetMessageDependencies(ctx, msg) + // update estimated writeset for each message deps + writesets = k.UpdateWritesetsWithAccessOps(msgDependencies, writesets, storeKeyMap) + } + return writesets, nil +} + +func (k Keeper) BuildDependencyDag(ctx sdk.Context, anteDepGen sdk.AnteDepGenerator, txs []sdk.Tx) (*types.Dag, error) { defer MeasureBuildDagDuration(time.Now(), "BuildDependencyDag") // contains the latest msg index for a specific Access Operation dependencyDag := types.NewDag() - for txIndex, txBytes := range txs { - tx, err := txDecoder(txBytes) // TODO: results in repetitive decoding for txs with runtx decode (potential optimization) - if err != nil { - return nil, err + for txIndex, tx := range txs { + if tx == nil { + // this implies decoding error + return nil, sdkerrors.ErrTxDecode } // get the ante dependencies and add them to the dag anteDeps, err := anteDepGen([]acltypes.AccessOperation{}, tx, txIndex) @@ -525,6 +584,7 @@ func (k Keeper) BuildDependencyDag(ctx sdk.Context, txDecoder sdk.TxDecoder, ant // add Access ops for msg for anteMsg dependencyDag.AddAccessOpsForMsg(acltypes.ANTE_MSG_INDEX, txIndex, anteAccessOpsList) + ctx = ctx.WithTxIndex(txIndex) msgs := tx.GetMsgs() for messageIndex, msg := range msgs { if types.IsGovMessage(msg) { diff --git a/x/accesscontrol/keeper/keeper_test.go b/x/accesscontrol/keeper/keeper_test.go index 52e714f5c..a18f86436 100644 --- a/x/accesscontrol/keeper/keeper_test.go +++ b/x/accesscontrol/keeper/keeper_test.go @@ -20,6 +20,7 @@ import ( aclkeeper "github.com/cosmos/cosmos-sdk/x/accesscontrol/keeper" acltestutil "github.com/cosmos/cosmos-sdk/x/accesscontrol/testutil" "github.com/cosmos/cosmos-sdk/x/accesscontrol/types" + authtypes "github.com/cosmos/cosmos-sdk/x/auth/types" banktypes "github.com/cosmos/cosmos-sdk/x/bank/types" govtypes "github.com/cosmos/cosmos-sdk/x/gov/types" stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" @@ -1999,13 +2000,11 @@ func TestBuildDependencyDag(t *testing.T) { txBuilder := simapp.MakeTestEncodingConfig().TxConfig.NewTxBuilder() err := txBuilder.SetMsgs(msgs...) require.NoError(t, err) - bz, err := simapp.MakeTestEncodingConfig().TxConfig.TxEncoder()(txBuilder.GetTx()) - require.NoError(t, err) - txs := [][]byte{ - bz, + txs := []sdk.Tx{ + txBuilder.GetTx(), } // ensure no errors creating dag - _, err = app.AccessControlKeeper.BuildDependencyDag(ctx, simapp.MakeTestEncodingConfig().TxConfig.TxDecoder(), app.GetAnteDepGenerator(), txs) + _, err = app.AccessControlKeeper.BuildDependencyDag(ctx, app.GetAnteDepGenerator(), txs) require.NoError(t, err) } @@ -2022,13 +2021,11 @@ func TestBuildDependencyDagWithGovMessage(t *testing.T) { txBuilder := simapp.MakeTestEncodingConfig().TxConfig.NewTxBuilder() err := txBuilder.SetMsgs(msgs...) require.NoError(t, err) - bz, err := simapp.MakeTestEncodingConfig().TxConfig.TxEncoder()(txBuilder.GetTx()) - require.NoError(t, err) - txs := [][]byte{ - bz, + txs := []sdk.Tx{ + txBuilder.GetTx(), } // ensure no errors creating dag - _, err = app.AccessControlKeeper.BuildDependencyDag(ctx, simapp.MakeTestEncodingConfig().TxConfig.TxDecoder(), app.GetAnteDepGenerator(), txs) + _, err = app.AccessControlKeeper.BuildDependencyDag(ctx, app.GetAnteDepGenerator(), txs) require.ErrorIs(t, err, types.ErrGovMsgInBlock) } @@ -2048,13 +2045,11 @@ func TestBuildDependencyDag_GovPropMessage(t *testing.T) { txBuilder := simapp.MakeTestEncodingConfig().TxConfig.NewTxBuilder() err := txBuilder.SetMsgs(msgs...) require.NoError(t, err) - bz, err := simapp.MakeTestEncodingConfig().TxConfig.TxEncoder()(txBuilder.GetTx()) - require.NoError(t, err) - txs := [][]byte{ - bz, + txs := []sdk.Tx{ + txBuilder.GetTx(), } // expect ErrGovMsgInBlock - _, err = app.AccessControlKeeper.BuildDependencyDag(ctx, simapp.MakeTestEncodingConfig().TxConfig.TxDecoder(), app.GetAnteDepGenerator(), txs) + _, err = app.AccessControlKeeper.BuildDependencyDag(ctx, app.GetAnteDepGenerator(), txs) require.EqualError(t, err, types.ErrGovMsgInBlock.Error()) } @@ -2072,13 +2067,11 @@ func TestBuildDependencyDag_GovDepositMessage(t *testing.T) { txBuilder := simapp.MakeTestEncodingConfig().TxConfig.NewTxBuilder() err := txBuilder.SetMsgs(msgs...) require.NoError(t, err) - bz, err := simapp.MakeTestEncodingConfig().TxConfig.TxEncoder()(txBuilder.GetTx()) - require.NoError(t, err) - txs := [][]byte{ - bz, + txs := []sdk.Tx{ + txBuilder.GetTx(), } // expect ErrGovMsgInBlock - _, err = app.AccessControlKeeper.BuildDependencyDag(ctx, simapp.MakeTestEncodingConfig().TxConfig.TxDecoder(), app.GetAnteDepGenerator(), txs) + _, err = app.AccessControlKeeper.BuildDependencyDag(ctx, app.GetAnteDepGenerator(), txs) require.EqualError(t, err, types.ErrGovMsgInBlock.Error()) } @@ -2099,49 +2092,28 @@ func TestBuildDependencyDag_MultipleTransactions(t *testing.T) { txBuilder := simapp.MakeTestEncodingConfig().TxConfig.NewTxBuilder() err := txBuilder.SetMsgs(msgs1...) require.NoError(t, err) - bz1, err := simapp.MakeTestEncodingConfig().TxConfig.TxEncoder()(txBuilder.GetTx()) - require.NoError(t, err) + tx1 := txBuilder.GetTx() err = txBuilder.SetMsgs(msgs2...) require.NoError(t, err) - bz2, err := simapp.MakeTestEncodingConfig().TxConfig.TxEncoder()(txBuilder.GetTx()) - require.NoError(t, err) + tx2 := txBuilder.GetTx() - txs := [][]byte{ - bz1, - bz2, + txs := []sdk.Tx{ + tx1, + tx2, } - _, err = app.AccessControlKeeper.BuildDependencyDag(ctx, simapp.MakeTestEncodingConfig().TxConfig.TxDecoder(), app.GetAnteDepGenerator(), txs) + _, err = app.AccessControlKeeper.BuildDependencyDag(ctx, app.GetAnteDepGenerator(), txs) require.NoError(t, err) mockAnteDepGenerator := func(_ []acltypes.AccessOperation, _ sdk.Tx, _ int) ([]acltypes.AccessOperation, error) { return nil, errors.New("Mocked error") } - _, err = app.AccessControlKeeper.BuildDependencyDag(ctx, simapp.MakeTestEncodingConfig().TxConfig.TxDecoder(), mockAnteDepGenerator, txs) + _, err = app.AccessControlKeeper.BuildDependencyDag(ctx, mockAnteDepGenerator, txs) require.ErrorContains(t, err, "Mocked error") } -func TestBuildDependencyDag_DecoderError(t *testing.T) { - // Set up a mocked app with a failing decoder - app := simapp.Setup(false) - ctx := app.BaseApp.NewContext(false, tmproto.Header{}) - - // Encode an invalid transaction - txs := [][]byte{ - []byte("invalid tx"), - } - - _, err := app.AccessControlKeeper.BuildDependencyDag( - ctx, - simapp.MakeTestEncodingConfig().TxConfig.TxDecoder(), - app.GetAnteDepGenerator(), - txs, - ) - require.Error(t, err) -} - -func BencharkAccessOpsBuildDependencyDag(b *testing.B) { +func BenchmarkAccessOpsBuildDependencyDag(b *testing.B) { app := simapp.Setup(false) ctx := app.BaseApp.NewContext(false, tmproto.Header{}) @@ -2157,30 +2129,30 @@ func BencharkAccessOpsBuildDependencyDag(b *testing.B) { txBuilder := simapp.MakeTestEncodingConfig().TxConfig.NewTxBuilder() _ = txBuilder.SetMsgs(msgs1...) - bz1, _ := simapp.MakeTestEncodingConfig().TxConfig.TxEncoder()(txBuilder.GetTx()) + tx1 := txBuilder.GetTx() _ = txBuilder.SetMsgs(msgs2...) - bz2, _ := simapp.MakeTestEncodingConfig().TxConfig.TxEncoder()(txBuilder.GetTx()) - - txs := [][]byte{ - bz1, - bz1, - bz1, - bz1, - bz1, - bz1, - bz2, - bz2, - bz2, - bz2, - bz2, - bz2, - bz2, - bz2, - bz2, - bz2, - bz2, - bz2, + tx2 := txBuilder.GetTx() + + txs := []sdk.Tx{ + tx1, + tx1, + tx1, + tx1, + tx1, + tx1, + tx2, + tx2, + tx2, + tx2, + tx2, + tx2, + tx2, + tx2, + tx2, + tx2, + tx2, + tx2, } mockAnteDepGenerator := func(_ []acltypes.AccessOperation, _ sdk.Tx, _ int) ([]acltypes.AccessOperation, error) { @@ -2236,7 +2208,7 @@ func BencharkAccessOpsBuildDependencyDag(b *testing.B) { for i := 0; i < b.N; i++ { _, _ = app.AccessControlKeeper.BuildDependencyDag( - ctx, simapp.MakeTestEncodingConfig().TxConfig.TxDecoder(), mockAnteDepGenerator, txs) + ctx, mockAnteDepGenerator, txs) } } @@ -2257,21 +2229,19 @@ func TestInvalidAccessOpsBuildDependencyDag(t *testing.T) { txBuilder := simapp.MakeTestEncodingConfig().TxConfig.NewTxBuilder() err := txBuilder.SetMsgs(msgs1...) require.NoError(t, err) - bz1, err := simapp.MakeTestEncodingConfig().TxConfig.TxEncoder()(txBuilder.GetTx()) - require.NoError(t, err) + tx1 := txBuilder.GetTx() err = txBuilder.SetMsgs(msgs2...) require.NoError(t, err) - bz2, err := simapp.MakeTestEncodingConfig().TxConfig.TxEncoder()(txBuilder.GetTx()) - require.NoError(t, err) + tx2 := txBuilder.GetTx() - txs := [][]byte{ - bz1, - bz2, - bz2, - bz2, - bz2, - bz2, + txs := []sdk.Tx{ + tx1, + tx2, + tx2, + tx2, + tx2, + tx2, } mockAnteDepGenerator := func(_ []acltypes.AccessOperation, _ sdk.Tx, _ int) ([]acltypes.AccessOperation, error) { @@ -2286,7 +2256,7 @@ func TestInvalidAccessOpsBuildDependencyDag(t *testing.T) { // ensure no errors creating dag _, err = app.AccessControlKeeper.BuildDependencyDag( - ctx, simapp.MakeTestEncodingConfig().TxConfig.TxDecoder(), mockAnteDepGenerator, txs) + ctx, mockAnteDepGenerator, txs) require.Error(t, err) mockAnteDepGenerator = func(_ []acltypes.AccessOperation, _ sdk.Tx, _ int) ([]acltypes.AccessOperation, error) { @@ -2302,7 +2272,7 @@ func TestInvalidAccessOpsBuildDependencyDag(t *testing.T) { // ensure no errors creating dag _, err = app.AccessControlKeeper.BuildDependencyDag( - ctx, simapp.MakeTestEncodingConfig().TxConfig.TxDecoder(), mockAnteDepGenerator, txs) + ctx, mockAnteDepGenerator, txs) require.NoError(t, err) } @@ -2670,6 +2640,40 @@ func (suite *KeeperTestSuite) TestBuildSelectorOps_AccessOperationSelectorType_C req.NoError(err) } +func TestGenerateEstimatedDependencies(t *testing.T) { + app := simapp.Setup(false) + ctx := app.BaseApp.NewContext(false, tmproto.Header{}) + + accounts := simapp.AddTestAddrsIncremental(app, ctx, 2, sdk.NewInt(30000000)) + // setup test txs + msgs := []sdk.Msg{ + banktypes.NewMsgSend(accounts[0], accounts[1], sdk.NewCoins(sdk.NewCoin("usei", sdk.NewInt(1)))), + } + // set up testing mapping + app.AccessControlKeeper.ResourceTypeStoreKeyMapping = map[acltypes.ResourceType]string{ + acltypes.ResourceType_KV_BANK_BALANCES: banktypes.StoreKey, + acltypes.ResourceType_KV_AUTH_ADDRESS_STORE: authtypes.StoreKey, + } + + storeKeyMap := app.AccessControlKeeper.GetStoreKeyMap(ctx) + + txBuilder := simapp.MakeTestEncodingConfig().TxConfig.NewTxBuilder() + err := txBuilder.SetMsgs(msgs...) + require.NoError(t, err) + + writesets, err := app.AccessControlKeeper.GenerateEstimatedWritesets(ctx, app.GetAnteDepGenerator(), 0, txBuilder.GetTx()) + require.NoError(t, err) + + // check writesets + require.Equal(t, 2, len(writesets)) + bankWritesets := writesets[storeKeyMap[banktypes.StoreKey]] + require.Equal(t, 3, len(bankWritesets)) + + authWritesets := writesets[storeKeyMap[authtypes.StoreKey]] + require.Equal(t, 1, len(authWritesets)) + +} + func TestKeeperTestSuite(t *testing.T) { t.Parallel() suite.Run(t, new(KeeperTestSuite)) diff --git a/x/accesscontrol/keeper/options.go b/x/accesscontrol/keeper/options.go index 365280ab3..6dd7f3b36 100644 --- a/x/accesscontrol/keeper/options.go +++ b/x/accesscontrol/keeper/options.go @@ -1,5 +1,7 @@ package keeper +import acltypes "github.com/cosmos/cosmos-sdk/types/accesscontrol" + type optsFn func(*Keeper) func (f optsFn) Apply(keeper *Keeper) { @@ -25,3 +27,9 @@ func (oldGenerator DependencyGeneratorMap) Merge(newGenerator DependencyGenerato } return oldGenerator } + +func WithResourceTypeToStoreKeyMap(resourceTypeStoreKeyMapping acltypes.ResourceTypeToStoreKeyMap) optsFn { + return optsFn(func(k *Keeper) { + k.ResourceTypeStoreKeyMapping = resourceTypeStoreKeyMapping + }) +} diff --git a/x/accesscontrol/testutil/accesscontrol.go b/x/accesscontrol/testutil/accesscontrol.go index 29e25e867..2be4ecc95 100644 --- a/x/accesscontrol/testutil/accesscontrol.go +++ b/x/accesscontrol/testutil/accesscontrol.go @@ -23,10 +23,11 @@ var TestingStoreKeyToResourceTypePrefixMap = acltypes.StoreKeyToResourceTypePref acltypes.ResourceType_Mem: acltypes.EmptyPrefix, }, banktypes.StoreKey: { - acltypes.ResourceType_KV_BANK: acltypes.EmptyPrefix, - acltypes.ResourceType_KV_BANK_BALANCES: banktypes.BalancesPrefix, - acltypes.ResourceType_KV_BANK_SUPPLY: banktypes.SupplyKey, - acltypes.ResourceType_KV_BANK_DENOM: banktypes.DenomMetadataPrefix, + acltypes.ResourceType_KV_BANK: acltypes.EmptyPrefix, + acltypes.ResourceType_KV_BANK_BALANCES: banktypes.BalancesPrefix, + acltypes.ResourceType_KV_BANK_SUPPLY: banktypes.SupplyKey, + acltypes.ResourceType_KV_BANK_DENOM: banktypes.DenomMetadataPrefix, + acltypes.ResourceType_KV_BANK_WEI_BALANCE: banktypes.WeiBalancesPrefix, }, banktypes.DeferredCacheStoreKey: { acltypes.ResourceType_KV_BANK_DEFERRED: acltypes.EmptyPrefix, diff --git a/x/bank/keeper/keeper.go b/x/bank/keeper/keeper.go index cdcf20d6c..e74d868a1 100644 --- a/x/bank/keeper/keeper.go +++ b/x/bank/keeper/keeper.go @@ -51,6 +51,8 @@ type Keeper interface { DelegateCoins(ctx sdk.Context, delegatorAddr, moduleAccAddr sdk.AccAddress, amt sdk.Coins) error UndelegateCoins(ctx sdk.Context, moduleAccAddr, delegatorAddr sdk.AccAddress, amt sdk.Coins) error + GetStoreKey() sdk.StoreKey + types.QueryServer } @@ -198,7 +200,7 @@ func (k BaseKeeper) DelegateCoins(ctx sdk.Context, delegatorAddr, moduleAccAddr } balances = balances.Add(balance) - err := k.setBalance(ctx, delegatorAddr, balance.Sub(coin)) + err := k.setBalance(ctx, delegatorAddr, balance.Sub(coin), true) if err != nil { return err } @@ -212,7 +214,7 @@ func (k BaseKeeper) DelegateCoins(ctx sdk.Context, delegatorAddr, moduleAccAddr types.NewCoinSpentEvent(delegatorAddr, amt), ) - err := k.addCoins(ctx, moduleAccAddr, amt) + err := k.addCoins(ctx, moduleAccAddr, amt, true) if err != nil { return err } @@ -235,7 +237,7 @@ func (k BaseKeeper) UndelegateCoins(ctx sdk.Context, moduleAccAddr, delegatorAdd return sdkerrors.Wrap(sdkerrors.ErrInvalidCoins, amt.String()) } - err := k.subUnlockedCoins(ctx, moduleAccAddr, amt) + err := k.subUnlockedCoins(ctx, moduleAccAddr, amt, true) if err != nil { return err } @@ -244,7 +246,7 @@ func (k BaseKeeper) UndelegateCoins(ctx sdk.Context, moduleAccAddr, delegatorAdd return sdkerrors.Wrap(err, "failed to track undelegation") } - err = k.addCoins(ctx, delegatorAddr, amt) + err = k.addCoins(ctx, delegatorAddr, amt, true) if err != nil { return err } @@ -408,7 +410,7 @@ func (k BaseKeeper) DeferredSendCoinsFromAccountToModule( panic("bank keeper created without deferred cache") } // Deducts Fees from the Sender Account - err := k.subUnlockedCoins(ctx, senderAddr, amount) + err := k.subUnlockedCoins(ctx, senderAddr, amount, true) if err != nil { return err } @@ -466,7 +468,7 @@ func (k BaseKeeper) WriteDeferredBalances(ctx sdk.Context) []abci.Event { ctx.Logger().Error(err.Error()) panic(err) } - err := k.addCoins(ctx, sdk.MustAccAddressFromBech32(moduleBech32Addr), amount) + err := k.addCoins(ctx, sdk.MustAccAddressFromBech32(moduleBech32Addr), amount, true) if err != nil { ctx.Logger().Error(fmt.Sprintf("Failed to add coin=%s to module address=%s, error is: %s", amount, moduleBech32Addr, err)) panic(err) @@ -568,7 +570,7 @@ func (k BaseKeeper) MintCoins(ctx sdk.Context, moduleName string, amounts sdk.Co if acc == nil { return errors.New(fmt.Sprintf("module account for %s not found", moduleName)) } - return k.addCoins(ctx, acc.GetAddress(), amounts) + return k.addCoins(ctx, acc.GetAddress(), amounts, true) } err := k.createCoins(ctx, moduleName, amounts, addFn) @@ -614,7 +616,7 @@ func (k BaseKeeper) destroyCoins(ctx sdk.Context, moduleName string, amounts sdk func (k BaseKeeper) BurnCoins(ctx sdk.Context, moduleName string, amounts sdk.Coins) error { subFn := func(ctx sdk.Context, moduleName string, amounts sdk.Coins) error { acc := k.ak.GetModuleAccount(ctx, moduleName) - return k.subUnlockedCoins(ctx, acc.GetAddress(), amounts) + return k.subUnlockedCoins(ctx, acc.GetAddress(), amounts, true) } err := k.destroyCoins(ctx, moduleName, amounts, subFn) @@ -677,6 +679,10 @@ func (k BaseKeeper) trackUndelegation(ctx sdk.Context, addr sdk.AccAddress, amt return nil } +func (k BaseKeeper) GetStoreKey() sdk.StoreKey { + return k.storeKey +} + // IterateTotalSupply iterates over the total supply calling the given cb (callback) function // with the balance of each coin. // The iteration stops if the callback returns true. diff --git a/x/bank/keeper/keeper_test.go b/x/bank/keeper/keeper_test.go index e2eec160a..25600f3f7 100644 --- a/x/bank/keeper/keeper_test.go +++ b/x/bank/keeper/keeper_test.go @@ -76,6 +76,7 @@ func (suite *IntegrationTestSuite) initKeepersWithmAccPerms(blockedAddrs map[str appCodec := simapp.MakeTestEncodingConfig().Marshaler maccPerms[holder] = nil + maccPerms[types.WeiEscrowName] = nil maccPerms[authtypes.Burner] = []string{authtypes.Burner} maccPerms[authtypes.Minter] = []string{authtypes.Minter} maccPerms[multiPerm] = []string{authtypes.Burner, authtypes.Minter, authtypes.Staking} @@ -108,6 +109,55 @@ func (suite *IntegrationTestSuite) SetupTest() { suite.queryClient = queryClient } +func (suite *IntegrationTestSuite) TestSendCoinsAndWei() { + ctx := suite.ctx + require := suite.Require() + authKeeper, keeper := suite.initKeepersWithmAccPerms(make(map[string]bool)) + amt := sdk.NewCoins(sdk.NewCoin(sdk.DefaultBondDenom, sdk.NewInt(100))) + require.NoError(keeper.MintCoins(ctx, authtypes.Minter, amt)) + addr1 := sdk.AccAddress([]byte("addr1_______________")) + addr2 := sdk.AccAddress([]byte("addr2_______________")) + addr3 := sdk.AccAddress([]byte("addr3_______________")) + require.NoError(keeper.SendCoinsFromModuleToAccount(ctx, authtypes.Minter, addr1, amt)) + // should no-op if sending zero + require.NoError(keeper.SendCoinsAndWei(ctx, addr1, addr2, nil, sdk.DefaultBondDenom, sdk.ZeroInt(), sdk.ZeroInt())) + require.Equal(sdk.ZeroInt(), keeper.GetWeiBalance(ctx, addr1)) + require.Equal(sdk.ZeroInt(), keeper.GetWeiBalance(ctx, addr2)) + require.Equal(sdk.NewInt(100), keeper.GetBalance(ctx, addr1, sdk.DefaultBondDenom).Amount) + require.Equal(sdk.ZeroInt(), keeper.GetBalance(ctx, addr2, sdk.DefaultBondDenom).Amount) + require.Equal(sdk.ZeroInt(), keeper.GetBalance(ctx, authKeeper.GetModuleAddress(types.WeiEscrowName), sdk.DefaultBondDenom).Amount) + // should just do usei send if wei is zero + require.NoError(keeper.SendCoinsAndWei(ctx, addr1, addr3, nil, sdk.DefaultBondDenom, sdk.NewInt(50), sdk.ZeroInt())) + require.Equal(sdk.ZeroInt(), keeper.GetWeiBalance(ctx, addr1)) + require.Equal(sdk.ZeroInt(), keeper.GetWeiBalance(ctx, addr3)) + require.Equal(sdk.NewInt(50), keeper.GetBalance(ctx, addr1, sdk.DefaultBondDenom).Amount) + require.Equal(sdk.NewInt(50), keeper.GetBalance(ctx, addr3, sdk.DefaultBondDenom).Amount) + require.Equal(sdk.ZeroInt(), keeper.GetBalance(ctx, authKeeper.GetModuleAddress(types.WeiEscrowName), sdk.DefaultBondDenom).Amount) + // should return error if wei amount overflows + require.Error(keeper.SendCoinsAndWei(ctx, addr1, addr2, nil, sdk.DefaultBondDenom, sdk.ZeroInt(), sdk.NewInt(1_000_000_000_000))) + // sender gets escrowed one usei, recipient does not get redeemed + require.NoError(keeper.SendCoinsAndWei(ctx, addr1, addr2, nil, sdk.DefaultBondDenom, sdk.NewInt(1), sdk.NewInt(1))) + require.Equal(sdk.NewInt(999_999_999_999), keeper.GetWeiBalance(ctx, addr1)) + require.Equal(sdk.OneInt(), keeper.GetWeiBalance(ctx, addr2)) + require.Equal(sdk.NewInt(48), keeper.GetBalance(ctx, addr1, sdk.DefaultBondDenom).Amount) + require.Equal(sdk.OneInt(), keeper.GetBalance(ctx, addr2, sdk.DefaultBondDenom).Amount) + require.Equal(sdk.OneInt(), keeper.GetBalance(ctx, authKeeper.GetModuleAddress(types.WeiEscrowName), sdk.DefaultBondDenom).Amount) + // sender does not get escrowed due to sufficient wei balance, recipient does not get redeemed + require.NoError(keeper.SendCoinsAndWei(ctx, addr1, addr3, nil, sdk.DefaultBondDenom, sdk.NewInt(1), sdk.NewInt(999_999_999_999))) + require.Equal(sdk.ZeroInt(), keeper.GetWeiBalance(ctx, addr1)) + require.Equal(sdk.NewInt(999_999_999_999), keeper.GetWeiBalance(ctx, addr3)) + require.Equal(sdk.NewInt(47), keeper.GetBalance(ctx, addr1, sdk.DefaultBondDenom).Amount) + require.Equal(sdk.NewInt(51), keeper.GetBalance(ctx, addr3, sdk.DefaultBondDenom).Amount) + require.Equal(sdk.OneInt(), keeper.GetBalance(ctx, authKeeper.GetModuleAddress(types.WeiEscrowName), sdk.DefaultBondDenom).Amount) + // sender gets escrowed and recipient gets redeemed + require.NoError(keeper.SendCoinsAndWei(ctx, addr1, addr3, nil, sdk.DefaultBondDenom, sdk.NewInt(1), sdk.NewInt(2))) + require.Equal(sdk.NewInt(999_999_999_998), keeper.GetWeiBalance(ctx, addr1)) + require.Equal(sdk.NewInt(1), keeper.GetWeiBalance(ctx, addr3)) + require.Equal(sdk.NewInt(45), keeper.GetBalance(ctx, addr1, sdk.DefaultBondDenom).Amount) + require.Equal(sdk.NewInt(53), keeper.GetBalance(ctx, addr3, sdk.DefaultBondDenom).Amount) + require.Equal(sdk.OneInt(), keeper.GetBalance(ctx, authKeeper.GetModuleAddress(types.WeiEscrowName), sdk.DefaultBondDenom).Amount) +} + func (suite *IntegrationTestSuite) TestSupply() { ctx := suite.ctx diff --git a/x/bank/keeper/send.go b/x/bank/keeper/send.go index 80fdb555b..401a815ba 100644 --- a/x/bank/keeper/send.go +++ b/x/bank/keeper/send.go @@ -1,7 +1,10 @@ package keeper import ( + "errors" + "github.com/cosmos/cosmos-sdk/codec" + "github.com/cosmos/cosmos-sdk/store/prefix" "github.com/cosmos/cosmos-sdk/telemetry" sdk "github.com/cosmos/cosmos-sdk/types" sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" @@ -16,6 +19,8 @@ type SendKeeper interface { InputOutputCoins(ctx sdk.Context, inputs []types.Input, outputs []types.Output) error SendCoins(ctx sdk.Context, fromAddr sdk.AccAddress, toAddr sdk.AccAddress, amt sdk.Coins) error + SendCoinsWithoutAccCreation(ctx sdk.Context, fromAddr sdk.AccAddress, toAddr sdk.AccAddress, amt sdk.Coins) error + SendCoinsAndWei(ctx sdk.Context, from sdk.AccAddress, to sdk.AccAddress, customEscrow sdk.AccAddress, denom string, amt sdk.Int, wei sdk.Int) error GetParams(ctx sdk.Context) types.Params SetParams(ctx sdk.Context, params types.Params) @@ -27,6 +32,7 @@ type SendKeeper interface { } var _ SendKeeper = (*BaseSendKeeper)(nil) +var MaxWeiBalance sdk.Int = sdk.NewInt(1_000_000_000_000) // BaseSendKeeper only allows transfers between accounts without the possibility of // creating coins. It implements the SendKeeper interface. @@ -83,7 +89,7 @@ func (k BaseSendKeeper) InputOutputCoins(ctx sdk.Context, inputs []types.Input, return err } - err = k.subUnlockedCoins(ctx, inAddress, in.Coins) + err = k.subUnlockedCoins(ctx, inAddress, in.Coins, true) if err != nil { return err } @@ -101,7 +107,7 @@ func (k BaseSendKeeper) InputOutputCoins(ctx sdk.Context, inputs []types.Input, if err != nil { return err } - err = k.addCoins(ctx, outAddress, out.Coins) + err = k.addCoins(ctx, outAddress, out.Coins, true) if err != nil { return err } @@ -131,13 +137,7 @@ func (k BaseSendKeeper) InputOutputCoins(ctx sdk.Context, inputs []types.Input, // SendCoins transfers amt coins from a sending account to a receiving account. // An error is returned upon failure. func (k BaseSendKeeper) SendCoins(ctx sdk.Context, fromAddr sdk.AccAddress, toAddr sdk.AccAddress, amt sdk.Coins) error { - err := k.subUnlockedCoins(ctx, fromAddr, amt) - if err != nil { - return err - } - - err = k.addCoins(ctx, toAddr, amt) - if err != nil { + if err := k.SendCoinsWithoutAccCreation(ctx, fromAddr, toAddr, amt); err != nil { return err } @@ -151,6 +151,24 @@ func (k BaseSendKeeper) SendCoins(ctx sdk.Context, fromAddr sdk.AccAddress, toAd k.ak.SetAccount(ctx, k.ak.NewAccountWithAddress(ctx, toAddr)) } + return nil +} + +func (k BaseSendKeeper) SendCoinsWithoutAccCreation(ctx sdk.Context, fromAddr sdk.AccAddress, toAddr sdk.AccAddress, amt sdk.Coins) error { + return k.sendCoinsWithoutAccCreation(ctx, fromAddr, toAddr, amt, true) +} + +func (k BaseSendKeeper) sendCoinsWithoutAccCreation(ctx sdk.Context, fromAddr sdk.AccAddress, toAddr sdk.AccAddress, amt sdk.Coins, checkNeg bool) error { + err := k.subUnlockedCoins(ctx, fromAddr, amt, checkNeg) + if err != nil { + return err + } + + err = k.addCoins(ctx, toAddr, amt, checkNeg) + if err != nil { + return err + } + ctx.EventManager().EmitEvents(sdk.Events{ sdk.NewEvent( types.EventTypeTransfer, @@ -170,7 +188,7 @@ func (k BaseSendKeeper) SendCoins(ctx sdk.Context, fromAddr sdk.AccAddress, toAd // subUnlockedCoins removes the unlocked amt coins of the given account. An error is // returned if the resulting balance is negative or the initial amount is invalid. // A coin_spent event is emitted after. -func (k BaseSendKeeper) subUnlockedCoins(ctx sdk.Context, addr sdk.AccAddress, amt sdk.Coins) error { +func (k BaseSendKeeper) subUnlockedCoins(ctx sdk.Context, addr sdk.AccAddress, amt sdk.Coins, checkNeg bool) error { if !amt.IsValid() { return sdkerrors.Wrap(sdkerrors.ErrInvalidCoins, amt.String()) } @@ -179,17 +197,24 @@ func (k BaseSendKeeper) subUnlockedCoins(ctx sdk.Context, addr sdk.AccAddress, a for _, coin := range amt { balance := k.GetBalance(ctx, addr, coin.Denom) - locked := sdk.NewCoin(coin.Denom, lockedCoins.AmountOf(coin.Denom)) - spendable := balance.Sub(locked) - - _, hasNeg := sdk.Coins{spendable}.SafeSub(sdk.Coins{coin}) - if hasNeg { - return sdkerrors.Wrapf(sdkerrors.ErrInsufficientFunds, "%s is smaller than %s", spendable, coin) + if checkNeg { + locked := sdk.NewCoin(coin.Denom, lockedCoins.AmountOf(coin.Denom)) + spendable := balance.Sub(locked) + + _, hasNeg := sdk.Coins{spendable}.SafeSub(sdk.Coins{coin}) + if hasNeg { + return sdkerrors.Wrapf(sdkerrors.ErrInsufficientFunds, "%s is smaller than %s", spendable, coin) + } } - newBalance := balance.Sub(coin) + var newBalance sdk.Coin + if checkNeg { + newBalance = balance.Sub(coin) + } else { + newBalance = balance.SubUnsafe(coin) + } - err := k.setBalance(ctx, addr, newBalance) + err := k.setBalance(ctx, addr, newBalance, checkNeg) if err != nil { return err } @@ -204,7 +229,7 @@ func (k BaseSendKeeper) subUnlockedCoins(ctx sdk.Context, addr sdk.AccAddress, a // addCoins increase the addr balance by the given amt. Fails if the provided amt is invalid. // It emits a coin received event. -func (k BaseSendKeeper) addCoins(ctx sdk.Context, addr sdk.AccAddress, amt sdk.Coins) error { +func (k BaseSendKeeper) addCoins(ctx sdk.Context, addr sdk.AccAddress, amt sdk.Coins, checkNeg bool) error { if !amt.IsValid() { return sdkerrors.Wrap(sdkerrors.ErrInvalidCoins, amt.String()) } @@ -213,7 +238,7 @@ func (k BaseSendKeeper) addCoins(ctx sdk.Context, addr sdk.AccAddress, amt sdk.C balance := k.GetBalance(ctx, addr, coin.Denom) newBalance := balance.Add(coin) - err := k.setBalance(ctx, addr, newBalance) + err := k.setBalance(ctx, addr, newBalance, checkNeg) if err != nil { return err } @@ -248,8 +273,8 @@ func (k BaseSendKeeper) initBalances(ctx sdk.Context, addr sdk.AccAddress, balan } // setBalance sets the coin balance for an account by address. -func (k BaseSendKeeper) setBalance(ctx sdk.Context, addr sdk.AccAddress, balance sdk.Coin) error { - if !balance.IsValid() { +func (k BaseSendKeeper) setBalance(ctx sdk.Context, addr sdk.AccAddress, balance sdk.Coin, checkNeg bool) error { + if checkNeg && !balance.IsValid() { return sdkerrors.Wrap(sdkerrors.ErrInvalidCoins, balance.String()) } @@ -266,6 +291,20 @@ func (k BaseSendKeeper) setBalance(ctx sdk.Context, addr sdk.AccAddress, balance return nil } +func (k BaseSendKeeper) setWeiBalance(ctx sdk.Context, addr sdk.AccAddress, amt sdk.Int) error { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.WeiBalancesPrefix) + if amt.IsZero() { + store.Delete(addr) + return nil + } + val, err := amt.Marshal() + if err != nil { + return err + } + store.Set(addr, val) + return nil +} + // IsSendEnabledCoins checks the coins provide and returns an ErrSendDisabled if // any of the coins are not configured for sending. Returns nil if sending is enabled // for all provided coin @@ -288,3 +327,55 @@ func (k BaseSendKeeper) IsSendEnabledCoin(ctx sdk.Context, coin sdk.Coin) bool { func (k BaseSendKeeper) BlockedAddr(addr sdk.AccAddress) bool { return k.blockedAddrs[addr.String()] } + +func (k BaseSendKeeper) SendCoinsAndWei(ctx sdk.Context, from sdk.AccAddress, to sdk.AccAddress, customEscrow sdk.AccAddress, denom string, amt sdk.Int, wei sdk.Int) error { + if wei.Equal(sdk.ZeroInt()) { + if amt.Equal(sdk.ZeroInt()) { + return nil + } + return k.SendCoinsWithoutAccCreation(ctx, from, to, sdk.NewCoins(sdk.NewCoin(denom, amt))) + } + if wei.GTE(MaxWeiBalance) { + return errors.New("cannot send more than 10^12 wei") + } + escrow := customEscrow + if escrow == nil { + escrow = k.ak.GetModuleAddress(types.WeiEscrowName) + } + currentWeiBalanceFrom := k.GetWeiBalance(ctx, from) + postWeiBalanceFrom := currentWeiBalanceFrom.Sub(wei) + if postWeiBalanceFrom.GTE(sdk.ZeroInt()) { + if err := k.setWeiBalance(ctx, from, postWeiBalanceFrom); err != nil { + return err + } + } else { + if err := k.setWeiBalance(ctx, from, MaxWeiBalance.Add(postWeiBalanceFrom)); err != nil { + // postWeiBalanceFrom is negative + return err + } + // need to send one sei to escrow because wei balance is insufficient + if err := k.sendCoinsWithoutAccCreation(ctx, from, escrow, sdk.NewCoins(sdk.NewCoin(denom, sdk.OneInt())), false); err != nil { + return err + } + } + currentWeiBalanceTo := k.GetWeiBalance(ctx, to) + postWeiBalanceTo := currentWeiBalanceTo.Add(wei) + if postWeiBalanceTo.LT(MaxWeiBalance) { + if err := k.setWeiBalance(ctx, to, postWeiBalanceTo); err != nil { + return err + } + } else { + if err := k.setWeiBalance(ctx, to, postWeiBalanceTo.Sub(MaxWeiBalance)); err != nil { + return err + } + // need to redeem one sei from escrow because wei balance overflowed + one := sdk.NewCoins(sdk.NewCoin(denom, sdk.OneInt())) + if err := k.sendCoinsWithoutAccCreation(ctx, escrow, to, one, false); err != nil { + return err + } + } + if amt.GT(sdk.ZeroInt()) { + return k.SendCoinsWithoutAccCreation(ctx, from, to, sdk.NewCoins(sdk.NewCoin(denom, amt))) + } + return nil +} diff --git a/x/bank/keeper/view.go b/x/bank/keeper/view.go index d126ddd75..7f835ab2b 100644 --- a/x/bank/keeper/view.go +++ b/x/bank/keeper/view.go @@ -26,6 +26,7 @@ type ViewKeeper interface { GetBalance(ctx sdk.Context, addr sdk.AccAddress, denom string) sdk.Coin LockedCoins(ctx sdk.Context, addr sdk.AccAddress) sdk.Coins SpendableCoins(ctx sdk.Context, addr sdk.AccAddress) sdk.Coins + GetWeiBalance(ctx sdk.Context, addr sdk.AccAddress) sdk.Int IterateAccountBalances(ctx sdk.Context, addr sdk.AccAddress, cb func(coin sdk.Coin) (stop bool)) IterateAllBalances(ctx sdk.Context, cb func(address sdk.AccAddress, coin sdk.Coin) (stop bool)) @@ -232,3 +233,17 @@ func (k BaseViewKeeper) getAccountStore(ctx sdk.Context, addr sdk.AccAddress) pr return prefix.NewStore(store, types.CreateAccountBalancesPrefix(addr)) } + +func (k BaseViewKeeper) GetWeiBalance(ctx sdk.Context, addr sdk.AccAddress) sdk.Int { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.WeiBalancesPrefix) + val := store.Get(addr) + if val == nil { + return sdk.ZeroInt() + } + res := new(sdk.Int) + if err := res.Unmarshal(val); err != nil { + // should never happen + panic(err) + } + return *res +} diff --git a/x/bank/types/key.go b/x/bank/types/key.go index 8b8c25a2d..146b30de1 100644 --- a/x/bank/types/key.go +++ b/x/bank/types/key.go @@ -21,10 +21,13 @@ const ( // QuerierRoute defines the module's query routing key QuerierRoute = ModuleName + + WeiEscrowName = "weiescrow" ) // KVStore keys var ( + WeiBalancesPrefix = []byte{0x04} // BalancesPrefix is the prefix for the account balances store. We use a byte // (instead of `[]byte("balances")` to save some disk space). DeferredCachePrefix = []byte{0x03} diff --git a/x/capability/capability_test.go b/x/capability/capability_test.go index 45a5f6ea4..8f09d6d73 100644 --- a/x/capability/capability_test.go +++ b/x/capability/capability_test.go @@ -61,14 +61,10 @@ func (suite *CapabilityTestSuite) TestInitializeMemStore() { suite.Require().False(newKeeper.IsInitialized(ctx), "memstore initialized flag set before BeginBlock") // Mock app beginblock and ensure that no gas has been consumed and memstore is initialized - ctx = suite.app.BaseApp.NewContext(false, tmproto.Header{}).WithBlockGasMeter(sdk.NewGasMeter(50)) - prevGas := ctx.BlockGasMeter().GasConsumed() + ctx = suite.app.BaseApp.NewContext(false, tmproto.Header{}) restartedModule := capability.NewAppModule(suite.cdc, *newKeeper) restartedModule.BeginBlock(ctx, abci.RequestBeginBlock{}) suite.Require().True(newKeeper.IsInitialized(ctx), "memstore initialized flag not set") - gasUsed := ctx.BlockGasMeter().GasConsumed() - - suite.Require().Equal(prevGas, gasUsed, "beginblocker consumed gas during execution") // Mock the first transaction getting capability and subsequently failing // by using a cached context and discarding all cached writes. diff --git a/x/capability/genesis_test.go b/x/capability/genesis_test.go index 875ea9793..70aabb729 100644 --- a/x/capability/genesis_test.go +++ b/x/capability/genesis_test.go @@ -6,7 +6,6 @@ import ( dbm "github.com/tendermint/tm-db" "github.com/cosmos/cosmos-sdk/simapp" - sdk "github.com/cosmos/cosmos-sdk/types" banktypes "github.com/cosmos/cosmos-sdk/x/bank/types" "github.com/cosmos/cosmos-sdk/x/capability" "github.com/cosmos/cosmos-sdk/x/capability/keeper" @@ -40,7 +39,7 @@ func (suite *CapabilityTestSuite) TestGenesis() { newKeeper := keeper.NewKeeper(suite.cdc, newApp.GetKey(types.StoreKey), newApp.GetMemKey(types.MemStoreKey)) newSk1 := newKeeper.ScopeToModule(banktypes.ModuleName) newSk2 := newKeeper.ScopeToModule(stakingtypes.ModuleName) - deliverCtx, _ := newApp.BaseApp.NewUncachedContext(false, tmproto.Header{}).WithBlockGasMeter(sdk.NewInfiniteGasMeter()).CacheContext() + deliverCtx, _ := newApp.BaseApp.NewUncachedContext(false, tmproto.Header{}).CacheContext() capability.InitGenesis(deliverCtx, *newKeeper, *genState) diff --git a/x/capability/keeper/keeper.go b/x/capability/keeper/keeper.go index 35b8addf4..4ba2579cf 100644 --- a/x/capability/keeper/keeper.go +++ b/x/capability/keeper/keeper.go @@ -111,12 +111,9 @@ func (k *Keeper) InitMemStore(ctx sdk.Context) { panic(fmt.Sprintf("invalid memory store type; got %s, expected: %s", memStoreType, sdk.StoreTypeMemory)) } - // create context with no block gas meter to ensure we do not consume gas during local initialization logic. - noGasCtx := ctx.WithBlockGasMeter(sdk.NewInfiniteGasMeter()) - // check if memory store has not been initialized yet by checking if initialized flag is nil. - if !k.IsInitialized(noGasCtx) { - prefixStore := prefix.NewStore(noGasCtx.KVStore(k.storeKey), types.KeyPrefixIndexCapability) + if !k.IsInitialized(ctx) { + prefixStore := prefix.NewStore(ctx.KVStore(k.storeKey), types.KeyPrefixIndexCapability) iterator := sdk.KVStorePrefixIterator(prefixStore, nil) // initialize the in-memory store for all persisted capabilities @@ -128,11 +125,11 @@ func (k *Keeper) InitMemStore(ctx sdk.Context) { var capOwners types.CapabilityOwners k.cdc.MustUnmarshal(iterator.Value(), &capOwners) - k.InitializeCapability(noGasCtx, index, capOwners) + k.InitializeCapability(ctx, index, capOwners) } // set the initialized flag so we don't rerun initialization logic - memStore := noGasCtx.KVStore(k.memKey) + memStore := ctx.KVStore(k.memKey) memStore.Set(types.KeyMemInitialized, []byte{1}) } } diff --git a/x/genutil/gentx.go b/x/genutil/gentx.go index 766b16cbf..9ef946f90 100644 --- a/x/genutil/gentx.go +++ b/x/genutil/gentx.go @@ -1,6 +1,7 @@ package genutil import ( + "crypto/sha256" "encoding/json" "fmt" @@ -87,7 +88,7 @@ func ValidateAccountInGenesis( return nil } -type deliverTxfn func(sdk.Context, abci.RequestDeliverTx) abci.ResponseDeliverTx +type deliverTxfn func(sdk.Context, abci.RequestDeliverTx, sdk.Tx, [32]byte) abci.ResponseDeliverTx // DeliverGenTxs iterates over all genesis txs, decodes each into a Tx and // invokes the provided deliverTxfn with the decoded Tx. It returns the result @@ -109,7 +110,7 @@ func DeliverGenTxs( panic(err) } - res := deliverTx(ctx, abci.RequestDeliverTx{Tx: bz}) + res := deliverTx(ctx, abci.RequestDeliverTx{Tx: bz}, tx, sha256.Sum256(bz)) if !res.IsOK() { panic(res.Log) } diff --git a/x/upgrade/abci.go b/x/upgrade/abci.go index e0791bc0c..a5e5d7ea8 100644 --- a/x/upgrade/abci.go +++ b/x/upgrade/abci.go @@ -111,7 +111,6 @@ func panicUpgradeNeeded(k keeper.Keeper, ctx sdk.Context, plan types.Plan) { func applyUpgrade(k keeper.Keeper, ctx sdk.Context, plan types.Plan) { ctx.Logger().Info(fmt.Sprintf("applying upgrade \"%s\" at %s", plan.Name, plan.DueAt())) - ctx = ctx.WithBlockGasMeter(sdk.NewInfiniteGasMeter()) k.ApplyUpgrade(ctx, plan) }