Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Sync upstream 2.60.10 #231

Open
wants to merge 5 commits into
base: op-erigon
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/qa-rpc-integration-tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ jobs:
- name: Checkout RPC Tests Repository & Install Requirements
run: |
rm -rf ${{ runner.workspace }}/rpc-tests
git -c advice.detachedHead=false clone --depth 1 --branch v0.52.0 https://github.com/erigontech/rpc-tests ${{runner.workspace}}/rpc-tests
git -c advice.detachedHead=false clone --depth 1 --branch erigon_v2.6x https://github.com/erigontech/rpc-tests ${{runner.workspace}}/rpc-tests
cd ${{ runner.workspace }}/rpc-tests
pip3 install -r requirements.txt

Expand Down
22 changes: 14 additions & 8 deletions ChangeLog.md
Original file line number Diff line number Diff line change
@@ -1,12 +1,18 @@
ChangeLog

## v2.60.10 (in development)
## v2.60.10

### TODO

Acknowledgements:

New features:

Fixes:
**Bugfixes:**

- Trace API: commit state changes from InitializeBlockExecution @yperbasis in [#12559](https://github.com/erigontech/erigon/pull/12559).
Prior to this PR in `callManyTransactions` (invoked by `trace_block`)
changes made by `InitializeBlockExecution` were discarded. That was immaterial before since no much was
happening at the beginning of a block. But that changed in Dencun with
[EIP-4788](https://eips.ethereum.org/EIPS/eip-4788).
Fixes Issues
[#11871](https://github.com/erigontech/erigon/issues/11871),
[#12092](https://github.com/erigontech/erigon/issues/12092),
[#12242](https://github.com/erigontech/erigon/issues/12242),
[#12432](https://github.com/erigontech/erigon/issues/12432),
[#12473](https://github.com/erigontech/erigon/issues/12473),
and [#12525](https://github.com/erigontech/erigon/issues/12525).
2 changes: 1 addition & 1 deletion params/version.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ var (
const (
VersionMajor = 2 // Major version component of the current release
VersionMinor = 60 // Minor version component of the current release
VersionMicro = 9 // Patch version component of the current release
VersionMicro = 10 // Patch version component of the current release
VersionModifier = "" // Modifier component of the current release
VersionKeyCreated = "ErigonVersionCreated"
VersionKeyFinished = "ErigonVersionFinished"
Expand Down
64 changes: 36 additions & 28 deletions turbo/jsonrpc/trace_adhoc.go
Original file line number Diff line number Diff line change
Expand Up @@ -1125,17 +1125,33 @@ func (api *TraceAPIImpl) CallMany(ctx context.Context, calls json.RawMessage, pa
return nil, fmt.Errorf("convert callParam to msg: %w", err)
}
}
results, _, err := api.doCallMany(ctx, dbtx, msgs, callParams, parentNrOrHash, nil, true /* gasBailout */, -1 /* all tx indices */, traceConfig)
return results, err

chainConfig, err := api.chainConfig(ctx, dbtx)
if err != nil {
return nil, err
}
stateReader, err := rpchelper.CreateStateReader(ctx, dbtx, *parentNrOrHash, 0, api.filters, api.stateCache, api.historyV3(dbtx), chainConfig.ChainName)
if err != nil {
return nil, err
}
stateCache := shards.NewStateCache(32, 0 /* no limit */) // this cache living only during current RPC call, but required to store state writes
cachedReader := state.NewCachedReader(stateReader, stateCache)
noop := state.NewNoopWriter()
cachedWriter := state.NewCachedWriter(noop, stateCache)
ibs := state.New(cachedReader)

return api.doCallMany(ctx, dbtx, stateReader, stateCache, cachedWriter, ibs,
msgs, callParams, parentNrOrHash, nil, true /* gasBailout */, -1 /* all tx indices */, traceConfig)
}

func (api *TraceAPIImpl) doCallMany(ctx context.Context, dbtx kv.Tx, msgs []types.Message, callParams []TraceCallParam,
parentNrOrHash *rpc.BlockNumberOrHash, header *types.Header, gasBailout bool, txIndexNeeded int,
traceConfig *tracers.TraceConfig,
) ([]*TraceCallResult, *state.IntraBlockState, error) {
func (api *TraceAPIImpl) doCallMany(ctx context.Context, dbtx kv.Tx, stateReader state.StateReader,
stateCache *shards.StateCache, cachedWriter state.StateWriter, ibs *state.IntraBlockState,
msgs []types.Message, callParams []TraceCallParam, parentNrOrHash *rpc.BlockNumberOrHash, header *types.Header,
gasBailout bool, txIndexNeeded int, traceConfig *tracers.TraceConfig,
) ([]*TraceCallResult, error) {
chainConfig, err := api.chainConfig(ctx, dbtx)
if err != nil {
return nil, nil, err
return nil, err
}
engine := api.engine()

Expand All @@ -1145,29 +1161,21 @@ func (api *TraceAPIImpl) doCallMany(ctx context.Context, dbtx kv.Tx, msgs []type
}
blockNumber, hash, _, err := rpchelper.GetBlockNumber(*parentNrOrHash, dbtx, api.filters)
if err != nil {
return nil, nil, err
}
stateReader, err := rpchelper.CreateStateReader(ctx, dbtx, *parentNrOrHash, 0, api.filters, api.stateCache, api.historyV3(dbtx), chainConfig.ChainName)
if err != nil {
return nil, nil, err
return nil, err
}
stateCache := shards.NewStateCache(32, 0 /* no limit */) // this cache living only during current RPC call, but required to store state writes
cachedReader := state.NewCachedReader(stateReader, stateCache)
noop := state.NewNoopWriter()
cachedWriter := state.NewCachedWriter(noop, stateCache)
ibs := state.New(cachedReader)

// TODO: can read here only parent header
parentBlock, err := api.blockWithSenders(ctx, dbtx, hash, blockNumber)
if err != nil {
return nil, nil, err
return nil, err
}
if parentBlock == nil {
return nil, nil, fmt.Errorf("parent block %d(%x) not found", blockNumber, hash)
return nil, fmt.Errorf("parent block %d(%x) not found", blockNumber, hash)
}
parentHeader := parentBlock.Header()
if parentHeader == nil {
return nil, nil, fmt.Errorf("parent header %d(%x) not found", blockNumber, hash)
return nil, fmt.Errorf("parent header %d(%x) not found", blockNumber, hash)
}

// Setup context so it may be cancelled the call has completed
Expand All @@ -1193,7 +1201,7 @@ func (api *TraceAPIImpl) doCallMany(ctx context.Context, dbtx kv.Tx, msgs []type
l1CostFunc := opstack.NewL1CostFunc(chainConfig, ibs)
for txIndex, msg := range msgs {
if err := libcommon.Stopped(ctx.Done()); err != nil {
return nil, nil, err
return nil, err
}

var traceTypeTrace, traceTypeStateDiff, traceTypeVmTrace bool
Expand All @@ -1207,7 +1215,7 @@ func (api *TraceAPIImpl) doCallMany(ctx context.Context, dbtx kv.Tx, msgs []type
case TraceTypeVmTrace:
traceTypeVmTrace = true
default:
return nil, nil, fmt.Errorf("unrecognized trace type: %s", traceType)
return nil, fmt.Errorf("unrecognized trace type: %s", traceType)
}
}

Expand All @@ -1217,7 +1225,7 @@ func (api *TraceAPIImpl) doCallMany(ctx context.Context, dbtx kv.Tx, msgs []type
var ot OeTracer
ot.config, err = parseOeTracerConfig(traceConfig)
if err != nil {
return nil, nil, err
return nil, err
}
ot.compat = api.compatibility
ot.r = traceResult
Expand Down Expand Up @@ -1291,7 +1299,7 @@ func (api *TraceAPIImpl) doCallMany(ctx context.Context, dbtx kv.Tx, msgs []type
execResult, err = core.ApplyMessage(evm, msg, gp, true /* refunds */, gasBailout /* gasBailout */)
}
if err != nil {
return nil, nil, fmt.Errorf("first run for txIndex %d error: %w", txIndex, err)
return nil, fmt.Errorf("first run for txIndex %d error: %w", txIndex, err)
}

chainRules := chainConfig.Rules(blockCtx.BlockNumber, blockCtx.Time)
Expand All @@ -1300,21 +1308,21 @@ func (api *TraceAPIImpl) doCallMany(ctx context.Context, dbtx kv.Tx, msgs []type
initialIbs := state.New(cloneReader)
if !txFinalized {
if err = ibs.FinalizeTx(chainRules, sd); err != nil {
return nil, nil, err
return nil, err
}
}
sd.CompareStates(initialIbs, ibs)
if err = ibs.CommitBlock(chainRules, cachedWriter); err != nil {
return nil, nil, err
return nil, err
}
} else {
if !txFinalized {
if err = ibs.FinalizeTx(chainRules, noop); err != nil {
return nil, nil, err
return nil, err
}
}
if err = ibs.CommitBlock(chainRules, cachedWriter); err != nil {
return nil, nil, err
return nil, err
}
}
if !traceTypeTrace {
Expand All @@ -1328,7 +1336,7 @@ func (api *TraceAPIImpl) doCallMany(ctx context.Context, dbtx kv.Tx, msgs []type
}
}

return results, ibs, nil
return results, nil
}

// RawTransaction implements trace_rawTransaction.
Expand Down
34 changes: 20 additions & 14 deletions turbo/jsonrpc/trace_filtering.go
Original file line number Diff line number Diff line change
Expand Up @@ -932,23 +932,34 @@ func (api *TraceAPIImpl) callManyTransactions(
}

callParams := make([]TraceCallParam, 0, len(txs))
reader, err := rpchelper.CreateHistoryStateReader(dbtx, blockNumber, txIndex, api.historyV3(dbtx), cfg.ChainName)
if err != nil {
return nil, nil, err

parentHash := block.ParentHash()
parentNrOrHash := rpc.BlockNumberOrHash{
BlockNumber: &parentNo,
BlockHash: &parentHash,
RequireCanonical: true,
}

initialState := state.New(reader)
stateReader, err := rpchelper.CreateStateReader(ctx, dbtx, parentNrOrHash, 0, api.filters, api.stateCache, api.historyV3(dbtx), cfg.ChainName)
if err != nil {
return nil, nil, err
}
stateCache := shards.NewStateCache(32, 0 /* no limit */) // this cache living only during current RPC call, but required to store state writes
cachedReader := state.NewCachedReader(stateReader, stateCache)
noop := state.NewNoopWriter()
cachedWriter := state.NewCachedWriter(noop, stateCache)
ibs := state.New(cachedReader)

engine := api.engine()
consensusHeaderReader := stagedsync.NewChainReaderImpl(cfg, dbtx, nil, nil)
logger := log.New("trace_filtering")
err = core.InitializeBlockExecution(engine.(consensus.Engine), consensusHeaderReader, block.HeaderNoCopy(), cfg, initialState, logger)
err = core.InitializeBlockExecution(engine.(consensus.Engine), consensusHeaderReader, block.HeaderNoCopy(), cfg, ibs, logger)
if err != nil {
return nil, nil, err
}
if err = ibs.CommitBlock(rules, cachedWriter); err != nil {
return nil, nil, err
}

msgs := make([]types.Message, len(txs))
for i, tx := range txs {
Expand All @@ -969,7 +980,7 @@ func (api *TraceAPIImpl) callManyTransactions(
// gnosis might have a fee free account here
if msg.FeeCap().IsZero() && engine != nil {
syscall := func(contract common.Address, data []byte) ([]byte, error) {
return core.SysCallContract(contract, data, cfg, initialState, header, engine, true /* constCall */)
return core.SysCallContract(contract, data, cfg, ibs, header, engine, true /* constCall */)
}
msg.SetIsFree(engine.IsServiceTransaction(msg.From(), syscall))
}
Expand All @@ -984,20 +995,15 @@ func (api *TraceAPIImpl) callManyTransactions(
msgs[i] = msg
}

parentHash := block.ParentHash()

traces, lastState, cmErr := api.doCallMany(ctx, dbtx, msgs, callParams, &rpc.BlockNumberOrHash{
BlockNumber: &parentNo,
BlockHash: &parentHash,
RequireCanonical: true,
}, header, gasBailOut /* gasBailout */, txIndex, traceConfig)
traces, cmErr := api.doCallMany(ctx, dbtx, stateReader, stateCache, cachedWriter, ibs, msgs, callParams,
&parentNrOrHash, header, gasBailOut /* gasBailout */, txIndex, traceConfig)

if cmErr != nil {
return nil, nil, cmErr
}

syscall := func(contract common.Address, data []byte) ([]byte, error) {
return core.SysCallContract(contract, data, cfg, lastState, header, engine, false /* constCall */)
return core.SysCallContract(contract, data, cfg, ibs, header, engine, false /* constCall */)
}

return traces, syscall, nil
Expand Down
22 changes: 13 additions & 9 deletions turbo/snapshotsync/freezeblocks/block_snapshots.go
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,9 @@ func (s Segment) FileInfo(dir string) snaptype.FileInfo {
}

func (s *Segment) reopenSeg(dir string) (err error) {
s.closeSeg()
if s.Decompressor != nil {
return nil
}
s.Decompressor, err = seg.NewDecompressor(filepath.Join(dir, s.FileName()))
if err != nil {
return fmt.Errorf("%w, fileName: %s", err, s.FileName())
Expand Down Expand Up @@ -160,12 +162,7 @@ func (s *Segment) openFiles() []string {
}

func (s *Segment) reopenIdxIfNeed(dir string, optimistic bool) (err error) {
if len(s.Type().IdxFileNames(s.version, s.from, s.to)) == 0 {
return nil
}

err = s.reopenIdx(dir)

if err != nil {
if !errors.Is(err, os.ErrNotExist) {
if optimistic {
Expand All @@ -180,19 +177,25 @@ func (s *Segment) reopenIdxIfNeed(dir string, optimistic bool) (err error) {
}

func (s *Segment) reopenIdx(dir string) (err error) {
s.closeIdx()
if s.Decompressor == nil {
return nil
}
for len(s.indexes) < len(s.Type().Indexes()) {
s.indexes = append(s.indexes, nil)
}

for i, fileName := range s.Type().IdxFileNames(s.version, s.from, s.to) {
if s.indexes[i] != nil {
continue
}

for _, fileName := range s.Type().IdxFileNames(s.version, s.from, s.to) {
index, err := recsplit.OpenIndex(filepath.Join(dir, fileName))

if err != nil {
return fmt.Errorf("%w, fileName: %s", err, fileName)
}

s.indexes = append(s.indexes, index)
s.indexes[i] = index
}

return nil
Expand Down Expand Up @@ -2152,6 +2155,7 @@ func (v *View) HeadersSegment(blockNum uint64) (*Segment, bool) {
func (v *View) BodiesSegment(blockNum uint64) (*Segment, bool) {
return v.Segment(coresnaptype.Bodies, blockNum)
}

func (v *View) TxsSegment(blockNum uint64) (*Segment, bool) {
return v.Segment(coresnaptype.Transactions, blockNum)
}
Expand Down
Loading