From 86d0b88cba61ed47afd82cb98932f0e275ceffa4 Mon Sep 17 00:00:00 2001 From: "alex.sharov" Date: Fri, 5 Jan 2024 14:58:45 +0700 Subject: [PATCH 1/6] save --- erigon-lib/state/domain_shared.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/erigon-lib/state/domain_shared.go b/erigon-lib/state/domain_shared.go index d2e027d715e..2afd47a9e37 100644 --- a/erigon-lib/state/domain_shared.go +++ b/erigon-lib/state/domain_shared.go @@ -1183,13 +1183,13 @@ func (sdc *SharedDomainsCommitmentContext) LatestCommitmentState(tx kv.Tx, cd *D if err != nil { return 0, 0, nil, err } - v, err := cd.GetAsOf(keyCommitmentState, txn+1, tx) //WHYYY +1 ??? + state, err = cd.GetAsOf(keyCommitmentState, txn+1, tx) //WHYYY +1 ??? if err != nil { return 0, 0, nil, err } if len(state) >= 16 { - txNum, blockNum = decodeTxBlockNums(v) - return blockNum, txNum, v, err + txNum, blockNum = decodeTxBlockNums(state) + return blockNum, txNum, state, nil } } @@ -1217,7 +1217,7 @@ func (sdc *SharedDomainsCommitmentContext) LatestCommitmentState(tx kv.Tx, cd *D } txNum, blockNum = decodeTxBlockNums(state) - return blockNum, txNum, state, err + return blockNum, txNum, state, nil } // SeekCommitment [sinceTx, untilTx] searches for last encoded state from DomainCommitted From 8c036fc8da11c20b388acc184670cc824df264f5 Mon Sep 17 00:00:00 2001 From: Michele Modolo <70838029+michelemodolo@users.noreply.github.com> Date: Fri, 5 Jan 2024 11:52:48 +0100 Subject: [PATCH 2/6] integration stage_headers (#9142) Added integration stage_headers command Co-authored-by: Michele Modolo --- cmd/integration/Readme.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/cmd/integration/Readme.md b/cmd/integration/Readme.md index 389248b1e5c..29387ed2850 100644 --- a/cmd/integration/Readme.md +++ b/cmd/integration/Readme.md @@ -33,6 +33,9 @@ integration stage_history --unwind=N integration stage_exec --prune.to=N integration stage_history --prune.to=N +# Reset stage_headers +integration stage_headers --reset --datadir= --chain= + # Exec blocks, but don't commit changes (loose them) integration stage_exec --no-commit ... From 3eeb57218f13f9e755c132b2003c826551b8e560 Mon Sep 17 00:00:00 2001 From: Andrew Ashikhmin <34320705+yperbasis@users.noreply.github.com> Date: Fri, 5 Jan 2024 13:03:36 +0100 Subject: [PATCH 3/6] params: begin 2.57 release cycle (#9144) --- params/version.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/params/version.go b/params/version.go index cb2d12d5bb1..a1b0c0ae15b 100644 --- a/params/version.go +++ b/params/version.go @@ -32,7 +32,7 @@ var ( // see https://calver.org const ( VersionMajor = 2 // Major version component of the current release - VersionMinor = 56 // Minor version component of the current release + VersionMinor = 57 // Minor version component of the current release VersionMicro = 0 // Patch version component of the current release VersionModifier = "dev" // Modifier component of the current release VersionKeyCreated = "ErigonVersionCreated" From ebe16d8360cb42067be9e558f816cf107bfad208 Mon Sep 17 00:00:00 2001 From: battlmonstr Date: Fri, 5 Jan 2024 14:03:19 +0100 Subject: [PATCH 4/6] bor: BorConfig setup fix (#9145) A crash on startup happens on --chain=mumbai , because I've confused chainConfig.Bor (from type chain.Config) and config.Bor (from type ethconfig.Config) in the setup code. ethconfig.Config.Bor property contained bogus values, and was used only to check its type in CreateConsensusEngine(). Its value was never read (before PR #9117). This change removes the property to avoid confusion and fix the crash. Devnet network.BorStateSyncDelay was implemented using ethconfig.Config.Bor, but it wasn't taking any effect. It should be fixed separately in a different way. --- cmd/devnet/devnet/node.go | 8 +++++--- cmd/integration/commands/stages.go | 11 ++++++----- cmd/state/commands/check_change_sets.go | 2 +- eth/backend.go | 3 ++- eth/ethconfig/config.go | 3 --- 5 files changed, 14 insertions(+), 13 deletions(-) diff --git a/cmd/devnet/devnet/node.go b/cmd/devnet/devnet/node.go index 4c372721a03..30f46633615 100644 --- a/cmd/devnet/devnet/node.go +++ b/cmd/devnet/devnet/node.go @@ -8,6 +8,9 @@ import ( "sync" "github.com/c2h5oh/datasize" + "github.com/ledgerwatch/log/v3" + "github.com/urfave/cli/v2" + "github.com/ledgerwatch/erigon/cmd/devnet/accounts" "github.com/ledgerwatch/erigon/cmd/devnet/args" "github.com/ledgerwatch/erigon/cmd/devnet/requests" @@ -17,8 +20,6 @@ import ( "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/turbo/debug" enode "github.com/ledgerwatch/erigon/turbo/node" - "github.com/ledgerwatch/log/v3" - "github.com/urfave/cli/v2" ) type Node interface { @@ -171,7 +172,8 @@ func (n *devnetNode) run(ctx *cli.Context) error { } if n.network.BorStateSyncDelay > 0 { - n.ethCfg.Bor.StateSyncConfirmationDelay = map[string]uint64{"0": uint64(n.network.BorStateSyncDelay.Seconds())} + stateSyncConfirmationDelay := map[string]uint64{"0": uint64(n.network.BorStateSyncDelay.Seconds())} + logger.Warn("TODO: custom BorStateSyncDelay is not applied to BorConfig.StateSyncConfirmationDelay", "delay", stateSyncConfirmationDelay) } n.ethNode, err = enode.New(ctx.Context, n.nodeCfg, n.ethCfg, logger) diff --git a/cmd/integration/commands/stages.go b/cmd/integration/commands/stages.go index fdfa42c44a2..b084cc3a7f5 100644 --- a/cmd/integration/commands/stages.go +++ b/cmd/integration/commands/stages.go @@ -13,6 +13,11 @@ import ( "github.com/c2h5oh/datasize" "github.com/erigontech/mdbx-go/mdbx" lru "github.com/hashicorp/golang-lru/arc/v2" + "github.com/ledgerwatch/log/v3" + "github.com/ledgerwatch/secp256k1" + "github.com/spf13/cobra" + "golang.org/x/exp/slices" + "github.com/ledgerwatch/erigon/consensus/bor" "github.com/ledgerwatch/erigon/consensus/bor/heimdall" "github.com/ledgerwatch/erigon/consensus/bor/heimdallgrpc" @@ -21,10 +26,6 @@ import ( "github.com/ledgerwatch/erigon/p2p/sentry/sentry_multi_client" "github.com/ledgerwatch/erigon/turbo/builder" "github.com/ledgerwatch/erigon/turbo/snapshotsync/freezeblocks" - "github.com/ledgerwatch/log/v3" - "github.com/ledgerwatch/secp256k1" - "github.com/spf13/cobra" - "golang.org/x/exp/slices" chain2 "github.com/ledgerwatch/erigon-lib/chain" "github.com/ledgerwatch/erigon-lib/commitment" @@ -1674,7 +1675,7 @@ func initConsensusEngine(ctx context.Context, cc *chain2.Config, dir string, db } else if cc.Aura != nil { consensusConfig = &config.Aura } else if cc.Bor != nil { - consensusConfig = &config.Bor + consensusConfig = cc.Bor config.HeimdallURL = HeimdallURL if !config.WithoutHeimdall { if config.HeimdallgRPCAddress != "" { diff --git a/cmd/state/commands/check_change_sets.go b/cmd/state/commands/check_change_sets.go index 19eceebb5ca..3b844f1e5d7 100644 --- a/cmd/state/commands/check_change_sets.go +++ b/cmd/state/commands/check_change_sets.go @@ -288,7 +288,7 @@ func initConsensusEngine(ctx context.Context, cc *chain2.Config, snapshots *free } else if cc.Aura != nil { consensusConfig = &config.Aura } else if cc.Bor != nil { - consensusConfig = &config.Bor + consensusConfig = cc.Bor } else { consensusConfig = &config.Ethash } diff --git a/eth/backend.go b/eth/backend.go index f389a9463c4..04436895754 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -32,6 +32,7 @@ import ( "time" lru "github.com/hashicorp/golang-lru/arc/v2" + "github.com/ledgerwatch/erigon-lib/chain/networkname" "github.com/ledgerwatch/erigon-lib/chain/snapcfg" "github.com/ledgerwatch/erigon-lib/diagnostics" @@ -509,7 +510,7 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger } else if chainConfig.Aura != nil { consensusConfig = &config.Aura } else if chainConfig.Bor != nil { - consensusConfig = &config.Bor + consensusConfig = chainConfig.Bor } else { consensusConfig = &config.Ethash } diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go index 47ee3501560..a75b5783ac6 100644 --- a/eth/ethconfig/config.go +++ b/eth/ethconfig/config.go @@ -34,8 +34,6 @@ import ( "github.com/ledgerwatch/erigon-lib/common/datadir" "github.com/ledgerwatch/erigon-lib/downloader/downloadercfg" "github.com/ledgerwatch/erigon-lib/txpool/txpoolcfg" - "github.com/ledgerwatch/erigon/consensus/bor/borcfg" - "github.com/ledgerwatch/erigon/cl/beacon/beacon_router_configuration" "github.com/ledgerwatch/erigon/cl/clparams" "github.com/ledgerwatch/erigon/consensus/ethash/ethashcfg" @@ -211,7 +209,6 @@ type Config struct { Clique params.ConsensusSnapshotConfig Aura chain.AuRaConfig - Bor borcfg.BorConfig // Transaction pool options DeprecatedTxPool DeprecatedTxPoolConfig From 98cc1ee808fc71eaf97eb0b9d18906aa05021958 Mon Sep 17 00:00:00 2001 From: milen <94537774+taratorio@users.noreply.github.com> Date: Fri, 5 Jan 2024 14:20:21 +0000 Subject: [PATCH 5/6] stagedsync: implement bor span for chain reader and fix loggers (#9146) While working on fixing the bor mining loop I stumbled across an error in `ChainReader.BorSpan` - not implemented panic. Also hit a few other panics due to missed logger in `ChainReaderImpl` struct initialisations. This PR fixes both. --- consensus/clique/snapshot_test.go | 13 ++++++-- core/block_validator_test.go | 34 ++++++++++++++++----- eth/stagedsync/chain_reader.go | 23 +++++++++----- eth/stagedsync/stage_bodies.go | 6 ++-- eth/stagedsync/stage_headers.go | 17 +++++++---- eth/stagedsync/stage_mining_create_block.go | 8 ++--- eth/stagedsync/stage_mining_exec.go | 7 ++--- eth/stagedsync/stage_mining_finish.go | 10 +++--- 8 files changed, 78 insertions(+), 40 deletions(-) diff --git a/consensus/clique/snapshot_test.go b/consensus/clique/snapshot_test.go index 0f487177bfc..de00f536d79 100644 --- a/consensus/clique/snapshot_test.go +++ b/consensus/clique/snapshot_test.go @@ -23,6 +23,8 @@ import ( "sort" "testing" + "github.com/ledgerwatch/log/v3" + "github.com/ledgerwatch/erigon-lib/chain" libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/length" @@ -35,7 +37,7 @@ import ( "github.com/ledgerwatch/erigon/eth/stagedsync" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/turbo/stages/mock" - "github.com/ledgerwatch/log/v3" + "github.com/ledgerwatch/erigon/turbo/testlog" ) // testerAccountPool is a pool to maintain currently active tester accounts, @@ -392,6 +394,7 @@ func TestClique(t *testing.T) { tt := tt t.Run(tt.name, func(t *testing.T) { + logger := testlog.Logger(t, log.LvlInfo) // Create the account pool and generate the initial set of signers accounts := newTesterAccountPool() @@ -509,7 +512,13 @@ func TestClique(t *testing.T) { var snap *clique.Snapshot if err := m.DB.View(context.Background(), func(tx kv.Tx) error { - snap, err = engine.Snapshot(stagedsync.ChainReader{Cfg: config, Db: tx, BlockReader: m.BlockReader}, head.NumberU64(), head.Hash(), nil) + chainReader := stagedsync.ChainReader{ + Cfg: config, + Db: tx, + BlockReader: m.BlockReader, + Logger: logger, + } + snap, err = engine.Snapshot(chainReader, head.NumberU64(), head.Hash(), nil) if err != nil { return err } diff --git a/core/block_validator_test.go b/core/block_validator_test.go index 34ab7a2c21d..e57d40d3477 100644 --- a/core/block_validator_test.go +++ b/core/block_validator_test.go @@ -20,13 +20,17 @@ import ( "context" "testing" + "github.com/ledgerwatch/log/v3" + "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon/consensus" "github.com/ledgerwatch/erigon/consensus/ethash" "github.com/ledgerwatch/erigon/core" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/eth/stagedsync" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/turbo/stages/mock" + "github.com/ledgerwatch/erigon/turbo/testlog" ) // Tests that simple header verification works, for both good and bad blocks. @@ -37,6 +41,7 @@ func TestHeaderVerification(t *testing.T) { gspec = &types.Genesis{Config: params.TestChainConfig} engine = ethash.NewFaker() ) + logger := testlog.Logger(t, log.LvlInfo) checkStateRoot := true m := mock.MockWithGenesisEngine(t, gspec, engine, false, checkStateRoot) @@ -48,13 +53,19 @@ func TestHeaderVerification(t *testing.T) { for i := 0; i < chain.Length(); i++ { if err := m.DB.View(context.Background(), func(tx kv.Tx) error { for j, valid := range []bool{true, false} { + chainReader := stagedsync.ChainReader{ + Cfg: *params.TestChainConfig, + Db: tx, + BlockReader: m.BlockReader, + Logger: logger, + } + var engine consensus.Engine if valid { - engine := ethash.NewFaker() - err = engine.VerifyHeader(stagedsync.ChainReader{Cfg: *params.TestChainConfig, Db: tx, BlockReader: m.BlockReader}, chain.Headers[i], true) + engine = ethash.NewFaker() } else { - engine := ethash.NewFakeFailer(chain.Headers[i].Number.Uint64()) - err = engine.VerifyHeader(stagedsync.ChainReader{Cfg: *params.TestChainConfig, Db: tx, BlockReader: m.BlockReader}, chain.Headers[i], true) + engine = ethash.NewFakeFailer(chain.Headers[i].Number.Uint64()) } + err = engine.VerifyHeader(chainReader, chain.Headers[i], true) if (err == nil) != valid { t.Errorf("test %d.%d: validity mismatch: have %v, want %v", i, j, err, valid) } @@ -79,6 +90,7 @@ func TestHeaderWithSealVerification(t *testing.T) { gspec = &types.Genesis{Config: params.TestChainAuraConfig} engine = ethash.NewFaker() ) + logger := testlog.Logger(t, log.LvlInfo) checkStateRoot := true m := mock.MockWithGenesisEngine(t, gspec, engine, false, checkStateRoot) @@ -91,13 +103,19 @@ func TestHeaderWithSealVerification(t *testing.T) { for i := 0; i < chain.Length(); i++ { if err := m.DB.View(context.Background(), func(tx kv.Tx) error { for j, valid := range []bool{true, false} { + chainReader := stagedsync.ChainReader{ + Cfg: *params.TestChainAuraConfig, + Db: tx, + BlockReader: m.BlockReader, + Logger: logger, + } + var engine consensus.Engine if valid { - engine := ethash.NewFaker() - err = engine.VerifyHeader(stagedsync.ChainReader{Cfg: *params.TestChainAuraConfig, Db: tx, BlockReader: m.BlockReader}, chain.Headers[i], true) + engine = ethash.NewFaker() } else { - engine := ethash.NewFakeFailer(chain.Headers[i].Number.Uint64()) - err = engine.VerifyHeader(stagedsync.ChainReader{Cfg: *params.TestChainAuraConfig, Db: tx, BlockReader: m.BlockReader}, chain.Headers[i], true) + engine = ethash.NewFakeFailer(chain.Headers[i].Number.Uint64()) } + err = engine.VerifyHeader(chainReader, chain.Headers[i], true) if (err == nil) != valid { t.Errorf("test %d.%d: validity mismatch: have %v, want %v", i, j, err, valid) } diff --git a/eth/stagedsync/chain_reader.go b/eth/stagedsync/chain_reader.go index 862cae5710a..f1d0e52057e 100644 --- a/eth/stagedsync/chain_reader.go +++ b/eth/stagedsync/chain_reader.go @@ -4,23 +4,24 @@ import ( "context" "math/big" + "github.com/ledgerwatch/log/v3" + "github.com/ledgerwatch/erigon-lib/chain" libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon/rlp" - "github.com/ledgerwatch/erigon/turbo/services" - "github.com/ledgerwatch/log/v3" - "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/rlp" + "github.com/ledgerwatch/erigon/turbo/services" ) -// Implements consensus.ChainReader +// ChainReader implements consensus.ChainReader type ChainReader struct { Cfg chain.Config Db kv.Getter BlockReader services.FullBlockReader + Logger log.Logger } // Config retrieves the blockchain's chain configuration. @@ -81,10 +82,16 @@ func (cr ChainReader) FrozenBlocks() uint64 { return cr.BlockReader.FrozenBlocks() } -func (cr ChainReader) BorEventsByBlock(hash libcommon.Hash, number uint64) []rlp.RawValue { - panic("") +func (cr ChainReader) BorEventsByBlock(_ libcommon.Hash, _ uint64) []rlp.RawValue { + panic("bor events by block not implemented") } func (cr ChainReader) BorSpan(spanId uint64) []byte { - panic("") + span, err := cr.BlockReader.Span(context.Background(), cr.Db, spanId) + if err != nil { + cr.Logger.Error("BorSpan failed", "err", err) + return nil + } + + return span } diff --git a/eth/stagedsync/stage_bodies.go b/eth/stagedsync/stage_bodies.go index 62811264323..c10aaae4438 100644 --- a/eth/stagedsync/stage_bodies.go +++ b/eth/stagedsync/stage_bodies.go @@ -6,14 +6,14 @@ import ( "runtime" "time" + "github.com/ledgerwatch/log/v3" + "github.com/ledgerwatch/erigon-lib/chain" libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/dbg" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/rawdb/blockio" - "github.com/ledgerwatch/log/v3" - "github.com/ledgerwatch/erigon/dataflow" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" "github.com/ledgerwatch/erigon/turbo/adapter" @@ -134,7 +134,7 @@ func BodiesForward( prevProgress := bodyProgress var noProgressCount uint = 0 // How many time the progress was printed without actual progress var totalDelivered uint64 = 0 - cr := ChainReader{Cfg: cfg.chanConfig, Db: tx, BlockReader: cfg.blockReader} + cr := ChainReader{Cfg: cfg.chanConfig, Db: tx, BlockReader: cfg.blockReader, Logger: logger} loopBody := func() (bool, error) { // loopCount is used here to ensure we don't get caught in a constant loop of making requests diff --git a/eth/stagedsync/stage_headers.go b/eth/stagedsync/stage_headers.go index aa60d348ae7..839d7842468 100644 --- a/eth/stagedsync/stage_headers.go +++ b/eth/stagedsync/stage_headers.go @@ -9,19 +9,19 @@ import ( "time" "github.com/c2h5oh/datasize" + "github.com/ledgerwatch/log/v3" + "github.com/ledgerwatch/erigon-lib/chain" libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/dbg" "github.com/ledgerwatch/erigon-lib/common/hexutility" "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon/core/rawdb/blockio" - "github.com/ledgerwatch/erigon/eth/ethconfig" - "github.com/ledgerwatch/erigon/eth/stagedsync/stages" - "github.com/ledgerwatch/log/v3" - "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/core/rawdb" + "github.com/ledgerwatch/erigon/core/rawdb/blockio" "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/eth/ethconfig" + "github.com/ledgerwatch/erigon/eth/stagedsync/stages" "github.com/ledgerwatch/erigon/rlp" "github.com/ledgerwatch/erigon/turbo/engineapi/engine_helpers" "github.com/ledgerwatch/erigon/turbo/services" @@ -183,7 +183,12 @@ func HeadersPOW( } TEMP TESTING */ headerInserter := headerdownload.NewHeaderInserter(logPrefix, localTd, startProgress, cfg.blockReader) - cfg.hd.SetHeaderReader(&ChainReaderImpl{config: &cfg.chainConfig, tx: tx, blockReader: cfg.blockReader}) + cfg.hd.SetHeaderReader(&ChainReaderImpl{ + config: &cfg.chainConfig, + tx: tx, + blockReader: cfg.blockReader, + logger: logger, + }) stopped := false var noProgressCounter uint = 0 diff --git a/eth/stagedsync/stage_mining_create_block.go b/eth/stagedsync/stage_mining_create_block.go index 5016d4afa02..40b63680e16 100644 --- a/eth/stagedsync/stage_mining_create_block.go +++ b/eth/stagedsync/stage_mining_create_block.go @@ -8,11 +8,10 @@ import ( "time" mapset "github.com/deckarep/golang-set/v2" - "github.com/ledgerwatch/erigon-lib/chain" - libcommon "github.com/ledgerwatch/erigon-lib/common" - "github.com/ledgerwatch/erigon/turbo/services" "github.com/ledgerwatch/log/v3" + "github.com/ledgerwatch/erigon-lib/chain" + libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon/common/debug" "github.com/ledgerwatch/erigon/consensus" @@ -22,6 +21,7 @@ import ( "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/eth/ethutils" "github.com/ledgerwatch/erigon/params" + "github.com/ledgerwatch/erigon/turbo/services" ) type MiningBlock struct { @@ -127,7 +127,7 @@ func SpawnMiningCreateBlockStage(s *StageState, tx kv.RwTx, cfg MiningCreateBloc if err != nil { return err } - chain := ChainReader{Cfg: cfg.chainConfig, Db: tx, BlockReader: cfg.blockReader} + chain := ChainReader{Cfg: cfg.chainConfig, Db: tx, BlockReader: cfg.blockReader, Logger: logger} var GetBlocksFromHash = func(hash libcommon.Hash, n int) (blocks []*types.Block) { number := rawdb.ReadHeaderNumber(tx, hash) if number == nil { diff --git a/eth/stagedsync/stage_mining_exec.go b/eth/stagedsync/stage_mining_exec.go index d2ad47f6fb0..abc94326dc0 100644 --- a/eth/stagedsync/stage_mining_exec.go +++ b/eth/stagedsync/stage_mining_exec.go @@ -10,15 +10,14 @@ import ( mapset "github.com/deckarep/golang-set/v2" "github.com/holiman/uint256" - "github.com/ledgerwatch/erigon-lib/kv/membatch" "github.com/ledgerwatch/log/v3" "golang.org/x/net/context" "github.com/ledgerwatch/erigon-lib/chain" libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon-lib/kv/membatch" types2 "github.com/ledgerwatch/erigon-lib/types" - "github.com/ledgerwatch/erigon/consensus" "github.com/ledgerwatch/erigon/core" "github.com/ledgerwatch/erigon/core/rawdb" @@ -89,7 +88,7 @@ func SpawnMiningExecStage(s *StageState, tx kv.RwTx, cfg MiningExecCfg, quit <-c ibs := state.New(stateReader) stateWriter := state.NewPlainStateWriter(tx, tx, current.Header.Number.Uint64()) - chainReader := ChainReader{Cfg: cfg.chainConfig, Db: tx, BlockReader: cfg.blockReader} + chainReader := ChainReader{Cfg: cfg.chainConfig, Db: tx, BlockReader: cfg.blockReader, Logger: logger} core.InitializeBlockExecution(cfg.engine, chainReader, current.Header, &cfg.chainConfig, ibs, logger) // Create an empty block based on temporary copied state for @@ -163,7 +162,7 @@ func SpawnMiningExecStage(s *StageState, tx kv.RwTx, cfg MiningExecCfg, quit <-c } var err error - _, current.Txs, current.Receipts, err = core.FinalizeBlockExecution(cfg.engine, stateReader, current.Header, current.Txs, current.Uncles, stateWriter, &cfg.chainConfig, ibs, current.Receipts, current.Withdrawals, ChainReaderImpl{config: &cfg.chainConfig, tx: tx, blockReader: cfg.blockReader}, true, logger) + _, current.Txs, current.Receipts, err = core.FinalizeBlockExecution(cfg.engine, stateReader, current.Header, current.Txs, current.Uncles, stateWriter, &cfg.chainConfig, ibs, current.Receipts, current.Withdrawals, ChainReaderImpl{config: &cfg.chainConfig, tx: tx, blockReader: cfg.blockReader, logger: logger}, true, logger) if err != nil { return err } diff --git a/eth/stagedsync/stage_mining_finish.go b/eth/stagedsync/stage_mining_finish.go index 16d90e00667..81cc486e57c 100644 --- a/eth/stagedsync/stage_mining_finish.go +++ b/eth/stagedsync/stage_mining_finish.go @@ -3,14 +3,14 @@ package stagedsync import ( "fmt" - "github.com/ledgerwatch/erigon-lib/chain" - "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon/turbo/builder" - "github.com/ledgerwatch/erigon/turbo/services" "github.com/ledgerwatch/log/v3" + "github.com/ledgerwatch/erigon-lib/chain" + "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon/consensus" "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/turbo/builder" + "github.com/ledgerwatch/erigon/turbo/services" ) type MiningFinishCfg struct { @@ -95,7 +95,7 @@ func SpawnMiningFinishStage(s *StageState, tx kv.RwTx, cfg MiningFinishCfg, quit default: logger.Trace("No in-flight sealing task.") } - chain := ChainReader{Cfg: cfg.chainConfig, Db: tx, BlockReader: cfg.blockReader} + chain := ChainReader{Cfg: cfg.chainConfig, Db: tx, BlockReader: cfg.blockReader, Logger: logger} if err := cfg.engine.Seal(chain, block, cfg.miningState.MiningResultCh, cfg.sealCancel); err != nil { logger.Warn("Block sealing failed", "err", err) } From e958d3584d0812c985a4158d1f51c3863c4b2695 Mon Sep 17 00:00:00 2001 From: Giulio rebuffo Date: Sat, 6 Jan 2024 21:49:23 +0100 Subject: [PATCH 6/6] Added Heads method to Beacon API (#9135) --- cl/beacon/handler/forkchoice.go | 39 +++++++++ cl/beacon/handler/forkchoice_test.go | 80 +++++++++++++++++ cl/beacon/handler/handler.go | 6 +- cl/beacon/handler/node.go | 20 +++++ cl/beacon/handler/node_test.go | 49 +++++++++++ cl/beacon/synced_data/synced_data.go | 5 +- .../historical_states_reader_test.go | 6 +- cl/phase1/forkchoice/forkchoice.go | 86 +++++++++++++++++-- cl/phase1/forkchoice/forkchoice_mock.go | 5 ++ cl/phase1/forkchoice/get_head.go | 54 ++++++++++-- cl/phase1/forkchoice/interface.go | 1 + cl/phase1/forkchoice/on_attestation.go | 51 ++++++++++- cl/phase1/forkchoice/on_attester_slashing.go | 2 +- cl/phase1/forkchoice/on_block.go | 3 + cl/phase1/forkchoice/utils.go | 1 + cl/phase1/stages/clstages.go | 19 ++-- 16 files changed, 392 insertions(+), 35 deletions(-) create mode 100644 cl/beacon/handler/forkchoice.go create mode 100644 cl/beacon/handler/forkchoice_test.go create mode 100644 cl/beacon/handler/node.go create mode 100644 cl/beacon/handler/node_test.go diff --git a/cl/beacon/handler/forkchoice.go b/cl/beacon/handler/forkchoice.go new file mode 100644 index 00000000000..4f7f845b707 --- /dev/null +++ b/cl/beacon/handler/forkchoice.go @@ -0,0 +1,39 @@ +package handler + +import ( + "encoding/json" + "net/http" + + "github.com/ledgerwatch/erigon/cl/beacon/beaconhttp" +) + +func (a *ApiHandler) GetEthV2DebugBeaconHeads(w http.ResponseWriter, r *http.Request) (*beaconResponse, error) { + if a.syncedData.Syncing() { + return nil, beaconhttp.NewEndpointError(http.StatusServiceUnavailable, "beacon node is syncing") + } + hash, slotNumber, err := a.forkchoiceStore.GetHead() + if err != nil { + return nil, err + } + return newBeaconResponse( + []interface{}{ + map[string]interface{}{ + "slot": slotNumber, + "root": hash, + "execution_optimistic": false, + }, + }), nil +} + +func (a *ApiHandler) GetEthV1DebugBeaconForkChoice(w http.ResponseWriter, r *http.Request) { + justifiedCheckpoint := a.forkchoiceStore.JustifiedCheckpoint() + finalizedCheckpoint := a.forkchoiceStore.FinalizedCheckpoint() + forkNodes := a.forkchoiceStore.ForkNodes() + if err := json.NewEncoder(w).Encode(map[string]interface{}{ + "justified_checkpoint": justifiedCheckpoint, + "finalized_checkpoint": finalizedCheckpoint, + "fork_choice_nodes": forkNodes, + }); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } +} diff --git a/cl/beacon/handler/forkchoice_test.go b/cl/beacon/handler/forkchoice_test.go new file mode 100644 index 00000000000..8b98997f815 --- /dev/null +++ b/cl/beacon/handler/forkchoice_test.go @@ -0,0 +1,80 @@ +package handler + +import ( + "io" + "net/http/httptest" + "testing" + + libcommon "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon/cl/clparams" + "github.com/ledgerwatch/erigon/cl/cltypes/solid" + "github.com/ledgerwatch/erigon/cl/phase1/forkchoice" + "github.com/stretchr/testify/require" +) + +func TestGetHeads(t *testing.T) { + // find server + _, _, _, _, p, handler, _, sm, fcu := setupTestingHandler(t, clparams.Phase0Version) + sm.OnHeadState(p) + s, cancel := sm.HeadState() + s.SetSlot(789274827847783) + cancel() + + fcu.HeadSlotVal = 128 + fcu.HeadVal = libcommon.Hash{1, 2, 3} + server := httptest.NewServer(handler.mux) + defer server.Close() + + // get heads + resp, err := server.Client().Get(server.URL + "/eth/v2/debug/beacon/heads") + require.NoError(t, err) + defer resp.Body.Close() + + require.Equal(t, 200, resp.StatusCode) + out, err := io.ReadAll(resp.Body) + require.NoError(t, err) + + require.Equal(t, `{"data":[{"execution_optimistic":false,"root":"0x0102030000000000000000000000000000000000000000000000000000000000","slot":128}]}`+"\n", string(out)) +} + +func TestGetForkchoice(t *testing.T) { + // find server + _, _, _, _, p, handler, _, sm, fcu := setupTestingHandler(t, clparams.Phase0Version) + sm.OnHeadState(p) + s, cancel := sm.HeadState() + s.SetSlot(789274827847783) + cancel() + + fcu.HeadSlotVal = 128 + fcu.HeadVal = libcommon.Hash{1, 2, 3} + server := httptest.NewServer(handler.mux) + defer server.Close() + + fcu.WeightsMock = []forkchoice.ForkNode{ + { + BlockRoot: libcommon.Hash{1, 2, 3}, + ParentRoot: libcommon.Hash{1, 2, 3}, + Slot: 128, + Weight: 1, + }, + { + BlockRoot: libcommon.Hash{1, 2, 2, 4, 5, 3}, + ParentRoot: libcommon.Hash{1, 2, 5}, + Slot: 128, + Weight: 2, + }, + } + + fcu.FinalizedCheckpointVal = solid.NewCheckpointFromParameters(libcommon.Hash{1, 2, 3}, 1) + fcu.JustifiedCheckpointVal = solid.NewCheckpointFromParameters(libcommon.Hash{1, 2, 3}, 2) + + // get heads + resp, err := server.Client().Get(server.URL + "/eth/v1/debug/fork_choice") + require.NoError(t, err) + defer resp.Body.Close() + + require.Equal(t, 200, resp.StatusCode) + out, err := io.ReadAll(resp.Body) + require.NoError(t, err) + require.Equal(t, `{"finalized_checkpoint":{"epoch":"1","root":"0x0102030000000000000000000000000000000000000000000000000000000000"},"fork_choice_nodes":[{"slot":"128","block_root":"0x0102030000000000000000000000000000000000000000000000000000000000","parent_root":"0x0102030000000000000000000000000000000000000000000000000000000000","justified_epoch":"0","finalized_epoch":"0","weight":"1","validity":"","execution_block_hash":"0x0000000000000000000000000000000000000000000000000000000000000000"},{"slot":"128","block_root":"0x0102020405030000000000000000000000000000000000000000000000000000","parent_root":"0x0102050000000000000000000000000000000000000000000000000000000000","justified_epoch":"0","finalized_epoch":"0","weight":"2","validity":"","execution_block_hash":"0x0000000000000000000000000000000000000000000000000000000000000000"}],"justified_checkpoint":{"epoch":"2","root":"0x0102030000000000000000000000000000000000000000000000000000000000"}}`+"\n", string(out)) +} diff --git a/cl/beacon/handler/handler.go b/cl/beacon/handler/handler.go index fb4e5aa63f6..eb2fc2e90f6 100644 --- a/cl/beacon/handler/handler.go +++ b/cl/beacon/handler/handler.go @@ -49,8 +49,11 @@ func (a *ApiHandler) init() { // otterscn specific ones are commented as such r.Route("/eth", func(r chi.Router) { r.Route("/v1", func(r chi.Router) { - r.Get("/events", http.NotFound) + r.Route("/node", func(r chi.Router) { + r.Get("/health", a.GetEthV1NodeHealth) + }) + r.Get("/debug/fork_choice", a.GetEthV1DebugBeaconForkChoice) r.Route("/config", func(r chi.Router) { r.Get("/spec", beaconhttp.HandleEndpointFunc(a.getSpec)) r.Get("/deposit_contract", beaconhttp.HandleEndpointFunc(a.getDepositContract)) @@ -125,6 +128,7 @@ func (a *ApiHandler) init() { r.Route("/debug", func(r chi.Router) { r.Route("/beacon", func(r chi.Router) { r.Get("/states/{state_id}", beaconhttp.HandleEndpointFunc(a.getFullState)) + r.Get("/heads", beaconhttp.HandleEndpointFunc(a.GetEthV2DebugBeaconHeads)) }) }) r.Route("/beacon", func(r chi.Router) { diff --git a/cl/beacon/handler/node.go b/cl/beacon/handler/node.go new file mode 100644 index 00000000000..26f4fc46f0d --- /dev/null +++ b/cl/beacon/handler/node.go @@ -0,0 +1,20 @@ +package handler + +import "net/http" + +func (a *ApiHandler) GetEthV1NodeHealth(w http.ResponseWriter, r *http.Request) { + syncingStatus, err := uint64FromQueryParams(r, "syncing_status") + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + syncingCode := http.StatusOK + if syncingStatus != nil { + syncingCode = int(*syncingStatus) + } + if a.syncedData.Syncing() { + w.WriteHeader(syncingCode) + return + } + w.WriteHeader(http.StatusOK) +} diff --git a/cl/beacon/handler/node_test.go b/cl/beacon/handler/node_test.go new file mode 100644 index 00000000000..094412ddd7e --- /dev/null +++ b/cl/beacon/handler/node_test.go @@ -0,0 +1,49 @@ +package handler + +import ( + "net/http" + "net/http/httptest" + "testing" + + "github.com/ledgerwatch/erigon/cl/clparams" + "github.com/stretchr/testify/require" +) + +func TestNodeSyncing(t *testing.T) { + // i just want the correct schema to be generated + _, _, _, _, _, handler, _, _, _ := setupTestingHandler(t, clparams.Phase0Version) + + // Call GET /eth/v1/node/health + server := httptest.NewServer(handler.mux) + defer server.Close() + + req, err := http.NewRequest("GET", server.URL+"/eth/v1/node/health?syncing_status=666", nil) + require.NoError(t, err) + + resp, err := http.DefaultClient.Do(req) + require.NoError(t, err) + defer resp.Body.Close() + require.Equal(t, 666, resp.StatusCode) +} + +func TestNodeSyncingTip(t *testing.T) { + // i just want the correct schema to be generated + _, _, _, _, post, handler, _, sm, _ := setupTestingHandler(t, clparams.Phase0Version) + + // Call GET /eth/v1/node/health + server := httptest.NewServer(handler.mux) + defer server.Close() + + req, err := http.NewRequest("GET", server.URL+"/eth/v1/node/health?syncing_status=666", nil) + require.NoError(t, err) + + require.NoError(t, sm.OnHeadState(post)) + s, cancel := sm.HeadState() + s.SetSlot(999999999999999) + cancel() + + resp, err := http.DefaultClient.Do(req) + require.NoError(t, err) + defer resp.Body.Close() + require.Equal(t, 200, resp.StatusCode) +} diff --git a/cl/beacon/synced_data/synced_data.go b/cl/beacon/synced_data/synced_data.go index c8de023f888..0d6f7e0789f 100644 --- a/cl/beacon/synced_data/synced_data.go +++ b/cl/beacon/synced_data/synced_data.go @@ -32,6 +32,9 @@ func (s *SyncedDataManager) OnHeadState(newState *state.CachingBeaconState) (err defer s.mu.Unlock() if s.headState == nil { s.headState, err = newState.Copy() + if err != nil { + return err + } } err = newState.CopyInto(s.headState) if err != nil { @@ -56,7 +59,7 @@ func (s *SyncedDataManager) Syncing() bool { s.mu.RLock() defer s.mu.RUnlock() if s.headState == nil { - return false + return true } headEpoch := utils.GetCurrentEpoch(s.headState.GenesisTime(), s.cfg.SecondsPerSlot, s.cfg.SlotsPerEpoch) diff --git a/cl/persistence/state/historical_states_reader/historical_states_reader_test.go b/cl/persistence/state/historical_states_reader/historical_states_reader_test.go index cc98f2ac304..cec53451589 100644 --- a/cl/persistence/state/historical_states_reader/historical_states_reader_test.go +++ b/cl/persistence/state/historical_states_reader/historical_states_reader_test.go @@ -47,19 +47,19 @@ func runTest(t *testing.T, blocks []*cltypes.SignedBeaconBlock, preState, postSt } func TestStateAntiquaryCapella(t *testing.T) { - //t.Skip() + t.Skip() blocks, preState, postState := tests.GetCapellaRandom() runTest(t, blocks, preState, postState) } func TestStateAntiquaryPhase0(t *testing.T) { - //t.Skip() + t.Skip() blocks, preState, postState := tests.GetPhase0Random() runTest(t, blocks, preState, postState) } func TestStateAntiquaryBellatrix(t *testing.T) { - //t.Skip() + t.Skip() blocks, preState, postState := tests.GetBellatrixRandom() runTest(t, blocks, preState, postState) } diff --git a/cl/phase1/forkchoice/forkchoice.go b/cl/phase1/forkchoice/forkchoice.go index f6533b96ebb..bde4d322f37 100644 --- a/cl/phase1/forkchoice/forkchoice.go +++ b/cl/phase1/forkchoice/forkchoice.go @@ -2,6 +2,7 @@ package forkchoice import ( "context" + "sort" "sync" "github.com/ledgerwatch/erigon/cl/clparams" @@ -20,6 +21,31 @@ import ( "github.com/ledgerwatch/erigon-lib/common/length" ) +// Schema +/* +{ + "slot": "1", + "block_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", + "parent_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", + "justified_epoch": "1", + "finalized_epoch": "1", + "weight": "1", + "validity": "valid", + "execution_block_hash": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", + "extra_data": {} + } +*/ +type ForkNode struct { + Slot uint64 `json:"slot,string"` + BlockRoot libcommon.Hash `json:"block_root"` + ParentRoot libcommon.Hash `json:"parent_root"` + JustifiedEpoch uint64 `json:"justified_epoch,string"` + FinalizedEpoch uint64 `json:"finalized_epoch,string"` + Weight uint64 `json:"weight,string"` + Validity string `json:"validity"` + ExecutionBlock libcommon.Hash `json:"execution_block_hash"` +} + type checkpointComparable string const ( @@ -53,17 +79,21 @@ type ForkChoiceStore struct { unrealizedJustifiedCheckpoint solid.Checkpoint unrealizedFinalizedCheckpoint solid.Checkpoint proposerBoostRoot libcommon.Hash - headHash libcommon.Hash - headSlot uint64 - genesisTime uint64 - childrens map[libcommon.Hash]childrens + // head data + headHash libcommon.Hash + headSlot uint64 + genesisTime uint64 + weights map[libcommon.Hash]uint64 + headSet map[libcommon.Hash]struct{} + // childrens + childrens map[libcommon.Hash]childrens // Use go map because this is actually an unordered set - equivocatingIndicies map[uint64]struct{} + equivocatingIndicies []byte forkGraph fork_graph.ForkGraph // I use the cache due to the convenient auto-cleanup feauture. checkpointStates map[checkpointComparable]*checkpointState // We keep ssz snappy of it as the full beacon state is full of rendundant data. - latestMessages map[uint64]*LatestMessage + latestMessages []LatestMessage anchorPublicKeys []byte // We keep track of them so that we can forkchoice with EL. eth2Roots *lru.Cache[libcommon.Hash, libcommon.Hash] // ETH2 root -> ETH1 hash @@ -163,6 +193,8 @@ func NewForkChoiceStore(ctx context.Context, anchorState *state2.CachingBeaconSt r := solid.NewHashVector(int(anchorState.BeaconConfig().EpochsPerHistoricalVector)) anchorState.RandaoMixes().CopyTo(r) randaoMixesLists.Add(anchorRoot, r) + headSet := make(map[libcommon.Hash]struct{}) + headSet[anchorRoot] = struct{}{} return &ForkChoiceStore{ ctx: ctx, highestSeen: anchorState.Slot(), @@ -172,8 +204,8 @@ func NewForkChoiceStore(ctx context.Context, anchorState *state2.CachingBeaconSt unrealizedJustifiedCheckpoint: anchorCheckpoint.Copy(), unrealizedFinalizedCheckpoint: anchorCheckpoint.Copy(), forkGraph: forkGraph, - equivocatingIndicies: map[uint64]struct{}{}, - latestMessages: map[uint64]*LatestMessage{}, + equivocatingIndicies: make([]byte, anchorState.ValidatorLength(), anchorState.ValidatorLength()*2), + latestMessages: make([]LatestMessage, anchorState.ValidatorLength(), anchorState.ValidatorLength()*2), checkpointStates: make(map[checkpointComparable]*checkpointState), eth2Roots: eth2Roots, engine: engine, @@ -188,6 +220,8 @@ func NewForkChoiceStore(ctx context.Context, anchorState *state2.CachingBeaconSt totalActiveBalances: totalActiveBalances, randaoMixesLists: randaoMixesLists, randaoDeltas: randaoDeltas, + headSet: headSet, + weights: make(map[libcommon.Hash]uint64), participation: participation, }, nil } @@ -399,3 +433,39 @@ func (f *ForkChoiceStore) RandaoMixes(blockRoot libcommon.Hash, out solid.HashLi func (f *ForkChoiceStore) Partecipation(epoch uint64) (*solid.BitList, bool) { return f.participation.Get(epoch) } + +func (f *ForkChoiceStore) ForkNodes() []ForkNode { + f.mu.Lock() + defer f.mu.Unlock() + forkNodes := make([]ForkNode, 0, len(f.weights)) + for blockRoot, weight := range f.weights { + header, has := f.forkGraph.GetHeader(blockRoot) + if !has { + continue + } + justifiedCheckpoint, has := f.forkGraph.GetCurrentJustifiedCheckpoint(blockRoot) + if !has { + continue + } + finalizedCheckpoint, has := f.forkGraph.GetFinalizedCheckpoint(blockRoot) + if !has { + continue + } + blockHash, _ := f.eth2Roots.Get(blockRoot) + + forkNodes = append(forkNodes, ForkNode{ + Weight: weight, + BlockRoot: blockRoot, + ParentRoot: header.ParentRoot, + JustifiedEpoch: justifiedCheckpoint.Epoch(), + FinalizedEpoch: finalizedCheckpoint.Epoch(), + Slot: header.Slot, + Validity: "valid", + ExecutionBlock: blockHash, + }) + } + sort.Slice(forkNodes, func(i, j int) bool { + return forkNodes[i].Slot < forkNodes[j].Slot + }) + return forkNodes +} diff --git a/cl/phase1/forkchoice/forkchoice_mock.go b/cl/phase1/forkchoice/forkchoice_mock.go index 16f8ee6b0af..6ae413d4f96 100644 --- a/cl/phase1/forkchoice/forkchoice_mock.go +++ b/cl/phase1/forkchoice/forkchoice_mock.go @@ -66,6 +66,7 @@ type ForkChoiceStorageMock struct { StateAtSlotVal map[uint64]*state.CachingBeaconState GetSyncCommitteesVal map[common.Hash][2]*solid.SyncCommittee GetFinalityCheckpointsVal map[common.Hash][3]solid.Checkpoint + WeightsMock []ForkNode Pool pool.OperationsPool } @@ -215,3 +216,7 @@ func (f *ForkChoiceStorageMock) OnBlsToExecutionChange(signedChange *cltypes.Sig f.Pool.BLSToExecutionChangesPool.Insert(signedChange.Signature, signedChange) return nil } + +func (f *ForkChoiceStorageMock) ForkNodes() []ForkNode { + return f.WeightsMock +} diff --git a/cl/phase1/forkchoice/get_head.go b/cl/phase1/forkchoice/get_head.go index e1300c2c022..56165f4a2bc 100644 --- a/cl/phase1/forkchoice/get_head.go +++ b/cl/phase1/forkchoice/get_head.go @@ -16,6 +16,23 @@ func (f *ForkChoiceStore) GetHead() (libcommon.Hash, uint64, error) { return f.getHead() } +// accountWeights updates the weights of the validators, given the vote and given an head leaf. +func (f *ForkChoiceStore) accountWeights(votes, weights map[libcommon.Hash]uint64, justifedRoot, leaf libcommon.Hash) { + curr := leaf + accumulated := uint64(0) + for curr != justifedRoot { + accumulated += votes[curr] + votes[curr] = 0 // make sure we don't double count + weights[curr] += accumulated + header, has := f.forkGraph.GetHeader(curr) + if !has { + return + } + curr = header.ParentRoot + } + return +} + func (f *ForkChoiceStore) getHead() (libcommon.Hash, uint64, error) { if f.headHash != (libcommon.Hash{}) { return f.headHash, f.headSlot, nil @@ -28,8 +45,33 @@ func (f *ForkChoiceStore) getHead() (libcommon.Hash, uint64, error) { if err != nil { return libcommon.Hash{}, 0, err } - // Filter all validators deemed as bad - filteredIndicies := f.filterValidatorSetForAttestationScores(justificationState, justificationState.epoch) + // Do a simple scan to determine the fork votes. + votes := make(map[libcommon.Hash]uint64) + for validatorIndex, message := range f.latestMessages { + if message == (LatestMessage{}) { + continue + } + if !readFromBitset(justificationState.actives, validatorIndex) || readFromBitset(justificationState.slasheds, validatorIndex) { + continue + } + if _, hasLatestMessage := f.getLatestMessage(uint64(validatorIndex)); !hasLatestMessage { + continue + } + if f.isUnequivocating(uint64(validatorIndex)) { + continue + } + votes[message.Root] += justificationState.balances[validatorIndex] + } + if f.proposerBoostRoot != (libcommon.Hash{}) { + boost := justificationState.activeBalance / justificationState.beaconConfig.SlotsPerEpoch + votes[f.proposerBoostRoot] += (boost * justificationState.beaconConfig.ProposerScoreBoost) / 100 + } + // Account for weights on each head fork + f.weights = make(map[libcommon.Hash]uint64) + for head := range f.headSet { + f.accountWeights(votes, f.weights, f.justifiedCheckpoint.BlockRoot(), head) + } + for { // Filter out current head children. unfilteredChildren := f.children(f.headHash) @@ -62,9 +104,9 @@ func (f *ForkChoiceStore) getHead() (libcommon.Hash, uint64, error) { // After sorting is done determine best fit. f.headHash = children[0] - maxWeight := f.getWeight(children[0], filteredIndicies, justificationState) + maxWeight := f.weights[children[0]] for i := 1; i < len(children); i++ { - weight := f.getWeight(children[i], filteredIndicies, justificationState) + weight := f.weights[children[i]] // Lexicographical order is king. if weight >= maxWeight { f.headHash = children[i] @@ -81,10 +123,10 @@ func (f *ForkChoiceStore) filterValidatorSetForAttestationScores(c *checkpointSt if !readFromBitset(c.actives, validatorIndex) || readFromBitset(c.slasheds, validatorIndex) { continue } - if _, hasLatestMessage := f.latestMessages[uint64(validatorIndex)]; !hasLatestMessage { + if _, hasLatestMessage := f.getLatestMessage(uint64(validatorIndex)); !hasLatestMessage { continue } - if _, isUnequivocating := f.equivocatingIndicies[uint64(validatorIndex)]; isUnequivocating { + if f.isUnequivocating(uint64(validatorIndex)) { continue } filtered = append(filtered, uint64(validatorIndex)) diff --git a/cl/phase1/forkchoice/interface.go b/cl/phase1/forkchoice/interface.go index 438db97f32c..7da33e5acbf 100644 --- a/cl/phase1/forkchoice/interface.go +++ b/cl/phase1/forkchoice/interface.go @@ -40,6 +40,7 @@ type ForkChoiceStorageReader interface { GetStateAtSlot(slot uint64, alwaysCopy bool) (*state.CachingBeaconState, error) GetStateAtStateRoot(root libcommon.Hash, alwaysCopy bool) (*state.CachingBeaconState, error) + ForkNodes() []ForkNode } type ForkChoiceStorageWriter interface { diff --git a/cl/phase1/forkchoice/on_attestation.go b/cl/phase1/forkchoice/on_attestation.go index ed4b0ce674c..ed734b4b8a7 100644 --- a/cl/phase1/forkchoice/on_attestation.go +++ b/cl/phase1/forkchoice/on_attestation.go @@ -85,20 +85,63 @@ func (f *ForkChoiceStore) scheduleAttestationForLaterProcessing(attestation *sol }() } +func (f *ForkChoiceStore) setLatestMessage(index uint64, message LatestMessage) { + if index >= uint64(len(f.latestMessages)) { + if index >= uint64(cap(f.latestMessages)) { + tmp := make([]LatestMessage, index+1, index*2) + copy(tmp, f.latestMessages) + f.latestMessages = tmp + } + f.latestMessages = f.latestMessages[:index+1] + } + f.latestMessages[index] = message +} + +func (f *ForkChoiceStore) getLatestMessage(validatorIndex uint64) (LatestMessage, bool) { + if validatorIndex >= uint64(len(f.latestMessages)) || f.latestMessages[validatorIndex] == (LatestMessage{}) { + return LatestMessage{}, false + } + return f.latestMessages[validatorIndex], true +} + +func (f *ForkChoiceStore) isUnequivocating(validatorIndex uint64) bool { + // f.equivocatingIndicies is a bitlist + index := int(validatorIndex) / 8 + if index >= len(f.equivocatingIndicies) { + return false + } + subIndex := int(validatorIndex) % 8 + return f.equivocatingIndicies[index]&(1<= len(f.equivocatingIndicies) { + if index >= cap(f.equivocatingIndicies) { + tmp := make([]byte, index+1, index*2) + copy(tmp, f.equivocatingIndicies) + f.equivocatingIndicies = tmp + } + f.equivocatingIndicies = f.equivocatingIndicies[:index+1] + } + subIndex := int(validatorIndex) % 8 + f.equivocatingIndicies[index] |= 1 << uint(subIndex) +} + func (f *ForkChoiceStore) processAttestingIndicies(attestation *solid.Attestation, indicies []uint64) { beaconBlockRoot := attestation.AttestantionData().BeaconBlockRoot() target := attestation.AttestantionData().Target() for _, index := range indicies { - if _, ok := f.equivocatingIndicies[index]; ok { + if f.isUnequivocating(index) { continue } - validatorMessage, has := f.latestMessages[index] + validatorMessage, has := f.getLatestMessage(index) if !has || target.Epoch() > validatorMessage.Epoch { - f.latestMessages[index] = &LatestMessage{ + f.setLatestMessage(index, LatestMessage{ Epoch: target.Epoch(), Root: beaconBlockRoot, - } + }) } } } diff --git a/cl/phase1/forkchoice/on_attester_slashing.go b/cl/phase1/forkchoice/on_attester_slashing.go index 1c4ea9d5dc3..4305ed58d08 100644 --- a/cl/phase1/forkchoice/on_attester_slashing.go +++ b/cl/phase1/forkchoice/on_attester_slashing.go @@ -86,7 +86,7 @@ func (f *ForkChoiceStore) OnAttesterSlashing(attesterSlashing *cltypes.AttesterS defer f.mu.Unlock() var anySlashed bool for _, index := range solid.IntersectionOfSortedSets(attestation1.AttestingIndices, attestation2.AttestingIndices) { - f.equivocatingIndicies[index] = struct{}{} + f.setUnequivocating(index) if !anySlashed { v, err := s.ValidatorForValidatorIndex(int(index)) if err != nil { diff --git a/cl/phase1/forkchoice/on_block.go b/cl/phase1/forkchoice/on_block.go index 06b28c5e772..627e16ac26f 100644 --- a/cl/phase1/forkchoice/on_block.go +++ b/cl/phase1/forkchoice/on_block.go @@ -66,6 +66,9 @@ func (f *ForkChoiceStore) OnBlock(block *cltypes.SignedBeaconBlock, newPayload, if block.Block.Slot > f.highestSeen { f.highestSeen = block.Block.Slot } + // Remove the parent from the head set + delete(f.headSet, block.Block.ParentRoot) + f.headSet[blockRoot] = struct{}{} // Add proposer score boost if the block is timely timeIntoSlot := (f.time - f.genesisTime) % lastProcessedState.BeaconConfig().SecondsPerSlot isBeforeAttestingInterval := timeIntoSlot < f.beaconCfg.SecondsPerSlot/f.beaconCfg.IntervalsPerSlot diff --git a/cl/phase1/forkchoice/utils.go b/cl/phase1/forkchoice/utils.go index b3eaca58da7..f13aee3dac4 100644 --- a/cl/phase1/forkchoice/utils.go +++ b/cl/phase1/forkchoice/utils.go @@ -41,6 +41,7 @@ func (f *ForkChoiceStore) onNewFinalized(newFinalized solid.Checkpoint) { for k, children := range f.childrens { if children.parentSlot <= newFinalized.Epoch()*f.beaconCfg.SlotsPerEpoch { delete(f.childrens, k) + delete(f.headSet, k) continue } } diff --git a/cl/phase1/stages/clstages.go b/cl/phase1/stages/clstages.go index 58cd687bd98..b2d57dd01a7 100644 --- a/cl/phase1/stages/clstages.go +++ b/cl/phase1/stages/clstages.go @@ -14,6 +14,7 @@ import ( "github.com/ledgerwatch/erigon/cl/clparams" "github.com/ledgerwatch/erigon/cl/clstages" "github.com/ledgerwatch/erigon/cl/cltypes" + "github.com/ledgerwatch/erigon/cl/cltypes/solid" "github.com/ledgerwatch/erigon/cl/persistence" "github.com/ledgerwatch/erigon/cl/persistence/beacon_indicies" "github.com/ledgerwatch/erigon/cl/persistence/db_config" @@ -403,6 +404,13 @@ func ConsensusClStages(ctx context.Context, cfg.rpc.BanPeer(blocks.Peer) continue MainLoop } + block.Block.Body.Attestations.Range(func(idx int, a *solid.Attestation, total int) bool { + if err = cfg.forkChoice.OnAttestation(a, true); err != nil { + log.Debug("bad attestation received", "err", err) + } + return true + }) + if block.Block.Slot >= args.targetSlot { break MainLoop } @@ -425,17 +433,6 @@ func ConsensusClStages(ctx context.Context, }, ActionFunc: func(ctx context.Context, logger log.Logger, cfg *Cfg, args Args) error { - // TODO: we need to get the last run block in order to process attestations here - ////////block.Block.Body.Attestations.Range(func(idx int, a *solid.Attestation, total int) bool { - //////// if err = g.forkChoice.OnAttestation(a, true); err != nil { - //////// return false - //////// } - //////// return true - ////////}) - ////////if err != nil { - //////// return err - ////////} - // Now check the head headRoot, headSlot, err := cfg.forkChoice.GetHead() if err != nil {