diff --git a/cl/beacon/handler/forkchoice.go b/cl/beacon/handler/forkchoice.go new file mode 100644 index 00000000000..4f7f845b707 --- /dev/null +++ b/cl/beacon/handler/forkchoice.go @@ -0,0 +1,39 @@ +package handler + +import ( + "encoding/json" + "net/http" + + "github.com/ledgerwatch/erigon/cl/beacon/beaconhttp" +) + +func (a *ApiHandler) GetEthV2DebugBeaconHeads(w http.ResponseWriter, r *http.Request) (*beaconResponse, error) { + if a.syncedData.Syncing() { + return nil, beaconhttp.NewEndpointError(http.StatusServiceUnavailable, "beacon node is syncing") + } + hash, slotNumber, err := a.forkchoiceStore.GetHead() + if err != nil { + return nil, err + } + return newBeaconResponse( + []interface{}{ + map[string]interface{}{ + "slot": slotNumber, + "root": hash, + "execution_optimistic": false, + }, + }), nil +} + +func (a *ApiHandler) GetEthV1DebugBeaconForkChoice(w http.ResponseWriter, r *http.Request) { + justifiedCheckpoint := a.forkchoiceStore.JustifiedCheckpoint() + finalizedCheckpoint := a.forkchoiceStore.FinalizedCheckpoint() + forkNodes := a.forkchoiceStore.ForkNodes() + if err := json.NewEncoder(w).Encode(map[string]interface{}{ + "justified_checkpoint": justifiedCheckpoint, + "finalized_checkpoint": finalizedCheckpoint, + "fork_choice_nodes": forkNodes, + }); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } +} diff --git a/cl/beacon/handler/forkchoice_test.go b/cl/beacon/handler/forkchoice_test.go new file mode 100644 index 00000000000..8b98997f815 --- /dev/null +++ b/cl/beacon/handler/forkchoice_test.go @@ -0,0 +1,80 @@ +package handler + +import ( + "io" + "net/http/httptest" + "testing" + + libcommon "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon/cl/clparams" + "github.com/ledgerwatch/erigon/cl/cltypes/solid" + "github.com/ledgerwatch/erigon/cl/phase1/forkchoice" + "github.com/stretchr/testify/require" +) + +func TestGetHeads(t *testing.T) { + // find server + _, _, _, _, p, handler, _, sm, fcu := setupTestingHandler(t, clparams.Phase0Version) + sm.OnHeadState(p) + s, cancel := sm.HeadState() + s.SetSlot(789274827847783) + cancel() + + fcu.HeadSlotVal = 128 + fcu.HeadVal = libcommon.Hash{1, 2, 3} + server := httptest.NewServer(handler.mux) + defer server.Close() + + // get heads + resp, err := server.Client().Get(server.URL + "/eth/v2/debug/beacon/heads") + require.NoError(t, err) + defer resp.Body.Close() + + require.Equal(t, 200, resp.StatusCode) + out, err := io.ReadAll(resp.Body) + require.NoError(t, err) + + require.Equal(t, `{"data":[{"execution_optimistic":false,"root":"0x0102030000000000000000000000000000000000000000000000000000000000","slot":128}]}`+"\n", string(out)) +} + +func TestGetForkchoice(t *testing.T) { + // find server + _, _, _, _, p, handler, _, sm, fcu := setupTestingHandler(t, clparams.Phase0Version) + sm.OnHeadState(p) + s, cancel := sm.HeadState() + s.SetSlot(789274827847783) + cancel() + + fcu.HeadSlotVal = 128 + fcu.HeadVal = libcommon.Hash{1, 2, 3} + server := httptest.NewServer(handler.mux) + defer server.Close() + + fcu.WeightsMock = []forkchoice.ForkNode{ + { + BlockRoot: libcommon.Hash{1, 2, 3}, + ParentRoot: libcommon.Hash{1, 2, 3}, + Slot: 128, + Weight: 1, + }, + { + BlockRoot: libcommon.Hash{1, 2, 2, 4, 5, 3}, + ParentRoot: libcommon.Hash{1, 2, 5}, + Slot: 128, + Weight: 2, + }, + } + + fcu.FinalizedCheckpointVal = solid.NewCheckpointFromParameters(libcommon.Hash{1, 2, 3}, 1) + fcu.JustifiedCheckpointVal = solid.NewCheckpointFromParameters(libcommon.Hash{1, 2, 3}, 2) + + // get heads + resp, err := server.Client().Get(server.URL + "/eth/v1/debug/fork_choice") + require.NoError(t, err) + defer resp.Body.Close() + + require.Equal(t, 200, resp.StatusCode) + out, err := io.ReadAll(resp.Body) + require.NoError(t, err) + require.Equal(t, `{"finalized_checkpoint":{"epoch":"1","root":"0x0102030000000000000000000000000000000000000000000000000000000000"},"fork_choice_nodes":[{"slot":"128","block_root":"0x0102030000000000000000000000000000000000000000000000000000000000","parent_root":"0x0102030000000000000000000000000000000000000000000000000000000000","justified_epoch":"0","finalized_epoch":"0","weight":"1","validity":"","execution_block_hash":"0x0000000000000000000000000000000000000000000000000000000000000000"},{"slot":"128","block_root":"0x0102020405030000000000000000000000000000000000000000000000000000","parent_root":"0x0102050000000000000000000000000000000000000000000000000000000000","justified_epoch":"0","finalized_epoch":"0","weight":"2","validity":"","execution_block_hash":"0x0000000000000000000000000000000000000000000000000000000000000000"}],"justified_checkpoint":{"epoch":"2","root":"0x0102030000000000000000000000000000000000000000000000000000000000"}}`+"\n", string(out)) +} diff --git a/cl/beacon/handler/handler.go b/cl/beacon/handler/handler.go index fb4e5aa63f6..eb2fc2e90f6 100644 --- a/cl/beacon/handler/handler.go +++ b/cl/beacon/handler/handler.go @@ -49,8 +49,11 @@ func (a *ApiHandler) init() { // otterscn specific ones are commented as such r.Route("/eth", func(r chi.Router) { r.Route("/v1", func(r chi.Router) { - r.Get("/events", http.NotFound) + r.Route("/node", func(r chi.Router) { + r.Get("/health", a.GetEthV1NodeHealth) + }) + r.Get("/debug/fork_choice", a.GetEthV1DebugBeaconForkChoice) r.Route("/config", func(r chi.Router) { r.Get("/spec", beaconhttp.HandleEndpointFunc(a.getSpec)) r.Get("/deposit_contract", beaconhttp.HandleEndpointFunc(a.getDepositContract)) @@ -125,6 +128,7 @@ func (a *ApiHandler) init() { r.Route("/debug", func(r chi.Router) { r.Route("/beacon", func(r chi.Router) { r.Get("/states/{state_id}", beaconhttp.HandleEndpointFunc(a.getFullState)) + r.Get("/heads", beaconhttp.HandleEndpointFunc(a.GetEthV2DebugBeaconHeads)) }) }) r.Route("/beacon", func(r chi.Router) { diff --git a/cl/beacon/handler/node.go b/cl/beacon/handler/node.go new file mode 100644 index 00000000000..26f4fc46f0d --- /dev/null +++ b/cl/beacon/handler/node.go @@ -0,0 +1,20 @@ +package handler + +import "net/http" + +func (a *ApiHandler) GetEthV1NodeHealth(w http.ResponseWriter, r *http.Request) { + syncingStatus, err := uint64FromQueryParams(r, "syncing_status") + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + syncingCode := http.StatusOK + if syncingStatus != nil { + syncingCode = int(*syncingStatus) + } + if a.syncedData.Syncing() { + w.WriteHeader(syncingCode) + return + } + w.WriteHeader(http.StatusOK) +} diff --git a/cl/beacon/handler/node_test.go b/cl/beacon/handler/node_test.go new file mode 100644 index 00000000000..094412ddd7e --- /dev/null +++ b/cl/beacon/handler/node_test.go @@ -0,0 +1,49 @@ +package handler + +import ( + "net/http" + "net/http/httptest" + "testing" + + "github.com/ledgerwatch/erigon/cl/clparams" + "github.com/stretchr/testify/require" +) + +func TestNodeSyncing(t *testing.T) { + // i just want the correct schema to be generated + _, _, _, _, _, handler, _, _, _ := setupTestingHandler(t, clparams.Phase0Version) + + // Call GET /eth/v1/node/health + server := httptest.NewServer(handler.mux) + defer server.Close() + + req, err := http.NewRequest("GET", server.URL+"/eth/v1/node/health?syncing_status=666", nil) + require.NoError(t, err) + + resp, err := http.DefaultClient.Do(req) + require.NoError(t, err) + defer resp.Body.Close() + require.Equal(t, 666, resp.StatusCode) +} + +func TestNodeSyncingTip(t *testing.T) { + // i just want the correct schema to be generated + _, _, _, _, post, handler, _, sm, _ := setupTestingHandler(t, clparams.Phase0Version) + + // Call GET /eth/v1/node/health + server := httptest.NewServer(handler.mux) + defer server.Close() + + req, err := http.NewRequest("GET", server.URL+"/eth/v1/node/health?syncing_status=666", nil) + require.NoError(t, err) + + require.NoError(t, sm.OnHeadState(post)) + s, cancel := sm.HeadState() + s.SetSlot(999999999999999) + cancel() + + resp, err := http.DefaultClient.Do(req) + require.NoError(t, err) + defer resp.Body.Close() + require.Equal(t, 200, resp.StatusCode) +} diff --git a/cl/beacon/synced_data/synced_data.go b/cl/beacon/synced_data/synced_data.go index c8de023f888..0d6f7e0789f 100644 --- a/cl/beacon/synced_data/synced_data.go +++ b/cl/beacon/synced_data/synced_data.go @@ -32,6 +32,9 @@ func (s *SyncedDataManager) OnHeadState(newState *state.CachingBeaconState) (err defer s.mu.Unlock() if s.headState == nil { s.headState, err = newState.Copy() + if err != nil { + return err + } } err = newState.CopyInto(s.headState) if err != nil { @@ -56,7 +59,7 @@ func (s *SyncedDataManager) Syncing() bool { s.mu.RLock() defer s.mu.RUnlock() if s.headState == nil { - return false + return true } headEpoch := utils.GetCurrentEpoch(s.headState.GenesisTime(), s.cfg.SecondsPerSlot, s.cfg.SlotsPerEpoch) diff --git a/cl/beacon/validatorapi/get.go b/cl/beacon/validatorapi/get.go index e72e4502119..00cbbf1e374 100644 --- a/cl/beacon/validatorapi/get.go +++ b/cl/beacon/validatorapi/get.go @@ -42,13 +42,14 @@ func (v *ValidatorApiHandler) GetEthV1NodeSyncing(w http.ResponseWriter, r *http } return map[string]any{ - "head_slot": strconv.FormatUint(slot, 10), - "sync_distance": syncDistance, - "is_syncing": isSyncing, - "el_offline": elOffline, - // TODO: figure out how to populat this field - "is_optimistic": true, - }, nil + "data": map[string]any{ + "head_slot": strconv.FormatUint(slot, 10), + "sync_distance": syncDistance, + "is_syncing": isSyncing, + "el_offline": elOffline, + // TODO: figure out how to populat this field + "is_optimistic": true, + }}, nil } func (v *ValidatorApiHandler) GetEthV1ConfigSpec(w http.ResponseWriter, r *http.Request) (*clparams.BeaconChainConfig, error) { @@ -67,10 +68,11 @@ func (v *ValidatorApiHandler) GetEthV1BeaconGenesis(w http.ResponseWriter, r *ht return nil, beaconhttp.NewEndpointError(http.StatusInternalServerError, err.Error()) } return map[string]any{ - "genesis_time": v.GenesisCfg.GenesisTime, - "genesis_validator_root": v.GenesisCfg.GenesisValidatorRoot, - "genesis_fork_version": hexutility.Bytes(digest[:]), - }, nil + "data": map[string]any{ + "genesis_time": v.GenesisCfg.GenesisTime, + "genesis_validator_root": v.GenesisCfg.GenesisValidatorRoot, + "genesis_fork_version": hexutility.Bytes(digest[:]), + }}, nil } func (v *ValidatorApiHandler) GetEthV1BeaconStatesStateIdFork(w http.ResponseWriter, r *http.Request) (any, error) { diff --git a/cl/persistence/state/historical_states_reader/historical_states_reader_test.go b/cl/persistence/state/historical_states_reader/historical_states_reader_test.go index 387ec7aa938..6846dbbd53a 100644 --- a/cl/persistence/state/historical_states_reader/historical_states_reader_test.go +++ b/cl/persistence/state/historical_states_reader/historical_states_reader_test.go @@ -47,19 +47,19 @@ func runTest(t *testing.T, blocks []*cltypes.SignedBeaconBlock, preState, postSt } func TestStateAntiquaryCapella(t *testing.T) { - //t.Skip() + t.Skip() blocks, preState, postState := tests.GetCapellaRandom() runTest(t, blocks, preState, postState) } func TestStateAntiquaryPhase0(t *testing.T) { - //t.Skip() + t.Skip() blocks, preState, postState := tests.GetPhase0Random() runTest(t, blocks, preState, postState) } func TestStateAntiquaryBellatrix(t *testing.T) { - //t.Skip() + t.Skip() blocks, preState, postState := tests.GetBellatrixRandom() runTest(t, blocks, preState, postState) } diff --git a/cl/phase1/forkchoice/forkchoice.go b/cl/phase1/forkchoice/forkchoice.go index f6533b96ebb..bde4d322f37 100644 --- a/cl/phase1/forkchoice/forkchoice.go +++ b/cl/phase1/forkchoice/forkchoice.go @@ -2,6 +2,7 @@ package forkchoice import ( "context" + "sort" "sync" "github.com/ledgerwatch/erigon/cl/clparams" @@ -20,6 +21,31 @@ import ( "github.com/ledgerwatch/erigon-lib/common/length" ) +// Schema +/* +{ + "slot": "1", + "block_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", + "parent_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", + "justified_epoch": "1", + "finalized_epoch": "1", + "weight": "1", + "validity": "valid", + "execution_block_hash": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", + "extra_data": {} + } +*/ +type ForkNode struct { + Slot uint64 `json:"slot,string"` + BlockRoot libcommon.Hash `json:"block_root"` + ParentRoot libcommon.Hash `json:"parent_root"` + JustifiedEpoch uint64 `json:"justified_epoch,string"` + FinalizedEpoch uint64 `json:"finalized_epoch,string"` + Weight uint64 `json:"weight,string"` + Validity string `json:"validity"` + ExecutionBlock libcommon.Hash `json:"execution_block_hash"` +} + type checkpointComparable string const ( @@ -53,17 +79,21 @@ type ForkChoiceStore struct { unrealizedJustifiedCheckpoint solid.Checkpoint unrealizedFinalizedCheckpoint solid.Checkpoint proposerBoostRoot libcommon.Hash - headHash libcommon.Hash - headSlot uint64 - genesisTime uint64 - childrens map[libcommon.Hash]childrens + // head data + headHash libcommon.Hash + headSlot uint64 + genesisTime uint64 + weights map[libcommon.Hash]uint64 + headSet map[libcommon.Hash]struct{} + // childrens + childrens map[libcommon.Hash]childrens // Use go map because this is actually an unordered set - equivocatingIndicies map[uint64]struct{} + equivocatingIndicies []byte forkGraph fork_graph.ForkGraph // I use the cache due to the convenient auto-cleanup feauture. checkpointStates map[checkpointComparable]*checkpointState // We keep ssz snappy of it as the full beacon state is full of rendundant data. - latestMessages map[uint64]*LatestMessage + latestMessages []LatestMessage anchorPublicKeys []byte // We keep track of them so that we can forkchoice with EL. eth2Roots *lru.Cache[libcommon.Hash, libcommon.Hash] // ETH2 root -> ETH1 hash @@ -163,6 +193,8 @@ func NewForkChoiceStore(ctx context.Context, anchorState *state2.CachingBeaconSt r := solid.NewHashVector(int(anchorState.BeaconConfig().EpochsPerHistoricalVector)) anchorState.RandaoMixes().CopyTo(r) randaoMixesLists.Add(anchorRoot, r) + headSet := make(map[libcommon.Hash]struct{}) + headSet[anchorRoot] = struct{}{} return &ForkChoiceStore{ ctx: ctx, highestSeen: anchorState.Slot(), @@ -172,8 +204,8 @@ func NewForkChoiceStore(ctx context.Context, anchorState *state2.CachingBeaconSt unrealizedJustifiedCheckpoint: anchorCheckpoint.Copy(), unrealizedFinalizedCheckpoint: anchorCheckpoint.Copy(), forkGraph: forkGraph, - equivocatingIndicies: map[uint64]struct{}{}, - latestMessages: map[uint64]*LatestMessage{}, + equivocatingIndicies: make([]byte, anchorState.ValidatorLength(), anchorState.ValidatorLength()*2), + latestMessages: make([]LatestMessage, anchorState.ValidatorLength(), anchorState.ValidatorLength()*2), checkpointStates: make(map[checkpointComparable]*checkpointState), eth2Roots: eth2Roots, engine: engine, @@ -188,6 +220,8 @@ func NewForkChoiceStore(ctx context.Context, anchorState *state2.CachingBeaconSt totalActiveBalances: totalActiveBalances, randaoMixesLists: randaoMixesLists, randaoDeltas: randaoDeltas, + headSet: headSet, + weights: make(map[libcommon.Hash]uint64), participation: participation, }, nil } @@ -399,3 +433,39 @@ func (f *ForkChoiceStore) RandaoMixes(blockRoot libcommon.Hash, out solid.HashLi func (f *ForkChoiceStore) Partecipation(epoch uint64) (*solid.BitList, bool) { return f.participation.Get(epoch) } + +func (f *ForkChoiceStore) ForkNodes() []ForkNode { + f.mu.Lock() + defer f.mu.Unlock() + forkNodes := make([]ForkNode, 0, len(f.weights)) + for blockRoot, weight := range f.weights { + header, has := f.forkGraph.GetHeader(blockRoot) + if !has { + continue + } + justifiedCheckpoint, has := f.forkGraph.GetCurrentJustifiedCheckpoint(blockRoot) + if !has { + continue + } + finalizedCheckpoint, has := f.forkGraph.GetFinalizedCheckpoint(blockRoot) + if !has { + continue + } + blockHash, _ := f.eth2Roots.Get(blockRoot) + + forkNodes = append(forkNodes, ForkNode{ + Weight: weight, + BlockRoot: blockRoot, + ParentRoot: header.ParentRoot, + JustifiedEpoch: justifiedCheckpoint.Epoch(), + FinalizedEpoch: finalizedCheckpoint.Epoch(), + Slot: header.Slot, + Validity: "valid", + ExecutionBlock: blockHash, + }) + } + sort.Slice(forkNodes, func(i, j int) bool { + return forkNodes[i].Slot < forkNodes[j].Slot + }) + return forkNodes +} diff --git a/cl/phase1/forkchoice/forkchoice_mock.go b/cl/phase1/forkchoice/forkchoice_mock.go index 16f8ee6b0af..6ae413d4f96 100644 --- a/cl/phase1/forkchoice/forkchoice_mock.go +++ b/cl/phase1/forkchoice/forkchoice_mock.go @@ -66,6 +66,7 @@ type ForkChoiceStorageMock struct { StateAtSlotVal map[uint64]*state.CachingBeaconState GetSyncCommitteesVal map[common.Hash][2]*solid.SyncCommittee GetFinalityCheckpointsVal map[common.Hash][3]solid.Checkpoint + WeightsMock []ForkNode Pool pool.OperationsPool } @@ -215,3 +216,7 @@ func (f *ForkChoiceStorageMock) OnBlsToExecutionChange(signedChange *cltypes.Sig f.Pool.BLSToExecutionChangesPool.Insert(signedChange.Signature, signedChange) return nil } + +func (f *ForkChoiceStorageMock) ForkNodes() []ForkNode { + return f.WeightsMock +} diff --git a/cl/phase1/forkchoice/get_head.go b/cl/phase1/forkchoice/get_head.go index e1300c2c022..56165f4a2bc 100644 --- a/cl/phase1/forkchoice/get_head.go +++ b/cl/phase1/forkchoice/get_head.go @@ -16,6 +16,23 @@ func (f *ForkChoiceStore) GetHead() (libcommon.Hash, uint64, error) { return f.getHead() } +// accountWeights updates the weights of the validators, given the vote and given an head leaf. +func (f *ForkChoiceStore) accountWeights(votes, weights map[libcommon.Hash]uint64, justifedRoot, leaf libcommon.Hash) { + curr := leaf + accumulated := uint64(0) + for curr != justifedRoot { + accumulated += votes[curr] + votes[curr] = 0 // make sure we don't double count + weights[curr] += accumulated + header, has := f.forkGraph.GetHeader(curr) + if !has { + return + } + curr = header.ParentRoot + } + return +} + func (f *ForkChoiceStore) getHead() (libcommon.Hash, uint64, error) { if f.headHash != (libcommon.Hash{}) { return f.headHash, f.headSlot, nil @@ -28,8 +45,33 @@ func (f *ForkChoiceStore) getHead() (libcommon.Hash, uint64, error) { if err != nil { return libcommon.Hash{}, 0, err } - // Filter all validators deemed as bad - filteredIndicies := f.filterValidatorSetForAttestationScores(justificationState, justificationState.epoch) + // Do a simple scan to determine the fork votes. + votes := make(map[libcommon.Hash]uint64) + for validatorIndex, message := range f.latestMessages { + if message == (LatestMessage{}) { + continue + } + if !readFromBitset(justificationState.actives, validatorIndex) || readFromBitset(justificationState.slasheds, validatorIndex) { + continue + } + if _, hasLatestMessage := f.getLatestMessage(uint64(validatorIndex)); !hasLatestMessage { + continue + } + if f.isUnequivocating(uint64(validatorIndex)) { + continue + } + votes[message.Root] += justificationState.balances[validatorIndex] + } + if f.proposerBoostRoot != (libcommon.Hash{}) { + boost := justificationState.activeBalance / justificationState.beaconConfig.SlotsPerEpoch + votes[f.proposerBoostRoot] += (boost * justificationState.beaconConfig.ProposerScoreBoost) / 100 + } + // Account for weights on each head fork + f.weights = make(map[libcommon.Hash]uint64) + for head := range f.headSet { + f.accountWeights(votes, f.weights, f.justifiedCheckpoint.BlockRoot(), head) + } + for { // Filter out current head children. unfilteredChildren := f.children(f.headHash) @@ -62,9 +104,9 @@ func (f *ForkChoiceStore) getHead() (libcommon.Hash, uint64, error) { // After sorting is done determine best fit. f.headHash = children[0] - maxWeight := f.getWeight(children[0], filteredIndicies, justificationState) + maxWeight := f.weights[children[0]] for i := 1; i < len(children); i++ { - weight := f.getWeight(children[i], filteredIndicies, justificationState) + weight := f.weights[children[i]] // Lexicographical order is king. if weight >= maxWeight { f.headHash = children[i] @@ -81,10 +123,10 @@ func (f *ForkChoiceStore) filterValidatorSetForAttestationScores(c *checkpointSt if !readFromBitset(c.actives, validatorIndex) || readFromBitset(c.slasheds, validatorIndex) { continue } - if _, hasLatestMessage := f.latestMessages[uint64(validatorIndex)]; !hasLatestMessage { + if _, hasLatestMessage := f.getLatestMessage(uint64(validatorIndex)); !hasLatestMessage { continue } - if _, isUnequivocating := f.equivocatingIndicies[uint64(validatorIndex)]; isUnequivocating { + if f.isUnequivocating(uint64(validatorIndex)) { continue } filtered = append(filtered, uint64(validatorIndex)) diff --git a/cl/phase1/forkchoice/interface.go b/cl/phase1/forkchoice/interface.go index 438db97f32c..7da33e5acbf 100644 --- a/cl/phase1/forkchoice/interface.go +++ b/cl/phase1/forkchoice/interface.go @@ -40,6 +40,7 @@ type ForkChoiceStorageReader interface { GetStateAtSlot(slot uint64, alwaysCopy bool) (*state.CachingBeaconState, error) GetStateAtStateRoot(root libcommon.Hash, alwaysCopy bool) (*state.CachingBeaconState, error) + ForkNodes() []ForkNode } type ForkChoiceStorageWriter interface { diff --git a/cl/phase1/forkchoice/on_attestation.go b/cl/phase1/forkchoice/on_attestation.go index ed4b0ce674c..ed734b4b8a7 100644 --- a/cl/phase1/forkchoice/on_attestation.go +++ b/cl/phase1/forkchoice/on_attestation.go @@ -85,20 +85,63 @@ func (f *ForkChoiceStore) scheduleAttestationForLaterProcessing(attestation *sol }() } +func (f *ForkChoiceStore) setLatestMessage(index uint64, message LatestMessage) { + if index >= uint64(len(f.latestMessages)) { + if index >= uint64(cap(f.latestMessages)) { + tmp := make([]LatestMessage, index+1, index*2) + copy(tmp, f.latestMessages) + f.latestMessages = tmp + } + f.latestMessages = f.latestMessages[:index+1] + } + f.latestMessages[index] = message +} + +func (f *ForkChoiceStore) getLatestMessage(validatorIndex uint64) (LatestMessage, bool) { + if validatorIndex >= uint64(len(f.latestMessages)) || f.latestMessages[validatorIndex] == (LatestMessage{}) { + return LatestMessage{}, false + } + return f.latestMessages[validatorIndex], true +} + +func (f *ForkChoiceStore) isUnequivocating(validatorIndex uint64) bool { + // f.equivocatingIndicies is a bitlist + index := int(validatorIndex) / 8 + if index >= len(f.equivocatingIndicies) { + return false + } + subIndex := int(validatorIndex) % 8 + return f.equivocatingIndicies[index]&(1<= len(f.equivocatingIndicies) { + if index >= cap(f.equivocatingIndicies) { + tmp := make([]byte, index+1, index*2) + copy(tmp, f.equivocatingIndicies) + f.equivocatingIndicies = tmp + } + f.equivocatingIndicies = f.equivocatingIndicies[:index+1] + } + subIndex := int(validatorIndex) % 8 + f.equivocatingIndicies[index] |= 1 << uint(subIndex) +} + func (f *ForkChoiceStore) processAttestingIndicies(attestation *solid.Attestation, indicies []uint64) { beaconBlockRoot := attestation.AttestantionData().BeaconBlockRoot() target := attestation.AttestantionData().Target() for _, index := range indicies { - if _, ok := f.equivocatingIndicies[index]; ok { + if f.isUnequivocating(index) { continue } - validatorMessage, has := f.latestMessages[index] + validatorMessage, has := f.getLatestMessage(index) if !has || target.Epoch() > validatorMessage.Epoch { - f.latestMessages[index] = &LatestMessage{ + f.setLatestMessage(index, LatestMessage{ Epoch: target.Epoch(), Root: beaconBlockRoot, - } + }) } } } diff --git a/cl/phase1/forkchoice/on_attester_slashing.go b/cl/phase1/forkchoice/on_attester_slashing.go index 1c4ea9d5dc3..4305ed58d08 100644 --- a/cl/phase1/forkchoice/on_attester_slashing.go +++ b/cl/phase1/forkchoice/on_attester_slashing.go @@ -86,7 +86,7 @@ func (f *ForkChoiceStore) OnAttesterSlashing(attesterSlashing *cltypes.AttesterS defer f.mu.Unlock() var anySlashed bool for _, index := range solid.IntersectionOfSortedSets(attestation1.AttestingIndices, attestation2.AttestingIndices) { - f.equivocatingIndicies[index] = struct{}{} + f.setUnequivocating(index) if !anySlashed { v, err := s.ValidatorForValidatorIndex(int(index)) if err != nil { diff --git a/cl/phase1/forkchoice/on_block.go b/cl/phase1/forkchoice/on_block.go index 06b28c5e772..627e16ac26f 100644 --- a/cl/phase1/forkchoice/on_block.go +++ b/cl/phase1/forkchoice/on_block.go @@ -66,6 +66,9 @@ func (f *ForkChoiceStore) OnBlock(block *cltypes.SignedBeaconBlock, newPayload, if block.Block.Slot > f.highestSeen { f.highestSeen = block.Block.Slot } + // Remove the parent from the head set + delete(f.headSet, block.Block.ParentRoot) + f.headSet[blockRoot] = struct{}{} // Add proposer score boost if the block is timely timeIntoSlot := (f.time - f.genesisTime) % lastProcessedState.BeaconConfig().SecondsPerSlot isBeforeAttestingInterval := timeIntoSlot < f.beaconCfg.SecondsPerSlot/f.beaconCfg.IntervalsPerSlot diff --git a/cl/phase1/forkchoice/utils.go b/cl/phase1/forkchoice/utils.go index b3eaca58da7..f13aee3dac4 100644 --- a/cl/phase1/forkchoice/utils.go +++ b/cl/phase1/forkchoice/utils.go @@ -41,6 +41,7 @@ func (f *ForkChoiceStore) onNewFinalized(newFinalized solid.Checkpoint) { for k, children := range f.childrens { if children.parentSlot <= newFinalized.Epoch()*f.beaconCfg.SlotsPerEpoch { delete(f.childrens, k) + delete(f.headSet, k) continue } } diff --git a/cl/phase1/stages/clstages.go b/cl/phase1/stages/clstages.go index 58cd687bd98..b2d57dd01a7 100644 --- a/cl/phase1/stages/clstages.go +++ b/cl/phase1/stages/clstages.go @@ -14,6 +14,7 @@ import ( "github.com/ledgerwatch/erigon/cl/clparams" "github.com/ledgerwatch/erigon/cl/clstages" "github.com/ledgerwatch/erigon/cl/cltypes" + "github.com/ledgerwatch/erigon/cl/cltypes/solid" "github.com/ledgerwatch/erigon/cl/persistence" "github.com/ledgerwatch/erigon/cl/persistence/beacon_indicies" "github.com/ledgerwatch/erigon/cl/persistence/db_config" @@ -403,6 +404,13 @@ func ConsensusClStages(ctx context.Context, cfg.rpc.BanPeer(blocks.Peer) continue MainLoop } + block.Block.Body.Attestations.Range(func(idx int, a *solid.Attestation, total int) bool { + if err = cfg.forkChoice.OnAttestation(a, true); err != nil { + log.Debug("bad attestation received", "err", err) + } + return true + }) + if block.Block.Slot >= args.targetSlot { break MainLoop } @@ -425,17 +433,6 @@ func ConsensusClStages(ctx context.Context, }, ActionFunc: func(ctx context.Context, logger log.Logger, cfg *Cfg, args Args) error { - // TODO: we need to get the last run block in order to process attestations here - ////////block.Block.Body.Attestations.Range(func(idx int, a *solid.Attestation, total int) bool { - //////// if err = g.forkChoice.OnAttestation(a, true); err != nil { - //////// return false - //////// } - //////// return true - ////////}) - ////////if err != nil { - //////// return err - ////////} - // Now check the head headRoot, headSlot, err := cfg.forkChoice.GetHead() if err != nil { diff --git a/cmd/devnet/devnet/node.go b/cmd/devnet/devnet/node.go index 4c372721a03..30f46633615 100644 --- a/cmd/devnet/devnet/node.go +++ b/cmd/devnet/devnet/node.go @@ -8,6 +8,9 @@ import ( "sync" "github.com/c2h5oh/datasize" + "github.com/ledgerwatch/log/v3" + "github.com/urfave/cli/v2" + "github.com/ledgerwatch/erigon/cmd/devnet/accounts" "github.com/ledgerwatch/erigon/cmd/devnet/args" "github.com/ledgerwatch/erigon/cmd/devnet/requests" @@ -17,8 +20,6 @@ import ( "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/turbo/debug" enode "github.com/ledgerwatch/erigon/turbo/node" - "github.com/ledgerwatch/log/v3" - "github.com/urfave/cli/v2" ) type Node interface { @@ -171,7 +172,8 @@ func (n *devnetNode) run(ctx *cli.Context) error { } if n.network.BorStateSyncDelay > 0 { - n.ethCfg.Bor.StateSyncConfirmationDelay = map[string]uint64{"0": uint64(n.network.BorStateSyncDelay.Seconds())} + stateSyncConfirmationDelay := map[string]uint64{"0": uint64(n.network.BorStateSyncDelay.Seconds())} + logger.Warn("TODO: custom BorStateSyncDelay is not applied to BorConfig.StateSyncConfirmationDelay", "delay", stateSyncConfirmationDelay) } n.ethNode, err = enode.New(ctx.Context, n.nodeCfg, n.ethCfg, logger) diff --git a/cmd/devnet/networks/devnet_bor.go b/cmd/devnet/networks/devnet_bor.go index af9b1f218ec..9fccc438dd8 100644 --- a/cmd/devnet/networks/devnet_bor.go +++ b/cmd/devnet/networks/devnet_bor.go @@ -3,6 +3,8 @@ package networks import ( "time" + "github.com/ledgerwatch/log/v3" + "github.com/ledgerwatch/erigon-lib/chain/networkname" "github.com/ledgerwatch/erigon/cmd/devnet/accounts" "github.com/ledgerwatch/erigon/cmd/devnet/args" @@ -10,9 +12,9 @@ import ( account_services "github.com/ledgerwatch/erigon/cmd/devnet/services/accounts" "github.com/ledgerwatch/erigon/cmd/devnet/services/polygon" "github.com/ledgerwatch/erigon/cmd/utils" + "github.com/ledgerwatch/erigon/consensus/bor/borcfg" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/params" - "github.com/ledgerwatch/log/v3" ) func NewBorDevnetWithoutHeimdall( @@ -190,8 +192,9 @@ func NewBorDevnetWithLocalHeimdall( logger log.Logger, ) devnet.Devnet { config := *params.BorDevnetChainConfig + borConfig := config.Bor.(*borcfg.BorConfig) if sprintSize > 0 { - config.Bor.Sprint = map[string]uint64{"0": sprintSize} + borConfig.Sprint = map[string]uint64{"0": sprintSize} } checkpointOwner := accounts.NewAccount("checkpoint-owner") diff --git a/cmd/devnet/services/polygon/heimdall.go b/cmd/devnet/services/polygon/heimdall.go index 69da6aae841..b3748cad48f 100644 --- a/cmd/devnet/services/polygon/heimdall.go +++ b/cmd/devnet/services/polygon/heimdall.go @@ -18,6 +18,7 @@ import ( "github.com/ledgerwatch/erigon/cmd/devnet/blocks" "github.com/ledgerwatch/erigon/cmd/devnet/contracts" "github.com/ledgerwatch/erigon/cmd/devnet/devnet" + "github.com/ledgerwatch/erigon/consensus/bor/borcfg" "github.com/ledgerwatch/erigon/consensus/bor/heimdall/checkpoint" "github.com/ledgerwatch/erigon/consensus/bor/heimdall/milestone" "github.com/ledgerwatch/erigon/consensus/bor/heimdall/span" @@ -68,6 +69,7 @@ type CheckpointConfig struct { type Heimdall struct { sync.Mutex chainConfig *chain.Config + borConfig *borcfg.BorConfig grpcAddr string validatorSet *valset.ValidatorSet pendingCheckpoint *checkpoint.Checkpoint @@ -97,6 +99,7 @@ func NewHeimdall( ) *Heimdall { heimdall := &Heimdall{ chainConfig: chainConfig, + borConfig: chainConfig.Bor.(*borcfg.BorConfig), grpcAddr: grpcAddr, checkpointConfig: *checkpointConfig, spans: map[uint64]*span.HeimdallSpan{}, @@ -159,7 +162,7 @@ func (h *Heimdall) Span(ctx context.Context, spanID uint64) (*span.HeimdallSpan, nextSpan.StartBlock = h.currentSpan.EndBlock + 1 } - nextSpan.EndBlock = nextSpan.StartBlock + (100 * h.chainConfig.Bor.CalculateSprint(nextSpan.StartBlock)) - 1 + nextSpan.EndBlock = nextSpan.StartBlock + (100 * h.borConfig.CalculateSprintLength(nextSpan.StartBlock)) - 1 // TODO we should use a subset here - see: https://wiki.polygon.technology/docs/pos/bor/ @@ -183,10 +186,10 @@ func (h *Heimdall) Span(ctx context.Context, spanID uint64) (*span.HeimdallSpan, func (h *Heimdall) currentSprintLength() int { if h.currentSpan != nil { - return int(h.chainConfig.Bor.CalculateSprint(h.currentSpan.StartBlock)) + return int(h.borConfig.CalculateSprintLength(h.currentSpan.StartBlock)) } - return int(h.chainConfig.Bor.CalculateSprint(256)) + return int(h.borConfig.CalculateSprintLength(256)) } func (h *Heimdall) getSpanOverrideHeight() uint64 { diff --git a/cmd/integration/commands/stages.go b/cmd/integration/commands/stages.go index 7de5dff47fc..89db6b2bd93 100644 --- a/cmd/integration/commands/stages.go +++ b/cmd/integration/commands/stages.go @@ -5,15 +5,21 @@ import ( "context" "errors" "fmt" - "github.com/ledgerwatch/erigon-lib/common/dbg" - "golang.org/x/sync/semaphore" "strings" "sync" "time" + "github.com/ledgerwatch/erigon-lib/common/dbg" + "golang.org/x/sync/semaphore" + "github.com/c2h5oh/datasize" "github.com/erigontech/mdbx-go/mdbx" lru "github.com/hashicorp/golang-lru/arc/v2" + "github.com/ledgerwatch/log/v3" + "github.com/ledgerwatch/secp256k1" + "github.com/spf13/cobra" + "golang.org/x/exp/slices" + "github.com/ledgerwatch/erigon/consensus/bor" "github.com/ledgerwatch/erigon/consensus/bor/heimdall" "github.com/ledgerwatch/erigon/consensus/bor/heimdallgrpc" @@ -22,10 +28,6 @@ import ( "github.com/ledgerwatch/erigon/p2p/sentry/sentry_multi_client" "github.com/ledgerwatch/erigon/turbo/builder" "github.com/ledgerwatch/erigon/turbo/snapshotsync/freezeblocks" - "github.com/ledgerwatch/log/v3" - "github.com/ledgerwatch/secp256k1" - "github.com/spf13/cobra" - "golang.org/x/exp/slices" "golang.org/x/sync/errgroup" chain2 "github.com/ledgerwatch/erigon-lib/chain" @@ -1757,7 +1759,7 @@ func initConsensusEngine(ctx context.Context, cc *chain2.Config, dir string, db } else if cc.Aura != nil { consensusConfig = &config.Aura } else if cc.Bor != nil { - consensusConfig = &config.Bor + consensusConfig = cc.Bor config.HeimdallURL = HeimdallURL if !config.WithoutHeimdall { if config.HeimdallgRPCAddress != "" { diff --git a/cmd/rpcdaemon/cli/config.go b/cmd/rpcdaemon/cli/config.go index 18c0a614875..c22c7616e5a 100644 --- a/cmd/rpcdaemon/cli/config.go +++ b/cmd/rpcdaemon/cli/config.go @@ -23,6 +23,7 @@ import ( libstate "github.com/ledgerwatch/erigon-lib/state" "github.com/ledgerwatch/erigon/consensus" "github.com/ledgerwatch/erigon/consensus/bor" + "github.com/ledgerwatch/erigon/consensus/bor/borcfg" "github.com/ledgerwatch/erigon/consensus/bor/contract" "github.com/ledgerwatch/erigon/consensus/bor/heimdall/span" "github.com/ledgerwatch/erigon/consensus/ethash" @@ -36,6 +37,13 @@ import ( "github.com/ledgerwatch/erigon/turbo/snapshotsync/freezeblocks" "github.com/ledgerwatch/erigon/turbo/snapshotsync/snap" + "github.com/ledgerwatch/log/v3" + "github.com/spf13/cobra" + "golang.org/x/sync/semaphore" + "google.golang.org/grpc" + grpcHealth "google.golang.org/grpc/health" + "google.golang.org/grpc/health/grpc_health_v1" + "github.com/ledgerwatch/erigon-lib/direct" "github.com/ledgerwatch/erigon-lib/gointerfaces" "github.com/ledgerwatch/erigon-lib/gointerfaces/grpcutil" @@ -46,12 +54,6 @@ import ( kv2 "github.com/ledgerwatch/erigon-lib/kv/mdbx" "github.com/ledgerwatch/erigon-lib/kv/remotedb" "github.com/ledgerwatch/erigon-lib/kv/remotedbserver" - "github.com/ledgerwatch/log/v3" - "github.com/spf13/cobra" - "golang.org/x/sync/semaphore" - "google.golang.org/grpc" - grpcHealth "google.golang.org/grpc/health" - "google.golang.org/grpc/health/grpc_health_v1" "github.com/ledgerwatch/erigon/cmd/rpcdaemon/cli/httpcfg" "github.com/ledgerwatch/erigon/cmd/rpcdaemon/graphql" @@ -504,9 +506,11 @@ func RemoteServices(ctx context.Context, cfg *httpcfg.HttpCfg, logger log.Logger } // Skip the compatibility check, until we have a schema in erigon-lib + borConfig := cc.Bor.(*borcfg.BorConfig) + engine = bor.NewRo(cc, borKv, blockReader, span.NewChainSpanner(contract.ValidatorSet(), cc, true, logger), - contract.NewGenesisContractsClient(cc, cc.Bor.ValidatorContract, cc.Bor.StateReceiverContract, logger), logger) + contract.NewGenesisContractsClient(cc, borConfig.ValidatorContract, borConfig.StateReceiverContract, logger), logger) default: engine = ethash.NewFaker() @@ -912,9 +916,11 @@ func (e *remoteConsensusEngine) init(db kv.RoDB, blockReader services.FullBlockR return false } + borConfig := cc.Bor.(*borcfg.BorConfig) + e.engine = bor.NewRo(cc, borKv, blockReader, span.NewChainSpanner(contract.ValidatorSet(), cc, true, logger), - contract.NewGenesisContractsClient(cc, cc.Bor.ValidatorContract, cc.Bor.StateReceiverContract, logger), logger) + contract.NewGenesisContractsClient(cc, borConfig.ValidatorContract, borConfig.StateReceiverContract, logger), logger) } else { e.engine = ethash.NewFaker() } diff --git a/cmd/state/commands/check_change_sets.go b/cmd/state/commands/check_change_sets.go index c6a751ac493..4675c941ba4 100644 --- a/cmd/state/commands/check_change_sets.go +++ b/cmd/state/commands/check_change_sets.go @@ -289,7 +289,7 @@ func initConsensusEngine(ctx context.Context, cc *chain2.Config, blockReader ser } else if cc.Aura != nil { consensusConfig = &config.Aura } else if cc.Bor != nil { - consensusConfig = &config.Bor + consensusConfig = cc.Bor } else { consensusConfig = &config.Ethash } diff --git a/consensus/bor/bor.go b/consensus/bor/bor.go index ed49c131b6d..aa62d4fffa0 100644 --- a/consensus/bor/bor.go +++ b/consensus/bor/bor.go @@ -28,6 +28,7 @@ import ( "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/consensus" + "github.com/ledgerwatch/erigon/consensus/bor/borcfg" "github.com/ledgerwatch/erigon/consensus/bor/finality" "github.com/ledgerwatch/erigon/consensus/bor/finality/flags" "github.com/ledgerwatch/erigon/consensus/bor/finality/whitelist" @@ -141,7 +142,7 @@ var ( type SignerFn func(signer libcommon.Address, mimeType string, message []byte) ([]byte, error) // ecrecover extracts the Ethereum account address from a signed header. -func Ecrecover(header *types.Header, sigcache *lru.ARCCache[libcommon.Hash, libcommon.Address], c *chain.BorConfig) (libcommon.Address, error) { +func Ecrecover(header *types.Header, sigcache *lru.ARCCache[libcommon.Hash, libcommon.Address], c *borcfg.BorConfig) (libcommon.Address, error) { // If the signature's already cached, return that hash := header.Hash() if address, known := sigcache.Get(hash); known { @@ -168,7 +169,7 @@ func Ecrecover(header *types.Header, sigcache *lru.ARCCache[libcommon.Hash, libc } // SealHash returns the hash of a block prior to it being sealed. -func SealHash(header *types.Header, c *chain.BorConfig) (hash libcommon.Hash) { +func SealHash(header *types.Header, c *borcfg.BorConfig) (hash libcommon.Hash) { hasher := cryptopool.NewLegacyKeccak256() defer cryptopool.ReturnToPoolKeccak256(hasher) @@ -178,7 +179,7 @@ func SealHash(header *types.Header, c *chain.BorConfig) (hash libcommon.Hash) { return hash } -func encodeSigHeader(w io.Writer, header *types.Header, c *chain.BorConfig) { +func encodeSigHeader(w io.Writer, header *types.Header, c *borcfg.BorConfig) { enc := []interface{}{ header.ParentHash, header.UncleHash, @@ -209,11 +210,11 @@ func encodeSigHeader(w io.Writer, header *types.Header, c *chain.BorConfig) { } // CalcProducerDelay is the block delay algorithm based on block time, period, producerDelay and turn-ness of a signer -func CalcProducerDelay(number uint64, succession int, c *chain.BorConfig) uint64 { +func CalcProducerDelay(number uint64, succession int, c *borcfg.BorConfig) uint64 { // When the block is the first block of the sprint, it is expected to be delayed by `producerDelay`. // That is to allow time for block propagation in the last sprint delay := c.CalculatePeriod(number) - if number%c.CalculateSprint(number) == 0 { + if number%c.CalculateSprintLength(number) == 0 { delay = c.CalculateProducerDelay(number) } @@ -231,7 +232,7 @@ func CalcProducerDelay(number uint64, succession int, c *chain.BorConfig) uint64 // Note, the method requires the extra data to be at least 65 bytes, otherwise it // panics. This is done to avoid accidentally using both forms (signature present // or not), which could be abused to produce different hashes for the same header. -func BorRLP(header *types.Header, c *chain.BorConfig) []byte { +func BorRLP(header *types.Header, c *borcfg.BorConfig) []byte { b := new(bytes.Buffer) encodeSigHeader(b, header, c) @@ -240,9 +241,9 @@ func BorRLP(header *types.Header, c *chain.BorConfig) []byte { // Bor is the matic-bor consensus engine type Bor struct { - chainConfig *chain.Config // Chain config - config *chain.BorConfig // Consensus engine configuration parameters for bor consensus - DB kv.RwDB // Database to store and retrieve snapshot checkpoints + chainConfig *chain.Config // Chain config + config *borcfg.BorConfig // Consensus engine configuration parameters for bor consensus + DB kv.RwDB // Database to store and retrieve snapshot checkpoints blockReader services.FullBlockReader Recents *lru.ARCCache[libcommon.Hash, *Snapshot] // Snapshots for recent block to speed up reorgs @@ -273,95 +274,6 @@ type signer struct { signFn SignerFn // Signer function to authorize hashes with } -type sprint struct { - from, size uint64 -} - -type sprints []sprint - -func (s sprints) Len() int { - return len(s) -} - -func (s sprints) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -func (s sprints) Less(i, j int) bool { - return s[i].from < s[j].from -} - -func asSprints(configSprints map[string]uint64) sprints { - sprints := make(sprints, len(configSprints)) - - i := 0 - for key, value := range configSprints { - sprints[i].from, _ = strconv.ParseUint(key, 10, 64) - sprints[i].size = value - i++ - } - - sort.Sort(sprints) - - return sprints -} - -func CalculateSprintCount(config *chain.BorConfig, from, to uint64) int { - - switch { - case from > to: - return 0 - case from < to: - to-- - } - - sprints := asSprints(config.Sprint) - - count := uint64(0) - startCalc := from - - zeroth := func(boundary uint64, size uint64) uint64 { - if boundary%size == 0 { - return 1 - } - - return 0 - } - - for i := 0; i < len(sprints)-1; i++ { - if startCalc >= sprints[i].from && startCalc < sprints[i+1].from { - if to >= sprints[i].from && to < sprints[i+1].from { - if startCalc == to { - return int(count + zeroth(startCalc, sprints[i].size)) - } - return int(count + zeroth(startCalc, sprints[i].size) + (to-startCalc)/sprints[i].size) - } else { - endCalc := sprints[i+1].from - 1 - count += zeroth(startCalc, sprints[i].size) + (endCalc-startCalc)/sprints[i].size - startCalc = endCalc + 1 - } - } - } - - if startCalc == to { - return int(count + zeroth(startCalc, sprints[len(sprints)-1].size)) - } - - return int(count + zeroth(startCalc, sprints[len(sprints)-1].size) + (to-startCalc)/sprints[len(sprints)-1].size) -} - -func CalculateSprint(config *chain.BorConfig, number uint64) uint64 { - sprints := asSprints(config.Sprint) - - for i := 0; i < len(sprints)-1; i++ { - if number >= sprints[i].from && number < sprints[i+1].from { - return sprints[i].size - } - } - - return sprints[len(sprints)-1].size -} - // New creates a Matic Bor consensus engine. func New( chainConfig *chain.Config, @@ -373,10 +285,10 @@ func New( logger log.Logger, ) *Bor { // get bor config - borConfig := chainConfig.Bor + borConfig := chainConfig.Bor.(*borcfg.BorConfig) // Set any missing consensus parameters to their defaults - if borConfig != nil && borConfig.CalculateSprint(0) == 0 { + if borConfig != nil && borConfig.CalculateSprintLength(0) == 0 { borConfig.Sprint = defaultSprintLength } @@ -441,10 +353,10 @@ func (w rwWrapper) BeginRwNosync(ctx context.Context) (kv.RwTx, error) { func NewRo(chainConfig *chain.Config, db kv.RoDB, blockReader services.FullBlockReader, spanner Spanner, genesisContracts GenesisContract, logger log.Logger) *Bor { // get bor config - borConfig := chainConfig.Bor + borConfig := chainConfig.Bor.(*borcfg.BorConfig) // Set any missing consensus parameters to their defaults - if borConfig != nil && borConfig.CalculateSprint(0) == 0 { + if borConfig != nil && borConfig.CalculateSprintLength(0) == 0 { borConfig.Sprint = defaultSprintLength } @@ -469,6 +381,10 @@ func (c *Bor) Type() chain.ConsensusName { return chain.BorConsensus } +func (c *Bor) Config() *borcfg.BorConfig { + return c.config +} + type HeaderProgress interface { Progress() uint64 } @@ -533,7 +449,7 @@ func (c *Bor) verifyHeader(chain consensus.ChainHeaderReader, header *types.Head } // check extr adata - isSprintEnd := isSprintStart(number+1, c.config.CalculateSprint(number)) + isSprintEnd := isSprintStart(number+1, c.config.CalculateSprintLength(number)) // Ensure that the extra-data contains a signer list on checkpoint, but none otherwise signersBytes := len(GetValidatorBytes(header, c.config)) @@ -889,7 +805,7 @@ func (c *Bor) verifySeal(chain consensus.ChainHeaderReader, header *types.Header return nil } -func IsBlockOnTime(parent *types.Header, header *types.Header, number uint64, succession int, cfg *chain.BorConfig) bool { +func IsBlockOnTime(parent *types.Header, header *types.Header, number uint64, succession int, cfg *borcfg.BorConfig) bool { return parent != nil && header.Time < parent.Time+CalcProducerDelay(number, succession, cfg) } @@ -922,7 +838,7 @@ func (c *Bor) Prepare(chain consensus.ChainHeaderReader, header *types.Header, s // client calls `GetCurrentValidators` because it makes a contract call // where it fetches producers internally. As we fetch data from span // in Erigon, use directly the `GetCurrentProducers` function. - if isSprintStart(number+1, c.config.CalculateSprint(number)) { + if isSprintStart(number+1, c.config.CalculateSprintLength(number)) { spanID := span.IDAt(number + 1) newValidators, err := c.spanner.GetCurrentProducers(spanID, c.authorizedSigner.Load().signer, chain) if err != nil { @@ -1017,7 +933,7 @@ func (c *Bor) Finalize(config *chain.Config, header *types.Header, state *state. return nil, nil, consensus.ErrUnexpectedWithdrawals } - if isSprintStart(headerNumber, c.config.CalculateSprint(headerNumber)) { + if isSprintStart(headerNumber, c.config.CalculateSprintLength(headerNumber)) { cx := statefull.ChainContext{Chain: chain, Bor: c} if c.blockReader != nil { @@ -1083,7 +999,7 @@ func (c *Bor) FinalizeAndAssemble(chainConfig *chain.Config, header *types.Heade return nil, nil, nil, consensus.ErrUnexpectedWithdrawals } - if isSprintStart(headerNumber, c.config.CalculateSprint(headerNumber)) { + if isSprintStart(headerNumber, c.config.CalculateSprintLength(headerNumber)) { cx := statefull.ChainContext{Chain: chain, Bor: c} if c.blockReader != nil { @@ -1378,7 +1294,7 @@ func (c *Bor) needToCommitSpan(currentSpan *span.Span, headerNumber uint64) bool if currentSpan.EndBlock == 0 { return true } - sprintLength := c.config.CalculateSprint(headerNumber) + sprintLength := c.config.CalculateSprintLength(headerNumber) // if current block is first block of last sprint in current span if currentSpan.EndBlock > sprintLength && currentSpan.EndBlock-sprintLength+1 == headerNumber { @@ -1542,7 +1458,7 @@ func (c *Bor) getNextHeimdallSpanForTest( spanBor.StartBlock = spanBor.EndBlock + 1 } - spanBor.EndBlock = spanBor.StartBlock + (100 * c.config.CalculateSprint(headerNumber)) - 1 + spanBor.EndBlock = spanBor.StartBlock + (100 * c.config.CalculateSprintLength(headerNumber)) - 1 selectedProducers := make([]valset.Validator, len(snap.ValidatorSet.Validators)) for i, v := range snap.ValidatorSet.Validators { @@ -1632,7 +1548,7 @@ func GetTxDependency(b *types.Block) [][]uint64 { return blockExtraData.TxDependency } -func GetValidatorBytes(h *types.Header, config *chain.BorConfig) []byte { +func GetValidatorBytes(h *types.Header, config *borcfg.BorConfig) []byte { tempExtra := h.Extra if !config.IsParallelUniverse(h.Number.Uint64()) { diff --git a/consensus/bor/bor_test.go b/consensus/bor/bor_test.go index 373b3bd10d5..12e10a7811c 100644 --- a/consensus/bor/bor_test.go +++ b/consensus/bor/bor_test.go @@ -7,6 +7,8 @@ import ( "math/big" "testing" + "github.com/ledgerwatch/erigon/consensus/bor/borcfg" + "github.com/ledgerwatch/erigon-lib/chain" "github.com/ledgerwatch/erigon-lib/common" libcommon "github.com/ledgerwatch/erigon-lib/common" @@ -34,12 +36,23 @@ import ( type test_heimdall struct { currentSpan *span.HeimdallSpan chainConfig *chain.Config + borConfig *borcfg.BorConfig validatorSet *valset.ValidatorSet spans map[uint64]*span.HeimdallSpan } func newTestHeimdall(chainConfig *chain.Config) *test_heimdall { - return &test_heimdall{nil, chainConfig, nil, map[uint64]*span.HeimdallSpan{}} + return &test_heimdall{ + currentSpan: nil, + chainConfig: chainConfig, + borConfig: chainConfig.Bor.(*borcfg.BorConfig), + validatorSet: nil, + spans: map[uint64]*span.HeimdallSpan{}, + } +} + +func (h *test_heimdall) BorConfig() *borcfg.BorConfig { + return h.borConfig } func (h test_heimdall) StateSyncEvents(ctx context.Context, fromID uint64, to int64) ([]*clerk.EventRecordWithTime, error) { @@ -67,7 +80,7 @@ func (h *test_heimdall) Span(ctx context.Context, spanID uint64) (*span.Heimdall nextSpan.StartBlock = h.currentSpan.EndBlock + 1 } - nextSpan.EndBlock = nextSpan.StartBlock + (100 * h.chainConfig.Bor.CalculateSprint(nextSpan.StartBlock)) - 1 + nextSpan.EndBlock = nextSpan.StartBlock + (100 * h.borConfig.CalculateSprintLength(nextSpan.StartBlock)) - 1 // TODO we should use a subset here - see: https://wiki.polygon.technology/docs/pos/bor/ @@ -91,10 +104,10 @@ func (h *test_heimdall) Span(ctx context.Context, spanID uint64) (*span.Heimdall func (h test_heimdall) currentSprintLength() int { if h.currentSpan != nil { - return int(h.chainConfig.Bor.CalculateSprint(h.currentSpan.StartBlock)) + return int(h.borConfig.CalculateSprintLength(h.currentSpan.StartBlock)) } - return int(h.chainConfig.Bor.CalculateSprint(256)) + return int(h.borConfig.CalculateSprintLength(256)) } func (h test_heimdall) FetchCheckpoint(ctx context.Context, number int64) (*checkpoint.Checkpoint, error) { @@ -346,11 +359,11 @@ func TestVerifyRun(t *testing.T) { } func TestVerifySprint(t *testing.T) { - //testVerify(t, 10, 4, int(params.BorDevnetChainConfig.Bor.CalculateSprint(256))) + //testVerify(t, 10, 4, int(params.BorDevnetChainConfig.Bor.CalculateSprintLength(256))) } func TestVerifySpan(t *testing.T) { - //testVerify(t, 10, 4 /*100**/ *int(params.BorDevnetChainConfig.Bor.CalculateSprint(256))) + //testVerify(t, 10, 4 /*100**/ *int(params.BorDevnetChainConfig.Bor.CalculateSprintLength(256))) } func testVerify(t *testing.T, noValidators int, chainLength int) { @@ -392,7 +405,7 @@ func testVerify(t *testing.T, noValidators int, chainLength int) { if isProposer { if vi != lastProposerIndex { - sprintLen := params.BorDevnetChainConfig.Bor.CalculateSprint(block.NumberU64()) + sprintLen := heimdall.BorConfig().CalculateSprintLength(block.NumberU64()) if block.NumberU64() > 1 && block.NumberU64()%sprintLen != 0 { t.Fatalf("Unexpected sprint boundary at %d for: %d", bi, block.NumberU64()) } diff --git a/consensus/bor/borcfg/bor_config.go b/consensus/bor/borcfg/bor_config.go new file mode 100644 index 00000000000..d73f20a830a --- /dev/null +++ b/consensus/bor/borcfg/bor_config.go @@ -0,0 +1,196 @@ +package borcfg + +import ( + "math/big" + "sort" + "strconv" + + "github.com/ledgerwatch/erigon-lib/common" +) + +// BorConfig is the consensus engine configs for Matic bor based sealing. +type BorConfig struct { + Period map[string]uint64 `json:"period"` // Number of seconds between blocks to enforce + ProducerDelay map[string]uint64 `json:"producerDelay"` // Number of seconds delay between two producer interval + Sprint map[string]uint64 `json:"sprint"` // Epoch length to proposer + BackupMultiplier map[string]uint64 `json:"backupMultiplier"` // Backup multiplier to determine the wiggle time + ValidatorContract string `json:"validatorContract"` // Validator set contract + StateReceiverContract string `json:"stateReceiverContract"` // State receiver contract + + OverrideStateSyncRecords map[string]int `json:"overrideStateSyncRecords"` // override state records count + BlockAlloc map[string]interface{} `json:"blockAlloc"` + + JaipurBlock *big.Int `json:"jaipurBlock"` // Jaipur switch block (nil = no fork, 0 = already on jaipur) + DelhiBlock *big.Int `json:"delhiBlock"` // Delhi switch block (nil = no fork, 0 = already on delhi) + IndoreBlock *big.Int `json:"indoreBlock"` // Indore switch block (nil = no fork, 0 = already on indore) + AgraBlock *big.Int `json:"agraBlock"` // Agra switch block (nil = no fork, 0 = already in agra) + StateSyncConfirmationDelay map[string]uint64 `json:"stateSyncConfirmationDelay"` // StateSync Confirmation Delay, in seconds, to calculate `to` + + ParallelUniverseBlock *big.Int `json:"parallelUniverseBlock"` // TODO: update all occurrence, change name and finalize number (hardfork for block-stm related changes) + + sprints sprints +} + +// String implements the stringer interface, returning the consensus engine details. +func (c *BorConfig) String() string { + return "bor" +} + +func (c *BorConfig) CalculateProducerDelay(number uint64) uint64 { + return borKeyValueConfigHelper(c.ProducerDelay, number) +} + +func (c *BorConfig) CalculateSprintLength(number uint64) uint64 { + if c.sprints == nil { + c.sprints = asSprints(c.Sprint) + } + + for i := 0; i < len(c.sprints)-1; i++ { + if number >= c.sprints[i].from && number < c.sprints[i+1].from { + return c.sprints[i].size + } + } + + return c.sprints[len(c.sprints)-1].size +} + +func (c *BorConfig) CalculateSprintNumber(number uint64) uint64 { + if c.sprints == nil { + c.sprints = asSprints(c.Sprint) + } + + // unknown sprint size + if (len(c.sprints) == 0) || (number < c.sprints[0].from) { + return 0 + } + + // remove sprint configs that are not in effect yet + sprints := c.sprints + for number < sprints[len(sprints)-1].from { + sprints = sprints[:len(sprints)-1] + } + + var count uint64 + end := number + for len(sprints) > 0 { + sprint := sprints[len(sprints)-1] + count += (end - sprint.from) / sprint.size + + sprints = sprints[:len(sprints)-1] + end = sprint.from + } + + if c.sprints[0].from > 0 { + count++ + } + return count +} + +func (c *BorConfig) CalculateBackupMultiplier(number uint64) uint64 { + return borKeyValueConfigHelper(c.BackupMultiplier, number) +} + +func (c *BorConfig) CalculatePeriod(number uint64) uint64 { + return borKeyValueConfigHelper(c.Period, number) +} + +// isForked returns whether a fork scheduled at block s is active at the given head block. +func isForked(s *big.Int, head uint64) bool { + if s == nil { + return false + } + return s.Uint64() <= head +} + +func (c *BorConfig) IsJaipur(number uint64) bool { + return isForked(c.JaipurBlock, number) +} + +func (c *BorConfig) IsDelhi(number uint64) bool { + return isForked(c.DelhiBlock, number) +} + +func (c *BorConfig) IsIndore(number uint64) bool { + return isForked(c.IndoreBlock, number) +} + +// IsAgra returns whether num is either equal to the Agra fork block or greater. +// The Agra hard fork is based on the Shanghai hard fork, but it doesn't include withdrawals. +// Also Agra is activated based on the block number rather than the timestamp. +// Refer to https://forum.polygon.technology/t/pip-28-agra-hardfork +func (c *BorConfig) IsAgra(num uint64) bool { + return isForked(c.AgraBlock, num) +} + +func (c *BorConfig) GetAgraBlock() *big.Int { + return c.AgraBlock +} + +// TODO: modify this function once the block number is finalized +func (c *BorConfig) IsParallelUniverse(number uint64) bool { + if c.ParallelUniverseBlock != nil { + if c.ParallelUniverseBlock.Cmp(big.NewInt(0)) == 0 { + return false + } + } + + return isForked(c.ParallelUniverseBlock, number) +} + +func (c *BorConfig) CalculateStateSyncDelay(number uint64) uint64 { + return borKeyValueConfigHelper(c.StateSyncConfirmationDelay, number) +} + +func borKeyValueConfigHelper[T uint64 | common.Address](field map[string]T, number uint64) T { + fieldUint := make(map[uint64]T) + for k, v := range field { + keyUint, err := strconv.ParseUint(k, 10, 64) + if err != nil { + panic(err) + } + fieldUint[keyUint] = v + } + + keys := common.SortedKeys(fieldUint) + + for i := 0; i < len(keys)-1; i++ { + if number >= keys[i] && number < keys[i+1] { + return fieldUint[keys[i]] + } + } + + return fieldUint[keys[len(keys)-1]] +} + +type sprint struct { + from, size uint64 +} + +type sprints []sprint + +func (s sprints) Len() int { + return len(s) +} + +func (s sprints) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s sprints) Less(i, j int) bool { + return s[i].from < s[j].from +} + +func asSprints(configSprints map[string]uint64) sprints { + sprints := make(sprints, len(configSprints)) + + i := 0 + for key, value := range configSprints { + sprints[i].from, _ = strconv.ParseUint(key, 10, 64) + sprints[i].size = value + i++ + } + + sort.Sort(sprints) + + return sprints +} diff --git a/consensus/bor/borcfg/bor_config_test.go b/consensus/bor/borcfg/bor_config_test.go new file mode 100644 index 00000000000..d8467730924 --- /dev/null +++ b/consensus/bor/borcfg/bor_config_test.go @@ -0,0 +1,48 @@ +package borcfg + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestCalculateSprintNumber(t *testing.T) { + cfg := BorConfig{ + Sprint: map[string]uint64{ + "0": 64, + "256": 16, + }, + } + + examples := map[uint64]uint64{ + 0: 0, + 1: 0, + 2: 0, + 63: 0, + 64: 1, + 65: 1, + 66: 1, + 127: 1, + 128: 2, + 191: 2, + 192: 3, + 255: 3, + 256: 4, + 257: 4, + 258: 4, + 271: 4, + 272: 5, + 273: 5, + 274: 5, + 287: 5, + 288: 6, + 303: 6, + 304: 7, + 319: 7, + 320: 8, + } + + for blockNumber, expectedSprintNumber := range examples { + assert.Equal(t, expectedSprintNumber, cfg.CalculateSprintNumber(blockNumber), blockNumber) + } +} diff --git a/consensus/bor/heimdall/span/span_id.go b/consensus/bor/heimdall/span/span_id.go index 7c4113bf848..50955212867 100644 --- a/consensus/bor/heimdall/span/span_id.go +++ b/consensus/bor/heimdall/span/span_id.go @@ -1,6 +1,8 @@ package span -import "github.com/ledgerwatch/erigon-lib/chain" +import ( + "github.com/ledgerwatch/erigon/consensus/bor/borcfg" +) const ( spanLength = 6400 // Number of blocks in a span @@ -24,10 +26,10 @@ func EndBlockNum(spanID uint64) uint64 { } // BlockInLastSprintOfSpan returns true if a block num is within the last sprint of a span and false otherwise. -func BlockInLastSprintOfSpan(blockNum uint64, config *chain.BorConfig) bool { +func BlockInLastSprintOfSpan(blockNum uint64, config *borcfg.BorConfig) bool { spanNum := IDAt(blockNum) endBlockNum := EndBlockNum(spanNum) - sprintLen := config.CalculateSprint(blockNum) + sprintLen := config.CalculateSprintLength(blockNum) startBlockNum := endBlockNum - sprintLen + 1 return startBlockNum <= blockNum && blockNum <= endBlockNum } diff --git a/consensus/bor/heimdall/span/span_id_test.go b/consensus/bor/heimdall/span/span_id_test.go index 8ab45ed425d..a7c80891c7c 100644 --- a/consensus/bor/heimdall/span/span_id_test.go +++ b/consensus/bor/heimdall/span/span_id_test.go @@ -5,7 +5,7 @@ import ( "github.com/stretchr/testify/assert" - "github.com/ledgerwatch/erigon-lib/chain" + "github.com/ledgerwatch/erigon/consensus/bor/borcfg" ) func TestSpanIDAt(t *testing.T) { @@ -31,7 +31,7 @@ func TestSpanEndBlockNum(t *testing.T) { } func TestBlockInLastSprintOfSpan(t *testing.T) { - config := &chain.BorConfig{ + config := &borcfg.BorConfig{ Sprint: map[string]uint64{ "0": 16, }, diff --git a/consensus/bor/heimdall/span/spanner.go b/consensus/bor/heimdall/span/spanner.go index b3738c4774c..9af95abf718 100644 --- a/consensus/bor/heimdall/span/spanner.go +++ b/consensus/bor/heimdall/span/spanner.go @@ -5,26 +5,31 @@ import ( "encoding/json" "math/big" + "github.com/ledgerwatch/log/v3" + "github.com/ledgerwatch/erigon-lib/chain" libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon/consensus" "github.com/ledgerwatch/erigon/consensus/bor/abi" + "github.com/ledgerwatch/erigon/consensus/bor/borcfg" "github.com/ledgerwatch/erigon/consensus/bor/valset" "github.com/ledgerwatch/erigon/rlp" - "github.com/ledgerwatch/log/v3" ) type ChainSpanner struct { validatorSet abi.ABI chainConfig *chain.Config + borConfig *borcfg.BorConfig logger log.Logger withoutHeimdall bool } func NewChainSpanner(validatorSet abi.ABI, chainConfig *chain.Config, withoutHeimdall bool, logger log.Logger) *ChainSpanner { + borConfig := chainConfig.Bor.(*borcfg.BorConfig) return &ChainSpanner{ validatorSet: validatorSet, chainConfig: chainConfig, + borConfig: borConfig, logger: logger, withoutHeimdall: withoutHeimdall, } @@ -42,7 +47,7 @@ func (c *ChainSpanner) GetCurrentSpan(syscall consensus.SystemCall) (*Span, erro return nil, err } - result, err := syscall(libcommon.HexToAddress(c.chainConfig.Bor.ValidatorContract), data) + result, err := syscall(libcommon.HexToAddress(c.borConfig.ValidatorContract), data) if err != nil { return nil, err } @@ -149,7 +154,7 @@ func (c *ChainSpanner) CommitSpan(heimdallSpan HeimdallSpan, syscall consensus.S return err } - _, err = syscall(libcommon.HexToAddress(c.chainConfig.Bor.ValidatorContract), data) + _, err = syscall(libcommon.HexToAddress(c.borConfig.ValidatorContract), data) return err } diff --git a/consensus/bor/snapshot.go b/consensus/bor/snapshot.go index 836acf36343..8eabd324172 100644 --- a/consensus/bor/snapshot.go +++ b/consensus/bor/snapshot.go @@ -6,17 +6,18 @@ import ( "encoding/json" lru "github.com/hashicorp/golang-lru/arc/v2" - "github.com/ledgerwatch/erigon-lib/chain" + "github.com/ledgerwatch/log/v3" + "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon/consensus/bor/borcfg" "github.com/ledgerwatch/erigon/consensus/bor/valset" "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/log/v3" ) // Snapshot is the state of the authorization voting at a given point in time. type Snapshot struct { - config *chain.BorConfig // Consensus engine parameters to fine tune behavior + config *borcfg.BorConfig // Consensus engine parameters to fine tune behavior sigcache *lru.ARCCache[common.Hash, common.Address] // Cache of recent block signatures to speed up ecrecover Number uint64 `json:"number"` // Block number where the snapshot was created @@ -38,7 +39,7 @@ const BorSeparate = "BorSeparate" // method does not initialize the set of recent signers, so only ever use if for // the genesis block. func NewSnapshot( - config *chain.BorConfig, + config *borcfg.BorConfig, sigcache *lru.ARCCache[common.Hash, common.Address], number uint64, hash common.Hash, @@ -57,7 +58,7 @@ func NewSnapshot( } // loadSnapshot loads an existing snapshot from the database. -func LoadSnapshot(config *chain.BorConfig, sigcache *lru.ARCCache[common.Hash, common.Address], db kv.RwDB, hash common.Hash) (*Snapshot, error) { +func LoadSnapshot(config *borcfg.BorConfig, sigcache *lru.ARCCache[common.Hash, common.Address], db kv.RwDB, hash common.Hash) (*Snapshot, error) { tx, err := db.BeginRo(context.Background()) if err != nil { return nil, err @@ -139,7 +140,7 @@ func (s *Snapshot) Apply(parent *types.Header, headers []*types.Header, logger l for _, header := range headers { // Remove any votes on checkpoint blocks number := header.Number.Uint64() - sprintLen := s.config.CalculateSprint(number) + sprintLen := s.config.CalculateSprintLength(number) // Delete the oldest signer from the recent list to allow it signing again if number >= sprintLen { @@ -244,18 +245,9 @@ func (s *Snapshot) Difficulty(signer common.Address) uint64 { return 1 } - validators := s.ValidatorSet.Validators - proposer := s.ValidatorSet.GetProposer().Address - totalValidators := len(validators) - - proposerIndex, _ := s.ValidatorSet.GetByAddress(proposer) - signerIndex, _ := s.ValidatorSet.GetByAddress(signer) - - // temp index - tempIndex := signerIndex - if tempIndex < proposerIndex { - tempIndex = tempIndex + totalValidators + if d, err := s.ValidatorSet.Difficulty(signer); err == nil { + return d + } else { + return 0 } - - return uint64(totalValidators - (tempIndex - proposerIndex)) } diff --git a/consensus/bor/valset/validator_set.go b/consensus/bor/valset/validator_set.go index de2792d5285..4f540dc8e1e 100644 --- a/consensus/bor/valset/validator_set.go +++ b/consensus/bor/valset/validator_set.go @@ -4,14 +4,16 @@ package valset import ( "bytes" + "errors" "fmt" "math" "math/big" "sort" "strings" - libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/log/v3" + + libcommon "github.com/ledgerwatch/erigon-lib/common" ) // MaxTotalVotingPower - the maximum allowed total voting power. @@ -656,6 +658,30 @@ func (vals *ValidatorSet) UpdateWithChangeSet(changes []*Validator, logger log.L return vals.updateWithChangeSet(changes, true, logger) } +// Difficulty returns the difficulty for a particular signer at the current snapshot number +func (vals *ValidatorSet) Difficulty(signer libcommon.Address) (uint64, error) { + proposer := vals.GetProposer() + if proposer == nil { + return 0, errors.New("ValidatorSet.Difficulty: proposer not found") + } + + proposerIndex, _ := vals.GetByAddress(proposer.Address) + if proposerIndex < 0 { + return 0, errors.New("ValidatorSet.Difficulty: proposer index not found") + } + + signerIndex, _ := vals.GetByAddress(signer) + if signerIndex < 0 { + return 0, errors.New("ValidatorSet.Difficulty: signer index not found") + } + + indexDiff := signerIndex - proposerIndex + if indexDiff < 0 { + indexDiff += len(vals.Validators) + } + return uint64(len(vals.Validators) - indexDiff), nil +} + //----------------- // ErrTooMuchChange diff --git a/consensus/clique/snapshot_test.go b/consensus/clique/snapshot_test.go index 0f487177bfc..de00f536d79 100644 --- a/consensus/clique/snapshot_test.go +++ b/consensus/clique/snapshot_test.go @@ -23,6 +23,8 @@ import ( "sort" "testing" + "github.com/ledgerwatch/log/v3" + "github.com/ledgerwatch/erigon-lib/chain" libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/length" @@ -35,7 +37,7 @@ import ( "github.com/ledgerwatch/erigon/eth/stagedsync" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/turbo/stages/mock" - "github.com/ledgerwatch/log/v3" + "github.com/ledgerwatch/erigon/turbo/testlog" ) // testerAccountPool is a pool to maintain currently active tester accounts, @@ -392,6 +394,7 @@ func TestClique(t *testing.T) { tt := tt t.Run(tt.name, func(t *testing.T) { + logger := testlog.Logger(t, log.LvlInfo) // Create the account pool and generate the initial set of signers accounts := newTesterAccountPool() @@ -509,7 +512,13 @@ func TestClique(t *testing.T) { var snap *clique.Snapshot if err := m.DB.View(context.Background(), func(tx kv.Tx) error { - snap, err = engine.Snapshot(stagedsync.ChainReader{Cfg: config, Db: tx, BlockReader: m.BlockReader}, head.NumberU64(), head.Hash(), nil) + chainReader := stagedsync.ChainReader{ + Cfg: config, + Db: tx, + BlockReader: m.BlockReader, + Logger: logger, + } + snap, err = engine.Snapshot(chainReader, head.NumberU64(), head.Hash(), nil) if err != nil { return err } diff --git a/consensus/misc/eip1559.go b/consensus/misc/eip1559.go index 8cd278b253b..e7729a4bef9 100644 --- a/consensus/misc/eip1559.go +++ b/consensus/misc/eip1559.go @@ -22,6 +22,7 @@ import ( "github.com/ledgerwatch/erigon-lib/chain" "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon/consensus/bor/borcfg" "github.com/ledgerwatch/erigon/common/math" "github.com/ledgerwatch/erigon/core/types" @@ -96,9 +97,9 @@ func CalcBaseFee(config *chain.Config, parent *types.Header) *big.Int { } } -func getBaseFeeChangeDenominator(borConfig *chain.BorConfig, number uint64) uint64 { +func getBaseFeeChangeDenominator(borConfig chain.BorConfig, number uint64) uint64 { // If we're running bor based chain post delhi hardfork, return the new value - if borConfig != nil && borConfig.IsDelhi(number) { + if borConfig, ok := borConfig.(*borcfg.BorConfig); ok && borConfig.IsDelhi(number) { return params.BaseFeeChangeDenominatorPostDelhi } diff --git a/core/block_validator_test.go b/core/block_validator_test.go index 34ab7a2c21d..e57d40d3477 100644 --- a/core/block_validator_test.go +++ b/core/block_validator_test.go @@ -20,13 +20,17 @@ import ( "context" "testing" + "github.com/ledgerwatch/log/v3" + "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon/consensus" "github.com/ledgerwatch/erigon/consensus/ethash" "github.com/ledgerwatch/erigon/core" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/eth/stagedsync" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/turbo/stages/mock" + "github.com/ledgerwatch/erigon/turbo/testlog" ) // Tests that simple header verification works, for both good and bad blocks. @@ -37,6 +41,7 @@ func TestHeaderVerification(t *testing.T) { gspec = &types.Genesis{Config: params.TestChainConfig} engine = ethash.NewFaker() ) + logger := testlog.Logger(t, log.LvlInfo) checkStateRoot := true m := mock.MockWithGenesisEngine(t, gspec, engine, false, checkStateRoot) @@ -48,13 +53,19 @@ func TestHeaderVerification(t *testing.T) { for i := 0; i < chain.Length(); i++ { if err := m.DB.View(context.Background(), func(tx kv.Tx) error { for j, valid := range []bool{true, false} { + chainReader := stagedsync.ChainReader{ + Cfg: *params.TestChainConfig, + Db: tx, + BlockReader: m.BlockReader, + Logger: logger, + } + var engine consensus.Engine if valid { - engine := ethash.NewFaker() - err = engine.VerifyHeader(stagedsync.ChainReader{Cfg: *params.TestChainConfig, Db: tx, BlockReader: m.BlockReader}, chain.Headers[i], true) + engine = ethash.NewFaker() } else { - engine := ethash.NewFakeFailer(chain.Headers[i].Number.Uint64()) - err = engine.VerifyHeader(stagedsync.ChainReader{Cfg: *params.TestChainConfig, Db: tx, BlockReader: m.BlockReader}, chain.Headers[i], true) + engine = ethash.NewFakeFailer(chain.Headers[i].Number.Uint64()) } + err = engine.VerifyHeader(chainReader, chain.Headers[i], true) if (err == nil) != valid { t.Errorf("test %d.%d: validity mismatch: have %v, want %v", i, j, err, valid) } @@ -79,6 +90,7 @@ func TestHeaderWithSealVerification(t *testing.T) { gspec = &types.Genesis{Config: params.TestChainAuraConfig} engine = ethash.NewFaker() ) + logger := testlog.Logger(t, log.LvlInfo) checkStateRoot := true m := mock.MockWithGenesisEngine(t, gspec, engine, false, checkStateRoot) @@ -91,13 +103,19 @@ func TestHeaderWithSealVerification(t *testing.T) { for i := 0; i < chain.Length(); i++ { if err := m.DB.View(context.Background(), func(tx kv.Tx) error { for j, valid := range []bool{true, false} { + chainReader := stagedsync.ChainReader{ + Cfg: *params.TestChainAuraConfig, + Db: tx, + BlockReader: m.BlockReader, + Logger: logger, + } + var engine consensus.Engine if valid { - engine := ethash.NewFaker() - err = engine.VerifyHeader(stagedsync.ChainReader{Cfg: *params.TestChainAuraConfig, Db: tx, BlockReader: m.BlockReader}, chain.Headers[i], true) + engine = ethash.NewFaker() } else { - engine := ethash.NewFakeFailer(chain.Headers[i].Number.Uint64()) - err = engine.VerifyHeader(stagedsync.ChainReader{Cfg: *params.TestChainAuraConfig, Db: tx, BlockReader: m.BlockReader}, chain.Headers[i], true) + engine = ethash.NewFakeFailer(chain.Headers[i].Number.Uint64()) } + err = engine.VerifyHeader(chainReader, chain.Headers[i], true) if (err == nil) != valid { t.Errorf("test %d.%d: validity mismatch: have %v, want %v", i, j, err, valid) } diff --git a/core/forkid/forkid.go b/core/forkid/forkid.go index 433c9221b18..f5c51cb3c1e 100644 --- a/core/forkid/forkid.go +++ b/core/forkid/forkid.go @@ -244,8 +244,8 @@ func GatherForks(config *chain.Config, genesisTime uint64) (heightForks []uint64 heightForks = append(heightForks, *config.Aura.PosdaoTransition) } - if config.Bor != nil && config.Bor.AgraBlock != nil { - heightForks = append(heightForks, config.Bor.AgraBlock.Uint64()) + if config.Bor != nil && config.Bor.GetAgraBlock() != nil { + heightForks = append(heightForks, config.Bor.GetAgraBlock().Uint64()) } // Sort the fork block numbers & times to permit chronological XOR diff --git a/core/rawdb/accessors_metadata.go b/core/rawdb/accessors_metadata.go index d93691c723d..c27da075cb1 100644 --- a/core/rawdb/accessors_metadata.go +++ b/core/rawdb/accessors_metadata.go @@ -20,6 +20,8 @@ import ( "encoding/json" "fmt" + "github.com/ledgerwatch/erigon/consensus/bor/borcfg" + "github.com/ledgerwatch/erigon-lib/chain" libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/kv" @@ -34,10 +36,20 @@ func ReadChainConfig(db kv.Getter, hash libcommon.Hash) (*chain.Config, error) { if len(data) == 0 { return nil, nil } + var config chain.Config if err := json.Unmarshal(data, &config); err != nil { return nil, fmt.Errorf("invalid chain config JSON: %x, %w", hash, err) } + + if config.BorJSON != nil { + borConfig := &borcfg.BorConfig{} + if err := json.Unmarshal(config.BorJSON, borConfig); err != nil { + return nil, fmt.Errorf("invalid chain config 'bor' JSON: %x, %w", hash, err) + } + config.Bor = borConfig + } + return &config, nil } @@ -46,10 +58,20 @@ func WriteChainConfig(db kv.Putter, hash libcommon.Hash, cfg *chain.Config) erro if cfg == nil { return nil } + + if cfg.Bor != nil { + borJSON, err := json.Marshal(cfg.Bor) + if err != nil { + return fmt.Errorf("failed to JSON encode chain config 'bor': %w", err) + } + cfg.BorJSON = borJSON + } + data, err := json.Marshal(cfg) if err != nil { return fmt.Errorf("failed to JSON encode chain config: %w", err) } + if err := db.Put(kv.ConfigTable, hash[:], data); err != nil { return fmt.Errorf("failed to store chain config: %w", err) } diff --git a/core/system_contract_lookup.go b/core/system_contract_lookup.go index 2905904a2d3..6b6908dda69 100644 --- a/core/system_contract_lookup.go +++ b/core/system_contract_lookup.go @@ -7,6 +7,7 @@ import ( "github.com/ledgerwatch/erigon-lib/chain/networkname" libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/hexutility" + "github.com/ledgerwatch/erigon/consensus/bor/borcfg" "github.com/ledgerwatch/erigon/core/systemcontracts" "github.com/ledgerwatch/erigon/core/types" @@ -23,7 +24,8 @@ func init() { allocToCodeRecords(genesisBlock.Alloc, byChain, 0) // Process upgrades chainConfig := params.ChainConfigByChainName(chainName) - for blockNumStr, genesisAlloc := range chainConfig.Bor.BlockAlloc { + borConfig := chainConfig.Bor.(*borcfg.BorConfig) + for blockNumStr, genesisAlloc := range borConfig.BlockAlloc { blockNum, err := strconv.ParseUint(blockNumStr, 10, 64) if err != nil { panic(fmt.Errorf("failed to parse block number in BlockAlloc: %s", err.Error())) diff --git a/erigon-lib/chain/chain_config.go b/erigon-lib/chain/chain_config.go index cecba46e21c..cc8fe6d9468 100644 --- a/erigon-lib/chain/chain_config.go +++ b/erigon-lib/chain/chain_config.go @@ -17,9 +17,9 @@ package chain import ( + "encoding/json" "fmt" "math/big" - "sort" "strconv" "github.com/ledgerwatch/erigon-lib/common" @@ -81,7 +81,15 @@ type Config struct { Ethash *EthashConfig `json:"ethash,omitempty"` Clique *CliqueConfig `json:"clique,omitempty"` Aura *AuRaConfig `json:"aura,omitempty"` - Bor *BorConfig `json:"bor,omitempty"` + + Bor BorConfig `json:"-"` + BorJSON json.RawMessage `json:"bor,omitempty"` +} + +type BorConfig interface { + fmt.Stringer + IsAgra(num uint64) bool + GetAgraBlock() *big.Int } func (c *Config) String() string { @@ -203,10 +211,7 @@ func (c *Config) IsShanghai(time uint64) bool { // Also Agra is activated based on the block number rather than the timestamp. // Refer to https://forum.polygon.technology/t/pip-28-agra-hardfork func (c *Config) IsAgra(num uint64) bool { - if c == nil || c.Bor == nil { - return false - } - return isForked(c.Bor.AgraBlock, num) + return (c != nil) && (c.Bor != nil) && c.Bor.IsAgra(num) } // IsCancun returns whether time is either equal to the Cancun fork time or greater. @@ -452,132 +457,6 @@ func (c *CliqueConfig) String() string { return "clique" } -// BorConfig is the consensus engine configs for Matic bor based sealing. -type BorConfig struct { - Period map[string]uint64 `json:"period"` // Number of seconds between blocks to enforce - ProducerDelay map[string]uint64 `json:"producerDelay"` // Number of seconds delay between two producer interval - Sprint map[string]uint64 `json:"sprint"` // Epoch length to proposer - BackupMultiplier map[string]uint64 `json:"backupMultiplier"` // Backup multiplier to determine the wiggle time - ValidatorContract string `json:"validatorContract"` // Validator set contract - StateReceiverContract string `json:"stateReceiverContract"` // State receiver contract - - OverrideStateSyncRecords map[string]int `json:"overrideStateSyncRecords"` // override state records count - BlockAlloc map[string]interface{} `json:"blockAlloc"` - - JaipurBlock *big.Int `json:"jaipurBlock"` // Jaipur switch block (nil = no fork, 0 = already on jaipur) - DelhiBlock *big.Int `json:"delhiBlock"` // Delhi switch block (nil = no fork, 0 = already on delhi) - IndoreBlock *big.Int `json:"indoreBlock"` // Indore switch block (nil = no fork, 0 = already on indore) - AgraBlock *big.Int `json:"agraBlock"` // Agra switch block (nil = no fork, 0 = already in agra) - StateSyncConfirmationDelay map[string]uint64 `json:"stateSyncConfirmationDelay"` // StateSync Confirmation Delay, in seconds, to calculate `to` - - ParallelUniverseBlock *big.Int `json:"parallelUniverseBlock"` // TODO: update all occurrence, change name and finalize number (hardfork for block-stm related changes) - - sprints sprints -} - -// String implements the stringer interface, returning the consensus engine details. -func (c *BorConfig) String() string { - return "bor" -} - -func (c *BorConfig) CalculateProducerDelay(number uint64) uint64 { - return borKeyValueConfigHelper(c.ProducerDelay, number) -} - -func (c *BorConfig) CalculateSprint(number uint64) uint64 { - if c.sprints == nil { - c.sprints = asSprints(c.Sprint) - } - - for i := 0; i < len(c.sprints)-1; i++ { - if number >= c.sprints[i].from && number < c.sprints[i+1].from { - return c.sprints[i].size - } - } - - return c.sprints[len(c.sprints)-1].size -} - -func (c *BorConfig) CalculateSprintCount(from, to uint64) int { - switch { - case from > to: - return 0 - case from < to: - to-- - } - - if c.sprints == nil { - c.sprints = asSprints(c.Sprint) - } - - count := uint64(0) - startCalc := from - - zeroth := func(boundary uint64, size uint64) uint64 { - if boundary%size == 0 { - return 1 - } - - return 0 - } - - for i := 0; i < len(c.sprints)-1; i++ { - if startCalc >= c.sprints[i].from && startCalc < c.sprints[i+1].from { - if to >= c.sprints[i].from && to < c.sprints[i+1].from { - if startCalc == to { - return int(count + zeroth(startCalc, c.sprints[i].size)) - } - return int(count + zeroth(startCalc, c.sprints[i].size) + (to-startCalc)/c.sprints[i].size) - } else { - endCalc := c.sprints[i+1].from - 1 - count += zeroth(startCalc, c.sprints[i].size) + (endCalc-startCalc)/c.sprints[i].size - startCalc = endCalc + 1 - } - } - } - - if startCalc == to { - return int(count + zeroth(startCalc, c.sprints[len(c.sprints)-1].size)) - } - - return int(count + zeroth(startCalc, c.sprints[len(c.sprints)-1].size) + (to-startCalc)/c.sprints[len(c.sprints)-1].size) -} - -func (c *BorConfig) CalculateBackupMultiplier(number uint64) uint64 { - return borKeyValueConfigHelper(c.BackupMultiplier, number) -} - -func (c *BorConfig) CalculatePeriod(number uint64) uint64 { - return borKeyValueConfigHelper(c.Period, number) -} - -func (c *BorConfig) IsJaipur(number uint64) bool { - return isForked(c.JaipurBlock, number) -} - -func (c *BorConfig) IsDelhi(number uint64) bool { - return isForked(c.DelhiBlock, number) -} - -func (c *BorConfig) IsIndore(number uint64) bool { - return isForked(c.IndoreBlock, number) -} - -// TODO: modify this function once the block number is finalized -func (c *BorConfig) IsParallelUniverse(number uint64) bool { - if c.ParallelUniverseBlock != nil { - if c.ParallelUniverseBlock.Cmp(big.NewInt(0)) == 0 { - return false - } - } - - return isForked(c.ParallelUniverseBlock, number) -} - -func (c *BorConfig) CalculateStateSyncDelay(number uint64) uint64 { - return borKeyValueConfigHelper(c.StateSyncConfirmationDelay, number) -} - func borKeyValueConfigHelper[T uint64 | common.Address](field map[string]T, number uint64) T { fieldUint := make(map[uint64]T) for k, v := range field { @@ -599,39 +478,6 @@ func borKeyValueConfigHelper[T uint64 | common.Address](field map[string]T, numb return fieldUint[keys[len(keys)-1]] } -type sprint struct { - from, size uint64 -} - -type sprints []sprint - -func (s sprints) Len() int { - return len(s) -} - -func (s sprints) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -func (s sprints) Less(i, j int) bool { - return s[i].from < s[j].from -} - -func asSprints(configSprints map[string]uint64) sprints { - sprints := make(sprints, len(configSprints)) - - i := 0 - for key, value := range configSprints { - sprints[i].from, _ = strconv.ParseUint(key, 10, 64) - sprints[i].size = value - i++ - } - - sort.Sort(sprints) - - return sprints -} - // Rules is syntactic sugar over Config. It can be used for functions // that do not have or require information about the block. // diff --git a/erigon-lib/common/collections.go b/erigon-lib/common/collections.go new file mode 100644 index 00000000000..1e5a13856d2 --- /dev/null +++ b/erigon-lib/common/collections.go @@ -0,0 +1,15 @@ +package common + +func SliceReverse[T any](s []T) { + for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 { + s[i], s[j] = s[j], s[i] + } +} + +func SliceMap[T any, U any](s []T, mapFunc func(T) U) []U { + out := make([]U, 0, len(s)) + for _, x := range s { + out = append(out, mapFunc(x)) + } + return out +} diff --git a/erigon-lib/txpool/txpooluitl/all_components.go b/erigon-lib/txpool/txpooluitl/all_components.go index ffa6fdf0310..156a5771c2b 100644 --- a/erigon-lib/txpool/txpooluitl/all_components.go +++ b/erigon-lib/txpool/txpooluitl/all_components.go @@ -137,7 +137,7 @@ func AllComponents(ctx context.Context, cfg txpoolcfg.Config, cache kvcache.Cach shanghaiTime := chainConfig.ShanghaiTime var agraBlock *big.Int if chainConfig.Bor != nil { - agraBlock = chainConfig.Bor.AgraBlock + agraBlock = chainConfig.Bor.GetAgraBlock() } cancunTime := chainConfig.CancunTime if cfg.OverrideCancunTime != nil { diff --git a/eth/backend.go b/eth/backend.go index 497658879b0..2d313a85cf1 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -514,7 +514,7 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger } else if chainConfig.Aura != nil { consensusConfig = &config.Aura } else if chainConfig.Bor != nil { - consensusConfig = &config.Bor + consensusConfig = chainConfig.Bor } else { consensusConfig = &config.Ethash } diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go index d0f79d63a40..0034d288d47 100644 --- a/eth/ethconfig/config.go +++ b/eth/ethconfig/config.go @@ -34,7 +34,6 @@ import ( "github.com/ledgerwatch/erigon-lib/common/datadir" "github.com/ledgerwatch/erigon-lib/downloader/downloadercfg" "github.com/ledgerwatch/erigon-lib/txpool/txpoolcfg" - "github.com/ledgerwatch/erigon/cl/beacon/beacon_router_configuration" "github.com/ledgerwatch/erigon/cl/clparams" "github.com/ledgerwatch/erigon/consensus/ethash/ethashcfg" @@ -211,7 +210,6 @@ type Config struct { Clique params.ConsensusSnapshotConfig Aura chain.AuRaConfig - Bor chain.BorConfig // Transaction pool options DeprecatedTxPool DeprecatedTxPoolConfig diff --git a/eth/ethconsensusconfig/config.go b/eth/ethconsensusconfig/config.go index 72ff681393e..04bf7cc08c1 100644 --- a/eth/ethconsensusconfig/config.go +++ b/eth/ethconsensusconfig/config.go @@ -8,6 +8,7 @@ import ( "github.com/ledgerwatch/log/v3" "github.com/ledgerwatch/erigon-lib/chain" + "github.com/ledgerwatch/erigon/consensus/bor/borcfg" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon/consensus" @@ -95,12 +96,12 @@ func CreateConsensusEngine(ctx context.Context, nodeConfig *nodecfg.Config, chai panic(err) } } - case *chain.BorConfig: + case *borcfg.BorConfig: // If Matic bor consensus is requested, set it up // In order to pass the ethereum transaction tests, we need to set the burn contract which is in the bor config // Then, bor != nil will also be enabled for ethash and clique. Only enable Bor for real if there is a validator contract present. - if chainConfig.Bor != nil && chainConfig.Bor.ValidatorContract != "" { - genesisContractsClient := contract.NewGenesisContractsClient(chainConfig, chainConfig.Bor.ValidatorContract, chainConfig.Bor.StateReceiverContract, logger) + if chainConfig.Bor != nil && consensusCfg.ValidatorContract != "" { + genesisContractsClient := contract.NewGenesisContractsClient(chainConfig, consensusCfg.ValidatorContract, consensusCfg.StateReceiverContract, logger) spanner := span.NewChainSpanner(contract.ValidatorSet(), chainConfig, withoutHeimdall, logger) diff --git a/eth/stagedsync/chain_reader.go b/eth/stagedsync/chain_reader.go index 862cae5710a..f1d0e52057e 100644 --- a/eth/stagedsync/chain_reader.go +++ b/eth/stagedsync/chain_reader.go @@ -4,23 +4,24 @@ import ( "context" "math/big" + "github.com/ledgerwatch/log/v3" + "github.com/ledgerwatch/erigon-lib/chain" libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon/rlp" - "github.com/ledgerwatch/erigon/turbo/services" - "github.com/ledgerwatch/log/v3" - "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/rlp" + "github.com/ledgerwatch/erigon/turbo/services" ) -// Implements consensus.ChainReader +// ChainReader implements consensus.ChainReader type ChainReader struct { Cfg chain.Config Db kv.Getter BlockReader services.FullBlockReader + Logger log.Logger } // Config retrieves the blockchain's chain configuration. @@ -81,10 +82,16 @@ func (cr ChainReader) FrozenBlocks() uint64 { return cr.BlockReader.FrozenBlocks() } -func (cr ChainReader) BorEventsByBlock(hash libcommon.Hash, number uint64) []rlp.RawValue { - panic("") +func (cr ChainReader) BorEventsByBlock(_ libcommon.Hash, _ uint64) []rlp.RawValue { + panic("bor events by block not implemented") } func (cr ChainReader) BorSpan(spanId uint64) []byte { - panic("") + span, err := cr.BlockReader.Span(context.Background(), cr.Db, spanId) + if err != nil { + cr.Logger.Error("BorSpan failed", "err", err) + return nil + } + + return span } diff --git a/eth/stagedsync/stage_bodies.go b/eth/stagedsync/stage_bodies.go index 852eca9f016..e8fb3acb17a 100644 --- a/eth/stagedsync/stage_bodies.go +++ b/eth/stagedsync/stage_bodies.go @@ -6,14 +6,14 @@ import ( "runtime" "time" + "github.com/ledgerwatch/log/v3" + "github.com/ledgerwatch/erigon-lib/chain" libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/dbg" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/rawdb/blockio" - "github.com/ledgerwatch/log/v3" - "github.com/ledgerwatch/erigon/dataflow" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" "github.com/ledgerwatch/erigon/turbo/adapter" @@ -134,7 +134,7 @@ func BodiesForward( prevProgress := bodyProgress var noProgressCount uint = 0 // How many time the progress was printed without actual progress var totalDelivered uint64 = 0 - cr := ChainReader{Cfg: cfg.chanConfig, Db: tx, BlockReader: cfg.blockReader} + cr := ChainReader{Cfg: cfg.chanConfig, Db: tx, BlockReader: cfg.blockReader, Logger: logger} loopBody := func() (bool, error) { // loopCount is used here to ensure we don't get caught in a constant loop of making requests diff --git a/eth/stagedsync/stage_bor_heimdall.go b/eth/stagedsync/stage_bor_heimdall.go index af23c822b31..e4c6a41200b 100644 --- a/eth/stagedsync/stage_bor_heimdall.go +++ b/eth/stagedsync/stage_bor_heimdall.go @@ -23,6 +23,7 @@ import ( "github.com/ledgerwatch/erigon/accounts/abi" "github.com/ledgerwatch/erigon/consensus" "github.com/ledgerwatch/erigon/consensus/bor" + "github.com/ledgerwatch/erigon/consensus/bor/borcfg" "github.com/ledgerwatch/erigon/consensus/bor/contract" "github.com/ledgerwatch/erigon/consensus/bor/finality/generics" "github.com/ledgerwatch/erigon/consensus/bor/finality/whitelist" @@ -40,7 +41,7 @@ import ( const ( inmemorySnapshots = 128 // Number of recent vote snapshots to keep in memory - inmemorySignatures = 4096 // Number of recent block signatures to keep in memory + InMemorySignatures = 4096 // Number of recent block signatures to keep in memory snapshotPersistInterval = 1024 // Number of blocks after which to persist the vote snapshot to the database extraVanity = 32 // Fixed number of extra-data prefix bytes reserved for signer vanity extraSeal = 65 // Fixed number of extra-data suffix bytes reserved for signer seal @@ -56,6 +57,7 @@ type BorHeimdallCfg struct { snapDb kv.RwDB // Database to store and retrieve snapshot checkpoints miningState MiningState chainConfig chain.Config + borConfig *borcfg.BorConfig heimdallClient heimdall.IHeimdallClient blockReader services.FullBlockReader hd *headerdownload.HeaderDownload @@ -79,11 +81,17 @@ func StageBorHeimdallCfg( recents *lru.ARCCache[libcommon.Hash, *bor.Snapshot], signatures *lru.ARCCache[libcommon.Hash, libcommon.Address], ) BorHeimdallCfg { + var borConfig *borcfg.BorConfig + if chainConfig.Bor != nil { + borConfig = chainConfig.Bor.(*borcfg.BorConfig) + } + return BorHeimdallCfg{ db: db, snapDb: snapDb, miningState: miningState, chainConfig: chainConfig, + borConfig: borConfig, heimdallClient: heimdallClient, blockReader: blockReader, hd: hd, @@ -106,7 +114,7 @@ func BorHeimdallForward( ) (err error) { processStart := time.Now() - if cfg.chainConfig.Bor == nil { + if cfg.borConfig == nil { return } if cfg.heimdallClient == nil { @@ -230,7 +238,7 @@ func BorHeimdallForward( endSpanID = span.IDAt(headNumber + 1) } - if span.BlockInLastSprintOfSpan(headNumber, cfg.chainConfig.Bor) { + if span.BlockInLastSprintOfSpan(headNumber, cfg.borConfig) { endSpanID++ } @@ -242,7 +250,7 @@ func BorHeimdallForward( if err != nil { return err } - signatures, err := lru.NewARC[libcommon.Hash, libcommon.Address](inmemorySignatures) + signatures, err := lru.NewARC[libcommon.Hash, libcommon.Address](InMemorySignatures) if err != nil { return err } @@ -297,19 +305,19 @@ func BorHeimdallForward( } } - sprintLength := cfg.chainConfig.Bor.CalculateSprint(blockNum) + sprintLength := cfg.borConfig.CalculateSprintLength(blockNum) spanID := span.IDAt(blockNum) if (spanID > 0) && ((blockNum+1)%sprintLength == 0) { - if err = checkHeaderExtraData(u, ctx, chain, blockNum, header, cfg.chainConfig.Bor); err != nil { + if err = checkHeaderExtraData(u, ctx, chain, blockNum, header, cfg.borConfig); err != nil { return err } } } - if blockNum > 0 && blockNum%cfg.chainConfig.Bor.CalculateSprint(blockNum) == 0 { + if blockNum > 0 && blockNum%cfg.borConfig.CalculateSprintLength(blockNum) == 0 { var callTime time.Duration var records int - if lastEventId, records, callTime, err = fetchAndWriteBorEvents(ctx, cfg.blockReader, cfg.chainConfig.Bor, header, lastEventId, cfg.chainConfig.ChainID.String(), tx, cfg.heimdallClient, cfg.stateReceiverABI, s.LogPrefix(), logger); err != nil { + if lastEventId, records, callTime, err = fetchAndWriteBorEvents(ctx, cfg.blockReader, cfg.borConfig, header, lastEventId, cfg.chainConfig.ChainID.String(), tx, cfg.heimdallClient, cfg.stateReceiverABI, s.LogPrefix(), logger); err != nil { return err } @@ -321,10 +329,10 @@ func BorHeimdallForward( if header != nil { if cfg.blockReader.BorSnapshots().SegmentsMin() == 0 { - snap = loadSnapshot(blockNum, header.Hash(), cfg.chainConfig.Bor, recents, signatures, cfg.snapDb, logger) + snap = loadSnapshot(blockNum, header.Hash(), cfg.borConfig, recents, signatures, cfg.snapDb, logger) if snap == nil { - snap, err = initValidatorSets(ctx, tx, cfg.blockReader, cfg.chainConfig.Bor, + snap, err = initValidatorSets(ctx, tx, cfg.blockReader, cfg.borConfig, cfg.heimdallClient, chain, blockNum, recents, signatures, cfg.snapDb, logger, s.LogPrefix()) if err != nil { @@ -332,7 +340,7 @@ func BorHeimdallForward( } } - if err = persistValidatorSets(ctx, snap, u, tx, cfg.blockReader, cfg.chainConfig.Bor, chain, blockNum, header.Hash(), recents, signatures, cfg.snapDb, logger, s.LogPrefix()); err != nil { + if err = persistValidatorSets(ctx, snap, u, tx, cfg.blockReader, cfg.borConfig, chain, blockNum, header.Hash(), recents, signatures, cfg.snapDb, logger, s.LogPrefix()); err != nil { return fmt.Errorf("can't persist validator sets: %w", err) } } @@ -365,7 +373,7 @@ func checkHeaderExtraData( chain consensus.ChainHeaderReader, blockNum uint64, header *types.Header, - config *chain.BorConfig, + config *borcfg.BorConfig, ) error { spanID := span.IDAt(blockNum + 1) spanBytes := chain.BorSpan(spanID) @@ -400,7 +408,7 @@ func checkHeaderExtraData( func fetchAndWriteBorEvents( ctx context.Context, blockReader services.FullBlockReader, - config *chain.BorConfig, + config *borcfg.BorConfig, header *types.Header, lastEventId uint64, chainID string, @@ -428,7 +436,7 @@ func fetchAndWriteBorEvents( stateSyncDelay := config.CalculateStateSyncDelay(blockNum) to = time.Unix(int64(header.Time-stateSyncDelay), 0) } else { - pHeader, err := blockReader.HeaderByNumber(ctx, tx, blockNum-config.CalculateSprint(blockNum)) + pHeader, err := blockReader.HeaderByNumber(ctx, tx, blockNum-config.CalculateSprintLength(blockNum)) if err != nil { return lastEventId, 0, time.Since(fetchStart), err } @@ -529,7 +537,7 @@ func fetchAndWriteSpans( return spanId, nil } -func loadSnapshot(blockNum uint64, hash libcommon.Hash, config *chain.BorConfig, recents *lru.ARCCache[libcommon.Hash, *bor.Snapshot], +func loadSnapshot(blockNum uint64, hash libcommon.Hash, config *borcfg.BorConfig, recents *lru.ARCCache[libcommon.Hash, *bor.Snapshot], signatures *lru.ARCCache[libcommon.Hash, libcommon.Address], snapDb kv.RwDB, logger log.Logger) *bor.Snapshot { @@ -554,7 +562,7 @@ func persistValidatorSets( u Unwinder, tx kv.Tx, blockReader services.FullBlockReader, - config *chain.BorConfig, + config *borcfg.BorConfig, chain consensus.ChainHeaderReader, blockNum uint64, hash libcommon.Hash, @@ -673,7 +681,7 @@ func initValidatorSets( ctx context.Context, tx kv.RwTx, blockReader services.FullBlockReader, - config *chain.BorConfig, + config *borcfg.BorConfig, heimdallClient heimdall.IHeimdallClient, chain consensus.ChainHeaderReader, blockNum uint64, @@ -732,7 +740,7 @@ func initValidatorSets( g.SetLimit(estimate.AlmostAllCPUs()) defer g.Wait() - batchSize := 128 // must be < inmemorySignatures + batchSize := 128 // must be < InMemorySignatures initialHeaders := make([]*types.Header, 0, batchSize) parentHeader := zeroHeader for i := uint64(1); i <= blockNum; i++ { @@ -740,7 +748,7 @@ func initValidatorSets( { // `snap.apply` bottleneck - is recover of signer. // to speedup: recover signer in background goroutines and save in `sigcache` - // `batchSize` < `inmemorySignatures`: means all current batch will fit in cache - and `snap.apply` will find it there. + // `batchSize` < `InMemorySignatures`: means all current batch will fit in cache - and `snap.apply` will find it there. g.Go(func() error { if header == nil { return nil @@ -775,7 +783,7 @@ func initValidatorSets( } func BorHeimdallUnwind(u *UnwindState, ctx context.Context, s *StageState, tx kv.RwTx, cfg BorHeimdallCfg) (err error) { - if cfg.chainConfig.Bor == nil { + if cfg.borConfig == nil { return } useExternalTx := tx != nil @@ -848,7 +856,7 @@ func BorHeimdallUnwind(u *UnwindState, ctx context.Context, s *StageState, tx kv } func BorHeimdallPrune(s *PruneState, ctx context.Context, tx kv.RwTx, cfg BorHeimdallCfg) (err error) { - if cfg.chainConfig.Bor == nil { + if cfg.borConfig == nil { return } return diff --git a/eth/stagedsync/stage_bor_heimdall_test.go b/eth/stagedsync/stage_bor_heimdall_test.go index 3bbb5405c8e..6b2294e7c4b 100644 --- a/eth/stagedsync/stage_bor_heimdall_test.go +++ b/eth/stagedsync/stage_bor_heimdall_test.go @@ -189,7 +189,7 @@ func TestBorHeimdallForwardDetectsUnauthorizedSignerError(t *testing.T) { invalidHeader.Extra = bytes.Repeat([]byte{0x00}, types.ExtraVanityLength+types.ExtraSealLength) validatorKey1, err := crypto.GenerateKey() require.NoError(t, err) - sighash, err := crypto.Sign(crypto.Keccak256(bor.BorRLP(invalidHeader, chainConfig.Bor)), validatorKey1) + sighash, err := crypto.Sign(crypto.Keccak256(bor.BorRLP(invalidHeader, testHarness.BorConfig())), validatorKey1) require.NoError(t, err) copy(invalidHeader.Extra[len(invalidHeader.Extra)-types.ExtraSealLength:], sighash) testHarness.SaveHeader(ctx, t, invalidHeader) diff --git a/eth/stagedsync/stage_headers.go b/eth/stagedsync/stage_headers.go index 31f5be53b51..f51cb5bfc25 100644 --- a/eth/stagedsync/stage_headers.go +++ b/eth/stagedsync/stage_headers.go @@ -18,7 +18,6 @@ import ( "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/state" "github.com/ledgerwatch/erigon/core/rawdb/blockio" - "github.com/ledgerwatch/erigon/eth/consensuschain" "github.com/ledgerwatch/erigon/eth/ethconfig" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" @@ -187,7 +186,12 @@ func HeadersPOW( } TEMP TESTING */ headerInserter := headerdownload.NewHeaderInserter(logPrefix, localTd, startProgress, cfg.blockReader) - cfg.hd.SetHeaderReader(consensuschain.NewReader(&cfg.chainConfig, tx, cfg.blockReader, logger)) + cfg.hd.SetHeaderReader(&ChainReaderImpl{ + config: &cfg.chainConfig, + tx: tx, + blockReader: cfg.blockReader, + logger: logger, + }) stopped := false var noProgressCounter uint = 0 diff --git a/eth/stagedsync/stage_mining_create_block.go b/eth/stagedsync/stage_mining_create_block.go index 5016d4afa02..40b63680e16 100644 --- a/eth/stagedsync/stage_mining_create_block.go +++ b/eth/stagedsync/stage_mining_create_block.go @@ -8,11 +8,10 @@ import ( "time" mapset "github.com/deckarep/golang-set/v2" - "github.com/ledgerwatch/erigon-lib/chain" - libcommon "github.com/ledgerwatch/erigon-lib/common" - "github.com/ledgerwatch/erigon/turbo/services" "github.com/ledgerwatch/log/v3" + "github.com/ledgerwatch/erigon-lib/chain" + libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon/common/debug" "github.com/ledgerwatch/erigon/consensus" @@ -22,6 +21,7 @@ import ( "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/eth/ethutils" "github.com/ledgerwatch/erigon/params" + "github.com/ledgerwatch/erigon/turbo/services" ) type MiningBlock struct { @@ -127,7 +127,7 @@ func SpawnMiningCreateBlockStage(s *StageState, tx kv.RwTx, cfg MiningCreateBloc if err != nil { return err } - chain := ChainReader{Cfg: cfg.chainConfig, Db: tx, BlockReader: cfg.blockReader} + chain := ChainReader{Cfg: cfg.chainConfig, Db: tx, BlockReader: cfg.blockReader, Logger: logger} var GetBlocksFromHash = func(hash libcommon.Hash, n int) (blocks []*types.Block) { number := rawdb.ReadHeaderNumber(tx, hash) if number == nil { diff --git a/eth/stagedsync/stage_mining_exec.go b/eth/stagedsync/stage_mining_exec.go index 6ec51b72f23..da2d1fb2bdd 100644 --- a/eth/stagedsync/stage_mining_exec.go +++ b/eth/stagedsync/stage_mining_exec.go @@ -12,7 +12,6 @@ import ( "github.com/holiman/uint256" "github.com/ledgerwatch/erigon-lib/kv/membatch" state2 "github.com/ledgerwatch/erigon-lib/state" - "github.com/ledgerwatch/erigon/eth/consensuschain" "github.com/ledgerwatch/log/v3" "golang.org/x/net/context" @@ -21,7 +20,6 @@ import ( "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/kvcfg" types2 "github.com/ledgerwatch/erigon-lib/types" - "github.com/ledgerwatch/erigon/consensus" "github.com/ledgerwatch/erigon/core" "github.com/ledgerwatch/erigon/core/rawdb" @@ -104,7 +102,7 @@ func SpawnMiningExecStage(s *StageState, tx kv.RwTx, cfg MiningExecCfg, ctx cont } ibs := state.New(stateReader) - chainReader := ChainReader{Cfg: cfg.chainConfig, Db: tx, BlockReader: cfg.blockReader} + chainReader := ChainReader{Cfg: cfg.chainConfig, Db: tx, BlockReader: cfg.blockReader, Logger: logger} core.InitializeBlockExecution(cfg.engine, chainReader, current.Header, &cfg.chainConfig, ibs, logger) // Create an empty block based on temporary copied state for @@ -185,7 +183,7 @@ func SpawnMiningExecStage(s *StageState, tx kv.RwTx, cfg MiningExecCfg, ctx cont } var err error - _, current.Txs, current.Receipts, err = core.FinalizeBlockExecution(cfg.engine, stateReader, current.Header, current.Txs, current.Uncles, stateWriter, &cfg.chainConfig, ibs, current.Receipts, current.Withdrawals, consensuschain.NewReader(&cfg.chainConfig, tx, cfg.blockReader, logger), true, logger) + _, current.Txs, current.Receipts, err = core.FinalizeBlockExecution(cfg.engine, stateReader, current.Header, current.Txs, current.Uncles, stateWriter, &cfg.chainConfig, ibs, current.Receipts, current.Withdrawals, ChainReaderImpl{config: &cfg.chainConfig, tx: tx, blockReader: cfg.blockReader, logger: logger}, true, logger) if err != nil { return err } diff --git a/eth/stagedsync/stage_mining_finish.go b/eth/stagedsync/stage_mining_finish.go index 16d90e00667..81cc486e57c 100644 --- a/eth/stagedsync/stage_mining_finish.go +++ b/eth/stagedsync/stage_mining_finish.go @@ -3,14 +3,14 @@ package stagedsync import ( "fmt" - "github.com/ledgerwatch/erigon-lib/chain" - "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon/turbo/builder" - "github.com/ledgerwatch/erigon/turbo/services" "github.com/ledgerwatch/log/v3" + "github.com/ledgerwatch/erigon-lib/chain" + "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon/consensus" "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/turbo/builder" + "github.com/ledgerwatch/erigon/turbo/services" ) type MiningFinishCfg struct { @@ -95,7 +95,7 @@ func SpawnMiningFinishStage(s *StageState, tx kv.RwTx, cfg MiningFinishCfg, quit default: logger.Trace("No in-flight sealing task.") } - chain := ChainReader{Cfg: cfg.chainConfig, Db: tx, BlockReader: cfg.blockReader} + chain := ChainReader{Cfg: cfg.chainConfig, Db: tx, BlockReader: cfg.blockReader, Logger: logger} if err := cfg.engine.Seal(chain, block, cfg.miningState.MiningResultCh, cfg.sealCancel); err != nil { logger.Warn("Block sealing failed", "err", err) } diff --git a/eth/stagedsync/stage_txlookup.go b/eth/stagedsync/stage_txlookup.go index 3c0223e3393..197a319099a 100644 --- a/eth/stagedsync/stage_txlookup.go +++ b/eth/stagedsync/stage_txlookup.go @@ -6,14 +6,16 @@ import ( "fmt" "math/big" + "github.com/ledgerwatch/log/v3" + "github.com/ledgerwatch/erigon-lib/chain" libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/cmp" "github.com/ledgerwatch/erigon-lib/common/hexutility" "github.com/ledgerwatch/erigon-lib/etl" "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon/consensus/bor/borcfg" "github.com/ledgerwatch/erigon/turbo/services" - "github.com/ledgerwatch/log/v3" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/types" @@ -24,7 +26,7 @@ type TxLookupCfg struct { db kv.RwDB prune prune.Mode tmpdir string - borConfig *chain.BorConfig + borConfig *borcfg.BorConfig blockReader services.FullBlockReader } @@ -32,9 +34,14 @@ func StageTxLookupCfg( db kv.RwDB, prune prune.Mode, tmpdir string, - borConfig *chain.BorConfig, + borConfigInterface chain.BorConfig, blockReader services.FullBlockReader, ) TxLookupCfg { + var borConfig *borcfg.BorConfig + if borConfigInterface != nil { + borConfig = borConfigInterface.(*borcfg.BorConfig) + } + return TxLookupCfg{ db: db, prune: prune, @@ -152,7 +159,7 @@ func borTxnLookupTransform(logPrefix string, tx kv.RwTx, blockFrom, blockTo uint blockNumBytes := bigNum.SetUint64(blocknum).Bytes() // we add state sync transactions every bor Sprint amount of blocks - if blocknum%cfg.borConfig.CalculateSprint(blocknum) == 0 && rawdb.HasBorReceipts(tx, blocknum) { + if blocknum%cfg.borConfig.CalculateSprintLength(blocknum) == 0 && rawdb.HasBorReceipts(tx, blocknum) { txnHash := types.ComputeBorTxHash(blocknum, blockHash) if err := next(k, txnHash.Bytes(), blockNumBytes); err != nil { return err diff --git a/eth/stagedsync/stagedsynctest/chain_configs.go b/eth/stagedsync/stagedsynctest/chain_configs.go index 99b81e24442..7be90935113 100644 --- a/eth/stagedsync/stagedsynctest/chain_configs.go +++ b/eth/stagedsync/stagedsynctest/chain_configs.go @@ -2,13 +2,14 @@ package stagedsynctest import ( "github.com/ledgerwatch/erigon-lib/chain" + "github.com/ledgerwatch/erigon/consensus/bor/borcfg" "github.com/ledgerwatch/erigon/params" ) func BorDevnetChainConfigWithNoBlockSealDelays() *chain.Config { // take care not to mutate global var (shallow copy) chainConfigCopy := *params.BorDevnetChainConfig - borConfigCopy := *chainConfigCopy.Bor + borConfigCopy := *chainConfigCopy.Bor.(*borcfg.BorConfig) borConfigCopy.Period = map[string]uint64{ "0": 0, } diff --git a/eth/stagedsync/stagedsynctest/harness.go b/eth/stagedsync/stagedsynctest/harness.go index 0b3100d8022..5385c4b8f18 100644 --- a/eth/stagedsync/stagedsynctest/harness.go +++ b/eth/stagedsync/stagedsynctest/harness.go @@ -11,6 +11,8 @@ import ( "testing" "time" + "github.com/ledgerwatch/erigon/consensus/bor/borcfg" + "github.com/golang/mock/gomock" "github.com/holiman/uint256" "github.com/ledgerwatch/log/v3" @@ -90,6 +92,7 @@ func InitHarness(ctx context.Context, t *testing.T, cfg HarnessCfg) Harness { chainDataDB: chainDataDB, borConsensusDB: borConsensusDB, chainConfig: cfg.ChainConfig, + borConfig: cfg.ChainConfig.Bor.(*borcfg.BorConfig), blockReader: blockReader, stateSyncStages: stateSyncStages, stateSync: stateSync, @@ -141,6 +144,7 @@ type Harness struct { chainDataDB kv.RwDB borConsensusDB kv.RwDB chainConfig *chain.Config + borConfig *borcfg.BorConfig blockReader services.BlockReader stateSyncStages []*stagedsync.Stage stateSync *stagedsync.Sync @@ -161,6 +165,10 @@ func (h *Harness) Logger() log.Logger { return h.logger } +func (h *Harness) BorConfig() *borcfg.BorConfig { + return h.borConfig +} + func (h *Harness) SaveStageProgress(ctx context.Context, t *testing.T, stageID stages.SyncStage, progress uint64) { rwTx, err := h.chainDataDB.BeginRw(ctx) require.NoError(t, err) @@ -417,8 +425,8 @@ func (h *Harness) consensusEngine(t *testing.T, cfg HarnessCfg) consensus.Engine if h.chainConfig.Bor != nil { genesisContracts := contract.NewGenesisContractsClient( h.chainConfig, - h.chainConfig.Bor.ValidatorContract, - h.chainConfig.Bor.StateReceiverContract, + h.borConfig.ValidatorContract, + h.borConfig.StateReceiverContract, h.logger, ) @@ -568,8 +576,8 @@ func (h *Harness) mockHeimdallClient() { StateSyncEvents(gomock.Any(), gomock.Any(), gomock.Any()). DoAndReturn(func(_ context.Context, _ uint64, _ int64) ([]*clerk.EventRecordWithTime, error) { h.heimdallLastEventID++ - h.heimdallLastEventHeaderNum += h.chainConfig.Bor.CalculateSprint(h.heimdallLastEventHeaderNum) - stateSyncDelay := h.chainConfig.Bor.CalculateStateSyncDelay(h.heimdallLastEventHeaderNum) + h.heimdallLastEventHeaderNum += h.borConfig.CalculateSprintLength(h.heimdallLastEventHeaderNum) + stateSyncDelay := h.borConfig.CalculateStateSyncDelay(h.heimdallLastEventHeaderNum) newEvent := clerk.EventRecordWithTime{ EventRecord: clerk.EventRecord{ ID: h.heimdallLastEventID, diff --git a/params/config.go b/params/config.go index dc61571ba34..4768abbd450 100644 --- a/params/config.go +++ b/params/config.go @@ -26,6 +26,8 @@ import ( "github.com/ledgerwatch/erigon-lib/chain" "github.com/ledgerwatch/erigon-lib/chain/networkname" libcommon "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon/consensus/bor/borcfg" + "github.com/ledgerwatch/erigon/common/paths" ) @@ -38,12 +40,23 @@ func readChainSpec(filename string) *chain.Config { panic(fmt.Sprintf("Could not open chainspec for %s: %v", filename, err)) } defer f.Close() + decoder := json.NewDecoder(f) spec := &chain.Config{} err = decoder.Decode(&spec) if err != nil { panic(fmt.Sprintf("Could not parse chainspec for %s: %v", filename, err)) } + + if spec.BorJSON != nil { + borConfig := &borcfg.BorConfig{} + err = json.Unmarshal(spec.BorJSON, borConfig) + if err != nil { + panic(fmt.Sprintf("Could not parse 'bor' chainspec for %s: %v", filename, err)) + } + spec.Bor = borConfig + } + return spec } diff --git a/params/version.go b/params/version.go index cb2d12d5bb1..a1b0c0ae15b 100644 --- a/params/version.go +++ b/params/version.go @@ -32,7 +32,7 @@ var ( // see https://calver.org const ( VersionMajor = 2 // Major version component of the current release - VersionMinor = 56 // Minor version component of the current release + VersionMinor = 57 // Minor version component of the current release VersionMicro = 0 // Patch version component of the current release VersionModifier = "dev" // Modifier component of the current release VersionKeyCreated = "ErigonVersionCreated" diff --git a/polygon/sync/canonical_chain_builder.go b/polygon/sync/canonical_chain_builder.go new file mode 100644 index 00000000000..9acd3cf07d3 --- /dev/null +++ b/polygon/sync/canonical_chain_builder.go @@ -0,0 +1,244 @@ +package sync + +import ( + "errors" + "fmt" + + libcommon "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon/consensus/bor" + "github.com/ledgerwatch/erigon/core/types" +) + +//go:generate mockgen -destination=./mock/canonical_chain_builder_mock.go -package=mock . CanonicalChainBuilder +type CanonicalChainBuilder interface { + Reset(root *types.Header) + ContainsHash(hash libcommon.Hash) bool + Tip() *types.Header + HeadersInRange(start uint64, count uint64) []*types.Header + Prune(newRootNum uint64) error + Connect(headers []*types.Header) error +} + +type producerSlotIndex uint64 + +type forkTreeNode struct { + parent *forkTreeNode + children map[producerSlotIndex]*forkTreeNode + + header *types.Header + headerHash libcommon.Hash + + totalDifficulty uint64 +} + +type canonicalChainBuilderImpl struct { + root *forkTreeNode + tip *forkTreeNode + + difficultyCalc DifficultyCalculator +} + +func NewCanonicalChainBuilder( + root *types.Header, + difficultyCalc DifficultyCalculator, +) CanonicalChainBuilder { + impl := &canonicalChainBuilderImpl{ + difficultyCalc: difficultyCalc, + } + impl.Reset(root) + return impl +} + +func (impl *canonicalChainBuilderImpl) Reset(root *types.Header) { + impl.root = &forkTreeNode{ + children: make(map[producerSlotIndex]*forkTreeNode), + header: root, + headerHash: root.Hash(), + } + impl.tip = impl.root +} + +// depth-first search +func (impl *canonicalChainBuilderImpl) enumerate(visitFunc func(*forkTreeNode) bool) { + stack := []*forkTreeNode{impl.root} + for len(stack) > 0 { + // pop + node := stack[len(stack)-1] + stack = stack[:len(stack)-1] + + if !visitFunc(node) { + break + } + + for _, child := range node.children { + stack = append(stack, child) + } + } +} + +func (impl *canonicalChainBuilderImpl) nodeByHash(hash libcommon.Hash) *forkTreeNode { + var result *forkTreeNode + impl.enumerate(func(node *forkTreeNode) bool { + if node.headerHash == hash { + result = node + } + return result == nil + }) + return result +} + +func (impl *canonicalChainBuilderImpl) ContainsHash(hash libcommon.Hash) bool { + return impl.nodeByHash(hash) != nil +} + +func (impl *canonicalChainBuilderImpl) Tip() *types.Header { + return impl.tip.header +} + +func (impl *canonicalChainBuilderImpl) Headers() []*types.Header { + var headers []*types.Header + node := impl.tip + for node != nil { + headers = append(headers, node.header) + node = node.parent + } + libcommon.SliceReverse(headers) + return headers +} + +func (impl *canonicalChainBuilderImpl) HeadersInRange(start uint64, count uint64) []*types.Header { + headers := impl.Headers() + if len(headers) == 0 { + return nil + } + if headers[0].Number.Uint64() > start { + return nil + } + if headers[len(headers)-1].Number.Uint64() < start+count-1 { + return nil + } + + offset := start - headers[0].Number.Uint64() + return headers[offset : offset+count] +} + +func (impl *canonicalChainBuilderImpl) Prune(newRootNum uint64) error { + if (newRootNum < impl.root.header.Number.Uint64()) || (newRootNum > impl.Tip().Number.Uint64()) { + return errors.New("canonicalChainBuilderImpl.Prune: newRootNum outside of the canonical chain") + } + + newRoot := impl.tip + for newRoot.header.Number.Uint64() > newRootNum { + newRoot = newRoot.parent + } + + impl.root = newRoot + return nil +} + +func (impl *canonicalChainBuilderImpl) updateTipIfNeeded(tipCandidate *forkTreeNode) { + if tipCandidate.totalDifficulty > impl.tip.totalDifficulty { + impl.tip = tipCandidate + } + // else if tipCandidate.totalDifficulty == impl.tip.totalDifficulty { + // TODO: is it possible? which one is selected? + // } +} + +func (impl *canonicalChainBuilderImpl) Connect(headers []*types.Header) error { + if (len(headers) > 0) && (headers[0].Number != nil) && (headers[0].Number.Cmp(impl.root.header.Number) == 0) { + headers = headers[1:] + } + if len(headers) == 0 { + return nil + } + + parent := impl.nodeByHash(headers[0].ParentHash) + if parent == nil { + return errors.New("canonicalChainBuilderImpl.Connect: can't connect headers") + } + + headersHashes := libcommon.SliceMap(headers, func(header *types.Header) libcommon.Hash { + return header.Hash() + }) + + // check if headers are linked by ParentHash + for i, header := range headers[1:] { + if header.ParentHash != headersHashes[i] { + return errors.New("canonicalChainBuilderImpl.Connect: invalid headers slice ParentHash") + } + } + + // skip existing matching nodes until a new header is found + for len(headers) > 0 { + var matchingNode *forkTreeNode + for _, c := range parent.children { + if c.headerHash == headersHashes[0] { + matchingNode = c + break + } + } + if matchingNode != nil { + parent = matchingNode + headers = headers[1:] + headersHashes = headersHashes[1:] + } else { + break + } + } + + // if all headers are already inserted + if len(headers) == 0 { + return nil + } + + // attach nodes for the new headers + for i, header := range headers { + if (header.Number == nil) && (header.Number.Uint64() != parent.header.Number.Uint64()+1) { + return errors.New("canonicalChainBuilderImpl.Connect: invalid header.Number") + } + + // TODO: validate using CalcProducerDelay + if header.Time <= parent.header.Time { + return errors.New("canonicalChainBuilderImpl.Connect: invalid header.Time") + } + + if err := bor.ValidateHeaderExtraField(header.Extra); err != nil { + return fmt.Errorf("canonicalChainBuilderImpl.Connect: invalid header.Extra %w", err) + } + + difficulty, err := impl.difficultyCalc.HeaderDifficulty(header) + if err != nil { + return fmt.Errorf("canonicalChainBuilderImpl.Connect: header difficulty error %w", err) + } + if (header.Difficulty == nil) || (header.Difficulty.Uint64() != difficulty) { + return &bor.WrongDifficultyError{ + Number: header.Number.Uint64(), + Expected: difficulty, + Actual: header.Difficulty.Uint64(), + Signer: []byte{}, + } + } + + slot := producerSlotIndex(difficulty) + if _, ok := parent.children[slot]; ok { + return errors.New("canonicalChainBuilderImpl.Connect: producer slot is already filled by a different header") + } + + node := &forkTreeNode{ + parent: parent, + children: make(map[producerSlotIndex]*forkTreeNode), + + header: header, + headerHash: headersHashes[i], + + totalDifficulty: parent.totalDifficulty + difficulty, + } + + parent.children[slot] = node + parent = node + impl.updateTipIfNeeded(node) + } + + return nil +} diff --git a/polygon/sync/canonical_chain_builder_test.go b/polygon/sync/canonical_chain_builder_test.go new file mode 100644 index 00000000000..aa640f3c479 --- /dev/null +++ b/polygon/sync/canonical_chain_builder_test.go @@ -0,0 +1,23 @@ +package sync + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ledgerwatch/erigon/core/types" +) + +type testDifficultyCalculator struct { +} + +func (*testDifficultyCalculator) HeaderDifficulty(*types.Header) (uint64, error) { + return 0, nil +} + +func TestCanonicalChainBuilderConnectEmpty(t *testing.T) { + difficultyCalc := testDifficultyCalculator{} + builder := NewCanonicalChainBuilder(new(types.Header), &difficultyCalc) + err := builder.Connect([]*types.Header{}) + require.Nil(t, err) +} diff --git a/polygon/sync/difficulty.go b/polygon/sync/difficulty.go new file mode 100644 index 00000000000..7a6895c506c --- /dev/null +++ b/polygon/sync/difficulty.go @@ -0,0 +1,60 @@ +package sync + +import ( + lru "github.com/hashicorp/golang-lru/arc/v2" + "github.com/ledgerwatch/erigon/eth/stagedsync" + "github.com/ledgerwatch/log/v3" + + libcommon "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon/consensus/bor" + "github.com/ledgerwatch/erigon/consensus/bor/borcfg" + heimdallspan "github.com/ledgerwatch/erigon/consensus/bor/heimdall/span" + "github.com/ledgerwatch/erigon/consensus/bor/valset" + "github.com/ledgerwatch/erigon/core/types" +) + +type DifficultyCalculator interface { + HeaderDifficulty(header *types.Header) (uint64, error) +} + +type difficultyCalculatorImpl struct { + borConfig *borcfg.BorConfig + span *heimdallspan.HeimdallSpan + signaturesCache *lru.ARCCache[libcommon.Hash, libcommon.Address] + + log log.Logger +} + +func NewDifficultyCalculator( + borConfig *borcfg.BorConfig, + span *heimdallspan.HeimdallSpan, + log log.Logger, +) DifficultyCalculator { + signaturesCache, err := lru.NewARC[libcommon.Hash, libcommon.Address](stagedsync.InMemorySignatures) + if err != nil { + panic(err) + } + return &difficultyCalculatorImpl{ + borConfig: borConfig, + span: span, + signaturesCache: signaturesCache, + + log: log, + } +} + +func (impl *difficultyCalculatorImpl) HeaderDifficulty(header *types.Header) (uint64, error) { + signer, err := bor.Ecrecover(header, impl.signaturesCache, impl.borConfig) + if err != nil { + return 0, err + } + + validatorSet := valset.NewValidatorSet(impl.span.ValidatorSet.Validators, log.New()) + + sprintCount := impl.borConfig.CalculateSprintNumber(header.Number.Uint64()) + if sprintCount > 0 { + validatorSet.IncrementProposerPriority(int(sprintCount), impl.log) + } + + return validatorSet.Difficulty(signer) +} diff --git a/polygon/sync/difficulty_test.go b/polygon/sync/difficulty_test.go new file mode 100644 index 00000000000..45d746958ab --- /dev/null +++ b/polygon/sync/difficulty_test.go @@ -0,0 +1,22 @@ +package sync + +import ( + "testing" + + "github.com/ledgerwatch/log/v3" + + "github.com/stretchr/testify/require" + + "github.com/ledgerwatch/erigon/consensus/bor/borcfg" + heimdallspan "github.com/ledgerwatch/erigon/consensus/bor/heimdall/span" + "github.com/ledgerwatch/erigon/core/types" +) + +func TestHeaderDifficultyNoSignature(t *testing.T) { + borConfig := borcfg.BorConfig{} + span := heimdallspan.HeimdallSpan{} + logger := log.New() + calc := NewDifficultyCalculator(&borConfig, &span, logger) + _, err := calc.HeaderDifficulty(new(types.Header)) + require.ErrorContains(t, err, "signature suffix missing") +} diff --git a/polygon/sync/heimdall.go b/polygon/sync/heimdall.go index f1addfad9e5..cdf8077b9ac 100644 --- a/polygon/sync/heimdall.go +++ b/polygon/sync/heimdall.go @@ -8,6 +8,7 @@ import ( "github.com/ledgerwatch/log/v3" + "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon/consensus/bor/heimdall" "github.com/ledgerwatch/erigon/consensus/bor/heimdall/checkpoint" "github.com/ledgerwatch/erigon/consensus/bor/heimdall/milestone" @@ -61,12 +62,6 @@ func cmpBlockNumToMilestoneRange(n uint64, m *milestone.Milestone) int { return cmpNumToRange(n, m.StartBlock, m.EndBlock) } -func reverse[T any](s []T) { - for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 { - s[i], s[j] = s[j], s[i] - } -} - func (impl *HeimdallImpl) FetchCheckpoints(ctx context.Context, start uint64) ([]*checkpoint.Checkpoint, error) { count, err := impl.client.FetchCheckpointCount(ctx) if err != nil { @@ -95,7 +90,7 @@ func (impl *HeimdallImpl) FetchCheckpoints(ctx context.Context, start uint64) ([ } } - reverse(checkpoints) + common.SliceReverse(checkpoints) return checkpoints, nil } @@ -111,7 +106,7 @@ func (impl *HeimdallImpl) FetchMilestones(ctx context.Context, start uint64) ([] m, err := impl.client.FetchMilestone(ctx, i) if err != nil { if errors.Is(err, heimdall.ErrNotInMilestoneList) { - reverse(milestones) + common.SliceReverse(milestones) return milestones, ErrIncompleteMilestoneRange } return nil, err @@ -131,7 +126,7 @@ func (impl *HeimdallImpl) FetchMilestones(ctx context.Context, start uint64) ([] } } - reverse(milestones) + common.SliceReverse(milestones) return milestones, nil } diff --git a/polygon/sync/mock/canonical_chain_builder_mock.go b/polygon/sync/mock/canonical_chain_builder_mock.go new file mode 100644 index 00000000000..2985c446798 --- /dev/null +++ b/polygon/sync/mock/canonical_chain_builder_mock.go @@ -0,0 +1,118 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/ledgerwatch/erigon/polygon/sync (interfaces: CanonicalChainBuilder) + +// Package mock is a generated GoMock package. +package mock + +import ( + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + common "github.com/ledgerwatch/erigon-lib/common" + types "github.com/ledgerwatch/erigon/core/types" +) + +// MockCanonicalChainBuilder is a mock of CanonicalChainBuilder interface. +type MockCanonicalChainBuilder struct { + ctrl *gomock.Controller + recorder *MockCanonicalChainBuilderMockRecorder +} + +// MockCanonicalChainBuilderMockRecorder is the mock recorder for MockCanonicalChainBuilder. +type MockCanonicalChainBuilderMockRecorder struct { + mock *MockCanonicalChainBuilder +} + +// NewMockCanonicalChainBuilder creates a new mock instance. +func NewMockCanonicalChainBuilder(ctrl *gomock.Controller) *MockCanonicalChainBuilder { + mock := &MockCanonicalChainBuilder{ctrl: ctrl} + mock.recorder = &MockCanonicalChainBuilderMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockCanonicalChainBuilder) EXPECT() *MockCanonicalChainBuilderMockRecorder { + return m.recorder +} + +// Connect mocks base method. +func (m *MockCanonicalChainBuilder) Connect(arg0 []*types.Header) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Connect", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// Connect indicates an expected call of Connect. +func (mr *MockCanonicalChainBuilderMockRecorder) Connect(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Connect", reflect.TypeOf((*MockCanonicalChainBuilder)(nil).Connect), arg0) +} + +// ContainsHash mocks base method. +func (m *MockCanonicalChainBuilder) ContainsHash(arg0 common.Hash) bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ContainsHash", arg0) + ret0, _ := ret[0].(bool) + return ret0 +} + +// ContainsHash indicates an expected call of ContainsHash. +func (mr *MockCanonicalChainBuilderMockRecorder) ContainsHash(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ContainsHash", reflect.TypeOf((*MockCanonicalChainBuilder)(nil).ContainsHash), arg0) +} + +// HeadersInRange mocks base method. +func (m *MockCanonicalChainBuilder) HeadersInRange(arg0, arg1 uint64) []*types.Header { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "HeadersInRange", arg0, arg1) + ret0, _ := ret[0].([]*types.Header) + return ret0 +} + +// HeadersInRange indicates an expected call of HeadersInRange. +func (mr *MockCanonicalChainBuilderMockRecorder) HeadersInRange(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HeadersInRange", reflect.TypeOf((*MockCanonicalChainBuilder)(nil).HeadersInRange), arg0, arg1) +} + +// Prune mocks base method. +func (m *MockCanonicalChainBuilder) Prune(arg0 uint64) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Prune", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// Prune indicates an expected call of Prune. +func (mr *MockCanonicalChainBuilderMockRecorder) Prune(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Prune", reflect.TypeOf((*MockCanonicalChainBuilder)(nil).Prune), arg0) +} + +// Reset mocks base method. +func (m *MockCanonicalChainBuilder) Reset(arg0 *types.Header) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Reset", arg0) +} + +// Reset indicates an expected call of Reset. +func (mr *MockCanonicalChainBuilderMockRecorder) Reset(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Reset", reflect.TypeOf((*MockCanonicalChainBuilder)(nil).Reset), arg0) +} + +// Tip mocks base method. +func (m *MockCanonicalChainBuilder) Tip() *types.Header { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Tip") + ret0, _ := ret[0].(*types.Header) + return ret0 +} + +// Tip indicates an expected call of Tip. +func (mr *MockCanonicalChainBuilderMockRecorder) Tip() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Tip", reflect.TypeOf((*MockCanonicalChainBuilder)(nil).Tip)) +} diff --git a/tests/bor/helper/miner.go b/tests/bor/helper/miner.go index 4850059e500..6ddd7f1b117 100644 --- a/tests/bor/helper/miner.go +++ b/tests/bor/helper/miner.go @@ -4,10 +4,13 @@ import ( "context" "crypto/ecdsa" "encoding/json" + "fmt" "math/big" "os" "time" + "github.com/ledgerwatch/erigon/consensus/bor/borcfg" + "github.com/c2h5oh/datasize" "github.com/ledgerwatch/erigon-lib/common/datadir" "github.com/ledgerwatch/erigon-lib/direct" @@ -28,7 +31,6 @@ import ( // InitGenesis initializes genesis file from json with sprint size and chain name as configurable inputs func InitGenesis(fileLocation string, sprintSize uint64, chainName string) types.Genesis { - // sprint size = 8 in genesis genesisData, err := os.ReadFile(fileLocation) if err != nil { @@ -36,14 +38,23 @@ func InitGenesis(fileLocation string, sprintSize uint64, chainName string) types } genesis := &types.Genesis{} - if err := json.Unmarshal(genesisData, genesis); err != nil { panic(err) } - genesis.Config.Bor.Sprint["0"] = sprintSize genesis.Config.ChainName = chainName + if genesis.Config.BorJSON != nil { + borConfig := &borcfg.BorConfig{} + err = json.Unmarshal(genesis.Config.BorJSON, borConfig) + if err != nil { + panic(fmt.Sprintf("Could not parse 'bor' config for %s: %v", fileLocation, err)) + } + + borConfig.Sprint["0"] = sprintSize + genesis.Config.Bor = borConfig + } + return *genesis } diff --git a/turbo/jsonrpc/bor_helper.go b/turbo/jsonrpc/bor_helper.go index c4a14e1b119..34a5ec35f5d 100644 --- a/turbo/jsonrpc/bor_helper.go +++ b/turbo/jsonrpc/bor_helper.go @@ -7,9 +7,9 @@ import ( "fmt" "sort" - "github.com/ledgerwatch/erigon-lib/chain" "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon/consensus/bor/borcfg" "github.com/ledgerwatch/erigon/consensus/bor" "github.com/ledgerwatch/erigon/consensus/bor/valset" @@ -89,7 +89,7 @@ func getHeaderByHash(ctx context.Context, api *BorImpl, tx kv.Tx, hash common.Ha } // ecrecover extracts the Ethereum account address from a signed header. -func ecrecover(header *types.Header, c *chain.BorConfig) (common.Address, error) { +func ecrecover(header *types.Header, c *borcfg.BorConfig) (common.Address, error) { // Retrieve the signature from the header extra-data if len(header.Extra) < extraSeal { return common.Address{}, errMissingSignature @@ -146,8 +146,8 @@ func getUpdatedValidatorSet(oldValidatorSet *ValidatorSet, newVals []*valset.Val // author returns the Ethereum address recovered // from the signature in the header's extra-data section. func author(api *BorImpl, tx kv.Tx, header *types.Header) (common.Address, error) { - config, _ := api.chainConfig(tx) - return ecrecover(header, config.Bor) + borEngine, _ := api.bor() + return ecrecover(header, borEngine.Config()) } func rankMapDifficulties(values map[common.Address]uint64) []difficultiesKV { diff --git a/turbo/jsonrpc/bor_snapshot.go b/turbo/jsonrpc/bor_snapshot.go index 7a6ef67f4c8..0fb6389c3c2 100644 --- a/turbo/jsonrpc/bor_snapshot.go +++ b/turbo/jsonrpc/bor_snapshot.go @@ -6,10 +6,11 @@ import ( "errors" "fmt" - "github.com/ledgerwatch/erigon-lib/chain" + "github.com/ledgerwatch/log/v3" + "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/log/v3" + "github.com/ledgerwatch/erigon/consensus/bor/borcfg" "github.com/ledgerwatch/erigon/consensus" "github.com/ledgerwatch/erigon/consensus/bor" @@ -21,7 +22,7 @@ import ( ) type Snapshot struct { - config *chain.BorConfig // Consensus engine parameters to fine tune behavior + config *borcfg.BorConfig // Consensus engine parameters to fine tune behavior Number uint64 `json:"number"` // Block number where the snapshot was created Hash common.Hash `json:"hash"` // Block hash where the snapshot was created @@ -526,7 +527,7 @@ func (s *Snapshot) apply(headers []*types.Header) (*Snapshot, error) { number := header.Number.Uint64() // Delete the oldest signer from the recent list to allow it signing again - currentSprint := s.config.CalculateSprint(number) + currentSprint := s.config.CalculateSprintLength(number) if number >= currentSprint { delete(snap.Recents, number-currentSprint) } @@ -628,8 +629,8 @@ func loadSnapshot(api *BorImpl, db kv.Tx, borDb kv.Tx, hash common.Hash) (*Snaps if err := json.Unmarshal(blob, snap); err != nil { return nil, err } - config, _ := api.chainConfig(db) - snap.config = config.Bor + borEngine, _ := api.bor() + snap.config = borEngine.Config() // update total voting power if err := snap.ValidatorSet.UpdateTotalVotingPower(); err != nil { diff --git a/turbo/jsonrpc/eth_block.go b/turbo/jsonrpc/eth_block.go index e014526f78d..3063fee6578 100644 --- a/turbo/jsonrpc/eth_block.go +++ b/turbo/jsonrpc/eth_block.go @@ -8,11 +8,13 @@ import ( "github.com/ledgerwatch/erigon-lib/common/hexutil" "github.com/ledgerwatch/erigon/cl/clparams" + "github.com/ledgerwatch/erigon/consensus/bor/borcfg" + + "github.com/ledgerwatch/log/v3" "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/hexutility" "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/log/v3" "github.com/ledgerwatch/erigon/common/math" "github.com/ledgerwatch/erigon/core" @@ -294,7 +296,8 @@ func (api *APIImpl) GetBlockByHash(ctx context.Context, numberOrHash rpc.BlockNu response, err := ethapi.RPCMarshalBlockEx(block, true, fullTx, borTx, borTxHash, additionalFields) if chainConfig.Bor != nil { - response["miner"], _ = ecrecover(block.Header(), chainConfig.Bor) + borConfig := chainConfig.Bor.(*borcfg.BorConfig) + response["miner"], _ = ecrecover(block.Header(), borConfig) } if err == nil && int64(number) == rpc.PendingBlockNumber.Int64() { diff --git a/turbo/snapshotsync/freezeblocks/dump_test.go b/turbo/snapshotsync/freezeblocks/dump_test.go index 9304b73462e..18a826a9d19 100644 --- a/turbo/snapshotsync/freezeblocks/dump_test.go +++ b/turbo/snapshotsync/freezeblocks/dump_test.go @@ -5,6 +5,8 @@ import ( "runtime" "testing" + "github.com/ledgerwatch/erigon/consensus/bor/borcfg" + "github.com/holiman/uint256" "github.com/ledgerwatch/erigon-lib/chain/networkname" "github.com/ledgerwatch/erigon-lib/chain/snapcfg" @@ -56,9 +58,9 @@ func TestDump(t *testing.T) { } withConfig := func(config chain.Config, sprints map[string]uint64) *chain.Config { - bor := *config.Bor + bor := *config.Bor.(*borcfg.BorConfig) + bor.Sprint = sprints config.Bor = &bor - config.Bor.Sprint = sprints return &config }