Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Implement EIP7805: Fork-choice enforced Inclusion Lists #14754

Open
wants to merge 1 commit into
base: develop
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ The format is based on Keep a Changelog, and this project adheres to Semantic Ve
- DB optimization for saving light client bootstraps (save unique sync committees only).
- Trace IDONTWANT Messages in Pubsub.
- Add Fulu fork boilerplate.
- FOCIL.

### Changed

Expand Down
1 change: 1 addition & 0 deletions beacon-chain/blockchain/BUILD.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ go_library(
"forkchoice_update_execution.go",
"head.go",
"head_sync_committee_info.go",
"inclusion_list.go",
"init_sync_process_block.go",
"log.go",
"merge_ascii_art.go",
Expand Down
34 changes: 34 additions & 0 deletions beacon-chain/blockchain/chain_info.go
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,7 @@ type GenesisFetcher interface {
type HeadFetcher interface {
HeadSlot() primitives.Slot
HeadRoot(ctx context.Context) ([]byte, error)
FilteredHeadRoot(ctx context.Context) ([32]byte, error)
HeadBlock(ctx context.Context) (interfaces.ReadOnlySignedBeaconBlock, error)
HeadState(ctx context.Context) (state.BeaconState, error)
HeadStateReadOnly(ctx context.Context) (state.ReadOnlyBeaconState, error)
Expand Down Expand Up @@ -180,6 +181,39 @@ func (s *Service) HeadRoot(ctx context.Context) ([]byte, error) {
return r[:], nil
}

// FilteredHeadRoot returns the filtered head root of the chain.
// If the head root does not satisfy the inclusion list constraint,
// its parent root is returned.
func (s *Service) FilteredHeadRoot(ctx context.Context) ([32]byte, error) {
s.headLock.RLock()
defer s.headLock.RUnlock()

if s.head != nil && s.head.root != params.BeaconConfig().ZeroHash {
if s.head.root == s.badInclusionListBlock {
return s.head.block.Block().ParentRoot(), nil
}
return s.head.root, nil
}

headBlock, err := s.cfg.BeaconDB.HeadBlock(ctx)
if err != nil {
return [32]byte{}, err
}
if headBlock == nil || headBlock.IsNil() {
return params.BeaconConfig().ZeroHash, nil
}

root, err := headBlock.Block().HashTreeRoot()
if err != nil {
return [32]byte{}, err
}
if root == s.badInclusionListBlock {
return headBlock.Block().ParentRoot(), nil
}

return root, nil
}

// HeadBlock returns the head block of the chain.
// If the head is nil from service struct,
// it will attempt to get the head block from DB.
Expand Down
19 changes: 18 additions & 1 deletion beacon-chain/blockchain/execution_engine.go
Original file line number Diff line number Diff line change
Expand Up @@ -269,12 +269,29 @@ func (s *Service) notifyNewPayload(ctx context.Context, preStateVersion int,
return false, errors.Wrap(err, "could not get execution requests")
}
}
lastValidHash, err = s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload, versionedHashes, parentRoot, requests)

txs := make([][]byte, 0)
// Post-FOCIL, only consider the inclusion list constraint if it matches the current slot.
if slots.ToEpoch(s.CurrentSlot()) >= params.BeaconConfig().FocilForkEpoch && s.CurrentSlot() == blk.Block().Slot() {
txs = s.inclusionListCache.Get(blk.Block().Slot())
}
lastValidHash, err = s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload, versionedHashes, parentRoot, requests, txs)

switch {
case err == nil:
newPayloadValidNodeCount.Inc()
return true, nil
case errors.Is(err, execution.ErrBadInclusionListPayloadStatus):
log.WithFields(logrus.Fields{
"slot": blk.Block().Slot(),
"parentRoot": fmt.Sprintf("%#x", parentRoot),
}).Info("Called new payload but inclusion list didn't satisfy")
r, err := blk.Block().HashTreeRoot()
if err != nil {
return false, errors.Wrap(err, "could not get block hash tree root")
}
s.badInclusionListBlock = r // Cache the block root that fails to satisfy the inclusion list constraint.

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is it sufficient to cache only the latest badInclusionListBlock?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think so

return true, nil
case errors.Is(err, execution.ErrAcceptedSyncingPayloadStatus):
newPayloadOptimisticNodeCount.Inc()
log.WithFields(logrus.Fields{
Expand Down
59 changes: 59 additions & 0 deletions beacon-chain/blockchain/inclusion_list.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
package blockchain

import (
"context"
"time"

"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/time/slots"
)

const updateInclusionListBlockInterval = time.Second

// Routine that updates block building with inclusion lists one second before the slot starts.
func (s *Service) updateBlockWithInclusionListRoutine() {
if err := s.waitForSync(); err != nil {
log.WithError(err).Error("Failed to wait for initial sync")
return
}

interval := time.Second*time.Duration(params.BeaconConfig().SecondsPerSlot) - updateInclusionListBlockInterval
ticker := slots.NewSlotTickerWithIntervals(s.genesisTime, []time.Duration{interval})

for {
select {
case <-s.ctx.Done():
return
case <-ticker.C():
s.updateBlockWithInclusionList(context.Background())
}
}
}

// Updates block building with inclusion lists, the current payload ID, and the new upload ID.
func (s *Service) updateBlockWithInclusionList(ctx context.Context) {
currentSlot := s.CurrentSlot()

// Skip update if not in or past the FOCIL fork epoch.
if slots.ToEpoch(currentSlot) < params.BeaconConfig().FocilForkEpoch {
return
}

s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()

headRoot := s.headRoot()
id, found := s.cfg.PayloadIDCache.PayloadID(currentSlot+1, headRoot)
if !found {
return
}

txs := s.inclusionListCache.Get(currentSlot)
newID, err := s.cfg.ExecutionEngineCaller.UpdatePayloadWithInclusionList(ctx, id, txs)
if err != nil {
log.WithError(err).Error("Failed to update block with inclusion list")
return
}

s.cfg.PayloadIDCache.Set(currentSlot+1, headRoot, *newID)
}
39 changes: 21 additions & 18 deletions beacon-chain/blockchain/service.go
Original file line number Diff line number Diff line change
Expand Up @@ -47,24 +47,26 @@ import (
// Service represents a service that handles the internal
// logic of managing the full PoS beacon chain.
type Service struct {
cfg *config
ctx context.Context
cancel context.CancelFunc
genesisTime time.Time
head *head
headLock sync.RWMutex
originBlockRoot [32]byte // genesis root, or weak subjectivity checkpoint root, depending on how the node is initialized
boundaryRoots [][32]byte
checkpointStateCache *cache.CheckpointStateCache
initSyncBlocks map[[32]byte]interfaces.ReadOnlySignedBeaconBlock
initSyncBlocksLock sync.RWMutex
wsVerifier *WeakSubjectivityVerifier
clockSetter startup.ClockSetter
clockWaiter startup.ClockWaiter
syncComplete chan struct{}
blobNotifiers *blobNotifierMap
blockBeingSynced *currentlySyncingBlock
blobStorage *filesystem.BlobStorage
cfg *config
ctx context.Context
cancel context.CancelFunc
genesisTime time.Time
head *head
headLock sync.RWMutex
originBlockRoot [32]byte // genesis root, or weak subjectivity checkpoint root, depending on how the node is initialized
boundaryRoots [][32]byte
checkpointStateCache *cache.CheckpointStateCache
initSyncBlocks map[[32]byte]interfaces.ReadOnlySignedBeaconBlock
initSyncBlocksLock sync.RWMutex
wsVerifier *WeakSubjectivityVerifier
clockSetter startup.ClockSetter
clockWaiter startup.ClockWaiter
syncComplete chan struct{}
blobNotifiers *blobNotifierMap
blockBeingSynced *currentlySyncingBlock
blobStorage *filesystem.BlobStorage
inclusionListCache *cache.InclusionLists
badInclusionListBlock [32]byte
}

// config options for the service.
Expand Down Expand Up @@ -215,6 +217,7 @@ func (s *Service) Start() {
}
s.spawnProcessAttestationsRoutine()
go s.runLateBlockTasks()
go s.updateBlockWithInclusionListRoutine()

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

What makes it better to have a routine here instead of updating a payload when a validator has a proposer role? Easier implementation?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The routine is a no op unless the validator has a proposer role regardless

}

// Stop the blockchain service's main event loop and associated goroutines.
Expand Down
7 changes: 7 additions & 0 deletions beacon-chain/blockchain/testing/mock.go
Original file line number Diff line number Diff line change
Expand Up @@ -348,6 +348,13 @@ func (s *ChainService) HeadRoot(_ context.Context) ([]byte, error) {
return make([]byte, 32), nil
}

func (s *ChainService) FilteredHeadRoot(_ context.Context) ([32]byte, error) {
if len(s.Root) > 0 {
return bytesutil.ToBytes32(s.Root), nil
}
return [32]byte{}, nil
}

// HeadBlock mocks HeadBlock method in chain service.
func (s *ChainService) HeadBlock(context.Context) (interfaces.ReadOnlySignedBeaconBlock, error) {
return s.Block, nil
Expand Down
2 changes: 2 additions & 0 deletions beacon-chain/cache/BUILD.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ go_library(
"common.go",
"doc.go",
"error.go",
"inclusion_list.go",
"interfaces.go",
"payload_id.go",
"proposer_indices.go",
Expand Down Expand Up @@ -75,6 +76,7 @@ go_test(
"checkpoint_state_test.go",
"committee_fuzz_test.go",
"committee_test.go",
"inclusion_list_test.go",
"payload_id_test.go",
"private_access_test.go",
"proposer_indices_test.go",
Expand Down
98 changes: 98 additions & 0 deletions beacon-chain/cache/inclusion_list.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,98 @@
package cache

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can we have unit tests?InclusionLists deserves ones.


import (
"crypto/sha256"
"sync"

"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
)

type InclusionLists struct {
mu sync.RWMutex
ils map[primitives.Slot]map[primitives.ValidatorIndex]struct {
txs [][]byte
seenTwice bool
}
}

// NewInclusionLists initializes a new InclusionLists instance.
func NewInclusionLists() *InclusionLists {
return &InclusionLists{
ils: make(map[primitives.Slot]map[primitives.ValidatorIndex]struct {
txs [][]byte
seenTwice bool
}),
}
}

// Add adds a set of transactions for a specific slot and validator index.
func (i *InclusionLists) Add(slot primitives.Slot, validatorIndex primitives.ValidatorIndex, txs [][]byte) {
i.mu.Lock()
defer i.mu.Unlock()

if _, ok := i.ils[slot]; !ok {
i.ils[slot] = make(map[primitives.ValidatorIndex]struct {
txs [][]byte
seenTwice bool
})
}

entry := i.ils[slot][validatorIndex]
if entry.seenTwice {
return // No need to modify if already marked as seen twice.
}

if entry.txs == nil {
entry.txs = txs
} else {
entry.seenTwice = true

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is there any chance the same IL received from different peer?

Copy link
Member Author

@terencechain terencechain Dec 31, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Libp2p covers such case, it filters by content so we should not be importing same IL from different peer

entry.txs = nil // Clear transactions to save space if seen twice.
}
i.ils[slot][validatorIndex] = entry
}

// Get retrieves unique transactions for a specific slot.
func (i *InclusionLists) Get(slot primitives.Slot) [][]byte {
i.mu.RLock()
defer i.mu.RUnlock()

ils, exists := i.ils[slot]
if !exists {
return nil
}

var uniqueTxs [][]byte
seen := make(map[[32]byte]struct{})
for _, entry := range ils {
for _, tx := range entry.txs {
hash := sha256.Sum256(tx)
if _, duplicate := seen[hash]; !duplicate {
uniqueTxs = append(uniqueTxs, tx)
seen[hash] = struct{}{}
}
}
}
return uniqueTxs
}

// Delete removes all inclusion lists for a specific slot.
func (i *InclusionLists) Delete(slot primitives.Slot) {
i.mu.Lock()
defer i.mu.Unlock()

delete(i.ils, slot)
}

// SeenTwice checks if a validator's transactions were marked as seen twice for a specific slot.
func (i *InclusionLists) SeenTwice(slot primitives.Slot, idx primitives.ValidatorIndex) bool {
i.mu.RLock()
defer i.mu.RUnlock()

ils, exists := i.ils[slot]
if !exists {
return false
}

entry, exists := ils[idx]
return exists && entry.seenTwice
}
Loading
Loading