diff --git a/README.md b/README.md
index baa3f56..5591a7b 100644
--- a/README.md
+++ b/README.md
@@ -1,4 +1,3 @@
-![build](https://github.com/BlocSoc-iitr/selene/actions/workflows/go.yml/badge.svg)
![tests](https://github.com/BlocSoc-iitr/selene/actions/workflows/test.yml/badge.svg)
![linter](https://github.com/BlocSoc-iitr/selene/actions/workflows/cilint.yml/badge.svg)
@@ -7,6 +6,41 @@
Selene is a fast, open source, portable & secure light client for Ethereum written in Golang. We plan to ship Selene as the underlying software behind wallets that use light clients. We derived our inspiration from [Helios](https://github.com/a16z/helios) which is a light client written in Rust. The project is in active maintenance on the [dev](https://github.com/BlocSoc-iitr/selene/tree/dev) branch.
# Architecture
+![Selene Architecture](https://github.com/user-attachments/assets/db7eb9d7-5bc3-4911-a849-1b2d05239942)
+## Data Flow
+
+1. The Consensus Layer provides sync information and verified block headers to the RPC Server.
+2. The RPC Server passes block information and verified headers to the Execution Layer.
+3. The Execution Layer validates Merkle proofs based on the state root and requested data.
+
+## Centralised RPC Server
+This server acts as an intermediary between the Consensus and Execution layers. It handles:
+
+* Providing block headers of previous blocks from checkpoint to the latest block
+* Transmitting block gossip of block head
+* Passing verified block headers to the Execution Layer
+
+## Execution Layer
+The Execution Layer is responsible for processing transactions and maintaining the current state of the blockchain. It includes:
+
+A `Validate Merkle Proofs` field that:
+
+Takes state root and requested data as input
+Outputs a boolean (true/false) indicating the validity of the Merkle proof
+
+## Consensus Layer
+
+The Consensus Layer is responsible for maintaining agreement on the state of the blockchain across the network. It includes:
+
+* Getting weak subjectivity checkpoints
+* Logic for determining **current and next sync committees**
+* A **Syncing** process that:
+ * Uses sync committee makeup to fetch previous block headers
+ * Syncs for each sync committee period (~27 hours) up to the latest block
+* A **verify bls sig** function that:
+ * Takes `blsaggsig` and `blspubkey[]` as input
+ * This function verifies a BLS aggregate signature. It accepts the aggregated signature (blsaggsig) and an array of public keys (blspubkey[]), returning a boolean value that indicates whether the signature is
+ valid.
# Installing
Yet to come.
diff --git a/common/types.go b/common/types.go
index 30f3303..77fabeb 100644
--- a/common/types.go
+++ b/common/types.go
@@ -1,19 +1,16 @@
package common
-//other option is to import a package of go-ethereum but that was weird
import (
"encoding/json"
"fmt"
+ "math/big"
+ "strconv"
+
+ "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/holiman/uint256"
- "strconv"
)
-// need to confirm how such primitive types will be imported,
-//
-// Transaction https://docs.rs/alloy/latest/alloy/rpc/types/struct.Transaction.html
-// address: 20 bytes
-// B256: 32 bytes https://bluealloy.github.io/revm/docs/revm/precompile/primitives/type.B256.html
type Address struct {
Addr [20]byte
}
@@ -40,29 +37,65 @@ type Block struct {
Transactions Transactions
TransactionsRoot [32]byte
Uncles [][32]byte
+ BlobGasUsed *uint64
+ ExcessBlobGas *uint64
}
-// an enum having 2 types- how to implement??
type Transactions struct {
Hashes [][32]byte
- Full []types.Transaction //transaction needs to be defined
+ Full []Transaction // transaction needs to be defined
+}
+
+type Transaction struct {
+ AccessList types.AccessList
+ Hash common.Hash
+ Nonce uint64
+ BlockHash [32]byte
+ BlockNumber *uint64
+ TransactionIndex uint64
+ From string
+ To *common.Address
+ Value *big.Int
+ GasPrice *big.Int
+ Gas uint64
+ Input []byte
+ ChainID *big.Int
+ TransactionType uint8
+ Signature *Signature
+ MaxFeePerGas *big.Int
+ MaxPriorityFeePerGas *big.Int
+ MaxFeePerBlobGas *big.Int
+ BlobVersionedHashes []common.Hash
+}
+
+type Signature struct {
+ R string
+ S string
+ V uint64
+ YParity Parity
+}
+
+type Parity struct {
+ Value bool
}
func Default() *Transactions {
return &Transactions{
- Full: []types.Transaction{},
+ Full: []Transaction{},
}
}
+
func (t *Transactions) HashesFunc() [][32]byte {
- if len(t.Hashes) > 0 { //if Transactions struct contains hashes then return them directly
+ if len(t.Hashes) > 0 {
return t.Hashes
}
hashes := make([][32]byte, len(t.Full))
for i := range t.Full {
- hashes[i] = t.Full[i].Hash()
+ hashes[i] = t.Full[i].Hash // Use the Hash field directly
}
return hashes
}
+
func (t Transactions) MarshalJSON() ([]byte, error) {
if len(t.Hashes) > 0 {
return json.Marshal(t.Hashes)
@@ -70,7 +103,6 @@ func (t Transactions) MarshalJSON() ([]byte, error) {
return json.Marshal(t.Full)
}
-// can be an enum
type BlockTag struct {
Latest bool
Finalized bool
@@ -106,18 +138,21 @@ func (b *BlockTag) UnmarshalJSON(data []byte) error {
}
return nil
}
+
func parseBlockNumber(block string) (uint64, error) {
if len(block) > 2 && block[:2] == "0x" {
return parseHexUint64(block[2:])
}
return parseDecimalUint64(block)
}
+
func parseHexUint64(hexStr string) (uint64, error) {
return strconv.ParseUint(hexStr, 16, 64)
}
+
func parseDecimalUint64(decStr string) (uint64, error) {
return strconv.ParseUint(decStr, 10, 64)
}
-// need some error structs and enums as well
-// Example BlockNotFoundError
+// Example error structs can be defined here
+// type BlockNotFoundError struct {}
diff --git a/common/utils.go b/common/utils.go
deleted file mode 100644
index deb1423..0000000
--- a/common/utils.go
+++ /dev/null
@@ -1,49 +0,0 @@
-package common
-
-import (
- "encoding/hex"
- "encoding/json"
- "fmt"
- "strings"
-
- "github.com/ethereum/go-ethereum/common"
-)
-
-// if we need to export the functions , just make their first letter capitalised
-func Hex_str_to_bytes(s string) ([]byte, error) {
- s = strings.TrimPrefix(s, "0x")
-
- bytesArray, err := hex.DecodeString(s)
-
- if err != nil {
- return nil, err
- }
-
- return bytesArray, nil
-}
-
-func Address_to_hex_string(addr common.Address) string {
- bytesArray := addr.Bytes()
- return fmt.Sprintf("0x%x", hex.EncodeToString(bytesArray))
-}
-
-func U64_to_hex_string(val uint64) string {
- return fmt.Sprintf("0x%x", val)
-}
-
-func Bytes_deserialize(data []byte) ([]byte, error) {
- var hexString string
- if err := json.Unmarshal(data, &hexString); err != nil {
- return nil, err
- }
-
- return Hex_str_to_bytes(hexString)
-}
-
-func Bytes_serialize(bytes []byte) ([]byte, error) {
- if bytes == nil {
- return json.Marshal(nil)
- }
- hexString := hex.EncodeToString(bytes)
- return json.Marshal(hexString)
-}
diff --git a/config/base.go b/config/base.go
index 98afea0..d3419ba 100644
--- a/config/base.go
+++ b/config/base.go
@@ -1,18 +1,20 @@
package config
+import "github.com/BlocSoc-iitr/selene/consensus/consensus_core"
+
// base config for a network
type BaseConfig struct {
- RpcBindIp string `json:"rpc_bind_ip"`
- RpcPort uint16 `json:"rpc_port"`
- ConsensusRpc *string `json:"consensus_rpc"`
- DefaultCheckpoint [32]byte `json:"default_checkpoint"` // In cli.go, checkpoint is currently taken as []byte{}
- Chain ChainConfig `json:"chain"` // but it should be [32]byte as it is a hash
- Forks Forks `json:"forks"`
- MaxCheckpointAge uint64 `json:"max_checkpoint_age"`
- DataDir *string `json:"data_dir"`
- LoadExternalFallback bool `json:"load_external_fallback"`
- StrictCheckpointAge bool `json:"strict_checkpoint_age"`
+ RpcBindIp string `json:"rpc_bind_ip"`
+ RpcPort uint16 `json:"rpc_port"`
+ ConsensusRpc *string `json:"consensus_rpc"`
+ DefaultCheckpoint [32]byte `json:"default_checkpoint"` // In cli.go, checkpoint is currently taken as []byte{}
+ Chain ChainConfig `json:"chain"` // but it should be [32]byte as it is a hash
+ Forks consensus_core.Forks `json:"forks"`
+ MaxCheckpointAge uint64 `json:"max_checkpoint_age"`
+ DataDir *string `json:"data_dir"`
+ LoadExternalFallback bool `json:"load_external_fallback"`
+ StrictCheckpointAge bool `json:"strict_checkpoint_age"`
}
// implement a default method for the above struct
@@ -24,10 +26,10 @@ func (b BaseConfig) Default() BaseConfig {
ConsensusRpc: nil,
DefaultCheckpoint: [32]byte{},
Chain: ChainConfig{},
- Forks: Forks{},
+ Forks: consensus_core.Forks{},
MaxCheckpointAge: 0,
DataDir: nil,
LoadExternalFallback: false,
StrictCheckpointAge: false,
}
-}
\ No newline at end of file
+}
diff --git a/config/checkpoints/checkpoints.go b/config/checkpoints/checkpoints.go
index 1ac7450..6ac2003 100644
--- a/config/checkpoints/checkpoints.go
+++ b/config/checkpoints/checkpoints.go
@@ -8,7 +8,9 @@ import (
"strconv"
"strings"
"sync"
-
+ "context"
+ "time"
+ "log"
"github.com/BlocSoc-iitr/selene/config"
"github.com/avast/retry-go"
"gopkg.in/yaml.v2"
@@ -107,23 +109,26 @@ func Get(url string) (*http.Response, error) {
// @param data: []byte - data to deserialize
// @return *uint64, error
// Deserializes the given data to uint64
-func DeserializeSlot(data []byte) (*uint64, error) {
- var s string
- if err := json.Unmarshal(data, &s); err != nil {
+func DeserializeSlot(input []byte) (*uint64, error) {
+ var value interface{}
+ if err := json.Unmarshal(input, &value); err != nil {
return nil, err
}
- if len(s) > 1 && s[0] == '"' && s[len(s)-1] == '"' {
- s = s[1 : len(s)-1]
- }
-
- value, err := strconv.ParseUint(s, 10, 64)
- if err != nil {
- return nil, err
+ switch v := value.(type) {
+ case string:
+ // Try to parse a string as a number
+ num, err := strconv.ParseUint(v, 10, 64)
+ if err != nil {
+ return nil, fmt.Errorf("invalid string format: %s", v)
+ }
+ return &num, nil
+ case float64:
+ num := uint64(v)
+ return &num, nil
+ default:
+ return nil, fmt.Errorf("unexpected type: %T", v)
}
-
- return &value, nil
-
}
// create a new CheckpointFallback object
@@ -170,27 +175,48 @@ func (ch CheckpointFallback) Build() (CheckpointFallback, error) {
return ch, fmt.Errorf("no services found for network %s", network)
}
- serviceList := serviceListRaw.([]interface{})
- for _, service := range serviceList {
- serviceMap := service.(map[interface{}]interface{})
- endpoint := serviceMap["endpoint"].(string)
- name := serviceMap["name"].(string)
- state := serviceMap["state"].(bool)
- verification := serviceMap["verification"].(bool)
- contacts := serviceMap["contacts"].(*yaml.MapSlice)
- notes := serviceMap["notes"].(*yaml.MapSlice)
- health := serviceMap["health"].(map[interface{}]interface{})
- healthResult := health["result"].(bool)
- healthDate := health["date"].(string)
+ serviceList, ok := serviceListRaw.([]interface{})
+ if !ok {
+ return ch, fmt.Errorf("expected a list for services in network %s", network)
+ }
+
+ for _, serviceRaw := range serviceList {
+ serviceMap, ok := serviceRaw.(map[interface{}]interface{})
+ if !ok {
+ return ch, fmt.Errorf("expected a map for service in network %s", network)
+ }
+
+ endpoint, _ := serviceMap["endpoint"].(string) // Handle potential nil
+ name, _ := serviceMap["name"].(string) // Handle potential nil
+ state, _ := serviceMap["state"].(bool) // Handle potential nil
+ verification, _ := serviceMap["verification"].(bool) // Handle potential nil
+
+ // Check contacts and notes
+ var contacts *yaml.MapSlice
+ if c, ok := serviceMap["contacts"].(*yaml.MapSlice); ok {
+ contacts = c
+ }
+
+ var notes *yaml.MapSlice
+ if n, ok := serviceMap["notes"].(*yaml.MapSlice); ok {
+ notes = n
+ }
+
+ healthRaw, ok := serviceMap["health"].(map[interface{}]interface{})
+ if !ok {
+ return ch, fmt.Errorf("expected a map for health in service %s", name)
+ }
+ healthResult, _ := healthRaw["result"].(bool) // Handle potential nil
+ healthDate, _ := healthRaw["date"].(string) // Handle potential nil
ch.Services[network] = append(ch.Services[network], CheckpointFallbackService{
- Endpoint: endpoint,
- Name: name,
- State: state,
- Verification: verification,
- Contacts: contacts,
- Notes: notes,
- Health_from_fallback: &Health{
+ Endpoint: endpoint,
+ Name: name,
+ State: state,
+ Verification: verification,
+ Contacts: contacts,
+ Notes: notes,
+ Health_from_fallback: &Health{
Result: healthResult,
Date: healthDate,
},
@@ -201,6 +227,7 @@ func (ch CheckpointFallback) Build() (CheckpointFallback, error) {
return ch, nil
}
+
// fetch the latest checkpoint from the given network
func (ch CheckpointFallback) FetchLatestCheckpoint(network config.Network) byte256 {
services := ch.GetHealthyFallbackServices(network)
@@ -231,95 +258,73 @@ func (ch CheckpointFallback) QueryService(endpoint string) (*RawSlotResponse, er
// fetch the latest checkpoint from the given services
func (ch CheckpointFallback) FetchLatestCheckpointFromServices(services []CheckpointFallbackService) (byte256, error) {
- var (
- slots []Slot
- wg sync.WaitGroup
- slotChan = make(chan Slot)
- errorsChan = make(chan error)
- )
-
- for _, service := range services {
- wg.Add(1)
- go func(service CheckpointFallbackService) {
- defer wg.Done()
- raw, err := ch.QueryService(service.Endpoint)
- if err != nil {
- errorsChan <- fmt.Errorf("failed to fetch checkpoint from service %s: %w", service.Endpoint, err)
- return
- }
-
- if len(raw.Data.Slots) > 0 {
- for _, slot := range raw.Data.Slots {
- if slot.Block_root != nil {
- slotChan <- slot
- return
- }
- }
- }
- }(service)
- }
-
- wg.Wait()
- close(slotChan)
- close(errorsChan)
-
- var allErrors error
- for err := range errorsChan {
- if allErrors == nil {
- allErrors = err
- } else {
- allErrors = fmt.Errorf("%v; %v", allErrors, err)
- }
- }
-
- if allErrors != nil {
- return byte256{}, allErrors
- }
-
- for slot := range slotChan {
- slots = append(slots, slot)
- }
-
- if len(slots) == 0 {
- return byte256{}, fmt.Errorf("failed to find max epoch from checkpoint slots")
- }
-
- maxEpochSlot := slots[0]
- for _, slot := range slots {
- if slot.Epoch > maxEpochSlot.Epoch {
- maxEpochSlot = slot
- }
- }
- maxEpoch := maxEpochSlot.Epoch
-
- var maxEpochSlots []Slot
- for _, slot := range slots {
- if slot.Epoch == maxEpoch {
- maxEpochSlots = append(maxEpochSlots, slot)
- }
- }
-
- checkpoints := make(map[byte256]int)
- for _, slot := range maxEpochSlots {
- if slot.Block_root != nil {
- checkpoints[*slot.Block_root]++
- }
- }
+ var (
+ slots []Slot
+ wg sync.WaitGroup
+ slotChan = make(chan Slot, len(services)) // Buffered channel
+ errorsChan = make(chan error, len(services))
+ )
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+
+ for _, service := range services {
+ wg.Add(1)
+ go func(service CheckpointFallbackService) {
+ defer wg.Done()
+ raw, err := ch.QueryService(service.Endpoint)
+ if err != nil {
+ errorsChan <- fmt.Errorf("failed to fetch checkpoint from service %s: %w", service.Endpoint, err)
+ return
+ }
+ if len(raw.Data.Slots) > 0 {
+ slotChan <- raw.Data.Slots[0] // Send the first valid slot
+ }
+ }(service)
+ }
+
+ go func() {
+ wg.Wait()
+ close(slotChan)
+ close(errorsChan)
+ }()
+
+ for {
+ select {
+ case slot, ok := <-slotChan:
+ if !ok {
+ // Channel closed, all slots processed
+ if len(slots) == 0 {
+ return byte256{}, fmt.Errorf("failed to find max epoch from checkpoint slots")
+ }
+ return processSlots(slots)
+ }
+ slots = append(slots, slot)
+ case err := <-errorsChan:
+ if err != nil {
+ log.Printf("Error fetching checkpoint: %v", err) // Log only if the error is not nil.
+ }
+ case <-ctx.Done():
+ if len(slots) == 0 {
+ return byte256{}, ctx.Err()
+ }
+ return processSlots(slots)
+ }
+ }
+}
- var mostCommon byte256
- maxCount := 0
- for blockRoot, count := range checkpoints {
- if count > maxCount {
- mostCommon = blockRoot
- maxCount = count
- }
- }
+func processSlots(slots []Slot) (byte256, error) {
+ maxEpochSlot := slots[0]
+ for _, slot := range slots {
+ if slot.Epoch > maxEpochSlot.Epoch {
+ maxEpochSlot = slot
+ }
+ }
- if maxCount == 0 {
- return byte256{}, fmt.Errorf("no checkpoint found")
- }
+ if maxEpochSlot.Block_root == nil {
+ return byte256{}, fmt.Errorf("no valid block root found")
+ }
- return mostCommon, nil
+ return *maxEpochSlot.Block_root, nil
}
func (ch CheckpointFallback) FetchLatestCheckpointFromApi(url string) (byte256, error) {
diff --git a/config/checkpoints/checkpoints_test.go b/config/checkpoints/checkpoints_test.go
new file mode 100644
index 0000000..4a119cf
--- /dev/null
+++ b/config/checkpoints/checkpoints_test.go
@@ -0,0 +1,320 @@
+package checkpoints
+
+import (
+ "bytes"
+ "encoding/json"
+ "net/http"
+ "net/http/httptest"
+ "testing"
+ "io"
+ "github.com/BlocSoc-iitr/selene/config"
+)
+
+type CustomTransport struct {
+ DoFunc func(req *http.Request) (*http.Response, error)
+}
+
+func (t *CustomTransport) RoundTrip(req *http.Request) (*http.Response, error) {
+ return t.DoFunc(req)
+}
+
+func TestCheckpointFallbackNew(t *testing.T) {
+ cf := CheckpointFallback{}.New()
+
+ if len(cf.Services) != 0 {
+ t.Errorf("Expected empty Services map, got %v", cf.Services)
+ }
+
+ expectedNetworks := []config.Network{config.SEPOLIA, config.MAINNET, config.GOERLI}
+ if !equalNetworks(cf.Networks, expectedNetworks) {
+ t.Errorf("Expected Networks %v, got %v", expectedNetworks, cf.Networks)
+ }
+}
+
+func TestDeserializeSlot(t *testing.T) {
+ tests := []struct {
+ input []byte
+ expected uint64
+ hasError bool
+ }{
+ {[]byte(`"12345"`), 12345, false},
+ {[]byte(`12345`), 12345, false},
+ {[]byte(`"abc"`), 0, true},
+ {[]byte(`{}`), 0, true},
+ }
+
+ for _, test := range tests {
+ result, err := DeserializeSlot(test.input)
+ if test.hasError {
+ if err == nil {
+ t.Errorf("Expected error for input %s, got nil", string(test.input))
+ }
+ } else {
+ if err != nil {
+ t.Errorf("Unexpected error for input %s: %v", string(test.input), err)
+ }
+ if result == nil || *result != test.expected {
+ t.Errorf("Expected %d, got %v for input %s", test.expected, result, string(test.input))
+ }
+ }
+ }
+}
+
+func TestGetHealthyFallbackEndpoints(t *testing.T) {
+ cf := CheckpointFallback{
+ Services: map[config.Network][]CheckpointFallbackService{
+ config.MAINNET: {
+ {Endpoint: "http://healthy1.com", Health_from_fallback: &Health{Result: true}},
+ {Endpoint: "http://unhealthy.com", Health_from_fallback: &Health{Result: false}},
+ {Endpoint: "http://healthy2.com", Health_from_fallback: &Health{Result: true}},
+ },
+ },
+ }
+
+ healthyEndpoints := cf.GetHealthyFallbackEndpoints(config.MAINNET)
+ expected := []string{"http://healthy1.com", "http://healthy2.com"}
+
+ if !equalStringSlices(healthyEndpoints, expected) {
+ t.Errorf("Expected healthy endpoints %v, got %v", expected, healthyEndpoints)
+ }
+}
+
+func TestBuild(t *testing.T) {
+ yamlData := `
+sepolia:
+ - endpoint: "https://sepolia1.example.com"
+ name: "Sepolia 1"
+ state: true
+ verification: true
+ health:
+ result: true
+mainnet:
+ - endpoint: "https://mainnet1.example.com"
+ name: "Mainnet 1"
+ state: true
+ verification: true
+ health:
+ result: true
+goerli:
+ - endpoint: "https://goerli1.example.com"
+ name: "Goerli 1"
+ state: true
+ verification: true
+ health:
+ result: true
+`
+
+ client := &http.Client{
+ Transport: &CustomTransport{
+ DoFunc: func(req *http.Request) (*http.Response, error) {
+ return &http.Response{
+ StatusCode: 200,
+ Body: io.NopCloser(bytes.NewBufferString(yamlData)),
+ Header: make(http.Header),
+ }, nil
+ },
+ },
+ }
+
+ cf := CheckpointFallback{}.New()
+ originalClient := http.DefaultClient
+ http.DefaultClient = client
+ defer func() { http.DefaultClient = originalClient }()
+
+ builtCf, err := cf.Build()
+ if err != nil {
+ t.Fatalf("Unexpected error: %v", err)
+ }
+
+ if len(builtCf.Services[config.SEPOLIA]) != 1 {
+ t.Errorf("Expected 1 service for Sepolia, got %d", len(builtCf.Services[config.SEPOLIA]))
+ }
+
+ if len(builtCf.Services[config.MAINNET]) != 1 {
+ t.Errorf("Expected 1 service for Mainnet, got %d", len(builtCf.Services[config.MAINNET]))
+ }
+
+ sepoliaService := builtCf.Services[config.SEPOLIA][0]
+ if sepoliaService.Endpoint != "https://sepolia1.example.com" {
+ t.Errorf("Expected endpoint https://sepolia1.example.com, got %s", sepoliaService.Endpoint)
+ }
+}
+
+func TestFetchLatestCheckpointFromServices(t *testing.T) {
+ server1 := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if err := json.NewEncoder(w).Encode(RawSlotResponse{
+ Data: RawSlotResponseData{
+ Slots: []Slot{
+ {Epoch: 100, Block_root: &byte256{1}},
+ },
+ },
+ }); err != nil {
+ t.Fatalf("Failed to encode response: %v", err)
+ }
+ }))
+ defer server1.Close()
+
+ server2 := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if err := json.NewEncoder(w).Encode(RawSlotResponse{
+ Data: RawSlotResponseData{
+ Slots: []Slot{
+ {Epoch: 101, Block_root: &byte256{2}},
+ },
+ },
+ }); err != nil {
+ t.Fatalf("Failed to encode response: %v", err)
+ }
+ }))
+ defer server2.Close()
+
+ cf := CheckpointFallback{}
+ services := []CheckpointFallbackService{
+ {Endpoint: server1.URL},
+ {Endpoint: server2.URL},
+ }
+
+ checkpoint, err := cf.FetchLatestCheckpointFromServices(services)
+ if err != nil {
+ t.Fatalf("Unexpected error: %v", err)
+ }
+
+ expected := byte256{2}
+ if checkpoint != expected {
+ t.Errorf("Expected checkpoint %v, got %v", expected, checkpoint)
+ }
+}
+
+func TestQueryService(t *testing.T) {
+ server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if err := json.NewEncoder(w).Encode(RawSlotResponse{
+ Data: RawSlotResponseData{
+ Slots: []Slot{
+ {Epoch: 100, Block_root: &byte256{1}},
+ },
+ },
+ }); err != nil {
+ t.Fatalf("Failed to encode response: %v", err)
+ }
+ }))
+ defer server.Close()
+
+ cf := CheckpointFallback{}
+ response, err := cf.QueryService(server.URL)
+ if err != nil {
+ t.Fatalf("Unexpected error: %v", err)
+ }
+
+ if len(response.Data.Slots) != 1 {
+ t.Errorf("Expected 1 slot, got %d", len(response.Data.Slots))
+ }
+
+ if response.Data.Slots[0].Epoch != 100 {
+ t.Errorf("Expected epoch 100, got %d", response.Data.Slots[0].Epoch)
+ }
+}
+
+func TestFetchLatestCheckpointFromApi(t *testing.T) {
+ server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if err := json.NewEncoder(w).Encode(RawSlotResponse{
+ Data: RawSlotResponseData{
+ Slots: []Slot{
+ {Epoch: 100, Block_root: &byte256{1}},
+ },
+ },
+ }); err != nil {
+ t.Fatalf("Failed to encode response: %v", err)
+ }
+ }))
+ defer server.Close()
+
+ cf := CheckpointFallback{}
+ checkpoint, err := cf.FetchLatestCheckpointFromApi(server.URL)
+ if err != nil {
+ t.Fatalf("Unexpected error: %v", err)
+ }
+
+ expected := byte256{1}
+ if checkpoint != expected {
+ t.Errorf("Expected checkpoint %v, got %v", expected, checkpoint)
+ }
+}
+
+func TestConstructUrl(t *testing.T) {
+ cf := CheckpointFallback{}
+ endpoint := "https://example.com"
+ expected := "https://example.com/checkpointz/v1/beacon/slots"
+ result := cf.ConstructUrl(endpoint)
+ if result != expected {
+ t.Errorf("Expected URL %s, got %s", expected, result)
+ }
+}
+
+func TestGetAllFallbackEndpoints(t *testing.T) {
+ cf := CheckpointFallback{
+ Services: map[config.Network][]CheckpointFallbackService{
+ config.MAINNET: {
+ {Endpoint: "http://endpoint1.com"},
+ {Endpoint: "http://endpoint2.com"},
+ },
+ },
+ }
+
+ endpoints := cf.GetAllFallbackEndpoints(config.MAINNET)
+ expected := []string{"http://endpoint1.com", "http://endpoint2.com"}
+
+ if !equalStringSlices(endpoints, expected) {
+ t.Errorf("Expected endpoints %v, got %v", expected, endpoints)
+ }
+}
+func TestGetHealthyFallbackServices(t *testing.T) {
+ cf := CheckpointFallback{
+ Services: map[config.Network][]CheckpointFallbackService{
+ config.MAINNET: {
+ {Endpoint: "http://healthy1.com", Health_from_fallback: &Health{Result: true}},
+ {Endpoint: "http://unhealthy.com", Health_from_fallback: &Health{Result: false}},
+ {Endpoint: "http://healthy2.com", Health_from_fallback: &Health{Result: true}},
+ },
+ },
+ }
+
+ healthyServices := cf.GetHealthyFallbackServices(config.MAINNET)
+ if len(healthyServices) != 2 {
+ t.Errorf("Expected 2 healthy services, got %d", len(healthyServices))
+ }
+
+ for _, service := range healthyServices {
+ if !service.Health_from_fallback.Result {
+ t.Errorf("Expected healthy service, got unhealthy for endpoint %s", service.Endpoint)
+ }
+ }
+}
+
+
+func equalNetworks(a, b []config.Network) bool {
+ if len(a) != len(b) {
+ return false
+ }
+ networkMap := make(map[config.Network]bool)
+ for _, n := range a {
+ networkMap[n] = true
+ }
+ for _, n := range b {
+ if !networkMap[n] {
+ return false
+ }
+ }
+ return true
+}
+
+func equalStringSlices(a, b []string) bool {
+ if len(a) != len(b) {
+ return false
+ }
+ for i := range a {
+ if a[i] != b[i] {
+ return false
+ }
+ }
+ return true
+}
+
diff --git a/config/config.go b/config/config.go
index bc184ce..9cb8052 100644
--- a/config/config.go
+++ b/config/config.go
@@ -2,24 +2,27 @@ package config
import (
"fmt"
+
+ "github.com/BlocSoc-iitr/selene/consensus/consensus_core"
+ "github.com/BlocSoc-iitr/selene/utils"
"github.com/spf13/viper"
- "github.com/BlocSoc-iitr/selene/common"
)
+
type Config struct {
- ConsensusRpc string `json:"consensus_rpc"`
- ExecutionRpc string `json:"execution_rpc"`
- RpcBindIp *string `json:"rpc_bind_ip"`
- RpcPort *uint16 `json:"rpc_port"`
- DefaultCheckpoint [32]byte `json:"default_checkpoint"` // In cli.go, checkpoint is currently taken as []byte{}
- Checkpoint *[32]byte `json:"checkpoint"` // but it should be of 32 bytes or [32]byte{}
- DataDir *string `json:"data_dir"`
- Chain ChainConfig `json:"chain"`
- Forks Forks `json:"forks"`
- MaxCheckpointAge uint64 `json:"max_checkpoint_age"`
- Fallback *string `json:"fallback"`
- LoadExternalFallback bool `json:"load_external_fallback"`
- StrictCheckpointAge bool `json:"strict_checkpoint_age"`
- DatabaseType *string `json:"database_type"`
+ ConsensusRpc string `json:"consensus_rpc"`
+ ExecutionRpc string `json:"execution_rpc"`
+ RpcBindIp *string `json:"rpc_bind_ip"`
+ RpcPort *uint16 `json:"rpc_port"`
+ DefaultCheckpoint [32]byte `json:"default_checkpoint"` // In cli.go, checkpoint is currently taken as []byte{}
+ Checkpoint *[32]byte `json:"checkpoint"` // but it should be of 32 bytes or [32]byte{}
+ DataDir *string `json:"data_dir"`
+ Chain ChainConfig `json:"chain"`
+ Forks consensus_core.Forks `json:"forks"`
+ MaxCheckpointAge uint64 `json:"max_checkpoint_age"`
+ Fallback *string `json:"fallback"`
+ LoadExternalFallback bool `json:"load_external_fallback"`
+ StrictCheckpointAge bool `json:"strict_checkpoint_age"`
+ DatabaseType *string `json:"database_type"`
}
// only if we are using CLI
@@ -76,7 +79,7 @@ func (c Config) from_file(configPath *string, network *string, cliConfig *CliCon
finalConfig.Checkpoint = (*[32]byte)(*cliConfig.Checkpoint)
} else if tomlProvider["checkpoint"] != nil && tomlProvider["checkpoint"].(string) != "" {
checkpoint, _ := tomlProvider["checkpoint"].(string)
- checkpointBytes, err := common.Hex_str_to_bytes(checkpoint)
+ checkpointBytes, err := utils.Hex_str_to_bytes(checkpoint)
if err != nil {
fmt.Printf("Failed to convert checkpoint value to byte slice, %v", err)
}
@@ -138,4 +141,4 @@ func (c Config) to_base_config() BaseConfig {
LoadExternalFallback: c.LoadExternalFallback,
StrictCheckpointAge: c.StrictCheckpointAge,
}
-}
\ No newline at end of file
+}
diff --git a/config/config_test.go b/config/config_test.go
index 0bc3a1d..c34095c 100644
--- a/config/config_test.go
+++ b/config/config_test.go
@@ -3,10 +3,12 @@ package config
import (
"encoding/hex"
"fmt"
- "github.com/spf13/viper"
"os"
"reflect"
"testing"
+
+ "github.com/BlocSoc-iitr/selene/consensus/consensus_core"
+ "github.com/spf13/viper"
)
var (
@@ -23,9 +25,9 @@ var (
defaultCheckpoint = [32]byte{}
)
-/////////////////////////////
-///// from_file() tests /////
-/////////////////////////////
+// ///////////////////////////
+// /// from_file() tests /////
+// ///////////////////////////
func TestMainnetBaseConfig(t *testing.T) {
network := "MAINNET"
path := "./config.toml"
@@ -179,9 +181,9 @@ func createConfigFile(v *viper.Viper) {
}
}
-//////////////////////////////////
-///// to_base_config() tests /////
-//////////////////////////////////
+// ////////////////////////////////
+// /// to_base_config() tests /////
+// ////////////////////////////////
func TestReturnsCorrectBaseConfig(t *testing.T) {
config := Config{
ConsensusRpc: consensusRpc,
@@ -189,7 +191,7 @@ func TestReturnsCorrectBaseConfig(t *testing.T) {
RpcPort: &rpcPort,
DefaultCheckpoint: defaultCheckpoint,
Chain: ChainConfig{},
- Forks: Forks{},
+ Forks: consensus_core.Forks{},
MaxCheckpointAge: uint64(maxCheckpointAge),
DataDir: &dataDirectory,
LoadExternalFallback: loadExternalFallback,
@@ -213,7 +215,7 @@ func TestReturnsCorrectDefaultValues(t *testing.T) {
ConsensusRpc: consensusRpc,
DefaultCheckpoint: defaultCheckpoint,
Chain: ChainConfig{},
- Forks: Forks{},
+ Forks: consensus_core.Forks{},
MaxCheckpointAge: uint64(maxCheckpointAge),
DataDir: &dataDirectory,
LoadExternalFallback: loadExternalFallback,
@@ -230,4 +232,4 @@ func TestReturnsCorrectDefaultValues(t *testing.T) {
if baseConfig.RpcBindIp != "127.0.0.1" {
t.Errorf("Expected Max Checkpoint age to be %v, got %v", "127.0.0.1", baseConfig.RpcBindIp)
}
-}
\ No newline at end of file
+}
diff --git a/config/networks.go b/config/networks.go
index a98e176..0f527a3 100644
--- a/config/networks.go
+++ b/config/networks.go
@@ -1,18 +1,24 @@
package config
+
import (
"fmt"
- "github.com/BlocSoc-iitr/selene/common"
- "github.com/pkg/errors"
"os"
"path/filepath"
"strings"
+
+ "github.com/BlocSoc-iitr/selene/consensus/consensus_core"
+ "github.com/BlocSoc-iitr/selene/utils"
+ "github.com/pkg/errors"
)
+
type Network string
+
const (
MAINNET Network = "MAINNET"
GOERLI Network = "GOERLI"
SEPOLIA Network = "SEPOLIA"
)
+
func (n Network) BaseConfig(s string) (BaseConfig, error) {
switch strings.ToUpper(s) {
case "MAINNET":
@@ -52,7 +58,7 @@ func (n Network) ChainID(id uint64) (BaseConfig, error) {
}
return config, nil
case 11155111:
-
+
config, err := Sepolia()
if err != nil {
return BaseConfig{}, err
@@ -63,19 +69,19 @@ func (n Network) ChainID(id uint64) (BaseConfig, error) {
}
}
func dataDir(network Network) (string, error) {
- homeDir, err := os.UserHomeDir()
- if err != nil {
- return "", fmt.Errorf("failed to get user home directory: %w", err)
- }
- path := filepath.Join(homeDir, fmt.Sprintf("selene/data/%s", strings.ToLower(string(network))))
- return path, nil
+ homeDir, err := os.UserHomeDir()
+ if err != nil {
+ return "", fmt.Errorf("failed to get user home directory: %w", err)
+ }
+ path := filepath.Join(homeDir, fmt.Sprintf("selene/data/%s", strings.ToLower(string(network))))
+ return path, nil
}
func Mainnet() (BaseConfig, error) {
- defaultCheckpoint, err := common.Hex_str_to_bytes("c7fc7b2f4b548bfc9305fa80bc1865ddc6eea4557f0a80507af5dc34db7bd9ce")
+ defaultCheckpoint, err := utils.Hex_str_to_bytes("c7fc7b2f4b548bfc9305fa80bc1865ddc6eea4557f0a80507af5dc34db7bd9ce")
if err != nil {
return BaseConfig{}, fmt.Errorf("failed to parse default checkpoint: %w", err)
}
- genesisRoot, err := common.Hex_str_to_bytes("4b363db94e286120d76eb905340fdd4e54bfe9f06bf33ff6cf5ad27f511bfe95")
+ genesisRoot, err := utils.Hex_str_to_bytes("4b363db94e286120d76eb905340fdd4e54bfe9f06bf33ff6cf5ad27f511bfe95")
if err != nil {
return BaseConfig{}, fmt.Errorf("failed to parse genesis root: %w", err)
}
@@ -93,33 +99,33 @@ func Mainnet() (BaseConfig, error) {
GenesisTime: 1606824023,
GenesisRoot: genesisRoot,
},
- Forks: Forks{
- Genesis: Fork{
+ Forks: consensus_core.Forks{
+ Genesis: consensus_core.Fork{
Epoch: 0,
ForkVersion: []byte{0x00, 0x00, 0x00, 0x00}},
- Altair: Fork{
+ Altair: consensus_core.Fork{
Epoch: 74240,
ForkVersion: []byte{0x01, 0x00, 0x00, 0x00}},
- Bellatrix: Fork{
+ Bellatrix: consensus_core.Fork{
Epoch: 144896,
ForkVersion: []byte{0x02, 0x00, 0x00, 0x00}},
- Capella: Fork{
+ Capella: consensus_core.Fork{
Epoch: 194048,
ForkVersion: []byte{0x03, 0x00, 0x00, 0x00}},
- Deneb: Fork{
+ Deneb: consensus_core.Fork{
Epoch: 269568,
ForkVersion: []byte{0x04, 0x00, 0x00, 0x00}},
},
MaxCheckpointAge: 1_209_600, // 14 days
- DataDir: &dataDir,
+ DataDir: &dataDir,
}, nil
}
func Goerli() (BaseConfig, error) {
- defaultCheckpoint, err := common.Hex_str_to_bytes("f6e9d5fdd7c406834e888961beab07b2443b64703c36acc1274ae1ce8bb48839")
+ defaultCheckpoint, err := utils.Hex_str_to_bytes("f6e9d5fdd7c406834e888961beab07b2443b64703c36acc1274ae1ce8bb48839")
if err != nil {
return BaseConfig{}, fmt.Errorf("failed to parse default checkpoint: %w", err)
}
- genesisRoot, err := common.Hex_str_to_bytes("043db0d9a83813551ee2f33450d23797757d430911a9320530ad8a0eabc43efb")
+ genesisRoot, err := utils.Hex_str_to_bytes("043db0d9a83813551ee2f33450d23797757d430911a9320530ad8a0eabc43efb")
if err != nil {
return BaseConfig{}, fmt.Errorf("failed to parse genesis root: %w", err)
}
@@ -136,20 +142,20 @@ func Goerli() (BaseConfig, error) {
GenesisTime: 1616508000,
GenesisRoot: genesisRoot,
},
- Forks: Forks{
- Genesis: Fork{
+ Forks: consensus_core.Forks{
+ Genesis: consensus_core.Fork{
Epoch: 0,
ForkVersion: []byte{0x00, 0x10, 0x20, 0x00}},
- Altair: Fork{
+ Altair: consensus_core.Fork{
Epoch: 36660,
ForkVersion: []byte{0x01, 0x10, 0x20, 0x00}},
- Bellatrix: Fork{
+ Bellatrix: consensus_core.Fork{
Epoch: 112260,
ForkVersion: []byte{0x02, 0x10, 0x20, 0x00}},
- Capella: Fork{
+ Capella: consensus_core.Fork{
Epoch: 162304,
ForkVersion: []byte{0x03, 0x10, 0x20, 0x00}},
- Deneb: Fork{
+ Deneb: consensus_core.Fork{
Epoch: 231680,
ForkVersion: []byte{0x04, 0x10, 0x20, 0x00}},
},
@@ -158,11 +164,11 @@ func Goerli() (BaseConfig, error) {
}, nil
}
func Sepolia() (BaseConfig, error) {
- defaultCheckpoint, err := common.Hex_str_to_bytes("4135bf01bddcfadac11143ba911f1c7f0772fdd6b87742b0bc229887bbf62b48")
+ defaultCheckpoint, err := utils.Hex_str_to_bytes("4135bf01bddcfadac11143ba911f1c7f0772fdd6b87742b0bc229887bbf62b48")
if err != nil {
return BaseConfig{}, fmt.Errorf("failed to parse default checkpoint: %w", err)
}
- genesisRoot, err := common.Hex_str_to_bytes("d8ea171f3c94aea21ebc42a1ed61052acf3f9209c00e4efbaaddac09ed9b8078")
+ genesisRoot, err := utils.Hex_str_to_bytes("d8ea171f3c94aea21ebc42a1ed61052acf3f9209c00e4efbaaddac09ed9b8078")
if err != nil {
return BaseConfig{}, fmt.Errorf("failed to parse genesis root: %w", err)
}
@@ -179,24 +185,24 @@ func Sepolia() (BaseConfig, error) {
GenesisTime: 1655733600,
GenesisRoot: genesisRoot,
},
- Forks: Forks{
- Genesis: Fork{
+ Forks: consensus_core.Forks{
+ Genesis: consensus_core.Fork{
Epoch: 0,
ForkVersion: []byte{0x90, 0x00, 0x00, 0x69}},
- Altair: Fork{
+ Altair: consensus_core.Fork{
Epoch: 50,
ForkVersion: []byte{0x90, 0x00, 0x00, 0x70}},
- Bellatrix: Fork{
+ Bellatrix: consensus_core.Fork{
Epoch: 100,
ForkVersion: []byte{0x90, 0x00, 0x00, 0x71}},
- Capella: Fork{
+ Capella: consensus_core.Fork{
Epoch: 56832,
ForkVersion: []byte{0x90, 0x00, 0x00, 0x72}},
- Deneb: Fork{
+ Deneb: consensus_core.Fork{
Epoch: 132608,
ForkVersion: []byte{0x90, 0x00, 0x00, 0x73}},
},
MaxCheckpointAge: 1_209_600, // 14 days
DataDir: &dataDir,
}, nil
-}
\ No newline at end of file
+}
diff --git a/config/networks_test.go b/config/networks_test.go
new file mode 100644
index 0000000..88fccaf
--- /dev/null
+++ b/config/networks_test.go
@@ -0,0 +1,195 @@
+package config
+import (
+ "testing"
+ "github.com/stretchr/testify/assert"
+ "strings"
+)
+func TestNetwork_BaseConfig(t *testing.T) {
+ tests := []struct {
+ name string
+ inputNetwork string
+ expectedChainID uint64
+ expectedGenesis uint64
+ expectedRPCPort uint16
+ checkConsensusRPC func(*testing.T, *string)
+ wantErr bool
+ }{
+ {
+ name: "Mainnet",
+ inputNetwork: "MAINNET",
+ expectedChainID: 1,
+ expectedGenesis: 1606824023,
+ expectedRPCPort: 8545,
+ checkConsensusRPC: func(t *testing.T, rpc *string) {
+ assert.NotNil(t, rpc)
+ assert.Equal(t, "https://www.lightclientdata.org", *rpc)
+ },
+ wantErr: false,
+ },
+ {
+ name: "Goerli",
+ inputNetwork: "GOERLI",
+ expectedChainID: 5,
+ expectedGenesis: 1616508000,
+ expectedRPCPort: 8545,
+ checkConsensusRPC: func(t *testing.T, rpc *string) {
+ assert.Nil(t, rpc)
+ },
+ wantErr: false,
+ },
+ {
+ name: "Sepolia",
+ inputNetwork: "SEPOLIA",
+ expectedChainID: 11155111,
+ expectedGenesis: 1655733600,
+ expectedRPCPort: 8545,
+ checkConsensusRPC: func(t *testing.T, rpc *string) {
+ assert.Nil(t, rpc)
+ },
+ wantErr: false,
+ },
+ {
+ name: "Invalid",
+ inputNetwork: "INVALID",
+ expectedChainID: 0,
+ expectedGenesis: 0,
+ expectedRPCPort: 0,
+ checkConsensusRPC: func(t *testing.T, rpc *string) {},
+ wantErr: true,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ n := Network("") // The receiver doesn't matter, we're testing the input
+ config, err := n.BaseConfig(tt.inputNetwork)
+ if tt.wantErr {
+ assert.Error(t, err)
+ } else {
+ assert.NoError(t, err)
+ assert.Equal(t, tt.expectedChainID, config.Chain.ChainID)
+ assert.Equal(t, tt.expectedGenesis, config.Chain.GenesisTime)
+ assert.Equal(t, tt.expectedRPCPort, config.RpcPort)
+ tt.checkConsensusRPC(t, config.ConsensusRpc)
+
+ // Check Forks
+ assert.NotEmpty(t, config.Forks.Genesis)
+ assert.NotEmpty(t, config.Forks.Altair)
+ assert.NotEmpty(t, config.Forks.Bellatrix)
+ assert.NotEmpty(t, config.Forks.Capella)
+ assert.NotEmpty(t, config.Forks.Deneb)
+
+ // Check MaxCheckpointAge
+ assert.Equal(t, uint64(1_209_600), config.MaxCheckpointAge)
+
+ // Check DataDir
+ assert.NotNil(t, config.DataDir)
+ assert.Contains(t, strings.ToLower(*config.DataDir), strings.ToLower(tt.inputNetwork))
+ }
+ })
+ }
+}
+func TestNetwork_ChainID(t *testing.T) {
+ tests := []struct {
+ name string
+ inputChainID uint64
+ expectedChainID uint64
+ expectedGenesis uint64
+ expectedRPCPort uint16
+ checkConsensusRPC func(*testing.T, *string)
+ wantErr bool
+ }{
+ {
+ name: "Mainnet",
+ inputChainID: 1,
+ expectedChainID: 1,
+ expectedGenesis: 1606824023,
+ expectedRPCPort: 8545,
+ checkConsensusRPC: func(t *testing.T, rpc *string) {
+ assert.NotNil(t, rpc)
+ assert.Equal(t, "https://www.lightclientdata.org", *rpc)
+ },
+ wantErr: false,
+ },
+ {
+ name: "Goerli",
+ inputChainID: 5,
+ expectedChainID: 5,
+ expectedGenesis: 1616508000,
+ expectedRPCPort: 8545,
+ checkConsensusRPC: func(t *testing.T, rpc *string) {
+ assert.Nil(t, rpc)
+ },
+ wantErr: false,
+ },
+ {
+ name: "Sepolia",
+ inputChainID: 11155111,
+ expectedChainID: 11155111,
+ expectedGenesis: 1655733600,
+ expectedRPCPort: 8545,
+ checkConsensusRPC: func(t *testing.T, rpc *string) {
+ assert.Nil(t, rpc)
+ },
+ wantErr: false,
+ },
+ {
+ name: "Invalid ChainID",
+ inputChainID: 9999, // Non-existent ChainID
+ expectedChainID: 0,
+ expectedGenesis: 0,
+ expectedRPCPort: 0,
+ checkConsensusRPC: func(t *testing.T, rpc *string) {},
+ wantErr: true,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ n := Network("") // The receiver doesn't matter, we're testing the input
+ config, err := n.ChainID(tt.inputChainID)
+ if tt.wantErr {
+ assert.Error(t, err)
+ } else {
+ assert.NoError(t, err)
+ assert.Equal(t, tt.expectedChainID, config.Chain.ChainID)
+ assert.Equal(t, tt.expectedGenesis, config.Chain.GenesisTime)
+ assert.Equal(t, tt.expectedRPCPort, config.RpcPort)
+ tt.checkConsensusRPC(t, config.ConsensusRpc)
+
+ // Check Forks
+ assert.NotEmpty(t, config.Forks.Genesis)
+ assert.NotEmpty(t, config.Forks.Altair)
+ assert.NotEmpty(t, config.Forks.Bellatrix)
+ assert.NotEmpty(t, config.Forks.Capella)
+ assert.NotEmpty(t, config.Forks.Deneb)
+
+ // Check MaxCheckpointAge
+ assert.Equal(t, uint64(1_209_600), config.MaxCheckpointAge)
+
+ // Check DataDir
+ assert.NotNil(t, config.DataDir)
+ assert.Contains(t, strings.ToLower(*config.DataDir), strings.ToLower(tt.name))
+ }
+ })
+ }
+}
+func TestDataDir(t *testing.T) {
+ tests := []struct {
+ name string
+ network Network
+ expected string
+ }{
+ {"Mainnet DataDir", MAINNET, "selene/data/mainnet"},
+ {"Goerli DataDir", GOERLI, "selene/data/goerli"},
+ {"Sepolia DataDir", SEPOLIA, "selene/data/sepolia"},
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ path, err := dataDir(tt.network)
+ assert.NoError(t, err)
+ assert.Contains(t, path, tt.expected)
+ })
+ }
+}
diff --git a/config/types.go b/config/types.go
index aa8d5b7..ec76902 100644
--- a/config/types.go
+++ b/config/types.go
@@ -1,24 +1,16 @@
package config
+
import (
"encoding/hex"
"encoding/json"
)
+
type ChainConfig struct {
ChainID uint64 `json:"chain_id"`
GenesisTime uint64 `json:"genesis_time"`
GenesisRoot []byte `json:"genesis_root"`
}
-type Fork struct {
- Epoch uint64 `json:"epoch"`
- ForkVersion []byte `json:"fork_version"`
-}
-type Forks struct {
- Genesis Fork `json:"genesis"`
- Altair Fork `json:"altair"`
- Bellatrix Fork `json:"bellatrix"`
- Capella Fork `json:"capella"`
- Deneb Fork `json:"deneb"`
-}
+
func (c ChainConfig) MarshalJSON() ([]byte, error) {
type Alias ChainConfig
return json.Marshal(&struct {
@@ -44,28 +36,3 @@ func (c *ChainConfig) UnmarshalJSON(data []byte) error {
c.GenesisRoot, err = hex.DecodeString(aux.GenesisRoot)
return err
}
-func (f Fork) MarshalJSON() ([]byte, error) {
- type Alias Fork
- return json.Marshal(&struct {
- Alias
- ForkVersion string `json:"fork_version"`
- }{
- Alias: (Alias)(f),
- ForkVersion: hex.EncodeToString(f.ForkVersion),
- })
-}
-func (f *Fork) UnmarshalJSON(data []byte) error {
- type Alias Fork
- aux := &struct {
- *Alias
- ForkVersion string `json:"fork_version"`
- }{
- Alias: (*Alias)(f),
- }
- if err := json.Unmarshal(data, &aux); err != nil {
- return err
- }
- var err error
- f.ForkVersion, err = hex.DecodeString(aux.ForkVersion)
- return err
-}
diff --git a/config/types_test.go b/config/types_test.go
new file mode 100644
index 0000000..2c47894
--- /dev/null
+++ b/config/types_test.go
@@ -0,0 +1,80 @@
+package config
+
+import (
+ "encoding/json"
+ "testing"
+ "github.com/BlocSoc-iitr/selene/consensus/consensus_core"
+)
+
+func TestChainConfigMarshalUnmarshal(t *testing.T) {
+ originalConfig := ChainConfig{
+ ChainID: 1,
+ GenesisTime: 1606824023,
+ GenesisRoot: []byte{0x4b, 0x36, 0x3d, 0xb9},
+ }
+
+ // Marshal ChainConfig to JSON
+ marshaledData, err := json.Marshal(originalConfig)
+ if err != nil {
+ t.Fatalf("Error marshaling ChainConfig: %v", err)
+ }
+
+ // Unmarshal the JSON back to ChainConfig
+ var unmarshaledConfig ChainConfig
+ err = json.Unmarshal(marshaledData, &unmarshaledConfig)
+ if err != nil {
+ t.Fatalf("Error unmarshaling ChainConfig: %v", err)
+ }
+
+ // Verify that the original and unmarshaled configs are the same
+ if originalConfig.ChainID != unmarshaledConfig.ChainID {
+ t.Errorf("ChainID mismatch. Got %d, expected %d", unmarshaledConfig.ChainID, originalConfig.ChainID)
+ }
+ if originalConfig.GenesisTime != unmarshaledConfig.GenesisTime {
+ t.Errorf("GenesisTime mismatch. Got %d, expected %d", unmarshaledConfig.GenesisTime, originalConfig.GenesisTime)
+ }
+ if string(originalConfig.GenesisRoot) != string(unmarshaledConfig.GenesisRoot) {
+ t.Errorf("GenesisRoot mismatch. Got %x, expected %x", unmarshaledConfig.GenesisRoot, originalConfig.GenesisRoot)
+ }
+}
+
+func TestForkMarshalUnmarshal(t *testing.T) {
+ originalFork := consensus_core.Fork{
+ Epoch: 0,
+ ForkVersion: []byte{0x01, 0x00, 0x00, 0x00},
+ }
+
+ // Marshal Fork to JSON
+ marshaledData, err := json.Marshal(originalFork)
+ if err != nil {
+ t.Fatalf("Error marshaling Fork: %v", err)
+ }
+
+ // Unmarshal the JSON back to Fork
+ var unmarshaledFork consensus_core.Fork
+ err = json.Unmarshal(marshaledData, &unmarshaledFork)
+ if err != nil {
+ t.Fatalf("Error unmarshaling Fork: %v", err)
+ }
+
+ // Verify that the original and unmarshaled Fork are the same
+ if originalFork.Epoch != unmarshaledFork.Epoch {
+ t.Errorf("Epoch mismatch. Got %d, expected %d", unmarshaledFork.Epoch, originalFork.Epoch)
+ }
+ if string(originalFork.ForkVersion) != string(unmarshaledFork.ForkVersion) {
+ t.Errorf("ForkVersion mismatch. Got %x, expected %x", unmarshaledFork.ForkVersion, originalFork.ForkVersion)
+ }
+}
+
+func TestUnmarshalInvalidHex(t *testing.T) {
+ invalidJSON := `{
+ "epoch": 0,
+ "fork_version": "invalid_hex_string"
+ }`
+
+ var fork consensus_core.Fork
+ err := json.Unmarshal([]byte(invalidJSON), &fork)
+ if err == nil {
+ t.Fatal("Expected error unmarshaling invalid hex string, but got nil")
+ }
+}
diff --git a/config/utils.go b/config/utils.go
deleted file mode 100644
index 3004d37..0000000
--- a/config/utils.go
+++ /dev/null
@@ -1,40 +0,0 @@
-package config
-
-import (
- "encoding/hex"
- "encoding/json"
-
- "github.com/BlocSoc-iitr/selene/common"
-)
-
-func BytesSerialise(bytes []byte) ([]byte, error) {
-
- if bytes == nil {
- return json.Marshal(nil)
- }
- bytesString := hex.EncodeToString(bytes)
- result, err := json.Marshal(bytesString)
- if err != nil {
- return nil, err
- }
- return result, nil
-
-}
-
-func BytesDeserialise(data []byte) ([]byte, error) {
- var bytesOpt *string
- if err := json.Unmarshal(data, &bytesOpt); err != nil {
- return nil, err
- }
-
- if bytesOpt == nil {
- return nil, nil
- } else {
- bytes, err := common.Hex_str_to_bytes(*bytesOpt)
- if err != nil {
- return nil, err
- }
- return bytes, nil
- }
-
-}
diff --git a/consensus/consensus.go b/consensus/consensus.go
index 1fd67dd..2a25cf3 100644
--- a/consensus/consensus.go
+++ b/consensus/consensus.go
@@ -4,48 +4,1061 @@ package consensus
// uses rpc
// uses config for networks
// uses common for datatypes
-type ConsensusClient struct{}
-type Inner struct{}
-type LightClientStore struct{}
-
-func(con ConsensusClient) new(){}
-func(con ConsensusClient) shutdown(){}
-func(con ConsensusClient) expected_current_slot(){}
-
-func sync_fallback(){}
-func sync_all_fallback(){}
-
-
-func(in Inner) new(){}
-func(in Inner) get_rpc(){}
-func(in Inner) check_execution_payload(){}
-func(in Inner) get_payloads(){}
-func(in Inner) advance(){}
-func(in Inner) send_blocks(){}
-func(in Inner) duration_until_next_update(){}
-func(in Inner) bootstrap(){}
-func(in Inner) verify_generic_update(){}
-func(in Inner) verify_update(){}
-func(in Inner) verify_finality_update(){}
-func(in Inner) verify_optimistic_update(){}
-func(in Inner) apply_generic_update(){}
-func(in Inner) apply_update(){}
-func(in Inner) apply_finality_update(){}
-func(in Inner) apply_optimistic_update(){}
-func(in Inner) log_finality_update(){}
-func(in Inner) log_optimistic_update(){}
-func(in Inner) has_finality_update(){}
-func(in Inner) has_sync_update(){}
-func(in Inner) safety_threshold(){}
-func(in Inner) verify_sync_committee_signature(){}
-func(in Inner) compute_committee_sign_root(){}
-func(in Inner) age(){}
-func(in Inner) expected_current_slot(){}
-func(in Inner) is_valid_checkpoint(){}
-
-
-
-func get_participating_keys(){}
-func is_finality_proof_valid(){}
-func is_current_committee_proof_valid(){}
-func is_next_committee_proof_valid(){}
\ No newline at end of file
+import (
+ "bytes"
+ "context"
+ "encoding/hex"
+ "fmt"
+ "log"
+ "math/big"
+
+ "os"
+ "sync"
+ "time"
+
+ "github.com/BlocSoc-iitr/selene/common"
+ "github.com/BlocSoc-iitr/selene/config"
+ "github.com/BlocSoc-iitr/selene/config/checkpoints"
+ "github.com/BlocSoc-iitr/selene/consensus/consensus_core"
+ "github.com/BlocSoc-iitr/selene/consensus/rpc"
+ "github.com/BlocSoc-iitr/selene/utils"
+ "github.com/BlocSoc-iitr/selene/utils/bls"
+ geth "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/holiman/uint256"
+ "github.com/pkg/errors"
+)
+
+// Error definitions
+
+var (
+ ErrIncorrectRpcNetwork = errors.New("incorrect RPC network")
+ ErrPayloadNotFound = errors.New("payload not found")
+ ErrInvalidHeaderHash = errors.New("invalid header hash")
+ ErrCheckpointTooOld = errors.New("checkpoint too old")
+ ErrBootstrapFetchFailed = errors.New("could not fetch bootstrap")
+ ErrInvalidUpdate = errors.New("invalid update")
+ ErrInsufficientParticipation = errors.New("insufficient participation")
+ ErrInvalidTimestamp = errors.New("invalid timestamp")
+ ErrInvalidPeriod = errors.New("invalid period")
+ ErrNotRelevant = errors.New("update not relevant")
+ ErrInvalidFinalityProof = errors.New("invalid finality proof")
+ ErrInvalidNextSyncCommitteeProof = errors.New("invalid next sync committee proof")
+ ErrInvalidSignature = errors.New("invalid signature")
+)
+
+const MAX_REQUEST_LIGHT_CLIENT_UPDATES = 128
+
+type GenericUpdate struct {
+ AttestedHeader consensus_core.Header
+ SyncAggregate consensus_core.SyncAggregate
+ SignatureSlot uint64
+ NextSyncCommittee *consensus_core.SyncCommittee
+ NextSyncCommitteeBranch *[]consensus_core.Bytes32
+ FinalizedHeader consensus_core.Header
+ FinalityBranch []consensus_core.Bytes32
+}
+
+type ConsensusClient struct {
+ BlockRecv *common.Block
+ FinalizedBlockRecv *common.Block
+ CheckpointRecv *[]byte
+ genesisTime uint64
+ db Database
+}
+
+type Inner struct {
+ RPC rpc.ConsensusRpc
+ Store LightClientStore
+ lastCheckpoint *[]byte
+ blockSend chan common.Block
+ finalizedBlockSend chan *common.Block
+ checkpointSend chan *[]byte
+ Config *config.Config
+}
+type LightClientStore struct {
+ FinalizedHeader consensus_core.Header
+ CurrentSyncCommitee consensus_core.SyncCommittee
+ NextSyncCommitee *consensus_core.SyncCommittee
+ OptimisticHeader consensus_core.Header
+ PreviousMaxActiveParticipants uint64
+ CurrentMaxActiveParticipants uint64
+}
+
+func (con ConsensusClient) New(rpc *string, config config.Config) ConsensusClient {
+ blockSend := make(chan common.Block, 256)
+ finalizedBlockSend := make(chan *common.Block)
+ checkpointSend := make(chan *[]byte)
+
+ db, err := con.db.New(&config)
+ if err != nil {
+ panic(err)
+ }
+
+ var initialCheckpoint [32]byte
+
+ if config.Checkpoint != nil {
+ initialNewCheckpoint, errorWhileLoadingCheckpoint := db.LoadCheckpoint()
+ copy(initialCheckpoint[:], initialNewCheckpoint)
+ if errorWhileLoadingCheckpoint != nil {
+ log.Printf("error while loading checkpoint: %v", errorWhileLoadingCheckpoint)
+ }
+ }
+ if initialCheckpoint == [32]byte{} {
+ panic("No checkpoint found")
+ }
+ In := &Inner{}
+ inner := In.New(*rpc, blockSend, finalizedBlockSend, checkpointSend, &config)
+
+ go func() {
+ err := inner.sync(initialCheckpoint)
+ if err != nil {
+ if inner.Config.LoadExternalFallback {
+ err = sync_all_fallback(inner, inner.Config.Chain.ChainID)
+ if err != nil {
+ log.Printf("sync failed: %v", err)
+ os.Exit(1)
+ }
+ } else if inner.Config.Fallback != nil {
+ err = sync_fallback(inner, inner.Config.Fallback)
+ if err != nil {
+ log.Printf("sync failed: %v", err)
+ os.Exit(1)
+ }
+ } else {
+ log.Printf("sync failed: %v", err)
+ os.Exit(1)
+ }
+ }
+
+ _ = inner.send_blocks()
+
+ for {
+ time.Sleep(inner.duration_until_next_update())
+
+ err := inner.advance()
+ if err != nil {
+ log.Printf("advance error: %v", err)
+ continue
+ }
+
+ err = inner.send_blocks()
+ if err != nil {
+ log.Printf("send error: %v", err)
+ continue
+ }
+ }
+ }()
+
+ blocksReceived := <-blockSend
+ finalizedBlocksReceived := <-finalizedBlockSend
+ checkpointsReceived := <-checkpointSend
+
+ return ConsensusClient{
+ BlockRecv: &blocksReceived,
+ FinalizedBlockRecv: finalizedBlocksReceived,
+ CheckpointRecv: checkpointsReceived,
+ genesisTime: config.Chain.GenesisTime,
+ db: db,
+ }
+
+}
+func (con ConsensusClient) Shutdown() error {
+ checkpoint := con.CheckpointRecv
+ if checkpoint != nil {
+ err := con.db.SaveCheckpoint(*checkpoint)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+func (con ConsensusClient) Expected_current_slot() uint64 {
+ now := time.Now().Unix()
+ // Assuming SLOT_DURATION is the duration of each slot in seconds
+ const SLOT_DURATION uint64 = 12
+ return (uint64(now) - con.genesisTime) / SLOT_DURATION
+}
+
+func sync_fallback(inner *Inner, fallback *string) error {
+ cf, err := (&checkpoints.CheckpointFallback{}).FetchLatestCheckpointFromApi(*fallback)
+ if err != nil {
+ return errors.Wrap(err, "failed to fetch checkpoint from API")
+ }
+ return inner.sync(cf)
+
+}
+func sync_all_fallback(inner *Inner, chainID uint64) error {
+ var n config.Network
+ network, err := n.ChainID(chainID)
+ if err != nil {
+ return err
+ }
+
+ ch := checkpoints.CheckpointFallback{}
+
+ checkpointFallback, errWhileCheckpoint := ch.Build()
+ if errWhileCheckpoint != nil {
+ return err
+ }
+
+ chainId := network.Chain.ChainID
+ var networkName config.Network
+ if chainId == 1 {
+ networkName = config.MAINNET
+ } else if chainId == 5 {
+ networkName = config.GOERLI
+ } else if chainId == 11155111 {
+ networkName = config.SEPOLIA
+ } else {
+ return errors.New("chain id not recognized")
+ }
+
+ // Fetch the latest checkpoint from the network
+ checkpoint := checkpointFallback.FetchLatestCheckpoint(networkName)
+
+ // Sync using the inner struct's sync method
+ if err := inner.sync(checkpoint); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (in *Inner) New(rpcURL string, blockSend chan common.Block, finalizedBlockSend chan *common.Block, checkpointSend chan *[]byte, config *config.Config) *Inner {
+ rpcClient := rpc.NewConsensusRpc(rpcURL)
+
+ return &Inner{
+ RPC: rpcClient,
+ Store: LightClientStore{},
+ lastCheckpoint: nil, // No checkpoint initially
+ blockSend: blockSend,
+ finalizedBlockSend: finalizedBlockSend,
+ checkpointSend: checkpointSend,
+ Config: config,
+ }
+
+}
+func (in *Inner) Check_rpc() error {
+ chainID, err := in.RPC.ChainId()
+ if err != nil {
+ return err
+ }
+ if chainID != in.Config.Chain.ChainID {
+ return ErrIncorrectRpcNetwork
+ }
+ return nil
+}
+func (in *Inner) get_execution_payload(ctx context.Context, slot *uint64) (*consensus_core.ExecutionPayload, error) {
+ block, err := in.RPC.GetBlock(*slot)
+ if err != nil {
+ return nil, err
+ }
+
+ blockHash, err := utils.TreeHashRoot(block.Body.ToBytes())
+ if err != nil {
+ return nil, err
+ }
+ latestSlot := in.Store.OptimisticHeader.Slot
+ finalizedSlot := in.Store.FinalizedHeader.Slot
+
+ var verifiedBlockHash []byte
+ var errGettingBlockHash error
+
+ if *slot == latestSlot {
+ verifiedBlockHash, errGettingBlockHash = utils.TreeHashRoot(in.Store.OptimisticHeader.ToBytes())
+ if errGettingBlockHash != nil {
+ return nil, ErrPayloadNotFound
+ }
+ } else if *slot == finalizedSlot {
+ verifiedBlockHash, errGettingBlockHash = utils.TreeHashRoot(in.Store.FinalizedHeader.ToBytes())
+ if errGettingBlockHash != nil {
+ return nil, ErrPayloadNotFound
+ }
+ } else {
+ return nil, ErrPayloadNotFound
+ }
+
+ // Compare the hashes
+ if !bytes.Equal(verifiedBlockHash, blockHash) {
+ return nil, fmt.Errorf("%w: expected %v but got %v", ErrInvalidHeaderHash, verifiedBlockHash, blockHash)
+ }
+
+ payload := block.Body.ExecutionPayload
+ return &payload, nil
+}
+
+func (in *Inner) Get_payloads(ctx context.Context, startSlot, endSlot uint64) ([]interface{}, error) {
+ var payloads []interface{}
+
+ // Fetch the block at endSlot to get the initial parent hash
+ endBlock, err := in.RPC.GetBlock(endSlot)
+ if err != nil {
+ return nil, err
+ }
+ endPayload := endBlock.Body.ExecutionPayload
+ prevParentHash := endPayload.ParentHash
+
+ // Create a wait group to manage concurrent fetching
+ var wg sync.WaitGroup
+ payloadsChan := make(chan interface{}, endSlot-startSlot+1)
+ errorChan := make(chan error, 1) // Buffer for one error
+
+ // Fetch blocks in parallel
+ for slot := endSlot; slot >= startSlot; slot-- {
+ wg.Add(1)
+ go func(slot uint64) {
+ defer wg.Done()
+ block, err := in.RPC.GetBlock(slot)
+ if err != nil {
+ errorChan <- err
+ return
+ }
+ payload := block.Body.ExecutionPayload
+ if payload.ParentHash != prevParentHash {
+ log.Printf("Error while backfilling blocks: expected block hash %v but got %v", prevParentHash, payload.ParentHash)
+ return
+ }
+ prevParentHash = payload.ParentHash
+ payloadsChan <- payload
+ }(slot)
+ }
+
+ // Close channels after all fetches are complete
+ go func() {
+ wg.Wait()
+ close(payloadsChan)
+ close(errorChan)
+ }()
+
+ // Collect results and check for errors
+ for {
+ select {
+ case payload, ok := <-payloadsChan:
+ if !ok {
+ return payloads, nil
+ }
+ payloads = append(payloads, payload)
+ case err := <-errorChan:
+ return nil, err
+ }
+ }
+}
+func (in *Inner) advance() error {
+ // Fetch and apply finality update
+ finalityUpdate, err := in.RPC.GetFinalityUpdate()
+ if err != nil {
+ return err
+ }
+ if err := in.verify_finality_update(&finalityUpdate); err != nil {
+ return err
+ }
+ in.apply_finality_update(&finalityUpdate)
+
+ // Fetch and apply optimistic update
+ optimisticUpdate, err := in.RPC.GetOptimisticUpdate()
+ if err != nil {
+ return err
+ }
+ if err := in.verify_optimistic_update(&optimisticUpdate); err != nil {
+ return err
+ }
+ in.apply_optimistic_update(&optimisticUpdate)
+
+ // Check for sync committee update if it's not set
+ if in.Store.NextSyncCommitee == nil {
+ log.Printf("checking for sync committee update")
+
+ currentPeriod := utils.CalcSyncPeriod(in.Store.FinalizedHeader.Slot)
+ updates, err := in.RPC.GetUpdates(currentPeriod, 1)
+ if err != nil {
+ return err
+ }
+
+ if len(updates) == 1 {
+ update := updates[0]
+ if err := in.verify_update(&update); err == nil {
+ log.Printf("updating sync committee")
+ in.apply_update(&update)
+ }
+ }
+ }
+
+ return nil
+}
+func (in *Inner) sync(checkpoint [32]byte) error {
+ // Reset store and checkpoint
+ in.Store = LightClientStore{}
+ in.lastCheckpoint = nil
+
+ // Perform bootstrap with the given checkpoint
+ in.bootstrap(checkpoint)
+
+ // Calculate the current sync period
+ currentPeriod := utils.CalcSyncPeriod(in.Store.FinalizedHeader.Slot)
+
+ // Fetch updates
+ updates, err := in.RPC.GetUpdates(currentPeriod, MAX_REQUEST_LIGHT_CLIENT_UPDATES)
+ if err != nil {
+ return err
+ }
+
+ // Apply updates
+ for _, update := range updates {
+ if err := in.verify_update(&update); err != nil {
+ return err
+ }
+ in.apply_update(&update)
+ }
+
+ // Fetch and apply finality update
+ finalityUpdate, err := in.RPC.GetFinalityUpdate()
+ if err != nil {
+ return err
+ }
+ if err := in.verify_finality_update(&finalityUpdate); err != nil {
+ return err
+ }
+ in.apply_finality_update(&finalityUpdate)
+
+ // Fetch and apply optimistic update
+
+ optimisticUpdate, err := in.RPC.GetOptimisticUpdate()
+ if err != nil {
+ return err
+ }
+ if err := in.verify_optimistic_update(&optimisticUpdate); err != nil {
+ return err
+ }
+ in.apply_optimistic_update(&optimisticUpdate)
+
+ // Log the success message
+ log.Printf("consensus client in sync with checkpoint: 0x%s", hex.EncodeToString(checkpoint[:]))
+
+ return nil
+}
+func (in *Inner) send_blocks() error {
+ // Get slot from the optimistic header
+ slot := in.Store.OptimisticHeader.Slot
+ payload, err := in.get_execution_payload(context.Background(), &slot)
+ if err != nil {
+ return err
+ }
+
+ // Get finalized slot from the finalized header
+ finalizedSlot := in.Store.FinalizedHeader.Slot
+ finalizedPayload, err := in.get_execution_payload(context.Background(), &finalizedSlot)
+ if err != nil {
+ return err
+ }
+
+ // Send payload converted to block over the BlockSend channel
+ go func() {
+ block, err := PayloadToBlock(payload)
+ if err != nil {
+ log.Printf("Error converting payload to block: %v", err)
+ return
+ }
+ in.blockSend <- *block
+ }()
+
+ go func() {
+ block, err := PayloadToBlock(finalizedPayload)
+ if err != nil {
+ log.Printf("Error converting finalized payload to block: %v", err)
+ return
+ }
+ in.finalizedBlockSend <- block
+ }()
+
+ // Send checkpoint over the CheckpointSend channel
+ go func() {
+ in.checkpointSend <- in.lastCheckpoint
+ }()
+
+ return nil
+}
+
+// / Gets the duration until the next update
+// / Updates are scheduled for 4 seconds into each slot
+func (in *Inner) duration_until_next_update() time.Duration {
+ currentSlot := in.expected_current_slot()
+ nextSlot := currentSlot + 1
+ nextSlotTimestamp := nextSlot*12 + in.Config.Chain.GenesisTime
+
+ now := uint64(time.Now().Unix())
+ timeToNextSlot := int64(nextSlotTimestamp - now)
+ nextUpdate := timeToNextSlot + 4
+
+ return time.Duration(nextUpdate) * time.Second
+}
+func (in *Inner) bootstrap(checkpoint [32]byte) {
+
+ bootstrap, errInBootstrap := in.RPC.GetBootstrap(checkpoint)
+ if errInBootstrap != nil {
+ log.Printf("failed to fetch bootstrap: %v", errInBootstrap)
+ return
+ }
+
+ isValid := in.is_valid_checkpoint(bootstrap.Header.Slot)
+ if !isValid {
+ if in.Config.StrictCheckpointAge {
+ log.Printf("checkpoint too old, consider using a more recent checkpoint")
+ return
+ } else {
+ log.Printf("checkpoint too old, consider using a more recent checkpoint")
+ }
+ }
+
+ verify_bootstrap(checkpoint, bootstrap)
+ apply_bootstrap(&in.Store, bootstrap)
+
+}
+func verify_bootstrap(checkpoint [32]byte, bootstrap consensus_core.Bootstrap) {
+ isCommitteValid := isCurrentCommitteeProofValid(&bootstrap.Header, &bootstrap.CurrentSyncCommittee, bootstrap.CurrentSyncCommitteeBranch)
+ if !isCommitteValid {
+ log.Println("invalid current sync committee proof")
+ return
+ }
+
+ headerHash, err := utils.TreeHashRoot(bootstrap.Header.ToBytes())
+ if err != nil {
+ log.Println("failed to hash header")
+ return
+ }
+ HeaderValid := bytes.Equal(headerHash[:], checkpoint[:])
+
+ if !HeaderValid {
+ log.Println("invalid header hash")
+ return
+ }
+
+}
+
+func apply_bootstrap(store *LightClientStore, bootstrap consensus_core.Bootstrap) {
+ store.FinalizedHeader = bootstrap.Header
+ store.CurrentSyncCommitee = bootstrap.CurrentSyncCommittee
+ store.NextSyncCommitee = nil
+ store.OptimisticHeader = bootstrap.Header
+ store.PreviousMaxActiveParticipants = 0
+ store.CurrentMaxActiveParticipants = 0
+
+}
+
+func (in *Inner) verify_generic_update(update *GenericUpdate, expectedCurrentSlot uint64, store *LightClientStore, genesisRoots []byte, forks consensus_core.Forks) error {
+ {
+ bits := getBits(update.SyncAggregate.SyncCommitteeBits)
+ if bits == 0 {
+ return ErrInsufficientParticipation
+ }
+
+ updateFinalizedSlot := update.FinalizedHeader.Slot
+ validTime := expectedCurrentSlot >= update.SignatureSlot &&
+ update.SignatureSlot > update.AttestedHeader.Slot &&
+ update.AttestedHeader.Slot >= updateFinalizedSlot
+
+ if !validTime {
+ return ErrInvalidTimestamp
+ }
+
+ storePeriod := utils.CalcSyncPeriod(store.FinalizedHeader.Slot)
+ updateSigPeriod := utils.CalcSyncPeriod(update.SignatureSlot)
+
+ var validPeriod bool
+ if store.NextSyncCommitee != nil {
+ validPeriod = updateSigPeriod == storePeriod || updateSigPeriod == storePeriod+1
+ } else {
+ validPeriod = updateSigPeriod == storePeriod
+ }
+
+ if !validPeriod {
+ return ErrInvalidPeriod
+ }
+
+ updateAttestedPeriod := utils.CalcSyncPeriod(update.AttestedHeader.Slot)
+ updateHasNextCommittee := store.NextSyncCommitee == nil && update.NextSyncCommittee != nil && updateAttestedPeriod == storePeriod
+
+ if update.AttestedHeader.Slot <= store.FinalizedHeader.Slot && !updateHasNextCommittee {
+ return ErrNotRelevant
+ }
+
+ // Validate finalized header and finality branch
+ if update.FinalizedHeader != (consensus_core.Header{}) && update.FinalityBranch != nil {
+ if !isFinalityProofValid(&update.AttestedHeader, &update.FinalizedHeader, update.FinalityBranch) {
+ return ErrInvalidFinalityProof
+ }
+ } else if update.FinalizedHeader != (consensus_core.Header{}) {
+ return ErrInvalidFinalityProof
+ }
+
+ // Validate next sync committee and its branch
+ if update.NextSyncCommittee != nil && update.NextSyncCommitteeBranch != nil {
+ if !isNextCommitteeProofValid(&update.AttestedHeader, update.NextSyncCommittee, *update.NextSyncCommitteeBranch) {
+ return ErrInvalidNextSyncCommitteeProof
+ }
+ } else if update.NextSyncCommittee != nil {
+ return ErrInvalidNextSyncCommitteeProof
+ }
+
+ // Set sync committee based on updateSigPeriod
+ var syncCommittee *consensus_core.SyncCommittee
+ if updateSigPeriod == storePeriod {
+ syncCommittee = &in.Store.CurrentSyncCommitee
+ } else {
+ syncCommittee = in.Store.NextSyncCommitee
+ }
+ pks, err := utils.GetParticipatingKeys(syncCommittee, update.SyncAggregate.SyncCommitteeBits)
+ if err != nil {
+ return fmt.Errorf("failed to get participating keys: %w", err)
+ }
+
+ forkVersion := utils.CalculateForkVersion(&forks, update.SignatureSlot)
+ forkDataRoot := utils.ComputeForkDataRoot(forkVersion, consensus_core.Bytes32(in.Config.Chain.GenesisRoot))
+
+ if !verifySyncCommitteeSignature(pks, &update.AttestedHeader, &update.SyncAggregate.SyncCommitteeSignature, forkDataRoot) {
+ return ErrInvalidSignature
+ }
+
+ return nil
+ }
+}
+func (in *Inner) verify_update(update *consensus_core.Update) error {
+ genUpdate := GenericUpdate{
+ AttestedHeader: update.AttestedHeader,
+ SyncAggregate: update.SyncAggregate,
+ SignatureSlot: update.SignatureSlot,
+ NextSyncCommittee: &update.NextSyncCommittee,
+ NextSyncCommitteeBranch: &update.NextSyncCommitteeBranch,
+ FinalizedHeader: update.FinalizedHeader,
+ FinalityBranch: update.FinalityBranch,
+ }
+ return in.verify_generic_update(&genUpdate, in.expected_current_slot(), &in.Store, in.Config.Chain.GenesisRoot, in.Config.Forks)
+}
+func (in *Inner) verify_finality_update(update *consensus_core.FinalityUpdate) error {
+ genUpdate := GenericUpdate{
+ AttestedHeader: update.AttestedHeader,
+ SyncAggregate: update.SyncAggregate,
+ SignatureSlot: update.SignatureSlot,
+ FinalizedHeader: update.FinalizedHeader,
+ FinalityBranch: update.FinalityBranch,
+ }
+ return in.verify_generic_update(&genUpdate, in.expected_current_slot(), &in.Store, in.Config.Chain.GenesisRoot, in.Config.Forks)
+}
+func (in *Inner) verify_optimistic_update(update *consensus_core.OptimisticUpdate) error {
+ genUpdate := GenericUpdate{
+ AttestedHeader: update.AttestedHeader,
+ SyncAggregate: update.SyncAggregate,
+ SignatureSlot: update.SignatureSlot,
+ }
+ return in.verify_generic_update(&genUpdate, in.expected_current_slot(), &in.Store, in.Config.Chain.GenesisRoot, in.Config.Forks)
+}
+func (in *Inner) apply_generic_update(store *LightClientStore, update *GenericUpdate) *[]byte {
+ committeeBits := getBits(update.SyncAggregate.SyncCommitteeBits)
+
+ // Update max active participants
+ if committeeBits > store.CurrentMaxActiveParticipants {
+ store.CurrentMaxActiveParticipants = committeeBits
+ }
+
+ // Determine if we should update the optimistic header
+ shouldUpdateOptimistic := committeeBits > in.safety_threshold() &&
+ update.AttestedHeader.Slot > store.OptimisticHeader.Slot
+
+ if shouldUpdateOptimistic {
+ store.OptimisticHeader = update.AttestedHeader
+ }
+
+ updateAttestedPeriod := utils.CalcSyncPeriod(update.AttestedHeader.Slot)
+
+ updateFinalizedSlot := uint64(0)
+ if update.FinalizedHeader != (consensus_core.Header{}) {
+ updateFinalizedSlot = update.FinalizedHeader.Slot
+ }
+ updateFinalizedPeriod := utils.CalcSyncPeriod(updateFinalizedSlot)
+
+ updateHasFinalizedNextCommittee := in.Store.NextSyncCommitee == nil &&
+ in.has_sync_update(update) && in.has_finality_update(update) &&
+ updateFinalizedPeriod == updateAttestedPeriod
+
+ // Determine if we should apply the update
+ hasMajority := committeeBits*3 >= 512*2
+ if !hasMajority {
+ log.Println("skipping block with low vote count")
+ }
+
+ updateIsNewer := updateFinalizedSlot > store.FinalizedHeader.Slot
+ goodUpdate := updateIsNewer || updateHasFinalizedNextCommittee
+
+ shouldApplyUpdate := hasMajority && goodUpdate
+
+ // Apply the update if conditions are met
+ if shouldApplyUpdate {
+ storePeriod := utils.CalcSyncPeriod(store.FinalizedHeader.Slot)
+
+ // Sync committee update logic
+ if store.NextSyncCommitee == nil {
+ store.NextSyncCommitee = update.NextSyncCommittee
+ } else if updateFinalizedPeriod == storePeriod+1 {
+ log.Println("sync committee updated")
+ store.CurrentSyncCommitee = *store.NextSyncCommitee
+ store.NextSyncCommitee = update.NextSyncCommittee
+ store.PreviousMaxActiveParticipants = store.CurrentMaxActiveParticipants
+ store.CurrentMaxActiveParticipants = 0
+ }
+
+ // Update finalized header
+ if updateFinalizedSlot > store.FinalizedHeader.Slot {
+ store.FinalizedHeader = update.FinalizedHeader
+
+ if store.FinalizedHeader.Slot > store.OptimisticHeader.Slot {
+ store.OptimisticHeader = store.FinalizedHeader
+ }
+
+ if store.FinalizedHeader.Slot%32 == 0 {
+ checkpoint, err := utils.TreeHashRoot(store.FinalizedHeader.ToBytes())
+ if err != nil {
+ return nil
+ }
+ return &checkpoint
+ }
+ }
+ }
+
+ return nil
+}
+func (in *Inner) apply_update(update *consensus_core.Update) {
+ genUpdate := GenericUpdate{
+ AttestedHeader: update.AttestedHeader,
+ SyncAggregate: update.SyncAggregate,
+ SignatureSlot: update.SignatureSlot,
+ NextSyncCommittee: &update.NextSyncCommittee,
+ NextSyncCommitteeBranch: &update.NextSyncCommitteeBranch,
+ FinalizedHeader: update.FinalizedHeader,
+ FinalityBranch: update.FinalityBranch,
+ }
+ checkpoint := in.apply_generic_update(&in.Store, &genUpdate)
+ if checkpoint != nil {
+ in.lastCheckpoint = checkpoint
+ }
+}
+func (in *Inner) apply_finality_update(update *consensus_core.FinalityUpdate) {
+ genUpdate := GenericUpdate{
+ AttestedHeader: update.AttestedHeader,
+ SyncAggregate: update.SyncAggregate,
+ SignatureSlot: update.SignatureSlot,
+ FinalizedHeader: update.FinalizedHeader,
+ FinalityBranch: update.FinalityBranch,
+ }
+ checkpoint := in.apply_generic_update(&in.Store, &genUpdate)
+ if checkpoint != nil {
+ in.lastCheckpoint = checkpoint
+ }
+}
+func (in *Inner) apply_optimistic_update(update *consensus_core.OptimisticUpdate) {
+ genUpdate := GenericUpdate{
+ AttestedHeader: update.AttestedHeader,
+ SyncAggregate: update.SyncAggregate,
+ SignatureSlot: update.SignatureSlot,
+ }
+ checkpoint := in.apply_generic_update(&in.Store, &genUpdate)
+ if checkpoint != nil {
+ in.lastCheckpoint = checkpoint
+ }
+}
+func (in *Inner) Log_finality_update(update *consensus_core.FinalityUpdate) {
+ participation := float32(getBits(update.SyncAggregate.SyncCommitteeBits)) / 512.0 * 100.0
+ decimals := 2
+ if participation == 100.0 {
+ decimals = 1
+ }
+
+ age := in.Age(in.Store.FinalizedHeader.Slot)
+ days := age.Hours() / 24
+ hours := int(age.Hours()) % 24
+ minutes := int(age.Minutes()) % 60
+ seconds := int(age.Seconds()) % 60
+
+ log.Printf(
+ "finalized slot slot=%d confidence=%.*f%% age=%02d:%02d:%02d:%02d",
+ in.Store.FinalizedHeader.Slot, decimals, participation, int(days), hours, minutes, seconds,
+ )
+}
+func (in *Inner) Log_optimistic_update(update *consensus_core.OptimisticUpdate) {
+ participation := float32(getBits(update.SyncAggregate.SyncCommitteeBits)) / 512.0 * 100.0
+ decimals := 2
+ if participation == 100.0 {
+ decimals = 1
+ }
+
+ age := in.Age(in.Store.OptimisticHeader.Slot)
+ days := age.Hours() / 24
+ hours := int(age.Hours()) % 24
+ minutes := int(age.Minutes()) % 60
+ seconds := int(age.Seconds()) % 60
+
+ log.Printf(
+ "updated head slot=%d confidence=%.*f%% age=%02d:%02d:%02d:%02d",
+ in.Store.OptimisticHeader.Slot, decimals, participation, int(days), hours, minutes, seconds,
+ )
+}
+func (in *Inner) has_finality_update(update *GenericUpdate) bool {
+ return update.FinalizedHeader != (consensus_core.Header{}) && update.FinalityBranch != nil
+}
+func (in *Inner) has_sync_update(update *GenericUpdate) bool {
+ return update.NextSyncCommittee != nil && update.NextSyncCommitteeBranch != nil
+}
+func (in *Inner) safety_threshold() uint64 {
+ return max(in.Store.CurrentMaxActiveParticipants, in.Store.PreviousMaxActiveParticipants) / 2
+}
+
+// verifySyncCommitteeSignature verifies the sync committee signature.
+func verifySyncCommitteeSignature(
+ pks []consensus_core.BLSPubKey, // Public keys slice
+ attestedHeader *consensus_core.Header, // Attested header
+ signature *consensus_core.SignatureBytes, // Signature bytes
+ forkDataRoot consensus_core.Bytes32, // Fork data root
+) bool {
+ // Collect public keys as references (or suitable Go struct)
+ collectedPks := make([]*consensus_core.BLSPubKey, len(pks))
+ for i := range pks {
+ collectedPks[i] = &pks[i]
+ }
+
+ // Compute headerRoot
+ headerRoot, err := utils.TreeHashRoot(attestedHeader.ToBytes())
+ if err != nil {
+ return false
+ }
+ var headerRootBytes consensus_core.Bytes32
+ copy(headerRootBytes[:], headerRoot[:])
+ // Compute signingRoot
+ signingRoot := ComputeCommitteeSignRoot(headerRootBytes, forkDataRoot)
+ var g2Points []*bls.G2Point
+ for _, pk := range collectedPks {
+ var g2Point bls.G2Point
+ errWhileCoonvertingtoG2 := g2Point.Unmarshal(pk[:])
+
+ if errWhileCoonvertingtoG2 != nil {
+ return false
+ }
+ }
+
+ return utils.IsAggregateValid(*signature, signingRoot, g2Points)
+}
+
+func ComputeCommitteeSignRoot(header consensus_core.Bytes32, fork consensus_core.Bytes32) consensus_core.Bytes32 {
+ // Domain type for the sync committee
+ domainType := [4]byte{7, 0, 0, 0}
+
+ // Compute the domain
+ domain := utils.ComputeDomain(domainType, fork)
+
+ // Compute and return the signing root
+ return utils.ComputeSigningRoot(header, domain)
+}
+func (in *Inner) Age(slot uint64) time.Duration {
+ expectedTime := slot*12 + in.Config.Chain.GenesisTime
+ now := time.Now().Unix()
+ return time.Duration(uint64(now)-expectedTime) * time.Second
+}
+func (in *Inner) expected_current_slot() uint64 {
+ const SLOT_DURATION = 12
+
+ now := time.Now().Unix()
+
+ sinceGenesis := now - int64(in.Config.Chain.GenesisTime)
+ return uint64(sinceGenesis) / SLOT_DURATION
+}
+func (in *Inner) is_valid_checkpoint(blockHashSlot uint64) bool {
+ const SLOT_DURATION = 12
+ currentSlot := in.expected_current_slot()
+ currentSlotTimestamp := int64(in.Config.Chain.GenesisTime) + int64(currentSlot)*SLOT_DURATION
+ blockhashSlotTimestamp := int64(in.Config.Chain.GenesisTime) + int64(blockHashSlot)*SLOT_DURATION
+
+ slotAge := currentSlotTimestamp - blockhashSlotTimestamp
+ return uint64(slotAge) < in.Config.MaxCheckpointAge
+}
+
+func isFinalityProofValid(attestedHeader *consensus_core.Header, finalizedHeader *consensus_core.Header, finalityBranch []consensus_core.Bytes32) bool {
+ finalityBranchForProof, err := utils.BranchToNodes(finalityBranch)
+ if err != nil {
+ return false
+ }
+ return utils.IsProofValid(attestedHeader, finalizedHeader.ToBytes(), finalityBranchForProof, 6, 41)
+}
+
+func isCurrentCommitteeProofValid(attestedHeader *consensus_core.Header, currentCommittee *consensus_core.SyncCommittee, currentCommitteeBranch []consensus_core.Bytes32) bool {
+ CurrentCommitteeForProof, err := utils.BranchToNodes(currentCommitteeBranch)
+ if err != nil {
+ return false
+ }
+ return utils.IsProofValid(attestedHeader, currentCommittee.ToBytes(), CurrentCommitteeForProof, 5, 22)
+}
+
+func isNextCommitteeProofValid(attestedHeader *consensus_core.Header, currentCommittee *consensus_core.SyncCommittee, currentCommitteeBranch []consensus_core.Bytes32) bool {
+ currentCommitteeBranchForProof, err := utils.BranchToNodes(currentCommitteeBranch)
+ if err != nil {
+ return false
+ }
+ return utils.IsProofValid(attestedHeader, currentCommittee.ToBytes(), currentCommitteeBranchForProof, 5, 23)
+}
+
+func PayloadToBlock(value *consensus_core.ExecutionPayload) (*common.Block, error) {
+ emptyNonce := "0x0000000000000000"
+ emptyUncleHash := geth.HexToHash("1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347")
+
+ // Allocate txs on the heap by using make with a predefined capacity
+ txs := make([]common.Transaction, 0, len(value.Transactions))
+
+ // Process each transaction
+ for i := range value.Transactions {
+ tx, err := processTransaction(&value.Transactions[i], value.BlockHash, &value.BlockNumber, uint64(i))
+ if err != nil {
+ return nil, err
+ }
+ txs = append(txs, tx)
+ }
+
+ // Construct and return the block
+ return &common.Block{
+ Number: value.BlockNumber,
+ BaseFeePerGas: *uint256.NewInt(value.BaseFeePerGas),
+ Difficulty: *uint256.NewInt(0),
+ ExtraData: value.ExtraData[:],
+ GasLimit: value.GasLimit,
+ GasUsed: value.GasUsed,
+ Hash: value.BlockHash,
+ LogsBloom: value.LogsBloom[:],
+ ParentHash: value.ParentHash,
+ ReceiptsRoot: value.ReceiptsRoot,
+ StateRoot: value.StateRoot,
+ Timestamp: value.Timestamp,
+ TotalDifficulty: uint64(0),
+ Transactions: common.Transactions{Full: txs},
+ MixHash: value.PrevRandao,
+ Nonce: emptyNonce,
+ Sha3Uncles: emptyUncleHash,
+ Size: 0,
+ TransactionsRoot: [32]byte{},
+ Uncles: [][32]byte{},
+ BlobGasUsed: value.BlobGasUsed,
+ ExcessBlobGas: value.ExcessBlobGas,
+ }, nil
+}
+
+func processTransaction(txBytes *[1073741824]byte, blockHash consensus_core.Bytes32, blockNumber *uint64, index uint64) (common.Transaction, error) {
+ // Decode the transaction envelope (RLP-encoded)
+
+ txEnvelope, err := DecodeTxEnvelope(txBytes)
+ if err != nil {
+ return common.Transaction{}, fmt.Errorf("failed to decode transaction: %v", err)
+ }
+
+ tx := common.Transaction{
+ Hash: txEnvelope.Hash(),
+ Nonce: txEnvelope.Nonce(),
+ BlockHash: blockHash,
+ BlockNumber: blockNumber,
+ TransactionIndex: index,
+ To: txEnvelope.To(),
+ Value: txEnvelope.Value(),
+ GasPrice: txEnvelope.GasPrice(),
+ Gas: txEnvelope.Gas(),
+ Input: txEnvelope.Data(),
+ ChainID: txEnvelope.ChainId(),
+ TransactionType: txEnvelope.Type(),
+ }
+
+ // Handle signature and transaction type logic
+ signer := types.LatestSignerForChainID(txEnvelope.ChainId())
+ from, err := types.Sender(signer, txEnvelope)
+ if err != nil {
+ return common.Transaction{}, fmt.Errorf("failed to recover sender: %v", err)
+ }
+ tx.From = from.Hex()
+
+ // Extract signature components
+ r, s, v := txEnvelope.RawSignatureValues()
+ tx.Signature = &common.Signature{
+ R: r.String(),
+ S: s.String(),
+ V: v.Uint64(),
+ YParity: common.Parity{Value: v.Uint64() == 1},
+ }
+
+ switch txEnvelope.Type() {
+ case types.AccessListTxType:
+ tx.AccessList = txEnvelope.AccessList()
+ case types.DynamicFeeTxType:
+ tx.MaxFeePerGas = new(big.Int).Set(txEnvelope.GasFeeCap())
+ tx.MaxPriorityFeePerGas = new(big.Int).Set(txEnvelope.GasTipCap())
+ case types.BlobTxType:
+ tx.MaxFeePerGas = new(big.Int).Set(txEnvelope.GasFeeCap())
+ tx.MaxPriorityFeePerGas = new(big.Int).Set(txEnvelope.GasTipCap())
+ tx.MaxFeePerBlobGas = new(big.Int).Set(txEnvelope.BlobGasFeeCap())
+ tx.BlobVersionedHashes = txEnvelope.BlobHashes()
+ default:
+ fmt.Println("Unhandled transaction type")
+ }
+
+ return tx, nil
+}
+
+// getBits counts the number of bits set to 1 in a [64]byte array
+func getBits(bitfield [64]byte) uint64 {
+ var count uint64
+ for _, b := range bitfield {
+ count += uint64(popCount(b))
+ }
+ return count
+}
+
+// popCount counts the number of bits set to 1 in a byte
+func popCount(b byte) int {
+ count := 0
+ for b != 0 {
+ count += int(b & 1)
+ b >>= 1
+ }
+ return count
+}
+
+// DecodeTxEnvelope takes the transaction bytes and decodes them into a transaction envelope (Ethereum transaction)
+func DecodeTxEnvelope(txBytes *[1073741824]byte) (*types.Transaction, error) {
+ // Create an empty transaction object
+ var tx types.Transaction
+
+ var txBytesForUnmarshal []byte
+ for _, b := range txBytes {
+ if b == 0 {
+ break
+ }
+ txBytesForUnmarshal = append(txBytesForUnmarshal, b)
+ }
+
+ // Unmarshal the RLP-encoded transaction bytes into the transaction object
+ err := tx.UnmarshalBinary(txBytesForUnmarshal)
+ if err != nil {
+ return nil, fmt.Errorf("failed to decode transaction: %v", err)
+ }
+
+ return &tx, nil
+}
+
+func SomeGasPrice(gasFeeCap, gasTipCap *big.Int, baseFeePerGas uint64) *big.Int {
+ // Create a new big.Int for the base fee and set its value
+ baseFee := new(big.Int).SetUint64(baseFeePerGas)
+
+ // Calculate the maximum gas price based on the provided parameters
+ maxGasPrice := new(big.Int).Set(gasFeeCap)
+
+ // Calculate the alternative gas price
+ alternativeGasPrice := new(big.Int).Set(gasTipCap)
+ alternativeGasPrice.Add(alternativeGasPrice, baseFee)
+
+ // Return the maximum of the two
+ if maxGasPrice.Cmp(alternativeGasPrice) < 0 {
+ return alternativeGasPrice
+ }
+ return maxGasPrice
+}
diff --git a/consensus/consensus_core/consensus_core.go b/consensus/consensus_core/consensus_core.go
index bb6a8f9..aab009b 100644
--- a/consensus/consensus_core/consensus_core.go
+++ b/consensus/consensus_core/consensus_core.go
@@ -1,29 +1,42 @@
package consensus_core
-type BeaconBlock struct {
- Slot uint64
- Proposer_index uint64
- Parent_root [32]byte
- StateRoot [32]byte
- Body BeaconBlockBody
-}
+
+import (
+ "bytes"
+
+ "github.com/BlocSoc-iitr/selene/consensus/types"
+ "github.com/ugorji/go/codec"
+)
+
type Bytes32 [32]byte
type BLSPubKey [48]byte
type Address [20]byte
-type LogsBloom = [256]byte
+type LogsBloom [256]byte
type SignatureBytes [96]byte
+
+type BeaconBlock struct {
+ Slot uint64
+ ProposerIndex uint64
+ ParentRoot Bytes32
+ StateRoot Bytes32
+ Body BeaconBlockBody
+}
+
type Eth1Data struct {
- Deposit_root Bytes32
- Deposit_count uint64
- Block_hash Bytes32
+ DepositRoot Bytes32
+ DepositCount uint64
+ BlockHash Bytes32
}
+
type ProposerSlashing struct {
SignedHeader1 SignedBeaconBlockHeader
SignedHeader2 SignedBeaconBlockHeader
}
+
type SignedBeaconBlockHeader struct {
Message BeaconBlockHeader
Signature SignatureBytes
}
+
type BeaconBlockHeader struct {
Slot uint64
ProposerIndex uint64
@@ -31,15 +44,18 @@ type BeaconBlockHeader struct {
StateRoot Bytes32
BodyRoot Bytes32
}
+
type AttesterSlashing struct {
Attestation1 IndexedAttestation
Attestation2 IndexedAttestation
}
+
type IndexedAttestation struct {
- AttestingIndices []uint64 //max length 2048 to be ensured
+ AttestingIndices [2048]uint64
Data AttestationData
Signature SignatureBytes
}
+
type AttestationData struct {
Slot uint64
Index uint64
@@ -47,45 +63,55 @@ type AttestationData struct {
Source Checkpoint
Target Checkpoint
}
+
type Checkpoint struct {
Epoch uint64
Root Bytes32
}
+
type Bitlist []bool
+
type Attestation struct {
AggregationBits Bitlist `ssz-max:"2048"`
Data AttestationData
Signature SignatureBytes
}
+
type Deposit struct {
- Proof [33]Bytes32 //fixed size array
+ Proof [33]Bytes32 // fixed size array
Data DepositData
}
+
type DepositData struct {
Pubkey [48]byte
WithdrawalCredentials Bytes32
Amount uint64
Signature SignatureBytes
}
+
type SignedVoluntaryExit struct {
Message VoluntaryExit
Signature SignatureBytes
}
+
type VoluntaryExit struct {
Epoch uint64
ValidatorIndex uint64
}
+
type SyncAggregate struct {
SyncCommitteeBits [64]byte
SyncCommitteeSignature SignatureBytes
}
+
type Withdrawal struct {
Index uint64
ValidatorIndex uint64
Address Address
Amount uint64
}
-type ExecutionPayload struct { //not implemented
+
+type ExecutionPayload struct {
ParentHash Bytes32
FeeRecipient Address
StateRoot Bytes32
@@ -99,33 +125,38 @@ type ExecutionPayload struct { //not implemented
ExtraData [32]byte
BaseFeePerGas uint64
BlockHash Bytes32
- Transactions []byte //max length 1073741824 to be implemented
- Withdrawals []Withdrawal //max length 16 to be implemented
- BlobGasUsed uint64
- ExcessBlobGas uint64
+ Transactions []types.Transaction `ssz-max:"1048576"`
+ Withdrawals []types.Withdrawal `ssz-max:"16"`
+ BlobGasUsed *uint64 // Deneb-specific field, use pointer for optionality
+ ExcessBlobGas *uint64 // Deneb-specific field, use pointer for optionality
}
+
type SignedBlsToExecutionChange struct {
Message BlsToExecutionChange
Signature SignatureBytes
}
+
type BlsToExecutionChange struct {
ValidatorIndex uint64
FromBlsPubkey [48]byte
}
-type BeaconBlockBody struct { //not implemented
+
+// BeaconBlockBody represents the body of a beacon block.
+type BeaconBlockBody struct {
RandaoReveal SignatureBytes
Eth1Data Eth1Data
Graffiti Bytes32
- ProposerSlashings []ProposerSlashing //max length 16 to be insured how?
- AttesterSlashings []AttesterSlashing //max length 2 to be ensured
- Attestations []Attestation //max length 128 to be ensured
- Deposits []Deposit //max length 16 to be ensured
- VoluntaryExits SignedVoluntaryExit
+ ProposerSlashings []ProposerSlashing `ssz-max:"16"`
+ AttesterSlashings []AttesterSlashing `ssz-max:"2"`
+ Attestations []Attestation `ssz-max:"128"`
+ Deposits []Deposit `ssz-max:"16"`
+ VoluntaryExits []SignedVoluntaryExit `ssz-max:"16"`
SyncAggregate SyncAggregate
ExecutionPayload ExecutionPayload
- BlsToExecutionChanges []SignedBlsToExecutionChange //max length 16 to be ensured
- BlobKzgCommitments [][48]byte //max length 4096 to be ensured
+ BlsToExecutionChanges []SignedBlsToExecutionChange `ssz-max:"16"`
+ BlobKzgCommitments [][48]byte `ssz-max:"4096"`
}
+
type Header struct {
Slot uint64
ProposerIndex uint64
@@ -133,19 +164,22 @@ type Header struct {
StateRoot Bytes32
BodyRoot Bytes32
}
-type SyncComittee struct {
+
+type SyncCommittee struct {
Pubkeys [512]BLSPubKey
AggregatePubkey BLSPubKey
}
+
type Update struct {
AttestedHeader Header
- NextSyncCommittee SyncComittee
+ NextSyncCommittee SyncCommittee
NextSyncCommitteeBranch []Bytes32
FinalizedHeader Header
- SinalityBranch []Bytes32
+ FinalityBranch []Bytes32
SyncAggregate SyncAggregate
SignatureSlot uint64
}
+
type FinalityUpdate struct {
AttestedHeader Header
FinalizedHeader Header
@@ -153,13 +187,50 @@ type FinalityUpdate struct {
SyncAggregate SyncAggregate
SignatureSlot uint64
}
+
type OptimisticUpdate struct {
AttestedHeader Header
SyncAggregate SyncAggregate
SignatureSlot uint64
}
+
type Bootstrap struct {
Header Header
- CurrentSyncCommittee SyncComittee
+ CurrentSyncCommittee SyncCommittee
CurrentSyncCommitteeBranch []Bytes32
}
+
+type Fork struct {
+ Epoch uint64 `json:"epoch"`
+ ForkVersion []byte `json:"fork_version"`
+}
+type Forks struct {
+ Genesis Fork `json:"genesis"`
+ Altair Fork `json:"altair"`
+ Bellatrix Fork `json:"bellatrix"`
+ Capella Fork `json:"capella"`
+ Deneb Fork `json:"deneb"`
+}
+
+// ToBytes serializes the Header struct to a byte slice.
+func (h *Header) ToBytes() []byte {
+ var buf bytes.Buffer
+ enc := codec.NewEncoder(&buf, new(codec.JsonHandle))
+ _ = enc.Encode(h) // Ignore error
+ return buf.Bytes()
+}
+
+func (b *BeaconBlockBody) ToBytes() []byte {
+ var buf bytes.Buffer
+ enc := codec.NewEncoder(&buf, new(codec.JsonHandle))
+ _ = enc.Encode(b) // Ignore error
+ return buf.Bytes()
+}
+
+// ToBytes serializes the SyncCommittee struct to a byte slice.
+func (sc *SyncCommittee) ToBytes() []byte {
+ var buf bytes.Buffer
+ enc := codec.NewEncoder(&buf, new(codec.JsonHandle))
+ _ = enc.Encode(sc) // Ignore error
+ return buf.Bytes()
+}
diff --git a/consensus/database.go b/consensus/database.go
index 4ced62a..77993a8 100644
--- a/consensus/database.go
+++ b/consensus/database.go
@@ -6,7 +6,7 @@ import (
"path/filepath"
)
type Database interface {
- New(cfg *config.BaseConfig) (Database, error)
+ New(cfg *config.Config) (Database, error)
SaveCheckpoint(checkpoint []byte) error
LoadCheckpoint() ([]byte, error)
}
@@ -14,7 +14,7 @@ type FileDB struct {
DataDir string
defaultCheckpoint [32]byte
}
-func (f *FileDB) New(cfg *config.BaseConfig) (Database, error) {
+func (f *FileDB) New(cfg *config.Config) (Database, error) {
if cfg.DataDir == nil || *cfg.DataDir == "" {
return nil, errors.New("data directory is not set in the config")
}
@@ -46,7 +46,7 @@ func (f *FileDB) LoadCheckpoint() ([]byte, error) {
type ConfigDB struct {
checkpoint [32]byte
}
-func (c *ConfigDB) New(cfg *config.BaseConfig) (Database, error) {
+func (c *ConfigDB) New(cfg *config.Config) (Database, error) {
checkpoint := cfg.DefaultCheckpoint
if cfg.DataDir == nil {
return nil, errors.New("data directory is not set in the config")
diff --git a/consensus/rpc/consensus_rpc.go b/consensus/rpc/consensus_rpc.go
index 8395c79..22dc00e 100644
--- a/consensus/rpc/consensus_rpc.go
+++ b/consensus/rpc/consensus_rpc.go
@@ -6,7 +6,7 @@ import (
// return types not mention and oarameters as well
type ConsensusRpc interface {
- GetBootstrap(block_root []byte) (consensus_core.Bootstrap, error)
+ GetBootstrap(block_root [32]byte) (consensus_core.Bootstrap, error)
GetUpdates(period uint64, count uint8) ([]consensus_core.Update, error)
GetFinalityUpdate() (consensus_core.FinalityUpdate, error)
GetOptimisticUpdate() (consensus_core.OptimisticUpdate, error)
diff --git a/consensus/rpc/nimbus_rpc.go b/consensus/rpc/nimbus_rpc.go
index 1725b4e..49b66fa 100644
--- a/consensus/rpc/nimbus_rpc.go
+++ b/consensus/rpc/nimbus_rpc.go
@@ -3,11 +3,12 @@ package rpc
import (
"encoding/json"
"fmt"
- "github.com/BlocSoc-iitr/selene/consensus/consensus_core"
"io"
"net/http"
"strconv"
"time"
+
+ "github.com/BlocSoc-iitr/selene/consensus/consensus_core"
)
// uses types package
@@ -43,15 +44,17 @@ func min(a uint8, b uint8) uint8 {
}
return b
}
+
type NimbusRpc struct {
//ConsensusRpc
rpc string
}
+
func NewNimbusRpc(rpc string) *NimbusRpc {
return &NimbusRpc{
rpc: rpc}
}
-func (n *NimbusRpc) GetBootstrap(block_root []byte) (consensus_core.Bootstrap, error) {
+func (n *NimbusRpc) GetBootstrap(block_root [32]byte) (consensus_core.Bootstrap, error) {
root_hex := fmt.Sprintf("%x", block_root)
req := fmt.Sprintf("%s/eth/v1/beacon/light_client/bootstrap/0x%s", n.rpc, root_hex)
var res BootstrapResponse
@@ -111,6 +114,7 @@ func (n *NimbusRpc) ChainId() (uint64, error) {
}
return res.Data.ChainId, nil
}
+
// BeaconBlock, Update,FinalityUpdate ,OptimisticUpdate,Bootstrap yet to be defined in consensus-core/src/types/mod.go
// For now defined in consensus/consensus_core.go
type BeaconBlockResponse struct {
@@ -138,4 +142,3 @@ type Spec struct {
type BootstrapResponse struct {
Data consensus_core.Bootstrap
}
-
diff --git a/consensus/utils.go b/consensus/utils.go
deleted file mode 100644
index 4e25578..0000000
--- a/consensus/utils.go
+++ /dev/null
@@ -1,15 +0,0 @@
-package consensus
-
-// uses types package
-
-func calculate_sync_period() {}
-func is_aggregate_valid() {}
-func is_proof_valid() {}
-func compute_signing_root() {}
-func compute_domian() {}
-func compute_fork_data_root() {}
-func branch_to_nodes() {}
-func bytes32_to_nodes() {}
-
-type SigningData struct{}
-type ForkData struct{}
diff --git a/go.mod b/go.mod
index ddc3288..d3224ef 100644
--- a/go.mod
+++ b/go.mod
@@ -16,6 +16,7 @@ require (
github.com/consensys/bavard v0.1.13 // indirect
github.com/consensys/gnark-crypto v0.12.1 // indirect
github.com/crate-crypto/go-kzg-4844 v1.0.0 // indirect
+ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 // indirect
github.com/ethereum/c-kzg-4844 v1.0.0 // indirect
github.com/fsnotify/fsnotify v1.7.0 // indirect
@@ -24,14 +25,17 @@ require (
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/mmcloughlin/addchain v0.4.0 // indirect
github.com/pelletier/go-toml/v2 v2.2.2 // indirect
+ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/sagikazarmark/locafero v0.4.0 // indirect
github.com/sagikazarmark/slog-shim v0.1.0 // indirect
github.com/sourcegraph/conc v0.3.0 // indirect
github.com/spf13/afero v1.11.0 // indirect
github.com/spf13/cast v1.6.0 // indirect
github.com/spf13/pflag v1.0.5 // indirect
+ github.com/stretchr/testify v1.9.0 // indirect
github.com/subosito/gotenv v1.6.0 // indirect
github.com/supranational/blst v0.3.11 // indirect
+ github.com/ugorji/go/codec v1.2.12 // indirect
github.com/wealdtech/go-merkletree v1.0.0 // indirect
go.uber.org/atomic v1.9.0 // indirect
go.uber.org/multierr v1.9.0 // indirect
diff --git a/go.sum b/go.sum
index 75d9c8c..602c81c 100644
--- a/go.sum
+++ b/go.sum
@@ -14,6 +14,8 @@ github.com/crate-crypto/go-kzg-4844 v1.0.0 h1:TsSgHwrkTKecKJ4kadtHi4b3xHW5dCFUDF
github.com/crate-crypto/go-kzg-4844 v1.0.0/go.mod h1:1kMhvPgI0Ky3yIa+9lFySEBUBXkYxeOi8ZF1sYioxhc=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
+github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 h1:YLtO71vCjJRCBcrPMtQ9nqBsqpA1m5sE92cU+pd5Mcc=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1/go.mod h1:hyedUtir6IdtD/7lIxGeCxkaw7y45JueMRL4DIyJDKs=
@@ -40,6 +42,8 @@ github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
+github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ=
github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4=
github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE=
@@ -62,11 +66,14 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
+github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=
github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
github.com/supranational/blst v0.3.11 h1:LyU6FolezeWAhvQk0k6O/d49jqgO52MSDDfYgbeoEm4=
github.com/supranational/blst v0.3.11/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
+github.com/ugorji/go/codec v1.2.12 h1:9LC83zGrHhuUA9l16C9AHXAqEV/2wBQ4nkvumAE65EE=
+github.com/ugorji/go/codec v1.2.12/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg=
github.com/wealdtech/go-merkletree v1.0.0 h1:DsF1xMzj5rK3pSQM6mPv8jlyJyHXhFxpnA2bwEjMMBY=
github.com/wealdtech/go-merkletree v1.0.0/go.mod h1:cdil512d/8ZC7Kx3bfrDvGMQXB25NTKbsm0rFrmDax4=
go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE=
diff --git a/utils/proof/merkleProof.go b/utils/proof/merkleProof.go
index 3610c11..b28bc54 100644
--- a/utils/proof/merkleProof.go
+++ b/utils/proof/merkleProof.go
@@ -6,7 +6,7 @@ import (
merkletree "github.com/wealdtech/go-merkletree"
)
-func validateMerkleProof(
+func ValidateMerkleProof(
root []byte,
leaf []byte,
proof *merkletree.Proof,
diff --git a/utils/utils.go b/utils/utils.go
new file mode 100644
index 0000000..c44317c
--- /dev/null
+++ b/utils/utils.go
@@ -0,0 +1,281 @@
+package utils
+
+import (
+ "encoding/hex"
+
+ "strings"
+
+ "bytes"
+ "crypto/sha256"
+ "encoding/json"
+ "fmt"
+ "log"
+
+ "github.com/ethereum/go-ethereum/common"
+
+ "github.com/BlocSoc-iitr/selene/consensus/consensus_core"
+ "github.com/BlocSoc-iitr/selene/utils/bls"
+
+ bls12381 "github.com/consensys/gnark-crypto/ecc/bls12-381"
+ "github.com/pkg/errors"
+ merkletree "github.com/wealdtech/go-merkletree"
+)
+
+// if we need to export the functions , just make their first letter capitalised
+func Hex_str_to_bytes(s string) ([]byte, error) {
+ s = strings.TrimPrefix(s, "0x")
+
+ bytesArray, err := hex.DecodeString(s)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return bytesArray, nil
+}
+
+func Address_to_hex_string(addr common.Address) string {
+ bytesArray := addr.Bytes()
+ return fmt.Sprintf("0x%x", hex.EncodeToString(bytesArray))
+}
+
+func U64_to_hex_string(val uint64) string {
+ return fmt.Sprintf("0x%x", val)
+}
+
+func Bytes_deserialize(data []byte) ([]byte, error) {
+ var hexString string
+ if err := json.Unmarshal(data, &hexString); err != nil {
+ return nil, err
+ }
+
+ return Hex_str_to_bytes(hexString)
+}
+
+func Bytes_serialize(bytes []byte) ([]byte, error) {
+ if bytes == nil {
+ return json.Marshal(nil)
+ }
+ hexString := hex.EncodeToString(bytes)
+ return json.Marshal(hexString)
+}
+
+// TreeHashRoot computes the Merkle root from the provided leaves in a flat []byte slice.
+// TreeHashRoot calculates the root hash from the input data.
+func TreeHashRoot(data []byte) ([]byte, error) {
+ // Convert the input data into a slice of leaves
+ leaves, err := bytesToLeaves(data)
+ if err != nil {
+ return nil, fmt.Errorf("error converting bytes to leaves: %w", err)
+ }
+
+ // Create the Merkle tree using the leaves
+ tree, errorCreatingMerkleTree := merkletree.New(leaves)
+ if errorCreatingMerkleTree != nil {
+ return nil, fmt.Errorf("error creating Merkle tree: %w", err)
+ }
+
+ // Fetch the root hash of the tree
+ root := tree.Root()
+ if root == nil {
+ return nil, errors.New("failed to calculate the Merkle root: root is nil")
+ }
+
+ return root, nil
+}
+
+func bytesToLeaves(data []byte) ([][]byte, error) {
+ var leaves [][]byte
+ if err := json.Unmarshal(data, &leaves); err != nil {
+ return nil, err
+ }
+
+ return leaves, nil
+}
+
+func CalcSyncPeriod(slot uint64) uint64 {
+ epoch := slot / 32
+ return epoch / 256
+}
+
+// isAggregateValid checks if the provided signature is valid for the given message and public keys.
+func IsAggregateValid(sigBytes consensus_core.SignatureBytes, msg [32]byte, pks []*bls.G2Point) bool {
+ var sigInBytes [96]byte
+ copy(sigInBytes[:], sigBytes[:])
+ // Deserialize the signature from bytes
+ var sig bls12381.G1Affine
+ if err := sig.Unmarshal(sigInBytes[:]); err != nil {
+ return false
+ }
+
+ // Map the message to a point on the curve
+ msgPoint := bls.MapToCurve(msg)
+
+ // Aggregate the public keys
+ aggPubKey := bls.AggregatePublicKeys(pks)
+
+ // Prepare the pairing check inputs
+ P := [2]bls12381.G1Affine{*msgPoint, sig}
+ Q := [2]bls12381.G2Affine{*aggPubKey.G2Affine, *bls.GetG2Generator()}
+
+ // Perform the pairing check
+ ok, err := bls12381.PairingCheck(P[:], Q[:])
+ if err != nil {
+ return false
+ }
+ return ok
+}
+
+func IsProofValid(
+ attestedHeader *consensus_core.Header,
+ leafObject []byte, // Single byte slice of the leaf object
+ branch [][]byte, // Slice of byte slices for the branch
+ depth, index int, // Depth of the Merkle proof and index of the leaf
+) bool {
+ // If the branch length is not equal to the depth, return false
+ if len(branch) != depth {
+ return false
+ }
+
+ // Initialize the derived root as the leaf object's hash
+ derivedRoot, errFetchingTreeHashRoot := TreeHashRoot(leafObject)
+
+ if errFetchingTreeHashRoot != nil {
+ fmt.Printf("Error fetching tree hash root: %v", errFetchingTreeHashRoot)
+ return false
+ }
+
+ // Iterate over the proof's hashes
+ for i, hash := range branch {
+ hasher := sha256.New()
+
+ // Use the index to determine how to combine the hashes
+ if (index>>i)&1 == 1 {
+ // If index is odd, hash node || derivedRoot
+ hasher.Write(hash)
+ hasher.Write(derivedRoot)
+ } else {
+ // If index is even, hash derivedRoot || node
+ hasher.Write(derivedRoot)
+ hasher.Write(hash)
+ }
+ // Update derivedRoot for the next level
+ derivedRoot = hasher.Sum(nil)
+ }
+
+ // Compare the final derived root with the attested header's state root
+ return bytes.Equal(derivedRoot, attestedHeader.StateRoot[:])
+}
+
+func CalculateForkVersion(forks *consensus_core.Forks, slot uint64) [4]byte {
+ epoch := slot / 32
+
+ switch {
+ case epoch >= forks.Deneb.Epoch:
+ return [4]byte(forks.Deneb.ForkVersion)
+ case epoch >= forks.Capella.Epoch:
+ return [4]byte(forks.Capella.ForkVersion)
+ case epoch >= forks.Bellatrix.Epoch:
+ return [4]byte(forks.Bellatrix.ForkVersion)
+ case epoch >= forks.Altair.Epoch:
+ return [4]byte(forks.Altair.ForkVersion)
+ default:
+ return [4]byte(forks.Genesis.ForkVersion)
+ }
+}
+
+func ComputeForkDataRoot(currentVersion [4]byte, genesisValidatorRoot consensus_core.Bytes32) consensus_core.Bytes32 {
+ forkData := ForkData{
+ CurrentVersion: currentVersion,
+ GenesisValidatorRoot: genesisValidatorRoot,
+ }
+
+ hash, err := TreeHashRoot(forkData.ToBytes())
+ if err != nil {
+ return consensus_core.Bytes32{}
+ }
+ return consensus_core.Bytes32(hash)
+}
+
+// GetParticipatingKeys retrieves the participating public keys from the committee based on the bitfield represented as a byte array.
+func GetParticipatingKeys(committee *consensus_core.SyncCommittee, bitfield [64]byte) ([]consensus_core.BLSPubKey, error) {
+ var pks []consensus_core.BLSPubKey
+ numBits := len(bitfield) * 8 // Total number of bits
+
+ if len(committee.Pubkeys) > numBits {
+ return nil, fmt.Errorf("bitfield is too short for the number of public keys")
+ }
+
+ for i := 0; i < len(bitfield); i++ {
+ byteVal := bitfield[i]
+ for bit := 0; bit < 8; bit++ {
+ if (byteVal & (1 << bit)) != 0 {
+ index := i*8 + bit
+ if index >= len(committee.Pubkeys) {
+ break
+ }
+ pks = append(pks, committee.Pubkeys[index])
+ }
+ }
+ }
+
+ return pks, nil
+}
+
+func ComputeSigningRoot(objectRoot, domain consensus_core.Bytes32) consensus_core.Bytes32 {
+ signingData := SigningData{
+ ObjectRoot: objectRoot,
+ Domain: domain,
+ }
+ hash, err := TreeHashRoot(signingData.ToBytes())
+ if err != nil {
+ return consensus_core.Bytes32{}
+ }
+ return consensus_core.Bytes32(hash)
+}
+
+func ComputeDomain(domainType [4]byte, forkDataRoot consensus_core.Bytes32) consensus_core.Bytes32 {
+ data := append(domainType[:], forkDataRoot[:28]...)
+ return sha256.Sum256(data)
+}
+
+type SigningData struct {
+ ObjectRoot consensus_core.Bytes32
+ Domain consensus_core.Bytes32
+}
+
+type ForkData struct {
+ CurrentVersion [4]byte
+ GenesisValidatorRoot consensus_core.Bytes32
+}
+
+func (fd *ForkData) ToBytes() []byte {
+ data, err := json.Marshal(fd)
+ if err != nil {
+ log.Println("Error marshaling ForkData:", err)
+ return nil // Or return an empty slice, based on your preference
+ }
+ return data
+}
+
+func (sd *SigningData) ToBytes() []byte {
+ data, err := json.Marshal(sd)
+ if err != nil {
+ log.Println("Error marshaling SigningData:", err)
+ return nil // Or return an empty slice, based on your preference
+ }
+ return data
+}
+
+func Bytes32ToNode(bytes consensus_core.Bytes32) []byte {
+ return []byte(bytes[:])
+}
+
+// branchToNodes converts a slice of Bytes32 to a slice of Node
+func BranchToNodes(branch []consensus_core.Bytes32) ([][]byte, error) {
+ nodes := make([][]byte, len(branch))
+ for i, b32 := range branch {
+ nodes[i] = Bytes32ToNode(b32)
+ }
+ return nodes, nil
+}