diff --git a/common/big.go b/common/big.go index 5be6b019d6..52d5cdde59 100644 --- a/common/big.go +++ b/common/big.go @@ -20,13 +20,15 @@ import "math/big" // Common big integers often used var ( - Big1 = big.NewInt(1) - Big2 = big.NewInt(2) - Big3 = big.NewInt(3) - Big0 = big.NewInt(0) - Big32 = big.NewInt(32) - Big256 = big.NewInt(256) - Big257 = big.NewInt(257) + Big0 = big.NewInt(0) + Big1 = big.NewInt(1) + Big2 = big.NewInt(2) + Big3 = big.NewInt(3) + Big4 = big.NewInt(4) + Big32 = big.NewInt(32) + Big256 = big.NewInt(256) + Big257 = big.NewInt(257) + Big2e256 = new(big.Int).Exp(big.NewInt(2), big.NewInt(256), big.NewInt(0)) ) func BigBitsToBits(original *big.Int) *big.Int { @@ -43,3 +45,10 @@ func BigBitsArrayToBitsArray(original []*big.Int) []*big.Int { return bitsArray } + +func MaxBigInt(x, y *big.Int) *big.Int { + if x.Cmp(y) > 0 { + return x + } + return y +} diff --git a/consensus/blake3pow/consensus.go b/consensus/blake3pow/consensus.go index 10de5f466f..580de2e0d9 100644 --- a/consensus/blake3pow/consensus.go +++ b/consensus/blake3pow/consensus.go @@ -40,6 +40,7 @@ var ( big8 = big.NewInt(8) big9 = big.NewInt(9) big10 = big.NewInt(10) + big20 = big.NewInt(20) big32 = big.NewInt(32) bigMinus99 = big.NewInt(-99) big2e256 = new(big.Int).Exp(big.NewInt(2), big.NewInt(256), big.NewInt(0)) // 2^256 @@ -280,11 +281,35 @@ func (blake3pow *Blake3pow) verifyHeader(chain consensus.ChainHeaderReader, head if common.Big0.Cmp(header.ParentDeltaS()) != 0 { return fmt.Errorf("invalid parent delta s: have %v, want %v", header.ParentDeltaS(), common.Big0) } + // If parent block is dom, validate the prime difficulty + if nodeCtx == common.REGION_CTX { + primeEntropyThreshold, err := blake3pow.CalcPrimeEntropyThreshold(chain, parent) + if err != nil { + return err + } + if header.PrimeEntropyThreshold(parent.Location().SubIndex()).Cmp(primeEntropyThreshold) != 0 { + return fmt.Errorf("invalid prime difficulty pd: have %v, want %v", header.PrimeEntropyThreshold(parent.Location().SubIndex()), primeEntropyThreshold) + } + } } else { parentDeltaS := blake3pow.DeltaLogS(parent) if parentDeltaS.Cmp(header.ParentDeltaS()) != 0 { return fmt.Errorf("invalid parent delta s: have %v, want %v", header.ParentDeltaS(), parentDeltaS) } + if nodeCtx == common.REGION_CTX { + // if parent is not a dom block, no adjustment to the prime or region difficulty will be made + for i := 0; i < common.NumZonesInRegion; i++ { + if header.PrimeEntropyThreshold(i).Cmp(parent.PrimeEntropyThreshold(i)) != 0 { + return fmt.Errorf("invalid prime difficulty pd: have %v, want %v at index %v", header.PrimeEntropyThreshold(i), parent.PrimeEntropyThreshold(i), i) + } + } + } + if nodeCtx == common.ZONE_CTX { + if header.PrimeEntropyThreshold(common.NodeLocation.Zone()).Cmp(parent.PrimeEntropyThreshold(common.NodeLocation.Zone())) != 0 { + return fmt.Errorf("invalid prime difficulty pd: have %v, want %v at index %v", header.PrimeEntropyThreshold(common.NodeLocation.Zone()), parent.PrimeEntropyThreshold(common.NodeLocation.Zone()), common.NodeLocation.Zone()) + } + } + } } @@ -376,6 +401,57 @@ func (blake3pow *Blake3pow) CalcDifficulty(chain consensus.ChainHeaderReader, pa return x } +// CalcPrimeDifficultyThreshold calculates the difficulty that a block must meet +// to become a region block. This function needs to have a controller so that the +// liveliness of the slices can balance even if the hash rate of the slice varies. +// This will also cause the production of the prime blocks to naturally diverge +// with time reducing the uncle rate. The controller is built to adjust the +// number of zone blocks it takes to produce a prime block. This is done based on +// the prior number of blocks to reach threshold which is than multiplied by the +// current difficulty to establish the threshold. The controller adjust the block +// threshold value and is a simple form of a bang-bang controller which is all +// that is needed to ensure liveliness of the slices in prime overtime. If the +// slice is not sufficiently lively 20 zone blocks are subtracted from the +// threshold. If it is too lively 20 blocks are added to the threshold. +func (blake3pow *Blake3pow) CalcPrimeEntropyThreshold(chain consensus.ChainHeaderReader, parent *types.Header) (*big.Int, error) { + nodeCtx := common.NodeLocation.Context() + + if nodeCtx != common.REGION_CTX { + log.Error("Cannot CalcPrimeEntropyThreshold for", "context", nodeCtx) + return nil, errors.New("cannot CalcPrimeEntropyThreshold for non-region context") + } + + if parent.Hash() == chain.Config().GenesisHash { + return parent.PrimeEntropyThreshold(parent.Location().SubIndex()), nil + } + + // Get the primeTerminus + termini := chain.GetTerminiByHash(parent.ParentHash()) + if termini == nil { + return nil, errors.New("termini not found in CalcPrimeEntropyThreshold") + } + primeTerminusHeader := chain.GetHeaderByHash(termini.PrimeTerminiAtIndex(parent.Location().SubIndex())) + + log.Info("CalcPrimeEntropyThreshold", "primeTerminusHeader:", primeTerminusHeader.NumberArray(), "Hash", primeTerminusHeader.Hash()) + deltaNumber := new(big.Int).Sub(parent.Number(), primeTerminusHeader.Number()) + log.Info("CalcPrimeEntropyThreshold", "deltaNumber:", deltaNumber) + target := new(big.Int).Mul(big.NewInt(common.NumRegionsInPrime), params.TimeFactor) + target = new(big.Int).Mul(big.NewInt(common.NumZonesInRegion), target) + log.Info("CalcPrimeEntropyThreshold", "target:", target) + + var newThreshold *big.Int + if target.Cmp(deltaNumber) > 0 { + newThreshold = new(big.Int).Add(parent.PrimeEntropyThreshold(parent.Location().Zone()), big20) + } else { + newThreshold = new(big.Int).Sub(parent.PrimeEntropyThreshold(parent.Location().Zone()), big20) + } + newMinThreshold := new(big.Int).Div(target, big2) + newThreshold = new(big.Int).Set(common.MaxBigInt(newThreshold, newMinThreshold)) + log.Info("CalcPrimeEntropyThreshold", "newThreshold:", newThreshold) + + return newThreshold, nil +} + func (blake3pow *Blake3pow) IsDomCoincident(chain consensus.ChainHeaderReader, header *types.Header) bool { _, order, err := blake3pow.CalcOrder(header) if err != nil { diff --git a/consensus/blake3pow/poem.go b/consensus/blake3pow/poem.go index 46203a819c..f2688c226e 100644 --- a/consensus/blake3pow/poem.go +++ b/consensus/blake3pow/poem.go @@ -23,41 +23,31 @@ func (blake3pow *Blake3pow) CalcOrder(header *types.Header) (*big.Int, int, erro // Get entropy reduction of this header intrinsicS := blake3pow.IntrinsicLogS(header.Hash()) - - // This is the updated the threshold calculation based on the zone difficulty threshold - target := new(big.Int).Div(big2e256, header.Difficulty()).Bytes() + target := new(big.Int).Div(common.Big2e256, header.Difficulty()).Bytes() zoneThresholdS := blake3pow.IntrinsicLogS(common.BytesToHash(target)) - timeFactorHierarchyDepthMultiple := new(big.Int).Mul(params.TimeFactor, big.NewInt(common.HierarchyDepth)) - - // Prime case - primeEntropyThreshold := new(big.Int).Mul(timeFactorHierarchyDepthMultiple, timeFactorHierarchyDepthMultiple) - primeEntropyThreshold = new(big.Int).Mul(primeEntropyThreshold, zoneThresholdS) - primeBlockThreshold := new(big.Int).Quo(primeEntropyThreshold, big.NewInt(2)) - primeEntropyThreshold = new(big.Int).Sub(primeEntropyThreshold, primeBlockThreshold) - primeBlockEntropyThresholdAdder, _ := mathutil.BinaryLog(primeBlockThreshold, 8) - primeBlockEntropyThreshold := new(big.Int).Add(zoneThresholdS, big.NewInt(int64(primeBlockEntropyThresholdAdder))) + // PRIME + // Compute the total accumulated entropy since the last prime block + totalDeltaSPrime := new(big.Int).Add(header.ParentDeltaS(common.REGION_CTX), header.ParentDeltaS(common.ZONE_CTX)) + totalDeltaSPrime.Add(totalDeltaSPrime, intrinsicS) - totalDeltaS := new(big.Int).Add(header.ParentDeltaS(common.REGION_CTX), header.ParentDeltaS(common.ZONE_CTX)) - totalDeltaS.Add(totalDeltaS, intrinsicS) - if intrinsicS.Cmp(primeBlockEntropyThreshold) > 0 && totalDeltaS.Cmp(primeEntropyThreshold) > 0 { + // PrimeEntropyThreshold number of zone blocks times the intrinsic logs of + // the given header determines the prime block + primeEntropyThreshold := new(big.Int).Mul(zoneThresholdS, header.PrimeEntropyThreshold(header.Location().Zone())) + if totalDeltaSPrime.Cmp(primeEntropyThreshold) > 0 { return intrinsicS, common.PRIME_CTX, nil } - // Region case - regionEntropyThreshold := new(big.Int).Mul(timeFactorHierarchyDepthMultiple, zoneThresholdS) - regionBlockThreshold := new(big.Int).Quo(regionEntropyThreshold, big.NewInt(2)) - regionEntropyThreshold = new(big.Int).Sub(regionEntropyThreshold, regionBlockThreshold) - - regionBlockEntropyThresholdAdder, _ := mathutil.BinaryLog(regionBlockThreshold, 8) - regionBlockEntropyThreshold := new(big.Int).Add(zoneThresholdS, big.NewInt(int64(regionBlockEntropyThresholdAdder))) - - totalDeltaS = new(big.Int).Add(header.ParentDeltaS(common.ZONE_CTX), intrinsicS) - if intrinsicS.Cmp(regionBlockEntropyThreshold) > 0 && totalDeltaS.Cmp(regionEntropyThreshold) > 0 { + // REGION + // Compute the total accumulated entropy since the last region block + totalDeltaSRegion := new(big.Int).Add(header.ParentDeltaS(common.ZONE_CTX), intrinsicS) + regionEntropyThreshold := new(big.Int).Mul(zoneThresholdS, params.TimeFactor) + regionEntropyThreshold = new(big.Int).Mul(regionEntropyThreshold, big.NewInt(common.NumZonesInRegion)) + if totalDeltaSRegion.Cmp(regionEntropyThreshold) > 0 { return intrinsicS, common.REGION_CTX, nil } - // Zone case + // ZONE return intrinsicS, common.ZONE_CTX, nil } diff --git a/consensus/consensus.go b/consensus/consensus.go index adb731c2b2..9eed397a46 100644 --- a/consensus/consensus.go +++ b/consensus/consensus.go @@ -44,6 +44,9 @@ type ChainHeaderReader interface { // GetHeaderByHash retrieves a block header from the database by its hash. GetHeaderByHash(hash common.Hash) *types.Header + + // GetTerminiByHash retrieves the termini for a given header hash + GetTerminiByHash(hash common.Hash) *types.Termini } // ChainReader defines a small collection of methods needed to access the local @@ -128,6 +131,9 @@ type Engine interface { // that a new block should have. CalcDifficulty(chain ChainHeaderReader, parent *types.Header) *big.Int + // CalcPrimeEntropyThreshold is the threshold adjustment algorithm for prime blocks per slice + CalcPrimeEntropyThreshold(chain ChainHeaderReader, parent *types.Header) (*big.Int, error) + // IsDomCoincident returns true if this block satisfies the difficulty order // of a dominant chain. If this node does not have a dominant chain (i.e. // if this is a prime node), then the function will always return false. diff --git a/consensus/progpow/consensus.go b/consensus/progpow/consensus.go index 3c3d95c118..04d9b4aa4b 100644 --- a/consensus/progpow/consensus.go +++ b/consensus/progpow/consensus.go @@ -41,7 +41,9 @@ var ( big8 = big.NewInt(8) big9 = big.NewInt(9) big10 = big.NewInt(10) + big20 = big.NewInt(20) big32 = big.NewInt(32) + big100 = big.NewInt(100) bigMinus99 = big.NewInt(-99) big2e256 = new(big.Int).Exp(big.NewInt(2), big.NewInt(256), big.NewInt(0)) // 2^256 ) @@ -280,11 +282,36 @@ func (progpow *Progpow) verifyHeader(chain consensus.ChainHeaderReader, header, if common.Big0.Cmp(header.ParentDeltaS()) != 0 { return fmt.Errorf("invalid parent delta s: have %v, want %v", header.ParentDeltaS(), common.Big0) } + // If parent block is dom, validate the prime difficulty + if nodeCtx == common.REGION_CTX { + primeEntropyThreshold, err := progpow.CalcPrimeEntropyThreshold(chain, parent) + if err != nil { + return err + } + if header.PrimeEntropyThreshold(parent.Location().SubIndex()).Cmp(primeEntropyThreshold) != 0 { + return fmt.Errorf("invalid prime difficulty pd: have %v, want %v", header.PrimeEntropyThreshold(parent.Location().SubIndex()), primeEntropyThreshold) + } + } } else { parentDeltaS := progpow.DeltaLogS(parent) if parentDeltaS.Cmp(header.ParentDeltaS()) != 0 { return fmt.Errorf("invalid parent delta s: have %v, want %v", header.ParentDeltaS(), parentDeltaS) } + + if nodeCtx == common.REGION_CTX { + // if parent is not a dom block, no adjustment to the prime or region difficulty will be made + for i := 0; i < common.NumZonesInRegion; i++ { + if header.PrimeEntropyThreshold(i).Cmp(parent.PrimeEntropyThreshold(i)) != 0 { + return fmt.Errorf("invalid prime difficulty pd: have %v, want %v at index %v", header.PrimeEntropyThreshold(i), parent.PrimeEntropyThreshold(i), i) + } + } + } + if nodeCtx == common.ZONE_CTX { + if header.PrimeEntropyThreshold(common.NodeLocation.Zone()).Cmp(parent.PrimeEntropyThreshold(common.NodeLocation.Zone())) != 0 { + return fmt.Errorf("invalid prime difficulty pd: have %v, want %v at index %v", header.PrimeEntropyThreshold(common.NodeLocation.Zone()), parent.PrimeEntropyThreshold(common.NodeLocation.Zone()), common.NodeLocation.Zone()) + } + } + } } if nodeCtx == common.ZONE_CTX { @@ -320,6 +347,57 @@ func (progpow *Progpow) verifyHeader(chain consensus.ChainHeaderReader, header, return nil } +// CalcPrimeDifficultyThreshold calculates the difficulty that a block must meet +// to become a region block. This function needs to have a controller so that the +// liveliness of the slices can balance even if the hash rate of the slice varies. +// This will also cause the production of the prime blocks to naturally diverge +// with time reducing the uncle rate. The controller is built to adjust the +// number of zone blocks it takes to produce a prime block. This is done based on +// the prior number of blocks to reach threshold which is than multiplied by the +// current difficulty to establish the threshold. The controller adjust the block +// threshold value and is a simple form of a bang-bang controller which is all +// that is needed to ensure liveliness of the slices in prime overtime. If the +// slice is not sufficiently lively 20 zone blocks are subtracted from the +// threshold. If it is too lively 20 blocks are added to the threshold. +func (progpow *Progpow) CalcPrimeEntropyThreshold(chain consensus.ChainHeaderReader, parent *types.Header) (*big.Int, error) { + nodeCtx := common.NodeLocation.Context() + + if nodeCtx != common.REGION_CTX { + log.Error("Cannot CalcPrimeEntropyThreshold for", "context", nodeCtx) + return nil, errors.New("cannot CalcPrimeEntropyThreshold for non-region context") + } + + if parent.Hash() == chain.Config().GenesisHash { + return parent.PrimeEntropyThreshold(parent.Location().SubIndex()), nil + } + + // Get the primeTerminus + termini := chain.GetTerminiByHash(parent.ParentHash()) + if termini == nil { + return nil, errors.New("termini not found in CalcPrimeEntropyThreshold") + } + primeTerminusHeader := chain.GetHeaderByHash(termini.PrimeTerminiAtIndex(parent.Location().SubIndex())) + + log.Info("CalcPrimeEntropyThreshold", "primeTerminusHeader:", primeTerminusHeader.NumberArray(), "Hash", primeTerminusHeader.Hash()) + deltaNumber := new(big.Int).Sub(parent.Number(), primeTerminusHeader.Number()) + log.Info("CalcPrimeEntropyThreshold", "deltaNumber:", deltaNumber) + target := new(big.Int).Mul(big.NewInt(common.NumRegionsInPrime), params.TimeFactor) + target = new(big.Int).Mul(big.NewInt(common.NumZonesInRegion), target) + log.Info("CalcPrimeEntropyThreshold", "target:", target) + + var newThreshold *big.Int + if target.Cmp(deltaNumber) > 0 { + newThreshold = new(big.Int).Add(parent.PrimeEntropyThreshold(parent.Location().Zone()), big20) + } else { + newThreshold = new(big.Int).Sub(parent.PrimeEntropyThreshold(parent.Location().Zone()), big20) + } + newMinThreshold := new(big.Int).Div(target, big2) + newThreshold = new(big.Int).Set(common.MaxBigInt(newThreshold, newMinThreshold)) + log.Info("CalcPrimeEntropyThreshold", "newThreshold:", newThreshold) + + return newThreshold, nil +} + // CalcDifficulty is the difficulty adjustment algorithm. It returns // the difficulty that a new block should have when created at time // given the parent block's time and difficulty. diff --git a/consensus/progpow/poem.go b/consensus/progpow/poem.go index 0cafffff79..7ab1cdbea9 100644 --- a/consensus/progpow/poem.go +++ b/consensus/progpow/poem.go @@ -23,40 +23,30 @@ func (progpow *Progpow) CalcOrder(header *types.Header) (*big.Int, int, error) { // Get entropy reduction of this header intrinsicS := progpow.IntrinsicLogS(powHash) - - // This is the updated the threshold calculation based on the zone difficulty threshold - target := new(big.Int).Div(big2e256, header.Difficulty()).Bytes() + target := new(big.Int).Div(common.Big2e256, header.Difficulty()).Bytes() zoneThresholdS := progpow.IntrinsicLogS(common.BytesToHash(target)) - timeFactorHierarchyDepthMultiple := new(big.Int).Mul(params.TimeFactor, big.NewInt(common.HierarchyDepth)) - - // Prime case - primeEntropyThreshold := new(big.Int).Mul(timeFactorHierarchyDepthMultiple, timeFactorHierarchyDepthMultiple) - primeEntropyThreshold = new(big.Int).Mul(primeEntropyThreshold, zoneThresholdS) - primeBlockThreshold := new(big.Int).Quo(primeEntropyThreshold, big.NewInt(2)) - primeEntropyThreshold = new(big.Int).Sub(primeEntropyThreshold, primeBlockThreshold) - primeBlockEntropyThresholdAdder, _ := mathutil.BinaryLog(primeBlockThreshold, 8) - primeBlockEntropyThreshold := new(big.Int).Add(zoneThresholdS, big.NewInt(int64(primeBlockEntropyThresholdAdder))) + // PRIME + // Compute the total accumulated entropy since the last prime block + totalDeltaSPrime := new(big.Int).Add(header.ParentDeltaS(common.REGION_CTX), header.ParentDeltaS(common.ZONE_CTX)) + totalDeltaSPrime.Add(totalDeltaSPrime, intrinsicS) - totalDeltaS := new(big.Int).Add(header.ParentDeltaS(common.REGION_CTX), header.ParentDeltaS(common.ZONE_CTX)) - totalDeltaS.Add(totalDeltaS, intrinsicS) - if intrinsicS.Cmp(primeBlockEntropyThreshold) > 0 && totalDeltaS.Cmp(primeEntropyThreshold) > 0 { + // PrimeEntropyThreshold number of zone blocks times the intrinsic logs of the given header determines the prime block + primeEntropyThreshold := new(big.Int).Mul(zoneThresholdS, header.PrimeEntropyThreshold(header.Location().Zone())) + if totalDeltaSPrime.Cmp(primeEntropyThreshold) > 0 { return intrinsicS, common.PRIME_CTX, nil } - // Region case - regionEntropyThreshold := new(big.Int).Mul(timeFactorHierarchyDepthMultiple, zoneThresholdS) - regionBlockThreshold := new(big.Int).Quo(regionEntropyThreshold, big.NewInt(2)) - regionEntropyThreshold = new(big.Int).Sub(regionEntropyThreshold, regionBlockThreshold) - - regionBlockEntropyThresholdAdder, _ := mathutil.BinaryLog(regionBlockThreshold, 8) - regionBlockEntropyThreshold := new(big.Int).Add(zoneThresholdS, big.NewInt(int64(regionBlockEntropyThresholdAdder))) - - totalDeltaS = new(big.Int).Add(header.ParentDeltaS(common.ZONE_CTX), intrinsicS) - if intrinsicS.Cmp(regionBlockEntropyThreshold) > 0 && totalDeltaS.Cmp(regionEntropyThreshold) > 0 { + // REGION + // Compute the total accumulated entropy since the last region block + totalDeltaSRegion := new(big.Int).Add(header.ParentDeltaS(common.ZONE_CTX), intrinsicS) + regionEntropyThreshold := new(big.Int).Mul(zoneThresholdS, params.TimeFactor) + regionEntropyThreshold = new(big.Int).Mul(regionEntropyThreshold, big.NewInt(common.NumZonesInRegion)) + if totalDeltaSRegion.Cmp(regionEntropyThreshold) > 0 { return intrinsicS, common.REGION_CTX, nil } + // ZONE return intrinsicS, common.ZONE_CTX, nil } diff --git a/core/chain_makers.go b/core/chain_makers.go index 5429487d9e..868746985a 100644 --- a/core/chain_makers.go +++ b/core/chain_makers.go @@ -310,3 +310,4 @@ func (cr *fakeChainReader) GetHeaderByNumber(number uint64) *types.Header func (cr *fakeChainReader) GetHeaderByHash(hash common.Hash) *types.Header { return nil } func (cr *fakeChainReader) GetHeader(hash common.Hash, number uint64) *types.Header { return nil } func (cr *fakeChainReader) GetBlock(hash common.Hash, number uint64) *types.Block { return nil } +func (cr *fakeChainReader) GetTerminiByHash(hash common.Hash) *types.Termini { return nil } diff --git a/core/core.go b/core/core.go index 8f2baa58ef..4531c7e1bb 100644 --- a/core/core.go +++ b/core/core.go @@ -335,7 +335,7 @@ func (c *Core) SubscribePendingEtxsRollup(ch chan<- types.PendingEtxsRollup) eve return c.sl.SubscribePendingEtxsRollup(ch) } -func (c *Core) GenerateRecoveryPendingHeader(pendingHeader *types.Header, checkpointHashes []common.Hash) error { +func (c *Core) GenerateRecoveryPendingHeader(pendingHeader *types.Header, checkpointHashes types.Termini) error { return c.sl.GenerateRecoveryPendingHeader(pendingHeader, checkpointHashes) } @@ -498,7 +498,7 @@ func (c *Core) GetBodyRLP(hash common.Hash) rlp.RawValue { } // GetTerminiByHash retrieves the termini stored for a given header hash -func (c *Core) GetTerminiByHash(hash common.Hash) []common.Hash { +func (c *Core) GetTerminiByHash(hash common.Hash) *types.Termini { return c.sl.hc.GetTerminiByHash(hash) } diff --git a/core/genesis.go b/core/genesis.go index 593c3634c0..7663996a92 100644 --- a/core/genesis.go +++ b/core/genesis.go @@ -37,12 +37,19 @@ import ( "github.com/dominant-strategies/go-quai/log" "github.com/dominant-strategies/go-quai/params" "github.com/dominant-strategies/go-quai/trie" + "modernc.org/mathutil" ) //go:generate gencodec -type Genesis -field-override genesisSpecMarshaling -out gen_genesis.go //go:generate gencodec -type GenesisAccount -field-override genesisAccountMarshaling -out gen_genesis_account.go -var errGenesisNoConfig = errors.New("genesis has no chain configuration") +var ( + errGenesisNoConfig = errors.New("genesis has no chain configuration") +) + +const ( + mantBits = 64 +) // Genesis specifies the header fields, state of a genesis block. It also defines hard // fork switch-over blocks through the chain configuration. @@ -278,9 +285,25 @@ func (g *Genesis) ToBlock(db ethdb.Database) *types.Block { head.SetParentHash(common.Hash{}, i) } + initPrimeThreshold := new(big.Int).Mul(params.TimeFactor, big.NewInt(common.NumRegionsInPrime)) + initPrimeThreshold = new(big.Int).Mul(initPrimeThreshold, big.NewInt(common.NumZonesInRegion)) + initPrimeThreshold = new(big.Int).Mul(initPrimeThreshold, params.TimeFactor) + for i := 0; i < common.NumZonesInRegion; i++ { + head.SetPrimeEntropyThreshold(initPrimeThreshold, i) + } return types.NewBlock(head, nil, nil, nil, nil, nil, trie.NewStackTrie(nil)) } +// IntrinsicLogS returns the logarithm of the intrinsic entropy reduction of a PoW hash +func (g *Genesis) IntrinsicLogS(powHash common.Hash) *big.Int { + x := new(big.Int).SetBytes(powHash.Bytes()) + d := new(big.Int).Div(common.Big2e256, x) + c, m := mathutil.BinaryLog(d, mantBits) + bigBits := new(big.Int).Mul(big.NewInt(int64(c)), new(big.Int).Exp(big.NewInt(2), big.NewInt(mantBits), nil)) + bigBits = new(big.Int).Add(bigBits, m) + return bigBits +} + // Commit writes the block and state of a genesis specification to the database. // The block is committed as the canonical head block. func (g *Genesis) Commit(db ethdb.Database) (*types.Block, error) { @@ -292,7 +315,7 @@ func (g *Genesis) Commit(db ethdb.Database) (*types.Block, error) { if config == nil { config = params.AllProgpowProtocolChanges } - rawdb.WriteTermini(db, block.Hash(), nil) + rawdb.WriteTermini(db, block.Hash(), types.EmptyTermini()) rawdb.WriteBlock(db, block) rawdb.WriteReceipts(db, block.Hash(), block.NumberU64(), nil) rawdb.WriteCanonicalHash(db, block.Hash(), block.NumberU64()) diff --git a/core/headerchain.go b/core/headerchain.go index e2ac88569d..cedea6d0ee 100644 --- a/core/headerchain.go +++ b/core/headerchain.go @@ -278,7 +278,7 @@ func (hc *HeaderChain) collectInclusiveEtxRollup(b *types.Block) (types.Transact // Append func (hc *HeaderChain) Append(batch ethdb.Batch, block *types.Block, newInboundEtxs types.Transactions) error { nodeCtx := common.NodeLocation.Context() - log.Debug("HeaderChain Append:", "Block information: Hash:", block.Hash(), "block header hash:", block.Header().Hash(), "Number:", block.NumberU64(), "Location:", block.Header().Location, "Parent:", block.ParentHash()) + log.Debug("HeaderChain Append:", "Block information: Hash:", block.Hash(), "block header hash:", block.Header().Hash(), "Number:", block.NumberU64(), "Location:", block.Header().Location(), "Parent:", block.ParentHash()) err := hc.engine.VerifyHeader(hc, block.Header()) if err != nil { @@ -498,7 +498,7 @@ func (hc *HeaderChain) GetBlockNumber(hash common.Hash) *uint64 { return number } -func (hc *HeaderChain) GetTerminiByHash(hash common.Hash) []common.Hash { +func (hc *HeaderChain) GetTerminiByHash(hash common.Hash) *types.Termini { termini := rawdb.ReadTermini(hc.headerDb, hash) return termini } diff --git a/core/rawdb/accessors_chain.go b/core/rawdb/accessors_chain.go index 493e42f669..e6959a391f 100644 --- a/core/rawdb/accessors_chain.go +++ b/core/rawdb/accessors_chain.go @@ -506,22 +506,21 @@ func DeleteAllPbBodyKeys(db ethdb.KeyValueWriter) { } // ReadHeadsHashes retreive's the heads hashes of the blockchain. -func ReadTermini(db ethdb.Reader, hash common.Hash) []common.Hash { +func ReadTermini(db ethdb.Reader, hash common.Hash) *types.Termini { key := terminiKey(hash) data, _ := db.Get(key) if len(data) == 0 { return nil } - hashes := []common.Hash{} - if err := rlp.DecodeBytes(data, &hashes); err != nil { + var termini types.Termini + if err := rlp.DecodeBytes(data, &termini); err != nil { return nil } - return hashes + return &termini } -// WriteHeadsHashes writes the heads hashes of the blockchain. -func WriteTermini(db ethdb.KeyValueWriter, index common.Hash, hashes []common.Hash) { - log.Debug("WriteTermini:", "hashes:", hashes, "index:", index) +// WriteTermini writes the heads hashes of the blockchain. +func WriteTermini(db ethdb.KeyValueWriter, index common.Hash, hashes types.Termini) { key := terminiKey(index) data, err := rlp.EncodeToBytes(hashes) if err != nil { @@ -542,23 +541,24 @@ func DeleteTermini(db ethdb.KeyValueWriter, hash common.Hash) { } // ReadPendingHeader retreive's the pending header stored in hash. -func ReadPendingHeader(db ethdb.Reader, hash common.Hash) *types.Header { +func ReadPendingHeader(db ethdb.Reader, hash common.Hash) *types.PendingHeader { key := pendingHeaderKey(hash) data, _ := db.Get(key) if len(data) == 0 { + log.Error("Pending Header is nil", "Key", key) return nil } - header := new(types.Header) - if err := rlp.Decode(bytes.NewReader(data), header); err != nil { + pendingHeader := new(types.PendingHeader) + if err := rlp.Decode(bytes.NewReader(data), pendingHeader); err != nil { log.Error("Invalid pendingHeader RLP") return nil } - return header + return pendingHeader } // WritePendingHeader writes the pending header of the terminus hash. -func WritePendingHeader(db ethdb.KeyValueWriter, hash common.Hash, pendingHeader *types.Header) { +func WritePendingHeader(db ethdb.KeyValueWriter, hash common.Hash, pendingHeader types.PendingHeader) { key := pendingHeaderKey(hash) // Write the encoded pending header @@ -580,42 +580,6 @@ func DeletePendingHeader(db ethdb.KeyValueWriter, hash common.Hash) { } } -// ReadPhCacheTermini retreive's the pending header termini stored in hash. -func ReadPhCacheTermini(db ethdb.Reader, hash common.Hash) []common.Hash { - key := phBodyTerminiKey(hash) - data, _ := db.Get(key) - if len(data) == 0 { - return nil - } - termini := []common.Hash{} - if err := rlp.Decode(bytes.NewReader(data), &termini); err != nil { - log.Error("Invalid pendingHeader RLP") - return nil - } - return termini -} - -// WritePhCacheTermini writes the pending header termini of the terminus hash. -func WritePhCacheTermini(db ethdb.KeyValueWriter, hash common.Hash, termini []common.Hash) { - key := phBodyTerminiKey(hash) - // Write the encoded pending header - data, err := rlp.EncodeToBytes(termini) - if err != nil { - log.Fatal("Failed to RLP encode pending header", "err", err) - } - if err := db.Put(key, data); err != nil { - log.Fatal("Failed to store header", "err", err) - } -} - -// DeletePhCacheTermini deletes the pending header termini stored for the header hash. -func DeletePhCacheTermini(db ethdb.KeyValueWriter, hash common.Hash) { - key := phBodyTerminiKey(hash) - if err := db.Delete(key); err != nil { - log.Fatal("Failed to delete slice pending header ", "err", err) - } -} - // ReadPhCache retreive's the heads hashes of the blockchain. func ReadPhCache(db ethdb.Reader) map[common.Hash]types.PendingHeader { data, _ := db.Get(phCacheKey) @@ -631,10 +595,10 @@ func ReadPhCache(db ethdb.Reader) map[common.Hash]types.PendingHeader { phCache := make(map[common.Hash]types.PendingHeader) // Read the pending header and phBody. for _, hash := range hashes { - header := ReadPendingHeader(db, hash) - termini := ReadPhCacheTermini(db, hash) - pendingHeader := types.PendingHeader{Header: header, Termini: termini} - phCache[hash] = pendingHeader + pendingHeader := ReadPendingHeader(db, hash) + if pendingHeader != nil { + phCache[hash] = *pendingHeader + } } return phCache } @@ -644,8 +608,7 @@ func WritePhCache(db ethdb.KeyValueWriter, phCache map[common.Hash]types.Pending var hashes []common.Hash for hash, pendingHeader := range phCache { hashes = append(hashes, hash) - WritePendingHeader(db, hash, pendingHeader.Header) - WritePhCacheTermini(db, hash, pendingHeader.Termini) + WritePendingHeader(db, hash, pendingHeader) } data, err := rlp.EncodeToBytes(hashes) diff --git a/core/slice.go b/core/slice.go index b91dc1d5a8..e65667ced3 100644 --- a/core/slice.go +++ b/core/slice.go @@ -6,6 +6,7 @@ import ( "errors" "fmt" "math/big" + "math/rand" "sync" "time" @@ -203,6 +204,7 @@ func (sl *Slice) Append(header *types.Header, domPendingHeader *types.Header, do if err != nil { return nil, false, err } + time7 := common.PrettyDuration(time.Since(start)) time8 := common.PrettyDuration(time.Since(start)) var subPendingEtxs types.Transactions @@ -214,7 +216,7 @@ func (sl *Slice) Append(header *types.Header, domPendingHeader *types.Header, do if nodeCtx != common.ZONE_CTX { // How to get the sub pending etxs if not running the full node?. if sl.subClients[location.SubIndex()] != nil { - subPendingEtxs, subReorg, err = sl.subClients[location.SubIndex()].Append(context.Background(), block.Header(), pendingHeaderWithTermini.Header, domTerminus, true, newInboundEtxs) + subPendingEtxs, subReorg, err = sl.subClients[location.SubIndex()].Append(context.Background(), block.Header(), pendingHeaderWithTermini.Header(), domTerminus, true, newInboundEtxs) if err != nil { return nil, false, err } @@ -245,13 +247,13 @@ func (sl *Slice) Append(header *types.Header, domPendingHeader *types.Header, do time11 := common.PrettyDuration(appendFinished) bestPh, exist := sl.readPhCache(sl.bestPhKey) if !exist { - sl.bestPhKey = pendingHeaderWithTermini.Termini[c_terminusIndex] + sl.bestPhKey = pendingHeaderWithTermini.Termini().DomTerminus() sl.writePhCache(block.Hash(), pendingHeaderWithTermini) bestPh = pendingHeaderWithTermini log.Error("BestPh Key does not exist for", "key", sl.bestPhKey) } - oldBestPhEntropy := sl.engine.TotalLogPhS(bestPh.Header) + oldBestPhEntropy := sl.engine.TotalLogPhS(bestPh.Header()) sl.updatePhCache(pendingHeaderWithTermini, true, nil) @@ -290,12 +292,12 @@ func (sl *Slice) relayPh(block *types.Block, appendTime *time.Duration, reorg bo // Send an empty header to miner bestPh, exists := sl.readPhCache(sl.bestPhKey) if exists { - bestPh.Header.SetLocation(common.NodeLocation) - sl.miner.worker.pendingHeaderFeed.Send(bestPh.Header) + bestPh.Header().SetLocation(common.NodeLocation) + sl.miner.worker.pendingHeaderFeed.Send(bestPh.Header()) return } } else if !domOrigin { - for i := range sl.subClients { + for _, i := range sl.randomRelayArray() { if sl.subClients[i] != nil { sl.subClients[i].SubRelayPendingHeader(context.Background(), pendingHeaderWithTermini, location) } @@ -303,6 +305,16 @@ func (sl *Slice) relayPh(block *types.Block, appendTime *time.Duration, reorg bo } } +func (sl *Slice) randomRelayArray() [3]int { + rand.Seed(time.Now().UnixNano()) + nums := [3]int{0, 1, 2} + for i := len(nums) - 1; i > 0; i-- { + j := rand.Intn(i + 1) + nums[i], nums[j] = nums[j], nums[i] + } + return nums +} + // asyncPendingHeaderLoop waits for the pendingheader updates from the worker and updates the phCache func (sl *Slice) asyncPendingHeaderLoop() { @@ -317,8 +329,8 @@ func (sl *Slice) asyncPendingHeaderLoop() { bestPh, exists := sl.readPhCache(sl.bestPhKey) if exists { - bestPh.Header.SetLocation(common.NodeLocation) - sl.miner.worker.pendingHeaderFeed.Send(bestPh.Header) + bestPh.Header().SetLocation(common.NodeLocation) + sl.miner.worker.pendingHeaderFeed.Send(bestPh.Header()) } case <-sl.asyncPhSub.Err(): return @@ -345,7 +357,7 @@ func (sl *Slice) writePhCache(hash common.Hash, pendingHeader types.PendingHeade } // Generate a slice pending header -func (sl *Slice) generateSlicePendingHeader(block *types.Block, newTermini []common.Hash, domPendingHeader *types.Header, domOrigin bool, fill bool) (types.PendingHeader, error) { +func (sl *Slice) generateSlicePendingHeader(block *types.Block, newTermini types.Termini, domPendingHeader *types.Header, domOrigin bool, fill bool) (types.PendingHeader, error) { // Upate the local pending header localPendingHeader, err := sl.miner.worker.GeneratePendingHeader(block, fill) if err != nil { @@ -353,8 +365,8 @@ func (sl *Slice) generateSlicePendingHeader(block *types.Block, newTermini []com } // Combine subordinates pending header with local pending header - pendingHeaderWithTermini := sl.computePendingHeader(types.PendingHeader{Header: localPendingHeader, Termini: newTermini}, domPendingHeader, domOrigin) - pendingHeaderWithTermini.Header.SetLocation(block.Header().Location()) + pendingHeaderWithTermini := sl.computePendingHeader(types.NewPendingHeader(localPendingHeader, newTermini), domPendingHeader, domOrigin) + pendingHeaderWithTermini.Header().SetLocation(block.Header().Location()) return pendingHeaderWithTermini, nil } @@ -418,39 +430,38 @@ func (sl *Slice) CollectNewlyConfirmedEtxs(block *types.Block, location common.L } // PCRC previous coincidence reference check makes sure there are not any cyclic references in the graph and calculates new termini and the block terminus -func (sl *Slice) pcrc(batch ethdb.Batch, header *types.Header, domTerminus common.Hash, domOrigin bool) (common.Hash, []common.Hash, error) { +func (sl *Slice) pcrc(batch ethdb.Batch, header *types.Header, domTerminus common.Hash, domOrigin bool) (common.Hash, types.Termini, error) { nodeCtx := common.NodeLocation.Context() location := header.Location() - log.Debug("PCRC:", "Parent Hash:", header.ParentHash(), "Number", header.Number, "Location:", header.Location()) + log.Debug("PCRC:", "Parent Hash:", header.ParentHash(), "Number", header.Number(), "Location:", header.Location()) termini := sl.hc.GetTerminiByHash(header.ParentHash()) - if len(termini) != 4 { - return common.Hash{}, []common.Hash{}, ErrSubNotSyncedToDom - } - - newTermini := make([]common.Hash, len(termini)) - for i, terminus := range termini { - newTermini[i] = terminus + if !termini.IsValid() { + return common.Hash{}, types.EmptyTermini(), errors.New("termini of parent is nil or invalid") } + newTermini := types.CopyTermini(*termini) // Set the subtermini if nodeCtx != common.ZONE_CTX { - newTermini[location.SubIndex()] = header.Hash() + newTermini.SetSubTerminiAtIndex(header.Hash(), location.SubIndex()) } // Set the terminus if nodeCtx == common.PRIME_CTX || domOrigin { - newTermini[c_terminusIndex] = header.Hash() - } else { - newTermini[c_terminusIndex] = termini[c_terminusIndex] + newTermini.SetDomTerminus(header.Hash()) + } + + // Set the prime termini + if nodeCtx == common.REGION_CTX && domOrigin { + newTermini.SetPrimeTerminiAtIndex(header.Hash(), location.SubIndex()) } // Check for a graph cyclic reference if domOrigin { - if termini[c_terminusIndex] != domTerminus { - log.Warn("Cyclic Block:", "block number", header.NumberArray(), "hash", header.Hash(), "terminus", domTerminus, "termini", termini) - return common.Hash{}, []common.Hash{}, errors.New("termini do not match, block rejected due to cyclic reference") + if termini.DomTerminus() != domTerminus { + log.Warn("Cyclic Block:", "block number", header.NumberArray(), "hash", header.Hash(), "terminus", domTerminus, "termini", termini.DomTerminus()) + return common.Hash{}, types.EmptyTermini(), errors.New("termini do not match, block rejected due to cyclic reference") } } @@ -461,7 +472,7 @@ func (sl *Slice) pcrc(batch ethdb.Batch, header *types.Header, domTerminus commo return common.Hash{}, newTermini, nil } - return termini[location.SubIndex()], newTermini, nil + return termini.SubTerminiAtIndex(location.SubIndex()), newTermini, nil } // POEM compares externS to the currentHead S and returns true if externS is greater @@ -474,7 +485,7 @@ func (sl *Slice) poem(externS *big.Int, currentS *big.Int) bool { // GetPendingHeader is used by the miner to request the current pending header func (sl *Slice) GetPendingHeader() (*types.Header, error) { if ph, exists := sl.readPhCache(sl.bestPhKey); exists { - return ph.Header, nil + return ph.Header(), nil } else { return nil, errors.New("empty pending header") } @@ -517,9 +528,10 @@ func (sl *Slice) SubRelayPendingHeader(pendingHeader types.PendingHeader, locati return } } - for i := range sl.subClients { + + for _, i := range sl.randomRelayArray() { if sl.subClients[i] != nil { - if ph, exists := sl.readPhCache(pendingHeader.Termini[common.NodeLocation.Region()]); exists { + if ph, exists := sl.readPhCache(pendingHeader.Termini().SubTerminiAtIndex(common.NodeLocation.Region())); exists { sl.subClients[i].SubRelayPendingHeader(context.Background(), ph, location) } } @@ -535,8 +547,8 @@ func (sl *Slice) SubRelayPendingHeader(pendingHeader types.PendingHeader, locati } bestPh, exists := sl.readPhCache(sl.bestPhKey) if exists { - bestPh.Header.SetLocation(common.NodeLocation) - sl.miner.worker.pendingHeaderFeed.Send(bestPh.Header) + bestPh.Header().SetLocation(common.NodeLocation) + sl.miner.worker.pendingHeaderFeed.Send(bestPh.Header()) } } } @@ -547,47 +559,55 @@ func (sl *Slice) computePendingHeader(localPendingHeaderWithTermini types.Pendin nodeCtx := common.NodeLocation.Context() var cachedPendingHeaderWithTermini types.PendingHeader - hash := localPendingHeaderWithTermini.Termini[c_terminusIndex] + hash := localPendingHeaderWithTermini.Termini().DomTerminus() cachedPendingHeaderWithTermini, exists := sl.readPhCache(hash) - log.Debug("computePendingHeader:", "hash:", hash, "pendingHeader:", cachedPendingHeaderWithTermini, "termini:", cachedPendingHeaderWithTermini.Termini) var newPh *types.Header + log.Info("computePendingHeader:", "primeEntropyThreshold:", localPendingHeaderWithTermini.Header().PrimeEntropyThresholdArray()) if exists { - newPh = sl.combinePendingHeader(localPendingHeaderWithTermini.Header, cachedPendingHeaderWithTermini.Header, nodeCtx, true) - return types.PendingHeader{Header: types.CopyHeader(newPh), Termini: localPendingHeaderWithTermini.Termini} + log.Info("computePendingHeader:", "primeEntropyThreshold:", cachedPendingHeaderWithTermini.Header().PrimeEntropyThresholdArray()) + + newPh = sl.combinePendingHeader(localPendingHeaderWithTermini.Header(), cachedPendingHeaderWithTermini.Header(), nodeCtx, true) + + log.Info("computePendingHeader:", "primeEntropyThreshold:", newPh.PrimeEntropyThresholdArray()) + return types.NewPendingHeader(newPh, localPendingHeaderWithTermini.Termini()) } else { + log.Info("computePendingHeader:", "primeEntropyThreshold:", domPendingHeader.PrimeEntropyThresholdArray()) + if domOrigin { - newPh = sl.combinePendingHeader(localPendingHeaderWithTermini.Header, domPendingHeader, nodeCtx, true) - return types.PendingHeader{Header: types.CopyHeader(newPh), Termini: localPendingHeaderWithTermini.Termini} + newPh = sl.combinePendingHeader(localPendingHeaderWithTermini.Header(), domPendingHeader, nodeCtx, true) + log.Info("computePendingHeader:", "primeEntropyThreshold:", newPh.PrimeEntropyThresholdArray()) + return types.NewPendingHeader(newPh, localPendingHeaderWithTermini.Termini()) } return localPendingHeaderWithTermini } + } // updatePhCacheFromDom combines the recieved pending header with the pending header stored locally at a given terminus for specified context func (sl *Slice) updatePhCacheFromDom(pendingHeader types.PendingHeader, terminiIndex int, indices []int) error { - hash := pendingHeader.Termini[terminiIndex] + hash := pendingHeader.Termini().SubTerminiAtIndex(terminiIndex) localPendingHeader, exists := sl.readPhCache(hash) if exists { - combinedPendingHeader := types.CopyHeader(localPendingHeader.Header) + combinedPendingHeader := types.CopyHeader(localPendingHeader.Header()) for _, i := range indices { - combinedPendingHeader = sl.combinePendingHeader(pendingHeader.Header, combinedPendingHeader, i, false) + combinedPendingHeader = sl.combinePendingHeader(pendingHeader.Header(), combinedPendingHeader, i, false) } bestPh, exist := sl.readPhCache(sl.bestPhKey) if !exist { - sl.bestPhKey = localPendingHeader.Termini[c_terminusIndex] - sl.writePhCache(localPendingHeader.Termini[c_terminusIndex], types.PendingHeader{Header: combinedPendingHeader, Termini: localPendingHeader.Termini}) - bestPh = types.PendingHeader{Header: combinedPendingHeader, Termini: localPendingHeader.Termini} + sl.bestPhKey = localPendingHeader.Termini().DomTerminus() + sl.writePhCache(localPendingHeader.Termini().DomTerminus(), types.NewPendingHeader(combinedPendingHeader, localPendingHeader.Termini())) + bestPh = types.NewPendingHeader(combinedPendingHeader, localPendingHeader.Termini()) log.Error("BestPh Key does not exist for", "key", sl.bestPhKey) } - oldBestPhEntropy := sl.engine.TotalLogPhS(bestPh.Header) - sl.updatePhCache(types.PendingHeader{Header: combinedPendingHeader, Termini: localPendingHeader.Termini}, false, nil) - sl.pickPhHead(types.PendingHeader{Header: combinedPendingHeader, Termini: localPendingHeader.Termini}, oldBestPhEntropy) + oldBestPhEntropy := sl.engine.TotalLogPhS(bestPh.Header()) + sl.updatePhCache(types.NewPendingHeader(combinedPendingHeader, localPendingHeader.Termini()), false, nil) + sl.pickPhHead(types.NewPendingHeader(combinedPendingHeader, localPendingHeader.Termini()), oldBestPhEntropy) return nil } - log.Warn("no pending header found for", "terminus", hash, "pendingHeaderNumber", pendingHeader.Header.NumberArray(), "Hash", pendingHeader.Header.ParentHash(), "Termini index", terminiIndex, "indices", indices) + log.Warn("no pending header found for", "terminus", hash, "pendingHeaderNumber", pendingHeader.Header().NumberArray(), "Hash", pendingHeader.Header().ParentHash(), "Termini index", terminiIndex, "indices", indices) return errors.New("no pending header found in cache") } @@ -599,33 +619,33 @@ func (sl *Slice) updatePhCache(pendingHeaderWithTermini types.PendingHeader, inS var exists bool if localHeader != nil { termini := sl.hc.GetTerminiByHash(localHeader.ParentHash()) - pendingHeaderWithTermini, exists = sl.readPhCache(termini[c_terminusIndex]) + pendingHeaderWithTermini, exists = sl.readPhCache(termini.DomTerminus()) if exists { - pendingHeaderWithTermini.Header = sl.combinePendingHeader(localHeader, pendingHeaderWithTermini.Header, common.ZONE_CTX, true) + pendingHeaderWithTermini.SetHeader(sl.combinePendingHeader(localHeader, pendingHeaderWithTermini.Header(), common.ZONE_CTX, true)) } } // Update the pendingHeader Cache - oldPh, exist := sl.readPhCache(pendingHeaderWithTermini.Termini[c_terminusIndex]) + oldPh, exist := sl.readPhCache(pendingHeaderWithTermini.Termini().DomTerminus()) var deepCopyPendingHeaderWithTermini types.PendingHeader - newPhEntropy := sl.engine.TotalLogPhS(pendingHeaderWithTermini.Header) - deepCopyPendingHeaderWithTermini = types.PendingHeader{Header: types.CopyHeader(pendingHeaderWithTermini.Header), Termini: pendingHeaderWithTermini.Termini} - deepCopyPendingHeaderWithTermini.Header.SetLocation(common.NodeLocation) - deepCopyPendingHeaderWithTermini.Header.SetTime(uint64(time.Now().Unix())) + newPhEntropy := sl.engine.TotalLogPhS(pendingHeaderWithTermini.Header()) + deepCopyPendingHeaderWithTermini = types.NewPendingHeader(types.CopyHeader(pendingHeaderWithTermini.Header()), pendingHeaderWithTermini.Termini()) + deepCopyPendingHeaderWithTermini.Header().SetLocation(common.NodeLocation) + deepCopyPendingHeaderWithTermini.Header().SetTime(uint64(time.Now().Unix())) if exist { // If we are inslice we will only update the cache if the entropy is better // Simultaneously we have to allow for the state root update // asynchronously, to do this equal check is added to the inSlice case - if (!inSlice && newPhEntropy.Cmp(sl.engine.TotalLogPhS(pendingHeaderWithTermini.Header)) >= 0) || - (inSlice && pendingHeaderWithTermini.Header.ParentEntropy().Cmp(oldPh.Header.ParentEntropy()) >= 0) { - sl.writePhCache(pendingHeaderWithTermini.Termini[c_terminusIndex], deepCopyPendingHeaderWithTermini) - log.Info("PhCache update:", "inSlice:", inSlice, "Ph Number:", deepCopyPendingHeaderWithTermini.Header.NumberArray(), "Termini:", deepCopyPendingHeaderWithTermini.Termini[c_terminusIndex]) + if (!inSlice && newPhEntropy.Cmp(sl.engine.TotalLogPhS(pendingHeaderWithTermini.Header())) >= 0) || + (inSlice && pendingHeaderWithTermini.Header().ParentEntropy().Cmp(oldPh.Header().ParentEntropy()) >= 0) { + sl.writePhCache(pendingHeaderWithTermini.Termini().DomTerminus(), deepCopyPendingHeaderWithTermini) + log.Info("PhCache update:", "inSlice:", inSlice, "Ph Number:", deepCopyPendingHeaderWithTermini.Header().NumberArray(), "Termini:", deepCopyPendingHeaderWithTermini.Termini().DomTerminus()) } } else { if inSlice { - sl.writePhCache(pendingHeaderWithTermini.Termini[c_terminusIndex], deepCopyPendingHeaderWithTermini) - log.Info("PhCache new terminus inSlice ", "Ph Number:", deepCopyPendingHeaderWithTermini.Header.NumberArray(), "Termini:", deepCopyPendingHeaderWithTermini.Termini[c_terminusIndex]) + sl.writePhCache(pendingHeaderWithTermini.Termini().DomTerminus(), deepCopyPendingHeaderWithTermini) + log.Info("PhCache new terminus inSlice ", "Ph Number:", deepCopyPendingHeaderWithTermini.Header().NumberArray(), "Termini:", deepCopyPendingHeaderWithTermini.Termini().DomTerminus()) } else { log.Info("phCache tried to create new entry from coord") } @@ -633,11 +653,11 @@ func (sl *Slice) updatePhCache(pendingHeaderWithTermini types.PendingHeader, inS } func (sl *Slice) pickPhHead(pendingHeaderWithTermini types.PendingHeader, oldBestPhEntropy *big.Int) bool { - newPhEntropy := sl.engine.TotalLogPhS(pendingHeaderWithTermini.Header) + newPhEntropy := sl.engine.TotalLogPhS(pendingHeaderWithTermini.Header()) // Pick a phCache Head if sl.poem(newPhEntropy, oldBestPhEntropy) { - sl.bestPhKey = pendingHeaderWithTermini.Termini[c_terminusIndex] - log.Info("Choosing new pending header", "Ph Number:", pendingHeaderWithTermini.Header.NumberArray(), "terminus:", pendingHeaderWithTermini.Termini[c_terminusIndex]) + sl.bestPhKey = pendingHeaderWithTermini.Termini().DomTerminus() + log.Info("Choosing new pending header", "Ph Number:", pendingHeaderWithTermini.Header().NumberArray(), "terminus:", pendingHeaderWithTermini.Termini().DomTerminus()) return true } return false @@ -662,7 +682,15 @@ func (sl *Slice) init(genesis *Genesis) error { // If the headerchain is empty start from genesis if sl.hc.Empty() { // Initialize slice state for genesis knot - genesisTermini := []common.Hash{genesisHash, genesisHash, genesisHash, genesisHash} + genesisTermini := types.EmptyTermini() + genesisTermini.SetDomTerminus(genesisHash) + for i := 0; i < len(genesisTermini.SubTermini()); i++ { + genesisTermini.SetSubTerminiAtIndex(genesisHash, i) + } + for i := 0; i < len(genesisTermini.PrimeTermini()); i++ { + genesisTermini.SetPrimeTerminiAtIndex(genesisHash, i) + } + rawdb.WriteTermini(sl.sliceDb, genesisHash, genesisTermini) rawdb.WriteManifest(sl.sliceDb, genesisHash, types.BlockManifest{genesisHash}) @@ -795,6 +823,12 @@ func (sl *Slice) combinePendingHeader(header *types.Header, slPendingHeader *typ combinedPendingHeader.SetParentEntropy(header.ParentEntropy(index), index) combinedPendingHeader.SetParentDeltaS(header.ParentDeltaS(index), index) + if index == common.REGION_CTX { + for i := 0; i < common.NumZonesInRegion; i++ { + combinedPendingHeader.SetPrimeEntropyThreshold(header.PrimeEntropyThreshold(i), i) + } + } + if inSlice { combinedPendingHeader.SetEtxRollupHash(header.EtxRollupHash()) combinedPendingHeader.SetDifficulty(header.Difficulty()) @@ -822,14 +856,12 @@ func (sl *Slice) NewGenesisPendingHeader(domPendingHeader *types.Header) { if err != nil { return } - if nodeCtx == common.PRIME_CTX { domPendingHeader = types.CopyHeader(localPendingHeader) } else { domPendingHeader = sl.combinePendingHeader(localPendingHeader, domPendingHeader, nodeCtx, true) domPendingHeader.SetLocation(common.NodeLocation) } - if nodeCtx != common.ZONE_CTX { for _, client := range sl.subClients { if client != nil { @@ -840,9 +872,16 @@ func (sl *Slice) NewGenesisPendingHeader(domPendingHeader *types.Header) { } } } - genesisTermini := []common.Hash{genesisHash, genesisHash, genesisHash, genesisHash} + genesisTermini := types.EmptyTermini() + genesisTermini.SetDomTerminus(genesisHash) + for i := 0; i < len(genesisTermini.SubTermini()); i++ { + genesisTermini.SetSubTerminiAtIndex(genesisHash, i) + } + for i := 0; i < len(genesisTermini.PrimeTermini()); i++ { + genesisTermini.SetPrimeTerminiAtIndex(genesisHash, i) + } if sl.hc.Empty() { - sl.phCache.Add(sl.config.GenesisHash, types.PendingHeader{Header: domPendingHeader, Termini: genesisTermini}) + sl.phCache.Add(sl.config.GenesisHash, types.NewPendingHeader(domPendingHeader, genesisTermini)) } } @@ -888,7 +927,6 @@ func (sl *Slice) loadLastState() error { sl.phCache.Add(key, value) // Removing the PendingHeaders from the database rawdb.DeletePendingHeader(sl.sliceDb, key) - rawdb.DeletePhCacheTermini(sl.sliceDb, key) } rawdb.DeletePhCache(sl.sliceDb) sl.bestPhKey = rawdb.ReadBestPhKey(sl.sliceDb) @@ -1034,11 +1072,11 @@ func (sl *Slice) SetHeadBackToRecoveryState(pendingHeader *types.Header, hash co if nodeCtx == common.PRIME_CTX { localPendingHeaderWithTermini := sl.ComputeRecoveryPendingHeader(hash) sl.phCache.Add(hash, localPendingHeaderWithTermini) - sl.GenerateRecoveryPendingHeader(localPendingHeaderWithTermini.Header, localPendingHeaderWithTermini.Termini) + sl.GenerateRecoveryPendingHeader(localPendingHeaderWithTermini.Header(), localPendingHeaderWithTermini.Termini()) } else { localPendingHeaderWithTermini := sl.ComputeRecoveryPendingHeader(hash) - localPendingHeaderWithTermini.Header = sl.combinePendingHeader(localPendingHeaderWithTermini.Header, pendingHeader, nodeCtx, true) - localPendingHeaderWithTermini.Header.SetLocation(common.NodeLocation) + localPendingHeaderWithTermini.SetHeader(sl.combinePendingHeader(localPendingHeaderWithTermini.Header(), pendingHeader, nodeCtx, true)) + localPendingHeaderWithTermini.Header().SetLocation(common.NodeLocation) sl.phCache.Add(hash, localPendingHeaderWithTermini) return localPendingHeaderWithTermini } @@ -1110,7 +1148,7 @@ func (sl *Slice) cleanCacheAndDatabaseTillBlock(hash common.Hash) { } } -func (sl *Slice) GenerateRecoveryPendingHeader(pendingHeader *types.Header, checkPointHashes []common.Hash) error { +func (sl *Slice) GenerateRecoveryPendingHeader(pendingHeader *types.Header, checkPointHashes types.Termini) error { nodeCtx := common.NodeLocation.Context() if nodeCtx == common.PRIME_CTX { for i := 0; i < common.NumRegionsInPrime; i++ { @@ -1119,14 +1157,14 @@ func (sl *Slice) GenerateRecoveryPendingHeader(pendingHeader *types.Header, chec } } } else if nodeCtx == common.REGION_CTX { - newPendingHeader := sl.SetHeadBackToRecoveryState(pendingHeader, checkPointHashes[common.NodeLocation.Region()]) + newPendingHeader := sl.SetHeadBackToRecoveryState(pendingHeader, checkPointHashes.SubTerminiAtIndex(common.NodeLocation.Region())) for i := 0; i < common.NumZonesInRegion; i++ { if sl.subClients[i] != nil { - sl.subClients[i].GenerateRecoveryPendingHeader(context.Background(), newPendingHeader.Header, newPendingHeader.Termini) + sl.subClients[i].GenerateRecoveryPendingHeader(context.Background(), newPendingHeader.Header(), newPendingHeader.Termini()) } } } else { - sl.SetHeadBackToRecoveryState(pendingHeader, checkPointHashes[common.NodeLocation.Zone()]) + sl.SetHeadBackToRecoveryState(pendingHeader, checkPointHashes.SubTerminiAtIndex(common.NodeLocation.Zone())) } return nil } @@ -1143,7 +1181,7 @@ func (sl *Slice) ComputeRecoveryPendingHeader(hash common.Hash) types.PendingHea } termini := sl.hc.GetTerminiByHash(hash) sl.bestPhKey = hash - return types.PendingHeader{Header: pendingHeader, Termini: termini} + return types.NewPendingHeader(pendingHeader, *termini) } // AddToBadHashesList adds a given set of badHashes to the BadHashesList diff --git a/core/types/block.go b/core/types/block.go index 1fcc47883f..da7a2df4e0 100644 --- a/core/types/block.go +++ b/core/types/block.go @@ -80,27 +80,28 @@ func (n *BlockNonce) UnmarshalText(input []byte) error { // Header represents a block header in the Quai blockchain. type Header struct { - parentHash []common.Hash `json:"parentHash" gencodec:"required"` - uncleHash common.Hash `json:"sha3Uncles" gencodec:"required"` - coinbase common.Address `json:"miner" gencodec:"required"` - root common.Hash `json:"stateRoot" gencodec:"required"` - txHash common.Hash `json:"transactionsRoot" gencodec:"required"` - etxHash common.Hash `json:"extTransactionsRoot" gencodec:"required"` - etxRollupHash common.Hash `json:"extRollupRoot" gencodec:"required"` - manifestHash []common.Hash `json:"manifestHash" gencodec:"required"` - receiptHash common.Hash `json:"receiptsRoot" gencodec:"required"` - difficulty *big.Int `json:"difficulty" gencodec:"required"` - parentEntropy []*big.Int `json:"parentEntropy" gencodec:"required"` - parentDeltaS []*big.Int `json:"parentDeltaS" gencodec:"required"` - number []*big.Int `json:"number" gencodec:"required"` - gasLimit uint64 `json:"gasLimit" gencodec:"required"` - gasUsed uint64 `json:"gasUsed" gencodec:"required"` - baseFee *big.Int `json:"baseFeePerGas" gencodec:"required"` - location common.Location `json:"location" gencodec:"required"` - time uint64 `json:"timestamp" gencodec:"required"` - extra []byte `json:"extraData" gencodec:"required"` - mixHash common.Hash `json:"mixHash" gencodec:"required"` - nonce BlockNonce `json:"nonce"` + parentHash []common.Hash `json:"parentHash" gencodec:"required"` + uncleHash common.Hash `json:"sha3Uncles" gencodec:"required"` + coinbase common.Address `json:"miner" gencodec:"required"` + root common.Hash `json:"stateRoot" gencodec:"required"` + txHash common.Hash `json:"transactionsRoot" gencodec:"required"` + etxHash common.Hash `json:"extTransactionsRoot" gencodec:"required"` + etxRollupHash common.Hash `json:"extRollupRoot" gencodec:"required"` + manifestHash []common.Hash `json:"manifestHash" gencodec:"required"` + receiptHash common.Hash `json:"receiptsRoot" gencodec:"required"` + difficulty *big.Int `json:"difficulty" gencodec:"required"` + primeEntropyThreshold []*big.Int `json:"primeEntropyThreshold" gencodec:"required"` + parentEntropy []*big.Int `json:"parentEntropy" gencodec:"required"` + parentDeltaS []*big.Int `json:"parentDeltaS" gencodec:"required"` + number []*big.Int `json:"number" gencodec:"required"` + gasLimit uint64 `json:"gasLimit" gencodec:"required"` + gasUsed uint64 `json:"gasUsed" gencodec:"required"` + baseFee *big.Int `json:"baseFeePerGas" gencodec:"required"` + location common.Location `json:"location" gencodec:"required"` + time uint64 `json:"timestamp" gencodec:"required"` + extra []byte `json:"extraData" gencodec:"required"` + mixHash common.Hash `json:"mixHash" gencodec:"required"` + nonce BlockNonce `json:"nonce"` // caches hash atomic.Value @@ -111,41 +112,43 @@ type Header struct { // field type overrides for gencodec type headerMarshaling struct { - Difficulty *hexutil.Big - Number []*hexutil.Big - GasLimit hexutil.Uint64 - GasUsed hexutil.Uint64 - BaseFee *hexutil.Big - ParentEntropy []*hexutil.Big - ParentDeltaS []*hexutil.Big - Time hexutil.Uint64 - Extra hexutil.Bytes - Hash common.Hash `json:"hash"` // adds call to Hash() in MarshalJSON + Difficulty *hexutil.Big + PrimeEntropyThreshold []*hexutil.Big + Number []*hexutil.Big + GasLimit hexutil.Uint64 + GasUsed hexutil.Uint64 + BaseFee *hexutil.Big + ParentEntropy []*hexutil.Big + ParentDeltaS []*hexutil.Big + Time hexutil.Uint64 + Extra hexutil.Bytes + Hash common.Hash `json:"hash"` // adds call to Hash() in MarshalJSON } // "external" header encoding. used for eth protocol, etc. type extheader struct { - ParentHash []common.Hash - UncleHash common.Hash - Coinbase common.Address - Root common.Hash - TxHash common.Hash - EtxHash common.Hash - EtxRollupHash common.Hash - ManifestHash []common.Hash - ReceiptHash common.Hash - Difficulty *big.Int - ParentEntropy []*big.Int - ParentDeltaS []*big.Int - Number []*big.Int - GasLimit uint64 - GasUsed uint64 - BaseFee *big.Int - Location common.Location - Time uint64 - Extra []byte - MixHash common.Hash - Nonce BlockNonce + ParentHash []common.Hash + UncleHash common.Hash + Coinbase common.Address + Root common.Hash + TxHash common.Hash + EtxHash common.Hash + EtxRollupHash common.Hash + ManifestHash []common.Hash + ReceiptHash common.Hash + Difficulty *big.Int + PrimeEntropyThreshold []*big.Int + ParentEntropy []*big.Int + ParentDeltaS []*big.Int + Number []*big.Int + GasLimit uint64 + GasUsed uint64 + BaseFee *big.Int + Location common.Location + Time uint64 + Extra []byte + MixHash common.Hash + Nonce BlockNonce } // Construct an empty header @@ -157,6 +160,7 @@ func EmptyHeader() *Header { h.parentDeltaS = make([]*big.Int, common.HierarchyDepth) h.number = make([]*big.Int, common.HierarchyDepth) h.difficulty = big.NewInt(0) + h.primeEntropyThreshold = make([]*big.Int, common.NumZonesInRegion) h.root = EmptyRootHash h.mixHash = EmptyRootHash h.txHash = EmptyRootHash @@ -171,6 +175,9 @@ func EmptyHeader() *Header { h.parentDeltaS[i] = big.NewInt(0) h.number[i] = big.NewInt(0) } + for i := 0; i < common.NumZonesInRegion; i++ { + h.primeEntropyThreshold[i] = big.NewInt(0) + } return h } @@ -190,6 +197,7 @@ func (h *Header) DecodeRLP(s *rlp.Stream) error { h.manifestHash = eh.ManifestHash h.receiptHash = eh.ReceiptHash h.difficulty = eh.Difficulty + h.primeEntropyThreshold = eh.PrimeEntropyThreshold h.parentEntropy = eh.ParentEntropy h.parentDeltaS = eh.ParentDeltaS h.number = eh.Number @@ -208,27 +216,28 @@ func (h *Header) DecodeRLP(s *rlp.Stream) error { // EncodeRLP serializes h into the Quai RLP block format. func (h *Header) EncodeRLP(w io.Writer) error { return rlp.Encode(w, extheader{ - ParentHash: h.parentHash, - UncleHash: h.uncleHash, - Coinbase: h.coinbase, - Root: h.root, - TxHash: h.txHash, - EtxHash: h.etxHash, - EtxRollupHash: h.etxRollupHash, - ManifestHash: h.manifestHash, - ReceiptHash: h.receiptHash, - Difficulty: h.difficulty, - ParentEntropy: h.parentEntropy, - ParentDeltaS: h.parentDeltaS, - Number: h.number, - GasLimit: h.gasLimit, - GasUsed: h.gasUsed, - BaseFee: h.baseFee, - Location: h.location, - Time: h.time, - Extra: h.extra, - MixHash: h.mixHash, - Nonce: h.nonce, + ParentHash: h.parentHash, + UncleHash: h.uncleHash, + Coinbase: h.coinbase, + Root: h.root, + TxHash: h.txHash, + EtxHash: h.etxHash, + EtxRollupHash: h.etxRollupHash, + ManifestHash: h.manifestHash, + ReceiptHash: h.receiptHash, + Difficulty: h.difficulty, + PrimeEntropyThreshold: h.primeEntropyThreshold, + ParentEntropy: h.parentEntropy, + ParentDeltaS: h.parentDeltaS, + Number: h.number, + GasLimit: h.gasLimit, + GasUsed: h.gasUsed, + BaseFee: h.baseFee, + Location: h.location, + Time: h.time, + Extra: h.extra, + MixHash: h.mixHash, + Nonce: h.nonce, }) } @@ -259,12 +268,15 @@ func (h *Header) RPCMarshalHeader() map[string]interface{} { number := make([]*hexutil.Big, common.HierarchyDepth) parentEntropy := make([]*hexutil.Big, common.HierarchyDepth) parentDeltaS := make([]*hexutil.Big, common.HierarchyDepth) + primeEntropyThreshold := make([]*hexutil.Big, common.HierarchyDepth) for i := 0; i < common.HierarchyDepth; i++ { number[i] = (*hexutil.Big)(h.Number(i)) parentEntropy[i] = (*hexutil.Big)(h.ParentEntropy(i)) parentDeltaS[i] = (*hexutil.Big)(h.ParentDeltaS(i)) + primeEntropyThreshold[i] = (*hexutil.Big)(h.PrimeEntropyThreshold(i)) } result["number"] = number + result["primeEntropyThreshold"] = primeEntropyThreshold result["parentEntropy"] = parentEntropy result["parentDeltaS"] = parentDeltaS @@ -315,6 +327,16 @@ func (h *Header) ParentDeltaS(args ...int) *big.Int { } return h.parentDeltaS[nodeCtx] } +func (h *Header) PrimeEntropyThreshold(args ...int) *big.Int { + nodeCtx := common.NodeLocation.Context() + if len(args) > 0 { + nodeCtx = args[0] + } + if args[0] < 0 { //GENESIS ESCAPE + nodeCtx = 0 + } + return h.primeEntropyThreshold[nodeCtx] +} func (h *Header) ManifestHash(args ...int) common.Hash { nodeCtx := common.NodeLocation.Context() if len(args) > 0 { @@ -437,6 +459,14 @@ func (h *Header) SetDifficulty(val *big.Int) { h.sealHash = atomic.Value{} // clear sealHash cache h.difficulty = new(big.Int).Set(val) } +func (h *Header) SetPrimeEntropyThreshold(val *big.Int, args ...int) { + h.hash = atomic.Value{} // clear hash cache + h.sealHash = atomic.Value{} // clear sealHash cache + if len(args) == 0 { + panic("have to provide index for setting the prime difficulty") + } + h.primeEntropyThreshold[args[0]] = new(big.Int).Set(val) +} func (h *Header) SetNumber(val *big.Int, args ...int) { h.hash = atomic.Value{} // clear hash cache h.sealHash = atomic.Value{} // clear sealHash cache @@ -489,9 +519,10 @@ func (h *Header) SetNonce(val BlockNonce) { } // Array accessors -func (h *Header) ParentHashArray() []common.Hash { return h.parentHash } -func (h *Header) ManifestHashArray() []common.Hash { return h.manifestHash } -func (h *Header) NumberArray() []*big.Int { return h.number } +func (h *Header) ParentHashArray() []common.Hash { return h.parentHash } +func (h *Header) ManifestHashArray() []common.Hash { return h.manifestHash } +func (h *Header) NumberArray() []*big.Int { return h.number } +func (h *Header) PrimeEntropyThresholdArray() []*big.Int { return h.primeEntropyThreshold } // headerData comprises all data fields of the header, excluding the nonce, so // that the nonce may be independently adjusted in the work algorithm. @@ -780,6 +811,7 @@ func CopyHeader(h *Header) *Header { cpy.parentEntropy = make([]*big.Int, common.HierarchyDepth) cpy.parentDeltaS = make([]*big.Int, common.HierarchyDepth) cpy.number = make([]*big.Int, common.HierarchyDepth) + cpy.primeEntropyThreshold = make([]*big.Int, common.NumZonesInRegion) for i := 0; i < common.HierarchyDepth; i++ { cpy.SetParentHash(h.ParentHash(i), i) cpy.SetManifestHash(h.ManifestHash(i), i) @@ -787,6 +819,9 @@ func CopyHeader(h *Header) *Header { cpy.SetParentDeltaS(h.ParentDeltaS(i), i) cpy.SetNumber(h.Number(i), i) } + for i := 0; i < common.NumZonesInRegion; i++ { + cpy.SetPrimeEntropyThreshold(h.PrimeEntropyThreshold(i), i) + } cpy.SetUncleHash(h.UncleHash()) cpy.SetCoinbase(h.Coinbase()) cpy.SetRoot(h.Root()) @@ -972,20 +1007,204 @@ type Blocks []*Block // PendingHeader stores the header and termini value associated with the header. type PendingHeader struct { - Header *Header - Termini []common.Hash + header *Header `json:"header"` + termini Termini `json:"termini"` +} + +// accessor methods for pending header +func (ph PendingHeader) Header() *Header { + return ph.header +} +func (ph PendingHeader) Termini() Termini { + return ph.termini +} + +func (ph *PendingHeader) SetHeader(header *Header) { + ph.header = CopyHeader(header) +} + +func (ph *PendingHeader) SetTermini(termini Termini) { + ph.termini = CopyTermini(termini) +} + +func emptyPendingHeader() PendingHeader { + pendingHeader := PendingHeader{} + pendingHeader.SetTermini(EmptyTermini()) + return pendingHeader +} + +func NewPendingHeader(header *Header, termini Termini) PendingHeader { + emptyPh := emptyPendingHeader() + emptyPh.SetHeader(header) + emptyPh.SetTermini(termini) + return emptyPh } func CopyPendingHeader(ph *PendingHeader) *PendingHeader { cpy := *ph - cpy.Header = CopyHeader(ph.Header) + cpy.SetHeader(CopyHeader(ph.Header())) + cpy.SetTermini(CopyTermini(ph.Termini())) + return &cpy +} - cpy.Termini = make([]common.Hash, 4) - for i, termini := range ph.Termini { - cpy.Termini[i] = termini +// "external" pending header encoding. used for rlp +type extPendingHeader struct { + Header *Header + Termini Termini +} + +// DecodeRLP decodes the Quai RLP encoding into pending header format. +func (p *PendingHeader) DecodeRLP(s *rlp.Stream) error { + var eb extPendingHeader + if err := s.Decode(&eb); err != nil { + return err } + p.header, p.termini = eb.Header, eb.Termini + return nil +} - return &cpy +// EncodeRLP serializes b into the Quai RLP format. +func (p PendingHeader) EncodeRLP(w io.Writer) error { + return rlp.Encode(w, extPendingHeader{ + Header: p.header, + Termini: p.termini, + }) +} + +// Termini stores the dom terminus (i.e the previous dom block) and +// subTermini(i.e the dom blocks that have occured in the subordinate chains) +// primeTermini(i.e the prime blocks have occured in a region for each of the zones in region) +// - prime termini is used to control the liveliness of the primes in slices +// using the PrimeEntropyThreshold +type Termini struct { + domTerminus common.Hash `json:"domTerminus"` + subTermini []common.Hash `json:"subTermini"` + primeTermini []common.Hash `json:"primeTermini"` +} + +func CopyTermini(termini Termini) Termini { + newTermini := EmptyTermini() + newTermini.SetDomTerminus(termini.domTerminus) + for i, t := range termini.subTermini { + newTermini.SetSubTerminiAtIndex(t, i) + } + for i, t := range termini.primeTermini { + newTermini.SetPrimeTerminiAtIndex(t, i) + } + return newTermini +} + +func EmptyTermini() Termini { + termini := Termini{} + termini.subTermini = make([]common.Hash, common.HierarchyDepth) + termini.primeTermini = make([]common.Hash, common.NumZonesInRegion) + return termini +} + +func (t Termini) DomTerminus() common.Hash { + return t.domTerminus +} + +func (t Termini) SubTermini() []common.Hash { + return t.subTermini +} + +func (t Termini) PrimeTermini() []common.Hash { + return t.primeTermini +} + +func (t Termini) SubTerminiAtIndex(args ...int) common.Hash { + if len(args) == 0 { + panic("cannot access sub termini at index with the index") + } + return t.subTermini[args[0]] +} + +func (t Termini) PrimeTerminiAtIndex(args ...int) common.Hash { + if len(args) == 0 { + panic("cannot access prime termini at index with the index") + } + return t.primeTermini[args[0]] +} + +func (t *Termini) SetDomTerminus(domTerminus common.Hash) { + t.domTerminus = domTerminus +} + +func (t *Termini) SetSubTermini(subTermini []common.Hash) { + t.subTermini = make([]common.Hash, len(subTermini)) + for i := 0; i < len(subTermini); i++ { + t.subTermini[i] = subTermini[i] + } +} + +func (t *Termini) SetPrimeTermini(primeTermini []common.Hash) { + t.primeTermini = make([]common.Hash, len(primeTermini)) + for i := 0; i < len(primeTermini); i++ { + t.primeTermini[i] = primeTermini[i] + } +} + +func (t *Termini) SetSubTerminiAtIndex(val common.Hash, args ...int) { + if len(args) == 0 { + panic("index cannot be empty for the sub termini") + } + t.subTermini[args[0]] = val +} + +func (t *Termini) SetPrimeTerminiAtIndex(val common.Hash, args ...int) { + if len(args) == 0 { + panic("index cannot be empty for the prime termini") + } + t.primeTermini[args[0]] = val +} + +func (t *Termini) IsValid() bool { + if t == nil { + return false + } + if len(t.subTermini) != common.HierarchyDepth { + return false + } + if len(t.primeTermini) != common.NumZonesInRegion { + return false + } + return true +} + +// "external termini" pending header encoding. used for rlp +type extTermini struct { + DomTerminus common.Hash + SubTermini []common.Hash + PrimeTermini []common.Hash +} + +func (t Termini) RPCMarshalTermini() map[string]interface{} { + result := map[string]interface{}{ + "domTerminus": t.DomTerminus(), + "subTermini": t.SubTermini(), + "primeTermini": t.PrimeTermini(), + } + return result +} + +// DecodeRLP decodes the Quai RLP encoding into pending header format. +func (t *Termini) DecodeRLP(s *rlp.Stream) error { + var et extTermini + if err := s.Decode(&et); err != nil { + return err + } + t.domTerminus, t.subTermini, t.primeTermini = et.DomTerminus, et.SubTermini, et.PrimeTermini + return nil +} + +// EncodeRLP serializes b into the Quai RLP format. +func (t Termini) EncodeRLP(w io.Writer) error { + return rlp.Encode(w, extTermini{ + DomTerminus: t.domTerminus, + SubTermini: t.subTermini, + PrimeTermini: t.primeTermini, + }) } // BlockManifest is a list of block hashes, which implements DerivableList diff --git a/core/types/gen_header_json.go b/core/types/gen_header_json.go index 5fcdd0860d..81a2f6cec8 100644 --- a/core/types/gen_header_json.go +++ b/core/types/gen_header_json.go @@ -16,32 +16,34 @@ var _ = (*headerMarshaling)(nil) // MarshalJSON marshals as JSON. func (h Header) MarshalJSON() ([]byte, error) { var enc struct { - ParentHash []common.Hash `json:"parentHash" gencodec:"required"` - UncleHash common.Hash `json:"sha3Uncles" gencodec:"required"` - Coinbase common.Address `json:"miner" gencodec:"required"` - Root common.Hash `json:"stateRoot" gencodec:"required"` - TxHash common.Hash `json:"transactionsRoot" gencodec:"required"` - EtxHash common.Hash `json:"extTransactionsRoot" gencodec:"required"` - EtxRollupHash common.Hash `json:"extRollupRoot" gencodec:"required"` - ManifestHash []common.Hash `json:"manifestHash" gencodec:"required"` - ReceiptHash common.Hash `json:"receiptsRoot" gencodec:"required"` - Difficulty *hexutil.Big `json:"difficulty" gencodec:"required"` - ParentEntropy []*hexutil.Big `json:"parentEntropy" gencodec:"required"` - ParentDeltaS []*hexutil.Big `json:"parentDeltaS" gencodec:"required"` - Number []*hexutil.Big `json:"number" gencodec:"required"` - GasLimit hexutil.Uint64 `json:"gasLimit" gencodec:"required"` - GasUsed hexutil.Uint64 `json:"gasUsed" gencodec:"required"` - BaseFee *hexutil.Big `json:"baseFeePerGas" gencodec:"required"` - Location hexutil.Bytes `json:"location" gencodec:"required"` - Time hexutil.Uint64 `json:"timestamp" gencodec:"required"` - Extra hexutil.Bytes `json:"extraData" gencodec:"required"` - MixHash common.Hash `json:"mixHash" gencodec:"required"` - Nonce BlockNonce `json:"nonce"` - Hash common.Hash `json:"hash"` + ParentHash []common.Hash `json:"parentHash" gencodec:"required"` + UncleHash common.Hash `json:"sha3Uncles" gencodec:"required"` + Coinbase common.Address `json:"miner" gencodec:"required"` + Root common.Hash `json:"stateRoot" gencodec:"required"` + TxHash common.Hash `json:"transactionsRoot" gencodec:"required"` + EtxHash common.Hash `json:"extTransactionsRoot" gencodec:"required"` + EtxRollupHash common.Hash `json:"extRollupRoot" gencodec:"required"` + ManifestHash []common.Hash `json:"manifestHash" gencodec:"required"` + ReceiptHash common.Hash `json:"receiptsRoot" gencodec:"required"` + Difficulty *hexutil.Big `json:"difficulty" gencodec:"required"` + PrimeEntropyThreshold []*hexutil.Big `json:"primeEntropyThreshold" gencodec:"required"` + ParentEntropy []*hexutil.Big `json:"parentEntropy" gencodec:"required"` + ParentDeltaS []*hexutil.Big `json:"parentDeltaS" gencodec:"required"` + Number []*hexutil.Big `json:"number" gencodec:"required"` + GasLimit hexutil.Uint64 `json:"gasLimit" gencodec:"required"` + GasUsed hexutil.Uint64 `json:"gasUsed" gencodec:"required"` + BaseFee *hexutil.Big `json:"baseFeePerGas" gencodec:"required"` + Location hexutil.Bytes `json:"location" gencodec:"required"` + Time hexutil.Uint64 `json:"timestamp" gencodec:"required"` + Extra hexutil.Bytes `json:"extraData" gencodec:"required"` + MixHash common.Hash `json:"mixHash" gencodec:"required"` + Nonce BlockNonce `json:"nonce"` + Hash common.Hash `json:"hash"` } // Initialize the enc struct enc.ParentEntropy = make([]*hexutil.Big, common.HierarchyDepth) enc.ParentDeltaS = make([]*hexutil.Big, common.HierarchyDepth) + enc.PrimeEntropyThreshold = make([]*hexutil.Big, common.NumZonesInRegion) enc.Number = make([]*hexutil.Big, common.HierarchyDepth) copy(enc.ParentHash, h.ParentHashArray()) @@ -51,6 +53,9 @@ func (h Header) MarshalJSON() ([]byte, error) { enc.ParentDeltaS[i] = (*hexutil.Big)(h.ParentDeltaS(i)) enc.Number[i] = (*hexutil.Big)(h.Number(i)) } + for i := 0; i < common.NumZonesInRegion; i++ { + enc.PrimeEntropyThreshold[i] = (*hexutil.Big)(h.PrimeEntropyThreshold(i)) + } enc.UncleHash = h.UncleHash() enc.Coinbase = h.Coinbase() enc.Root = h.Root() @@ -75,27 +80,28 @@ func (h Header) MarshalJSON() ([]byte, error) { // UnmarshalJSON unmarshals from JSON. func (h *Header) UnmarshalJSON(input []byte) error { var dec struct { - ParentHash []common.Hash `json:"parentHash" gencodec:"required"` - UncleHash *common.Hash `json:"sha3Uncles" gencodec:"required"` - Coinbase *common.Address `json:"miner" gencodec:"required"` - Root *common.Hash `json:"stateRoot" gencodec:"required"` - TxHash *common.Hash `json:"transactionsRoot" gencodec:"required"` - ReceiptHash *common.Hash `json:"receiptsRoot" gencodec:"required"` - EtxHash *common.Hash `json:"extTransactionsRoot" gencodec:"required"` - EtxRollupHash *common.Hash `json:"extRollupRoot" gencodec:"required"` - ManifestHash []common.Hash `json:"manifestHash" gencodec:"required"` - Difficulty *hexutil.Big `json:"difficulty" gencodec:"required"` - ParentEntropy []*hexutil.Big `json:"parentEntropy" gencodec:"required"` - ParentDeltaS []*hexutil.Big `json:"parentDeltaS" gencodec:"required"` - Number []*hexutil.Big `json:"number" gencodec:"required"` - GasLimit *hexutil.Uint64 `json:"gasLimit" gencodec:"required"` - GasUsed *hexutil.Uint64 `json:"gasUsed" gencodec:"required"` - BaseFee *hexutil.Big `json:"baseFeePerGas" gencodec:"required"` - Location hexutil.Bytes `json:"location" gencodec:"required"` - Time hexutil.Uint64 `json:"timestamp" gencodec:"required"` - Extra hexutil.Bytes `json:"extraData" gencodec:"required"` - MixHash *common.Hash `json:"MixHash" gencodec:"required"` - Nonce BlockNonce `json:"nonce"` + ParentHash []common.Hash `json:"parentHash" gencodec:"required"` + UncleHash *common.Hash `json:"sha3Uncles" gencodec:"required"` + Coinbase *common.Address `json:"miner" gencodec:"required"` + Root *common.Hash `json:"stateRoot" gencodec:"required"` + TxHash *common.Hash `json:"transactionsRoot" gencodec:"required"` + ReceiptHash *common.Hash `json:"receiptsRoot" gencodec:"required"` + EtxHash *common.Hash `json:"extTransactionsRoot" gencodec:"required"` + EtxRollupHash *common.Hash `json:"extRollupRoot" gencodec:"required"` + ManifestHash []common.Hash `json:"manifestHash" gencodec:"required"` + Difficulty *hexutil.Big `json:"difficulty" gencodec:"required"` + PrimeEntropyThreshold []*hexutil.Big `json:"primeEntropyThreshold" gencodec:"required"` + ParentEntropy []*hexutil.Big `json:"parentEntropy" gencodec:"required"` + ParentDeltaS []*hexutil.Big `json:"parentDeltaS" gencodec:"required"` + Number []*hexutil.Big `json:"number" gencodec:"required"` + GasLimit *hexutil.Uint64 `json:"gasLimit" gencodec:"required"` + GasUsed *hexutil.Uint64 `json:"gasUsed" gencodec:"required"` + BaseFee *hexutil.Big `json:"baseFeePerGas" gencodec:"required"` + Location hexutil.Bytes `json:"location" gencodec:"required"` + Time hexutil.Uint64 `json:"timestamp" gencodec:"required"` + Extra hexutil.Bytes `json:"extraData" gencodec:"required"` + MixHash *common.Hash `json:"MixHash" gencodec:"required"` + Nonce BlockNonce `json:"nonce"` } if err := json.Unmarshal(input, &dec); err != nil { return err @@ -130,6 +136,9 @@ func (h *Header) UnmarshalJSON(input []byte) error { if dec.Difficulty == nil { return errors.New("missing required field 'difficulty' for Header") } + if dec.PrimeEntropyThreshold == nil { + return errors.New("missing required field 'primeEntropyThreshold' for Header") + } if dec.ParentEntropy == nil { return errors.New("missing required field 'parentEntropy' for Header") } @@ -160,6 +169,7 @@ func (h *Header) UnmarshalJSON(input []byte) error { h.parentEntropy = make([]*big.Int, common.HierarchyDepth) h.parentDeltaS = make([]*big.Int, common.HierarchyDepth) h.number = make([]*big.Int, common.HierarchyDepth) + h.primeEntropyThreshold = make([]*big.Int, common.NumZonesInRegion) for i := 0; i < common.HierarchyDepth; i++ { h.SetParentHash(dec.ParentHash[i], i) @@ -177,6 +187,9 @@ func (h *Header) UnmarshalJSON(input []byte) error { } h.SetNumber((*big.Int)(dec.Number[i]), i) } + for i := 0; i < common.NumZonesInRegion; i++ { + h.SetPrimeEntropyThreshold((*big.Int)(dec.PrimeEntropyThreshold[i]), i) + } h.SetUncleHash(*dec.UncleHash) h.SetCoinbase(*dec.Coinbase) h.SetRoot(*dec.Root) @@ -198,3 +211,40 @@ func (h *Header) UnmarshalJSON(input []byte) error { h.SetNonce(dec.Nonce) return nil } + +func (t Termini) MarshalJSON() ([]byte, error) { + var enc struct { + DomTermius common.Hash `json:"domTerminus" gencodec:"required"` + SubTermini []common.Hash `json:"subTermini" gencodec:"required"` + PrimeTermini []common.Hash `json:"primeTermini" gencodec:"required"` + } + copy(enc.SubTermini, t.SubTermini()) + copy(enc.PrimeTermini, t.PrimeTermini()) + enc.DomTermius = t.DomTerminus() + raw, err := json.Marshal(&enc) + return raw, err +} + +func (t *Termini) UnmarshalJSON(input []byte) error { + var dec struct { + DomTermius *common.Hash `json:"domTerminus" gencodec:"required"` + SubTermini []common.Hash `json:"subTermini" gencodec:"required"` + PrimeTermini []common.Hash `json:"primeTermini" gencodec:"required"` + } + if err := json.Unmarshal(input, &dec); err != nil { + return err + } + if dec.DomTermius == nil { + return errors.New("missing required field 'domTerminus' for Termini") + } + if dec.SubTermini == nil { + return errors.New("missing required field 'subTermini' for Termini") + } + if dec.PrimeTermini == nil { + return errors.New("missing require field 'primeTermini' for Termini") + } + t.SetDomTerminus(*dec.DomTermius) + t.SetSubTermini(dec.SubTermini) + t.SetPrimeTermini(dec.PrimeTermini) + return nil +} diff --git a/core/worker.go b/core/worker.go index 1fbdbc1106..47e8785cfd 100644 --- a/core/worker.go +++ b/core/worker.go @@ -764,9 +764,26 @@ func (w *worker) prepareWork(genParams *generateParams, block *types.Block) (*en header.SetParentDeltaS(w.engine.DeltaLogS(parent.Header()), nodeCtx) } } + + if nodeCtx == common.REGION_CTX { + for i := 0; i < common.NumZonesInRegion; i++ { + header.SetPrimeEntropyThreshold(parent.Header().PrimeEntropyThreshold(i), i) + } + } + + if nodeCtx == common.REGION_CTX && order == common.PRIME_CTX { + primeEntropyThreshold, err := w.engine.CalcPrimeEntropyThreshold(w.hc, parent.Header()) + if err != nil { + return nil, err + } + header.SetPrimeEntropyThreshold(primeEntropyThreshold, parent.Header().Location().SubIndex()) + } header.SetParentEntropy(w.engine.TotalLogS(parent.Header())) + } else { + for i := 0; i < common.NumZonesInRegion; i++ { + header.SetPrimeEntropyThreshold(parent.Header().PrimeEntropyThreshold(i), i) + } } - // Only zone should calculate state if nodeCtx == common.ZONE_CTX { header.SetExtra(w.extra) diff --git a/eth/api_backend.go b/eth/api_backend.go index 62651aef32..6d0c64ca45 100644 --- a/eth/api_backend.go +++ b/eth/api_backend.go @@ -500,6 +500,6 @@ func (b *QuaiAPIBackend) SubscribePendingHeaderEvent(ch chan<- *types.Header) ev return b.eth.core.SubscribePendingHeader(ch) } -func (b *QuaiAPIBackend) GenerateRecoveryPendingHeader(pendingHeader *types.Header, checkpointHashes []common.Hash) error { +func (b *QuaiAPIBackend) GenerateRecoveryPendingHeader(pendingHeader *types.Header, checkpointHashes types.Termini) error { return b.eth.core.GenerateRecoveryPendingHeader(pendingHeader, checkpointHashes) } diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index 170fea53b3..aa03bd81f4 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -153,7 +153,7 @@ type Core interface { WriteBlock(block *types.Block) // GetTerminiByHash returns the termini of a given block - GetTerminiByHash(hash common.Hash) []common.Hash + GetTerminiByHash(hash common.Hash) *types.Termini // BadHashExistsInChain returns true if any of the specified bad hashes exists on chain BadHashExistsInChain() bool diff --git a/internal/quaiapi/backend.go b/internal/quaiapi/backend.go index 914aa57661..8883c5db0e 100644 --- a/internal/quaiapi/backend.go +++ b/internal/quaiapi/backend.go @@ -84,7 +84,7 @@ type Backend interface { AddPendingEtxs(pEtxs types.PendingEtxs) error AddPendingEtxsRollup(pEtxsRollup types.PendingEtxsRollup) error PendingBlockAndReceipts() (*types.Block, types.Receipts) - GenerateRecoveryPendingHeader(pendingHeader *types.Header, checkpointHashes []common.Hash) error + GenerateRecoveryPendingHeader(pendingHeader *types.Header, checkpointHashes types.Termini) error // Transaction pool API SendTx(ctx context.Context, signedTx *types.Transaction) error diff --git a/internal/quaiapi/quai_api.go b/internal/quaiapi/quai_api.go index 64c3179ace..ff77454829 100644 --- a/internal/quaiapi/quai_api.go +++ b/internal/quaiapi/quai_api.go @@ -628,8 +628,8 @@ func (s *PublicBlockChainQuaiAPI) Append(ctx context.Context, raw json.RawMessag } type SubRelay struct { - Header *types.Header - Termini []common.Hash + Header *types.Header `json:"header"` + Termini types.Termini `json:"termini"` Location common.Location } @@ -638,7 +638,7 @@ func (s *PublicBlockChainQuaiAPI) SubRelayPendingHeader(ctx context.Context, raw if err := json.Unmarshal(raw, &subRelay); err != nil { return } - pendingHeader := types.PendingHeader{Header: subRelay.Header, Termini: subRelay.Termini} + pendingHeader := types.NewPendingHeader(subRelay.Header, subRelay.Termini) s.b.SubRelayPendingHeader(pendingHeader, subRelay.Location) } @@ -706,7 +706,7 @@ func (s *PublicBlockChainQuaiAPI) SendPendingEtxsRollupToDom(ctx context.Context type GenerateRecoveryPendingHeaderArgs struct { PendingHeader *types.Header `json:"pendingHeader"` - CheckpointHashes []common.Hash `json:"checkpointHashes"` + CheckpointHashes types.Termini `json:"checkpointHashes"` } func (s *PublicBlockChainQuaiAPI) GenerateRecoveryPendingHeader(ctx context.Context, raw json.RawMessage) error { diff --git a/quaiclient/quaiclient.go b/quaiclient/quaiclient.go index 874bfbd7f3..de0c438491 100644 --- a/quaiclient/quaiclient.go +++ b/quaiclient/quaiclient.go @@ -112,8 +112,8 @@ func (ec *Client) Append(ctx context.Context, header *types.Header, domPendingHe } func (ec *Client) SubRelayPendingHeader(ctx context.Context, pendingHeader types.PendingHeader, location common.Location) { - data := map[string]interface{}{"Header": pendingHeader.Header.RPCMarshalHeader()} - data["Termini"] = pendingHeader.Termini + data := map[string]interface{}{"header": pendingHeader.Header().RPCMarshalHeader()} + data["termini"] = pendingHeader.Termini().RPCMarshalTermini() data["Location"] = location ec.c.CallContext(ctx, nil, "quai_subRelayPendingHeader", data) @@ -157,10 +157,10 @@ func (ec *Client) SendPendingEtxsRollupToDom(ctx context.Context, pEtxsRollup ty return ec.c.CallContext(ctx, &raw, "quai_sendPendingEtxsRollupToDom", fields) } -func (ec *Client) GenerateRecoveryPendingHeader(ctx context.Context, pendingHeader *types.Header, checkpointHashes []common.Hash) error { +func (ec *Client) GenerateRecoveryPendingHeader(ctx context.Context, pendingHeader *types.Header, checkpointHashes types.Termini) error { fields := make(map[string]interface{}) fields["pendingHeader"] = pendingHeader.RPCMarshalHeader() - fields["checkpointHashes"] = checkpointHashes + fields["checkpointHashes"] = checkpointHashes.RPCMarshalTermini() return ec.c.CallContext(ctx, nil, "quai_generateRecoveryPendingHeader", fields) }