Skip to content

Commit

Permalink
chore: Use weights from reclamped stakes in current address book (#17777
Browse files Browse the repository at this point in the history
)

Signed-off-by: Michael Tinker <[email protected]>
  • Loading branch information
tinker-michaelj authored Feb 8, 2025
1 parent 77f369e commit e6c1433
Show file tree
Hide file tree
Showing 13 changed files with 161 additions and 38 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@
import static com.hedera.node.app.hapi.utils.CommonUtils.noThrowSha384HashOf;
import static com.hedera.node.app.records.impl.BlockRecordInfoUtils.blockHashByBlockNumber;
import static com.hedera.node.app.records.schemas.V0490BlockRecordSchema.BLOCK_INFO_STATE_KEY;
import static com.hedera.node.app.service.token.impl.handlers.staking.EndOfStakingPeriodUpdater.computeReclampedStakeWeights;
import static com.hedera.node.app.spi.workflows.record.StreamBuilder.nodeTransactionWith;
import static com.hedera.node.app.state.merkle.VersionUtils.isSoOrdered;
import static com.hedera.node.app.statedumpers.DumpCheckpoint.MOD_POST_EVENT_STREAM_REPLAY;
Expand Down Expand Up @@ -84,6 +85,7 @@
import com.hedera.node.app.service.networkadmin.impl.NetworkServiceImpl;
import com.hedera.node.app.service.schedule.impl.ScheduleServiceImpl;
import com.hedera.node.app.service.token.impl.ReadableNetworkStakingRewardsStoreImpl;
import com.hedera.node.app.service.token.impl.ReadableStakingInfoStoreImpl;
import com.hedera.node.app.service.token.impl.TokenServiceImpl;
import com.hedera.node.app.service.token.impl.WritableStakingInfoStore;
import com.hedera.node.app.service.token.impl.handlers.staking.StakePeriodManager;
Expand Down Expand Up @@ -116,6 +118,7 @@
import com.hedera.node.config.data.HederaConfig;
import com.hedera.node.config.data.LedgerConfig;
import com.hedera.node.config.data.NetworkAdminConfig;
import com.hedera.node.config.data.StakingConfig;
import com.hedera.node.config.data.VersionConfig;
import com.hedera.node.config.types.StreamMode;
import com.hedera.node.internal.network.Network;
Expand Down Expand Up @@ -166,6 +169,7 @@
import java.util.ArrayList;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.CompletableFuture;
Expand Down Expand Up @@ -673,6 +677,12 @@ private void migrateSchemas(
if (diskAddressBook != null) {
PLATFORM_STATE_SERVICE.setDiskAddressBook(diskAddressBook);
}
final Supplier<Map<Long, Long>> reclampedStakeWeightsSupplier = () -> {
final var store = new ReadableStakingInfoStoreImpl(state.getReadableStates(TokenServiceImpl.NAME));
return computeReclampedStakeWeights(
store, configProvider.getConfiguration().getConfigData(StakingConfig.class));
};
PLATFORM_STATE_SERVICE.setReclampedStakeWeights(reclampedStakeWeightsSupplier);
this.initState = state;
final var migrationChanges = serviceMigrator.doMigrations(
state,
Expand All @@ -688,6 +698,7 @@ private void migrateSchemas(
startupNetworks);
this.initState = null;
PLATFORM_STATE_SERVICE.clearDiskAddressBook();
PLATFORM_STATE_SERVICE.clearReclampedStakeWeights();
migrationStateChanges = new ArrayList<>(migrationChanges);
kvStateChangeListener.reset();
boundaryStateChangeListener.reset();
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (C) 2023-2024 Hedera Hashgraph, LLC
* Copyright (C) 2023-2025 Hedera Hashgraph, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -42,4 +42,6 @@ public record StakingConfig(
@ConfigProperty(defaultValue = "25000000000000000") @NetworkProperty long startThreshold,
@ConfigProperty(defaultValue = "500") @NetworkProperty int sumOfConsensusWeights,
@ConfigProperty(defaultValue = "8500000000000000") @NetworkProperty long rewardBalanceThreshold,
@ConfigProperty(defaultValue = "650000000000000000") @NetworkProperty long maxStakeRewarded) {}
@ConfigProperty(defaultValue = "650000000000000000") @NetworkProperty long maxStakeRewarded,
@ConfigProperty(defaultValue = "0") @NetworkProperty long minStake,
@ConfigProperty(defaultValue = "45000000000000000") @NetworkProperty long maxStake) {}
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (C) 2022-2024 Hedera Hashgraph, LLC
* Copyright (C) 2022-2025 Hedera Hashgraph, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -35,6 +35,7 @@
import com.hedera.hapi.node.transaction.NodeStake;
import com.hedera.node.app.service.token.ReadableAccountStore;
import com.hedera.node.app.service.token.ReadableNetworkStakingRewardsStore;
import com.hedera.node.app.service.token.ReadableStakingInfoStore;
import com.hedera.node.app.service.token.impl.WritableNetworkStakingRewardsStore;
import com.hedera.node.app.service.token.impl.WritableStakingInfoStore;
import com.hedera.node.app.service.token.records.NodeStakeUpdateStreamBuilder;
Expand Down Expand Up @@ -161,7 +162,7 @@ public EndOfStakingPeriodUpdater(
final var pendingRewards = (nodeInfo.stakeRewardStart() - nodeInfo.unclaimedStakeRewardStart())
/ HBARS_TO_TINYBARS
* nodeRewardRate;
final var newStakes = computeNewStakes(nodeInfo);
final var newStakes = computeNewStakes(nodeInfo, stakingConfig);
log.info(
"For node{}, the tb/hbar reward rate was {} for {} pending, with stake reward start {} -> {}",
nodeId,
Expand Down Expand Up @@ -251,6 +252,36 @@ public EndOfStakingPeriodUpdater(
.status(SUCCESS);
}

/**
* Given a {@link ReadableStakingInfoStore}, computes the new weights for all nodes based on their stakes,
* re-clamped to the {@code [minStake, maxStake]} range in the given {@link StakingConfig}.
* @param store the {@link ReadableStakingInfoStore} to compute the new weights for
* @param stakingConfig the {@link StakingConfig} to use for the re-clamping
* @return a map of node IDs to their new weights
*/
public static Map<Long, Long> computeReclampedStakeWeights(
@NonNull final ReadableStakingInfoStore store, @NonNull final StakingConfig stakingConfig) {
long totalStake = 0;
for (final long nodeId : store.getAll()) {
final var nodeInfo = requireNonNull(store.get(nodeId));
if (!nodeInfo.deleted()) {
final var newStakes = computeNewStakes(nodeInfo, stakingConfig);
totalStake += newStakes.stake();
}
}
final var totalWeight = stakingConfig.sumOfConsensusWeights();
final Map<Long, Long> weights = new HashMap<>();
for (final long nodeId : store.getAll()) {
final var nodeInfo = requireNonNull(store.get(nodeId));
if (!nodeInfo.deleted()) {
final var newStakes = computeNewStakes(nodeInfo, stakingConfig);
final long weight = scaleStakeToWeight(newStakes.stake(), totalStake, totalWeight);
weights.put(nodeId, weight);
}
}
return weights;
}

/**
* Scales up the weight of the node to the range [minStake, maxStakeOfAllNodes]
* from the consensus weight range [0, sumOfConsensusWeights].
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (C) 2023-2024 Hedera Hashgraph, LLC
* Copyright (C) 2023-2025 Hedera Hashgraph, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -255,15 +255,21 @@ public record StakeResult(long stake, long stakeRewardStart) {}
* current staking info. The new {@code stakeRewardStart} value is also computed
*
* @param stakingInfo the node's current staking info
* @param stakingConfig the staking configuration of the network
* @return the calculated {@link StakeResult}
*/
@NonNull
public static StakeResult computeNewStakes(@NonNull final StakingNodeInfo stakingInfo) {
public static StakeResult computeNewStakes(
@NonNull final StakingNodeInfo stakingInfo, @NonNull final StakingConfig stakingConfig) {
requireNonNull(stakingInfo);
requireNonNull(stakingConfig);
final var totalStake = stakingInfo.stakeToReward() + stakingInfo.stakeToNotReward();
final long newStake;
if (totalStake > stakingInfo.maxStake()) {
newStake = stakingInfo.maxStake();
} else if (totalStake < stakingInfo.minStake()) {
final long effectiveMax = Math.min(stakingInfo.maxStake(), stakingConfig.maxStake());
final long effectiveMin = Math.min(effectiveMax, Math.max(stakingInfo.minStake(), stakingConfig.minStake()));
if (totalStake > effectiveMax) {
newStake = effectiveMax;
} else if (totalStake < effectiveMin) {
newStake = 0;
} else {
newStake = totalStake;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,6 @@
import com.hedera.node.app.service.token.impl.WritableNetworkStakingRewardsStore;
import com.hedera.node.app.service.token.impl.WritableStakingInfoStore;
import com.hedera.node.app.spi.workflows.record.StreamBuilder;
import com.hedera.node.config.data.LedgerConfig;
import com.hedera.node.config.data.StakingConfig;
import com.swirlds.config.api.Configuration;
import com.swirlds.state.lifecycle.info.NetworkInfo;
Expand Down Expand Up @@ -219,11 +218,9 @@ private void completeUpdateFromNewAddressBook(
@NonNull final WritableStakingInfoStore store,
@NonNull final List<NodeInfo> nodeInfos,
@NonNull final Configuration config) {
final var numberOfNodesInAddressBook = nodeInfos.size();
final long maxStakePerNode =
config.getConfigData(LedgerConfig.class).totalTinyBarFloat() / numberOfNodesInAddressBook;
final var numRewardHistoryStoredPeriods =
config.getConfigData(StakingConfig.class).rewardHistoryNumStoredPeriods();
final var stakingConfig = config.getConfigData(StakingConfig.class);
final var numRewardHistoryStoredPeriods = stakingConfig.rewardHistoryNumStoredPeriods();
final long maxStakePerNode = stakingConfig.maxStake();
for (final var nodeId : nodeInfos) {
final var stakingInfo = store.get(nodeId.nodeId());
if (stakingInfo != null) {
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (C) 2023-2024 Hedera Hashgraph, LLC
* Copyright (C) 2023-2025 Hedera Hashgraph, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand All @@ -17,8 +17,10 @@
package com.hedera.node.app.service.token.impl.test.handlers.staking;

import static com.hedera.node.app.service.token.impl.handlers.staking.EndOfStakingPeriodUtils.*;
import static com.hedera.node.app.service.token.impl.test.handlers.staking.StakeInfoHelperTest.DEFAULT_CONFIG;

import com.hedera.hapi.node.state.token.StakingNodeInfo;
import com.hedera.node.config.data.StakingConfig;
import java.math.BigInteger;
import java.util.Collections;
import java.util.List;
Expand All @@ -43,6 +45,7 @@ class EndOfStakingPeriodUtilsTest {
.rewardSumHistory(List.of(2L, 1L, 0L))
.weight(5)
.build();
private static final StakingConfig STAKING_CONFIG = DEFAULT_CONFIG.getConfigData(StakingConfig.class);

@Test
void readableNonZeroHistoryFromEmptyRewards() {
Expand Down Expand Up @@ -149,15 +152,16 @@ void calculatesUpdatedSumHistoryAsExpectedForNodeWithLessThanMinStakeWhenMinIsNo
@SuppressWarnings("DataFlowIssue")
@Test
void computeStakeNullArg() {
Assertions.assertThatThrownBy(() -> computeNewStakes(null)).isInstanceOf(NullPointerException.class);
Assertions.assertThatThrownBy(() -> computeNewStakes(null, STAKING_CONFIG))
.isInstanceOf(NullPointerException.class);
}

@Test
void computeStakeTotalStakeGreaterThanMaxStake() {
final var maxStake = STAKE_TO_REWARD + STAKE_TO_NOT_REWARD - 1;
final var input = STAKING_INFO.copyBuilder().maxStake(maxStake).build();

final var result = computeNewStakes(input);
final var result = computeNewStakes(input, STAKING_CONFIG);
Assertions.assertThat(result.stake()).isEqualTo(maxStake);
Assertions.assertThat(result.stakeRewardStart()).isEqualTo(STAKE_TO_REWARD);
}
Expand All @@ -169,7 +173,7 @@ void computeStakeTotalStakeLessThanMinStake() {
.minStake(STAKE_TO_REWARD + STAKE_TO_NOT_REWARD + 1)
.build();

final var result = computeNewStakes(input);
final var result = computeNewStakes(input, STAKING_CONFIG);
Assertions.assertThat(result.stake()).isZero();
Assertions.assertThat(result.stakeRewardStart()).isEqualTo(STAKE_TO_REWARD);
}
Expand All @@ -182,7 +186,7 @@ void computeStakeTotalStakeInBetweenMinStakeAndMaxStake() {
.maxStake(STAKE_TO_REWARD + STAKE_TO_NOT_REWARD + 1)
.build();

final var result = computeNewStakes(input);
final var result = computeNewStakes(input, STAKING_CONFIG);
Assertions.assertThat(result.stake()).isEqualTo(STAKE_TO_REWARD + STAKE_TO_NOT_REWARD);
Assertions.assertThat(result.stakeRewardStart()).isEqualTo(STAKE_TO_REWARD);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -123,12 +123,12 @@ void marksNonExistingNodesToDeletedInStateAndAddsNewNodesToState() throws ParseE
assertThat(((StakingNodeInfo) updatedStates.get(NODE_NUM_4)).deleted()).isFalse();
assertThat(((StakingNodeInfo) updatedStates.get(NODE_NUM_4)).weight()).isZero();
assertThat(((StakingNodeInfo) updatedStates.get(NODE_NUM_4)).minStake()).isZero();
assertThat(((StakingNodeInfo) updatedStates.get(NODE_NUM_4)).maxStake()).isEqualTo(1666666666666666666L);
assertThat(((StakingNodeInfo) updatedStates.get(NODE_NUM_4)).maxStake()).isEqualTo(45000000000000000L);
// Also adds node 8 to the state
assertThat(((StakingNodeInfo) updatedStates.get(NODE_NUM_8)).deleted()).isFalse();
assertThat(((StakingNodeInfo) updatedStates.get(NODE_NUM_8)).weight()).isZero();
assertThat(((StakingNodeInfo) updatedStates.get(NODE_NUM_8)).minStake()).isZero();
assertThat(((StakingNodeInfo) updatedStates.get(NODE_NUM_8)).maxStake()).isEqualTo(1666666666666666666L);
assertThat(((StakingNodeInfo) updatedStates.get(NODE_NUM_8)).maxStake()).isEqualTo(45000000000000000L);
}

private MapWritableStates newStatesInstance(final MapWritableKVState<EntityNumber, StakingNodeInfo> stakingInfo) {
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (C) 2020-2024 Hedera Hashgraph, LLC
* Copyright (C) 2020-2025 Hedera Hashgraph, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -40,6 +40,11 @@ default String getServiceName() {
return NAME;
}

@Override
default int migrationOrder() {
return -1;
}

@NonNull
@Override
default Set<RpcServiceDefinition> rpcDefinitions() {
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (C) 2024 Hedera Hashgraph, LLC
* Copyright (C) 2024-2025 Hedera Hashgraph, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -37,8 +37,10 @@
import static com.hedera.services.bdd.spec.utilops.UtilVerbs.doingContextual;
import static com.hedera.services.bdd.spec.utilops.UtilVerbs.newKeyNamed;
import static com.hedera.services.bdd.spec.utilops.UtilVerbs.overriding;
import static com.hedera.services.bdd.spec.utilops.UtilVerbs.overridingTwo;
import static com.hedera.services.bdd.spec.utilops.UtilVerbs.sourcingContextual;
import static com.hedera.services.bdd.spec.utilops.UtilVerbs.waitUntilStartOfNextStakingPeriod;
import static com.hedera.services.bdd.suites.HapiSuite.ONE_BILLION_HBARS;
import static com.hedera.services.bdd.suites.hip869.NodeCreateTest.generateX509Certificates;
import static com.hedera.services.bdd.suites.utils.validation.ValidationScenarios.TINYBARS_PER_HBAR;
import static com.swirlds.platform.roster.RosterRetriever.getActiveRosterHash;
Expand Down Expand Up @@ -312,12 +314,11 @@ private void observeInteractionsIn(@NonNull final Block block) {
private Stream<SpecOperation> enableTss() {
if (blockSigningType == BlockSigningType.SIGN_WITH_LEDGER_ID) {
return Stream.of(
overriding("tss.keyCandidateRoster", "true"), overriding("tss.signWithLedgerId", "true")
// overridingTwo("tss.signWithLedgerId", "true",
// "tss.maxSharesPerNode", "4")
);
overridingTwo("tss.keyCandidateRoster", "true", "staking.maxStake", "" + (50 * ONE_BILLION_HBARS)),
overriding("tss.signWithLedgerId", "true"));
} else {
return Stream.of(overriding("tss.keyCandidateRoster", "true"));
return Stream.of(
overridingTwo("tss.keyCandidateRoster", "true", "staking.maxStake", "" + (50 * ONE_BILLION_HBARS)));
}
}

Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (C) 2024 Hedera Hashgraph, LLC
* Copyright (C) 2024-2025 Hedera Hashgraph, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -102,7 +102,7 @@ Stream<DynamicTest> blockStreamManagerCatchesUpWithIndirectProofs() {
*/
@LeakyRepeatableHapiTest(
value = {NEEDS_TSS_CONTROL, NEEDS_VIRTUAL_TIME_FOR_FAST_EXECUTION},
overrides = {"tss.keyCandidateRoster"})
overrides = {"tss.keyCandidateRoster", "staking.maxStake"})
Stream<DynamicTest> embeddedNodeVotesGivenThresholdValidMessages() {
final var scenario = rekeyingScenario(
// Changing stakes is enough to ensure the candidate roster is different from the active roster
Expand All @@ -121,7 +121,7 @@ Stream<DynamicTest> embeddedNodeVotesGivenThresholdValidMessages() {
*/
@LeakyRepeatableHapiTest(
value = {NEEDS_TSS_CONTROL, NEEDS_VIRTUAL_TIME_FOR_FAST_EXECUTION},
overrides = {"tss.keyCandidateRoster", "tss.signWithLedgerId"})
overrides = {"tss.keyCandidateRoster", "tss.signWithLedgerId", "staking.maxStake"})
Stream<DynamicTest> blockSigningHappyPath() {
final var scenario = rekeyingScenario(
// Changing stakes is enough to ensure the candidate roster is different from the active roster
Expand Down
Loading

0 comments on commit e6c1433

Please sign in to comment.