Skip to content

Commit

Permalink
Use weights from reclamped stakes in current address book
Browse files Browse the repository at this point in the history
Signed-off-by: Michael Tinker <[email protected]>
  • Loading branch information
tinker-michaelj committed Feb 7, 2025
1 parent 77f369e commit 9dc9464
Show file tree
Hide file tree
Showing 9 changed files with 135 additions and 23 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@
import static com.hedera.node.app.hapi.utils.CommonUtils.noThrowSha384HashOf;
import static com.hedera.node.app.records.impl.BlockRecordInfoUtils.blockHashByBlockNumber;
import static com.hedera.node.app.records.schemas.V0490BlockRecordSchema.BLOCK_INFO_STATE_KEY;
import static com.hedera.node.app.service.token.impl.handlers.staking.EndOfStakingPeriodUpdater.computeReclampedStakeWeights;
import static com.hedera.node.app.spi.workflows.record.StreamBuilder.nodeTransactionWith;
import static com.hedera.node.app.state.merkle.VersionUtils.isSoOrdered;
import static com.hedera.node.app.statedumpers.DumpCheckpoint.MOD_POST_EVENT_STREAM_REPLAY;
Expand Down Expand Up @@ -84,6 +85,7 @@
import com.hedera.node.app.service.networkadmin.impl.NetworkServiceImpl;
import com.hedera.node.app.service.schedule.impl.ScheduleServiceImpl;
import com.hedera.node.app.service.token.impl.ReadableNetworkStakingRewardsStoreImpl;
import com.hedera.node.app.service.token.impl.ReadableStakingInfoStoreImpl;
import com.hedera.node.app.service.token.impl.TokenServiceImpl;
import com.hedera.node.app.service.token.impl.WritableStakingInfoStore;
import com.hedera.node.app.service.token.impl.handlers.staking.StakePeriodManager;
Expand Down Expand Up @@ -116,6 +118,7 @@
import com.hedera.node.config.data.HederaConfig;
import com.hedera.node.config.data.LedgerConfig;
import com.hedera.node.config.data.NetworkAdminConfig;
import com.hedera.node.config.data.StakingConfig;
import com.hedera.node.config.data.VersionConfig;
import com.hedera.node.config.types.StreamMode;
import com.hedera.node.internal.network.Network;
Expand Down Expand Up @@ -673,6 +676,10 @@ private void migrateSchemas(
if (diskAddressBook != null) {
PLATFORM_STATE_SERVICE.setDiskAddressBook(diskAddressBook);
}
final var store = new ReadableStakingInfoStoreImpl(state.getReadableStates(TokenServiceImpl.NAME));
final var reclampedStakeWeights = computeReclampedStakeWeights(
store, configProvider.getConfiguration().getConfigData(StakingConfig.class));
PLATFORM_STATE_SERVICE.setReclampedStakeWeights(reclampedStakeWeights);
this.initState = state;
final var migrationChanges = serviceMigrator.doMigrations(
state,
Expand All @@ -688,6 +695,7 @@ private void migrateSchemas(
startupNetworks);
this.initState = null;
PLATFORM_STATE_SERVICE.clearDiskAddressBook();
PLATFORM_STATE_SERVICE.clearReclampedStakeWeights();
migrationStateChanges = new ArrayList<>(migrationChanges);
kvStateChangeListener.reset();
boundaryStateChangeListener.reset();
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,19 @@
// SPDX-License-Identifier: Apache-2.0
/*
* Copyright (C) 2025 Hedera Hashgraph, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package com.hedera.node.app;

import static com.swirlds.common.io.utility.FileUtils.getAbsolutePath;
Expand Down Expand Up @@ -297,6 +312,8 @@ public static void main(final String... args) throws Exception {
initialState.get(),
diskAddressBook.copy(),
platformContext);
// 0.58-specific weight update based on stakes "re-clamped" to the new
// configuration-managed [minStake, maxStake] range
rosterHistory = buildRosterHistory((State) initialState.get().getState());
}
final var platformBuilder = PlatformBuilder.create(
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (C) 2023-2024 Hedera Hashgraph, LLC
* Copyright (C) 2023-2025 Hedera Hashgraph, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -42,4 +42,6 @@ public record StakingConfig(
@ConfigProperty(defaultValue = "25000000000000000") @NetworkProperty long startThreshold,
@ConfigProperty(defaultValue = "500") @NetworkProperty int sumOfConsensusWeights,
@ConfigProperty(defaultValue = "8500000000000000") @NetworkProperty long rewardBalanceThreshold,
@ConfigProperty(defaultValue = "650000000000000000") @NetworkProperty long maxStakeRewarded) {}
@ConfigProperty(defaultValue = "650000000000000000") @NetworkProperty long maxStakeRewarded,
@ConfigProperty(defaultValue = "0") @NetworkProperty long minStake,
@ConfigProperty(defaultValue = "100000000000000000") @NetworkProperty long maxStake) {}
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (C) 2022-2024 Hedera Hashgraph, LLC
* Copyright (C) 2022-2025 Hedera Hashgraph, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -35,6 +35,7 @@
import com.hedera.hapi.node.transaction.NodeStake;
import com.hedera.node.app.service.token.ReadableAccountStore;
import com.hedera.node.app.service.token.ReadableNetworkStakingRewardsStore;
import com.hedera.node.app.service.token.ReadableStakingInfoStore;
import com.hedera.node.app.service.token.impl.WritableNetworkStakingRewardsStore;
import com.hedera.node.app.service.token.impl.WritableStakingInfoStore;
import com.hedera.node.app.service.token.records.NodeStakeUpdateStreamBuilder;
Expand Down Expand Up @@ -161,7 +162,7 @@ public EndOfStakingPeriodUpdater(
final var pendingRewards = (nodeInfo.stakeRewardStart() - nodeInfo.unclaimedStakeRewardStart())
/ HBARS_TO_TINYBARS
* nodeRewardRate;
final var newStakes = computeNewStakes(nodeInfo);
final var newStakes = computeNewStakes(nodeInfo, stakingConfig);
log.info(
"For node{}, the tb/hbar reward rate was {} for {} pending, with stake reward start {} -> {}",
nodeId,
Expand Down Expand Up @@ -251,6 +252,32 @@ public EndOfStakingPeriodUpdater(
.status(SUCCESS);
}

/**
* Given a {@link ReadableStakingInfoStore}, computes the new weights for all nodes based on their stakes,
* re-clamped to the {@code [minStake, maxStake]} range in the given {@link StakingConfig}.
* @param store the {@link ReadableStakingInfoStore} to compute the new weights for
* @param stakingConfig the {@link StakingConfig} to use for the re-clamping
* @return a map of node IDs to their new weights
*/
public static Map<Long, Long> computeReclampedStakeWeights(
@NonNull final ReadableStakingInfoStore store, @NonNull final StakingConfig stakingConfig) {
long totalStake = 0;
for (final long nodeId : store.getAll()) {
final var nodeInfo = requireNonNull(store.get(nodeId));
totalStake += nodeInfo.deleted() ? 0 : nodeInfo.stake();
}
final var totalWeight = stakingConfig.sumOfConsensusWeights();
final Map<Long, Long> weights = new HashMap<>();
for (final long nodeId : store.getAll()) {
final var nodeInfo = requireNonNull(store.get(nodeId));
if (!nodeInfo.deleted()) {
final long weight = scaleStakeToWeight(nodeInfo.stake(), totalStake, totalWeight);
weights.put(nodeId, weight);
}
}
return weights;
}

/**
* Scales up the weight of the node to the range [minStake, maxStakeOfAllNodes]
* from the consensus weight range [0, sumOfConsensusWeights].
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (C) 2023-2024 Hedera Hashgraph, LLC
* Copyright (C) 2023-2025 Hedera Hashgraph, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -255,15 +255,21 @@ public record StakeResult(long stake, long stakeRewardStart) {}
* current staking info. The new {@code stakeRewardStart} value is also computed
*
* @param stakingInfo the node's current staking info
* @param stakingConfig the staking configuration of the network
* @return the calculated {@link StakeResult}
*/
@NonNull
public static StakeResult computeNewStakes(@NonNull final StakingNodeInfo stakingInfo) {
public static StakeResult computeNewStakes(
@NonNull final StakingNodeInfo stakingInfo, @NonNull final StakingConfig stakingConfig) {
requireNonNull(stakingInfo);
requireNonNull(stakingConfig);
final var totalStake = stakingInfo.stakeToReward() + stakingInfo.stakeToNotReward();
final long newStake;
if (totalStake > stakingInfo.maxStake()) {
newStake = stakingInfo.maxStake();
} else if (totalStake < stakingInfo.minStake()) {
final long effectiveMax = Math.min(stakingInfo.maxStake(), stakingConfig.maxStake());
final long effectiveMin = Math.min(effectiveMax, Math.max(stakingInfo.minStake(), stakingConfig.minStake()));
if (totalStake > effectiveMax) {
newStake = effectiveMax;
} else if (totalStake < effectiveMin) {
newStake = 0;
} else {
newStake = totalStake;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,6 @@
import com.hedera.node.app.service.token.impl.WritableNetworkStakingRewardsStore;
import com.hedera.node.app.service.token.impl.WritableStakingInfoStore;
import com.hedera.node.app.spi.workflows.record.StreamBuilder;
import com.hedera.node.config.data.LedgerConfig;
import com.hedera.node.config.data.StakingConfig;
import com.swirlds.config.api.Configuration;
import com.swirlds.state.lifecycle.info.NetworkInfo;
Expand Down Expand Up @@ -219,11 +218,9 @@ private void completeUpdateFromNewAddressBook(
@NonNull final WritableStakingInfoStore store,
@NonNull final List<NodeInfo> nodeInfos,
@NonNull final Configuration config) {
final var numberOfNodesInAddressBook = nodeInfos.size();
final long maxStakePerNode =
config.getConfigData(LedgerConfig.class).totalTinyBarFloat() / numberOfNodesInAddressBook;
final var numRewardHistoryStoredPeriods =
config.getConfigData(StakingConfig.class).rewardHistoryNumStoredPeriods();
final var stakingConfig = config.getConfigData(StakingConfig.class);
final var numRewardHistoryStoredPeriods = stakingConfig.rewardHistoryNumStoredPeriods();
final long maxStakePerNode = stakingConfig.maxStake();
for (final var nodeId : nodeInfos) {
final var stakingInfo = store.get(nodeId.nodeId());
if (stakingInfo != null) {
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (C) 2023-2024 Hedera Hashgraph, LLC
* Copyright (C) 2023-2025 Hedera Hashgraph, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand All @@ -17,8 +17,10 @@
package com.hedera.node.app.service.token.impl.test.handlers.staking;

import static com.hedera.node.app.service.token.impl.handlers.staking.EndOfStakingPeriodUtils.*;
import static com.hedera.node.app.service.token.impl.test.handlers.staking.StakeInfoHelperTest.DEFAULT_CONFIG;

import com.hedera.hapi.node.state.token.StakingNodeInfo;
import com.hedera.node.config.data.StakingConfig;
import java.math.BigInteger;
import java.util.Collections;
import java.util.List;
Expand All @@ -43,6 +45,7 @@ class EndOfStakingPeriodUtilsTest {
.rewardSumHistory(List.of(2L, 1L, 0L))
.weight(5)
.build();
private static final StakingConfig STAKING_CONFIG = DEFAULT_CONFIG.getConfigData(StakingConfig.class);

@Test
void readableNonZeroHistoryFromEmptyRewards() {
Expand Down Expand Up @@ -149,15 +152,16 @@ void calculatesUpdatedSumHistoryAsExpectedForNodeWithLessThanMinStakeWhenMinIsNo
@SuppressWarnings("DataFlowIssue")
@Test
void computeStakeNullArg() {
Assertions.assertThatThrownBy(() -> computeNewStakes(null)).isInstanceOf(NullPointerException.class);
Assertions.assertThatThrownBy(() -> computeNewStakes(null, STAKING_CONFIG))
.isInstanceOf(NullPointerException.class);
}

@Test
void computeStakeTotalStakeGreaterThanMaxStake() {
final var maxStake = STAKE_TO_REWARD + STAKE_TO_NOT_REWARD - 1;
final var input = STAKING_INFO.copyBuilder().maxStake(maxStake).build();

final var result = computeNewStakes(input);
final var result = computeNewStakes(input, STAKING_CONFIG);
Assertions.assertThat(result.stake()).isEqualTo(maxStake);
Assertions.assertThat(result.stakeRewardStart()).isEqualTo(STAKE_TO_REWARD);
}
Expand All @@ -169,7 +173,7 @@ void computeStakeTotalStakeLessThanMinStake() {
.minStake(STAKE_TO_REWARD + STAKE_TO_NOT_REWARD + 1)
.build();

final var result = computeNewStakes(input);
final var result = computeNewStakes(input, STAKING_CONFIG);
Assertions.assertThat(result.stake()).isZero();
Assertions.assertThat(result.stakeRewardStart()).isEqualTo(STAKE_TO_REWARD);
}
Expand All @@ -182,7 +186,7 @@ void computeStakeTotalStakeInBetweenMinStakeAndMaxStake() {
.maxStake(STAKE_TO_REWARD + STAKE_TO_NOT_REWARD + 1)
.build();

final var result = computeNewStakes(input);
final var result = computeNewStakes(input, STAKING_CONFIG);
Assertions.assertThat(result.stake()).isEqualTo(STAKE_TO_REWARD + STAKE_TO_NOT_REWARD);
Assertions.assertThat(result.stakeRewardStart()).isEqualTo(STAKE_TO_REWARD);
}
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (C) 2024 Hedera Hashgraph, LLC
* Copyright (C) 2024-2025 Hedera Hashgraph, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -36,6 +36,7 @@
import edu.umd.cs.findbugs.annotations.Nullable;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.Function;

Expand All @@ -57,6 +58,12 @@ public enum PlatformStateService implements Service {
*/
@Deprecated
private static final AtomicReference<AddressBook> DISK_ADDRESS_BOOK = new AtomicReference<>();

/**
* Temporary access to the re-clamped stake weights used in 0.58 upgrade specifically.
*/
private static final AtomicReference<Map<Long, Long>> RECLAMPED_STAKE_WEIGHTS = new AtomicReference<>();

/**
* The schemas to register with the {@link SchemaRegistry}.
*/
Expand All @@ -65,6 +72,7 @@ public enum PlatformStateService implements Service {
.apply(config)),
new V058RosterLifecycleTransitionSchema(
DISK_ADDRESS_BOOK::get,
RECLAMPED_STAKE_WEIGHTS::get,
config -> requireNonNull(APP_VERSION_FN.get()).apply(config),
WritablePlatformStateStore::new));

Expand Down Expand Up @@ -104,6 +112,20 @@ public void clearDiskAddressBook() {
DISK_ADDRESS_BOOK.set(null);
}

/**
* Sets the re-clamped stake weights to the given map.
*/
public void setReclampedStakeWeights(@NonNull final Map<Long, Long> reclampedStakeWeights) {
RECLAMPED_STAKE_WEIGHTS.set(requireNonNull(reclampedStakeWeights));
}

/**
* Clears the re-clamped stake weights.
*/
public void clearReclampedStakeWeights() {
RECLAMPED_STAKE_WEIGHTS.set(null);
}

/**
* Given a {@link MerkleStateRoot}, returns the creation version of the platform state if it exists.
* @param root the root to extract the creation version from
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -35,8 +35,11 @@
import edu.umd.cs.findbugs.annotations.Nullable;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.function.Function;
import java.util.function.Supplier;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;

/**
* A restart-only schema to ensure the platform state has its active and previous
Expand All @@ -47,21 +50,26 @@
*/
@Deprecated
public class V058RosterLifecycleTransitionSchema extends Schema {
private static final Logger logger = LogManager.getLogger(V058RosterLifecycleTransitionSchema.class);

private static final SemanticVersion VERSION =
SemanticVersion.newBuilder().major(0).minor(58).build();

private final Supplier<AddressBook> addressBook;
private final Supplier<Map<Long, Long>> reclampedStakeWeightsSupplier;
private final Function<Configuration, SoftwareVersion> appVersionFn;
private final Function<WritableStates, WritablePlatformStateStore> platformStateStoreFn;

public V058RosterLifecycleTransitionSchema(
@NonNull final Supplier<AddressBook> addressBook,
@NonNull final Supplier<Map<Long, Long>> reclampedStakeWeightsSupplier,
@NonNull final Function<Configuration, SoftwareVersion> appVersionFn,
@NonNull final Function<WritableStates, WritablePlatformStateStore> platformStateStoreFn) {
super(VERSION);
this.addressBook = requireNonNull(addressBook);
this.appVersionFn = requireNonNull(appVersionFn);
this.platformStateStoreFn = requireNonNull(platformStateStoreFn);
this.reclampedStakeWeightsSupplier = requireNonNull(reclampedStakeWeightsSupplier);
}

@Override
Expand Down Expand Up @@ -94,12 +102,33 @@ public void restart(@NonNull final MigrationContext ctx) {
final var nextBook = isOverride ? withExtantNodeWeights(diskBook, currentBook) : diskBook;
stateStore.bulkUpdate(v -> {
v.setPreviousAddressBook(currentBook == null ? null : currentBook.copy());
v.setAddressBook(nextBook);
final var updatedNextBook =
withReclampedStakeWeights(nextBook, reclampedStakeWeightsSupplier.get());
v.setAddressBook(updatedNextBook);
});
}
}
}

/**
* Returns an address book whose node weights are updated to match
*/
private AddressBook withReclampedStakeWeights(
@NonNull final AddressBook book, @NonNull final Map<Long, Long> reclampedStakeWeights) {
final List<Address> addresses = new ArrayList<>();
for (final var address : book) {
final long newWeight =
reclampedStakeWeights.getOrDefault(address.getNodeId().id(), 0L);
logger.info(
"Adjusting re-clamped stake weight for node{} from {} to {}",
address.getNodeId().id(),
address.getWeight(),
newWeight);
addresses.add(address.copySetWeight(newWeight));
}
return new AddressBook(addresses);
}

/**
* If there are weights to copy to the first address book from the (possibly null) second address book,
* then does so and returns a new address book with the result.
Expand Down

0 comments on commit 9dc9464

Please sign in to comment.