From 290a13edc3a93a4910b39420d3b6271699090170 Mon Sep 17 00:00:00 2001 From: Jacob Shufro Date: Tue, 22 Jul 2025 14:57:32 -0400 Subject: [PATCH 01/33] Add v11 generate and tests as clones of v9_v10 --- shared/services/rewards/generator-impl-v11.go | 1309 +++++++++++++++++ shared/services/rewards/mock_v11_test.go | 859 +++++++++++ 2 files changed, 2168 insertions(+) create mode 100644 shared/services/rewards/generator-impl-v11.go create mode 100644 shared/services/rewards/mock_v11_test.go diff --git a/shared/services/rewards/generator-impl-v11.go b/shared/services/rewards/generator-impl-v11.go new file mode 100644 index 000000000..708858825 --- /dev/null +++ b/shared/services/rewards/generator-impl-v11.go @@ -0,0 +1,1309 @@ +package rewards + +import ( + "context" + "fmt" + "math/big" + "sort" + "sync" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ipfs/go-cid" + "github.com/rocket-pool/smartnode/bindings/rewards" + rptypes "github.com/rocket-pool/smartnode/bindings/types" + "github.com/rocket-pool/smartnode/bindings/utils/eth" + "github.com/rocket-pool/smartnode/shared/services/beacon" + "github.com/rocket-pool/smartnode/shared/services/config" + "github.com/rocket-pool/smartnode/shared/services/rewards/fees" + "github.com/rocket-pool/smartnode/shared/services/rewards/ssz_types" + sszbig "github.com/rocket-pool/smartnode/shared/services/rewards/ssz_types/big" + "github.com/rocket-pool/smartnode/shared/services/state" + "github.com/rocket-pool/smartnode/shared/utils/log" + "golang.org/x/sync/errgroup" +) + +// Type assertion to ensure SSZFile_v1 is IRewardsFile +var _ IRewardsFile = (*ssz_types.SSZFile_v1)(nil) + +// Implementation for tree generator ruleset v9 +type treeGeneratorImpl_v11 struct { + networkState *state.NetworkState + rewardsFile *ssz_types.SSZFile_v1 + elSnapshotHeader *types.Header + snapshotEnd *SnapshotEnd + log *log.ColorLogger + logPrefix string + rp RewardsExecutionClient + previousRewardsPoolAddresses []common.Address + bc RewardsBeaconClient + opts *bind.CallOpts + nodeDetails []*NodeSmoothingDetails + smoothingPoolBalance *big.Int + intervalDutiesInfo *IntervalDutiesInfo + slotsPerEpoch uint64 + validatorIndexMap map[string]*MinipoolInfo + elStartTime time.Time + elEndTime time.Time + validNetworkCache map[uint64]bool + epsilon *big.Int + intervalSeconds *big.Int + beaconConfig beacon.Eth2Config + validatorStatusMap map[rptypes.ValidatorPubkey]beacon.ValidatorStatus + totalAttestationScore *big.Int + successfulAttestations uint64 + genesisTime time.Time + invalidNetworkNodes map[common.Address]uint64 + minipoolPerformanceFile *MinipoolPerformanceFile_v2 + nodeRewards map[common.Address]*ssz_types.NodeReward + networkRewards map[ssz_types.Layer]*ssz_types.NetworkReward + + // fields for RPIP-62 bonus calculations + // Withdrawals made by a minipool's validator. + minipoolWithdrawals map[common.Address]*big.Int +} + +// Create a new tree generator +func newTreeGeneratorImpl_v11(log *log.ColorLogger, logPrefix string, index uint64, snapshotEnd *SnapshotEnd, elSnapshotHeader *types.Header, intervalsPassed uint64, state *state.NetworkState) *treeGeneratorImpl_v11 { + return &treeGeneratorImpl_v11{ + rewardsFile: &ssz_types.SSZFile_v1{ + RewardsFileVersion: 3, + RulesetVersion: 11, + Index: index, + IntervalsPassed: intervalsPassed, + TotalRewards: &ssz_types.TotalRewards{ + ProtocolDaoRpl: sszbig.NewUint256(0), + TotalCollateralRpl: sszbig.NewUint256(0), + TotalOracleDaoRpl: sszbig.NewUint256(0), + TotalSmoothingPoolEth: sszbig.NewUint256(0), + PoolStakerSmoothingPoolEth: sszbig.NewUint256(0), + NodeOperatorSmoothingPoolEth: sszbig.NewUint256(0), + TotalNodeWeight: sszbig.NewUint256(0), + }, + NetworkRewards: ssz_types.NetworkRewards{}, + NodeRewards: ssz_types.NodeRewards{}, + }, + validatorStatusMap: map[rptypes.ValidatorPubkey]beacon.ValidatorStatus{}, + validatorIndexMap: map[string]*MinipoolInfo{}, + elSnapshotHeader: elSnapshotHeader, + snapshotEnd: snapshotEnd, + log: log, + logPrefix: logPrefix, + totalAttestationScore: big.NewInt(0), + networkState: state, + invalidNetworkNodes: map[common.Address]uint64{}, + minipoolPerformanceFile: &MinipoolPerformanceFile_v2{ + Index: index, + MinipoolPerformance: map[common.Address]*SmoothingPoolMinipoolPerformance_v2{}, + }, + nodeRewards: map[common.Address]*ssz_types.NodeReward{}, + networkRewards: map[ssz_types.Layer]*ssz_types.NetworkReward{}, + minipoolWithdrawals: map[common.Address]*big.Int{}, + } +} + +// Get the version of the ruleset used by this generator +func (r *treeGeneratorImpl_v11) getRulesetVersion() uint64 { + return r.rewardsFile.RulesetVersion +} + +func (r *treeGeneratorImpl_v11) generateTree(rp RewardsExecutionClient, networkName string, previousRewardsPoolAddresses []common.Address, bc RewardsBeaconClient) (*GenerateTreeResult, error) { + + r.log.Printlnf("%s Generating tree using Ruleset v%d.", r.logPrefix, r.rewardsFile.RulesetVersion) + + // Provision some struct params + r.rp = rp + r.previousRewardsPoolAddresses = previousRewardsPoolAddresses + r.bc = bc + r.validNetworkCache = map[uint64]bool{ + 0: true, + } + + // Set the network name + r.rewardsFile.Network, _ = ssz_types.NetworkFromString(networkName) + r.minipoolPerformanceFile.Network = networkName + r.minipoolPerformanceFile.RewardsFileVersion = r.rewardsFile.RewardsFileVersion + r.minipoolPerformanceFile.RulesetVersion = r.rewardsFile.RulesetVersion + + // Get the Beacon config + r.beaconConfig = r.networkState.BeaconConfig + r.slotsPerEpoch = r.beaconConfig.SlotsPerEpoch + r.genesisTime = time.Unix(int64(r.beaconConfig.GenesisTime), 0) + + // Set the EL client call opts + r.opts = &bind.CallOpts{ + BlockNumber: r.elSnapshotHeader.Number, + } + + r.log.Printlnf("%s Creating tree for %d nodes", r.logPrefix, len(r.networkState.NodeDetails)) + + // Get the max of node count and minipool count - this will be used for an error epsilon due to division truncation + nodeCount := len(r.networkState.NodeDetails) + minipoolCount := len(r.networkState.MinipoolDetails) + if nodeCount > minipoolCount { + r.epsilon = big.NewInt(int64(nodeCount)) + } else { + r.epsilon = big.NewInt(int64(minipoolCount)) + } + + // Calculate the RPL rewards + err := r.calculateRplRewards() + if err != nil { + return nil, fmt.Errorf("error calculating RPL rewards: %w", err) + } + + // Calculate the ETH rewards + err = r.calculateEthRewards(true) + if err != nil { + return nil, fmt.Errorf("error calculating ETH rewards: %w", err) + } + + // Sort and assign the maps to the ssz file lists + for nodeAddress, nodeReward := range r.nodeRewards { + copy(nodeReward.Address[:], nodeAddress[:]) + r.rewardsFile.NodeRewards = append(r.rewardsFile.NodeRewards, nodeReward) + } + + for layer, networkReward := range r.networkRewards { + networkReward.Network = layer + r.rewardsFile.NetworkRewards = append(r.rewardsFile.NetworkRewards, networkReward) + } + + // Generate the Merkle Tree + err = r.rewardsFile.GenerateMerkleTree() + if err != nil { + return nil, fmt.Errorf("error generating Merkle tree: %w", err) + } + + // Sort all of the missed attestations so the files are always generated in the same state + for _, minipoolInfo := range r.minipoolPerformanceFile.MinipoolPerformance { + sort.Slice(minipoolInfo.MissingAttestationSlots, func(i, j int) bool { + return minipoolInfo.MissingAttestationSlots[i] < minipoolInfo.MissingAttestationSlots[j] + }) + } + + return &GenerateTreeResult{ + RewardsFile: r.rewardsFile, + InvalidNetworkNodes: r.invalidNetworkNodes, + MinipoolPerformanceFile: r.minipoolPerformanceFile, + }, nil + +} + +// Quickly calculates an approximate of the staker's share of the smoothing pool balance without processing Beacon performance +// Used for approximate returns in the rETH ratio update +func (r *treeGeneratorImpl_v11) approximateStakerShareOfSmoothingPool(rp RewardsExecutionClient, networkName string, bc RewardsBeaconClient) (*big.Int, error) { + r.log.Printlnf("%s Approximating tree using Ruleset v%d.", r.logPrefix, r.rewardsFile.RulesetVersion) + + r.rp = rp + r.bc = bc + r.validNetworkCache = map[uint64]bool{ + 0: true, + } + + // Set the network name + r.rewardsFile.Network, _ = ssz_types.NetworkFromString(networkName) + r.minipoolPerformanceFile.Network = networkName + r.minipoolPerformanceFile.RewardsFileVersion = r.rewardsFile.RewardsFileVersion + r.minipoolPerformanceFile.RulesetVersion = r.rewardsFile.RulesetVersion + + // Get the Beacon config + r.beaconConfig = r.networkState.BeaconConfig + r.slotsPerEpoch = r.beaconConfig.SlotsPerEpoch + r.genesisTime = time.Unix(int64(r.beaconConfig.GenesisTime), 0) + + // Set the EL client call opts + r.opts = &bind.CallOpts{ + BlockNumber: r.elSnapshotHeader.Number, + } + + r.log.Printlnf("%s Creating tree for %d nodes", r.logPrefix, len(r.networkState.NodeDetails)) + + // Get the max of node count and minipool count - this will be used for an error epsilon due to division truncation + nodeCount := len(r.networkState.NodeDetails) + minipoolCount := len(r.networkState.MinipoolDetails) + if nodeCount > minipoolCount { + r.epsilon = big.NewInt(int64(nodeCount)) + } else { + r.epsilon = big.NewInt(int64(minipoolCount)) + } + + // Calculate the ETH rewards + err := r.calculateEthRewards(false) + if err != nil { + return nil, fmt.Errorf("error calculating ETH rewards: %w", err) + } + + return r.rewardsFile.TotalRewards.PoolStakerSmoothingPoolEth.Int, nil +} + +func (r *treeGeneratorImpl_v11) calculateNodeRplRewards( + collateralRewards *big.Int, + nodeWeight *big.Int, + totalNodeWeight *big.Int, +) *big.Int { + + if nodeWeight.Sign() <= 0 { + return big.NewInt(0) + } + + // (collateralRewards * nodeWeight / totalNodeWeight) + rpip30Rewards := big.NewInt(0).Mul(collateralRewards, nodeWeight) + rpip30Rewards.Quo(rpip30Rewards, totalNodeWeight) + + return rpip30Rewards +} + +// Calculates the RPL rewards for the given interval +func (r *treeGeneratorImpl_v11) calculateRplRewards() error { + pendingRewards := r.networkState.NetworkDetails.PendingRPLRewards + r.log.Printlnf("%s Pending RPL rewards: %s (%.3f)", r.logPrefix, pendingRewards.String(), eth.WeiToEth(pendingRewards)) + if pendingRewards.Cmp(common.Big0) == 0 { + return fmt.Errorf("there are no pending RPL rewards, so this interval cannot be used for rewards submission") + } + + // Get baseline Protocol DAO rewards + pDaoPercent := r.networkState.NetworkDetails.ProtocolDaoRewardsPercent + pDaoRewards := big.NewInt(0) + pDaoRewards.Mul(pendingRewards, pDaoPercent) + pDaoRewards.Div(pDaoRewards, oneEth) + r.log.Printlnf("%s Expected Protocol DAO rewards: %s (%.3f)", r.logPrefix, pDaoRewards.String(), eth.WeiToEth(pDaoRewards)) + + // Get node operator rewards + nodeOpPercent := r.networkState.NetworkDetails.NodeOperatorRewardsPercent + totalNodeRewards := big.NewInt(0) + totalNodeRewards.Mul(pendingRewards, nodeOpPercent) + totalNodeRewards.Div(totalNodeRewards, oneEth) + r.log.Printlnf("%s Approx. total collateral RPL rewards: %s (%.3f)", r.logPrefix, totalNodeRewards.String(), eth.WeiToEth(totalNodeRewards)) + + // Calculate the RPIP-30 weight of each node, scaling by their participation in this interval + nodeWeights, totalNodeWeight, err := r.networkState.CalculateNodeWeights() + if err != nil { + return fmt.Errorf("error calculating node weights: %w", err) + } + + // Operate normally if any node has rewards + if totalNodeWeight.Sign() > 0 { + // Make sure to record totalNodeWeight in the rewards file + r.rewardsFile.TotalRewards.TotalNodeWeight.Set(totalNodeWeight) + + r.log.Printlnf("%s Calculating individual collateral rewards...", r.logPrefix) + for i, nodeDetails := range r.networkState.NodeDetails { + // Get how much RPL goes to this node + nodeRplRewards := r.calculateNodeRplRewards( + totalNodeRewards, + nodeWeights[nodeDetails.NodeAddress], + totalNodeWeight, + ) + + // If there are pending rewards, add it to the map + if nodeRplRewards.Sign() == 1 { + rewardsForNode, exists := r.nodeRewards[nodeDetails.NodeAddress] + if !exists { + // Get the network the rewards should go to + network := r.networkState.NodeDetails[i].RewardNetwork.Uint64() + validNetwork, err := r.validateNetwork(network) + if err != nil { + return err + } + if !validNetwork { + network = 0 + } + + rewardsForNode = ssz_types.NewNodeReward( + network, + ssz_types.AddressFromBytes(nodeDetails.NodeAddress.Bytes()), + ) + r.nodeRewards[nodeDetails.NodeAddress] = rewardsForNode + } + rewardsForNode.CollateralRpl.Add(rewardsForNode.CollateralRpl.Int, nodeRplRewards) + + // Add the rewards to the running total for the specified network + rewardsForNetwork, exists := r.networkRewards[rewardsForNode.Network] + if !exists { + rewardsForNetwork = ssz_types.NewNetworkReward(rewardsForNode.Network) + r.networkRewards[rewardsForNode.Network] = rewardsForNetwork + } + rewardsForNetwork.CollateralRpl.Int.Add(rewardsForNetwork.CollateralRpl.Int, nodeRplRewards) + } + } + + // Sanity check to make sure we arrived at the correct total + delta := big.NewInt(0) + totalCalculatedNodeRewards := big.NewInt(0) + for _, networkRewards := range r.networkRewards { + totalCalculatedNodeRewards.Add(totalCalculatedNodeRewards, networkRewards.CollateralRpl.Int) + } + delta.Sub(totalNodeRewards, totalCalculatedNodeRewards).Abs(delta) + if delta.Cmp(r.epsilon) == 1 { + return fmt.Errorf("error calculating collateral RPL: total was %s, but expected %s; error was too large", totalCalculatedNodeRewards.String(), totalNodeRewards.String()) + } + r.rewardsFile.TotalRewards.TotalCollateralRpl.Int.Set(totalCalculatedNodeRewards) + r.log.Printlnf("%s Calculated rewards: %s (error = %s wei)", r.logPrefix, totalCalculatedNodeRewards.String(), delta.String()) + pDaoRewards.Sub(pendingRewards, totalCalculatedNodeRewards) + } else { + // In this situation, none of the nodes in the network had eligible rewards so send it all to the pDAO + pDaoRewards.Add(pDaoRewards, totalNodeRewards) + r.log.Printlnf("%s None of the nodes were eligible for collateral rewards, sending everything to the pDAO; now at %s (%.3f)", r.logPrefix, pDaoRewards.String(), eth.WeiToEth(pDaoRewards)) + } + + // Handle Oracle DAO rewards + oDaoPercent := r.networkState.NetworkDetails.TrustedNodeOperatorRewardsPercent + totalODaoRewards := big.NewInt(0) + totalODaoRewards.Mul(pendingRewards, oDaoPercent) + totalODaoRewards.Div(totalODaoRewards, oneEth) + r.log.Printlnf("%s Total Oracle DAO RPL rewards: %s (%.3f)", r.logPrefix, totalODaoRewards.String(), eth.WeiToEth(totalODaoRewards)) + + oDaoDetails := r.networkState.OracleDaoMemberDetails + + // Calculate the true effective time of each oDAO node based on their participation in this interval + totalODaoNodeTime := big.NewInt(0) + trueODaoNodeTimes := map[common.Address]*big.Int{} + for _, details := range oDaoDetails { + // Get the timestamp of the node joining the oDAO + joinTime := details.JoinedTime + + // Get the actual effective time, scaled based on participation + intervalDuration := r.networkState.NetworkDetails.IntervalDuration + intervalDurationBig := big.NewInt(int64(intervalDuration.Seconds())) + participationTime := big.NewInt(0).Set(intervalDurationBig) + snapshotBlockTime := time.Unix(int64(r.elSnapshotHeader.Time), 0) + eligibleDuration := snapshotBlockTime.Sub(joinTime) + if eligibleDuration < intervalDuration { + participationTime = big.NewInt(int64(eligibleDuration.Seconds())) + } + trueODaoNodeTimes[details.Address] = participationTime + + // Add it to the total + totalODaoNodeTime.Add(totalODaoNodeTime, participationTime) + } + + for _, details := range oDaoDetails { + address := details.Address + + // Calculate the oDAO rewards for the node: (participation time) * (total oDAO rewards) / (total participation time) + individualOdaoRewards := big.NewInt(0) + individualOdaoRewards.Mul(trueODaoNodeTimes[address], totalODaoRewards) + individualOdaoRewards.Div(individualOdaoRewards, totalODaoNodeTime) + + rewardsForNode, exists := r.nodeRewards[address] + if !exists { + // Get the network the rewards should go to + network := r.networkState.NodeDetailsByAddress[address].RewardNetwork.Uint64() + validNetwork, err := r.validateNetwork(network) + if err != nil { + return err + } + if !validNetwork { + r.invalidNetworkNodes[address] = network + network = 0 + } + + rewardsForNode = ssz_types.NewNodeReward( + network, + ssz_types.AddressFromBytes(address.Bytes()), + ) + r.nodeRewards[address] = rewardsForNode + + } + rewardsForNode.OracleDaoRpl.Add(rewardsForNode.OracleDaoRpl.Int, individualOdaoRewards) + + // Add the rewards to the running total for the specified network + rewardsForNetwork, exists := r.networkRewards[rewardsForNode.Network] + if !exists { + rewardsForNetwork = ssz_types.NewNetworkReward(rewardsForNode.Network) + r.networkRewards[rewardsForNode.Network] = rewardsForNetwork + } + rewardsForNetwork.OracleDaoRpl.Add(rewardsForNetwork.OracleDaoRpl.Int, individualOdaoRewards) + } + + // Sanity check to make sure we arrived at the correct total + totalCalculatedOdaoRewards := big.NewInt(0) + delta := big.NewInt(0) + for _, networkRewards := range r.networkRewards { + totalCalculatedOdaoRewards.Add(totalCalculatedOdaoRewards, networkRewards.OracleDaoRpl.Int) + } + delta.Sub(totalODaoRewards, totalCalculatedOdaoRewards).Abs(delta) + if delta.Cmp(r.epsilon) == 1 { + return fmt.Errorf("error calculating ODao RPL: total was %s, but expected %s; error was too large", totalCalculatedOdaoRewards.String(), totalODaoRewards.String()) + } + r.rewardsFile.TotalRewards.TotalOracleDaoRpl.Int.Set(totalCalculatedOdaoRewards) + r.log.Printlnf("%s Calculated rewards: %s (error = %s wei)", r.logPrefix, totalCalculatedOdaoRewards.String(), delta.String()) + + // Get actual protocol DAO rewards + pDaoRewards.Sub(pDaoRewards, totalCalculatedOdaoRewards) + r.rewardsFile.TotalRewards.ProtocolDaoRpl = sszbig.NewUint256(0) + r.rewardsFile.TotalRewards.ProtocolDaoRpl.Set(pDaoRewards) + r.log.Printlnf("%s Actual Protocol DAO rewards: %s to account for truncation", r.logPrefix, pDaoRewards.String()) + + // Print total node weight + r.log.Printlnf("%s Total Node Weight: %s", r.logPrefix, totalNodeWeight) + + return nil + +} + +// Calculates the ETH rewards for the given interval +func (r *treeGeneratorImpl_v11) calculateEthRewards(checkBeaconPerformance bool) error { + + // Get the Smoothing Pool contract's balance + r.smoothingPoolBalance = r.networkState.NetworkDetails.SmoothingPoolBalance + r.log.Printlnf("%s Smoothing Pool Balance: %s (%.3f)", r.logPrefix, r.smoothingPoolBalance.String(), eth.WeiToEth(r.smoothingPoolBalance)) + + // Ignore the ETH calculation if there are no rewards + if r.smoothingPoolBalance.Cmp(common.Big0) == 0 { + return nil + } + + if r.rewardsFile.Index == 0 { + // This is the first interval, Smoothing Pool rewards are ignored on the first interval since it doesn't have a discrete start time + return nil + } + + // Get the start time of this interval based on the event from the previous one + //previousIntervalEvent, err := GetRewardSnapshotEvent(r.rp, r.cfg, r.rewardsFile.Index-1, r.opts) // This is immutable so querying at the head is fine and mitigates issues around calls for pruned EL state + previousIntervalEvent, err := r.rp.GetRewardSnapshotEvent(r.previousRewardsPoolAddresses, r.rewardsFile.Index-1, r.opts) + if err != nil { + return err + } + startElBlockHeader, err := r.getBlocksAndTimesForInterval(previousIntervalEvent) + if err != nil { + return err + } + + r.elStartTime = time.Unix(int64(startElBlockHeader.Time), 0) + r.elEndTime = time.Unix(int64(r.elSnapshotHeader.Time), 0) + r.intervalSeconds = big.NewInt(int64(r.elEndTime.Sub(r.elStartTime) / time.Second)) + + // Get the details for nodes eligible for Smoothing Pool rewards + // This should be all of the eth1 calls, so do them all at the start of Smoothing Pool calculation to prevent the need for an archive node during normal operations + err = r.getSmoothingPoolNodeDetails() + if err != nil { + return err + } + eligible := 0 + for _, nodeInfo := range r.nodeDetails { + if nodeInfo.IsEligible { + eligible++ + } + } + r.log.Printlnf("%s %d / %d nodes were eligible for Smoothing Pool rewards", r.logPrefix, eligible, len(r.nodeDetails)) + + // Process the attestation performance for each minipool during this interval + r.intervalDutiesInfo = &IntervalDutiesInfo{ + Index: r.rewardsFile.Index, + Slots: map[uint64]*SlotInfo{}, + } + if checkBeaconPerformance { + err = r.processAttestationsBalancesAndWithdrawalsForInterval() + if err != nil { + return err + } + } else { + // Attestation processing is disabled, just give each minipool 1 good attestation and complete slot activity so they're all scored the same + // Used for approximating rETH's share during balances calculation + validatorReq := big.NewInt(0).Set(thirtyTwoEth) + for _, nodeInfo := range r.nodeDetails { + // Check if the node is currently opted in for simplicity + if nodeInfo.IsEligible && nodeInfo.IsOptedIn && r.elEndTime.After(nodeInfo.OptInTime) { + eligibleBorrowedEth := nodeInfo.EligibleBorrowedEth + _, percentOfBorrowedEth := r.networkState.GetStakedRplValueInEthAndPercentOfBorrowedEth(eligibleBorrowedEth, nodeInfo.RplStake) + for _, minipool := range nodeInfo.Minipools { + minipool.CompletedAttestations = map[uint64]bool{0: true} + + // Make up an attestation + details := r.networkState.MinipoolDetailsByAddress[minipool.Address] + bond, fee := details.GetMinipoolBondAndNodeFee(r.elEndTime) + if r.rewardsFile.RulesetVersion >= 10 { + fee = fees.GetMinipoolFeeWithBonus(bond, fee, percentOfBorrowedEth) + } + minipoolScore := big.NewInt(0).Sub(oneEth, fee) // 1 - fee + minipoolScore.Mul(minipoolScore, bond) // Multiply by bond + minipoolScore.Div(minipoolScore, validatorReq) // Divide by 32 to get the bond as a fraction of a total validator + minipoolScore.Add(minipoolScore, fee) // Total = fee + (bond/32)(1 - fee) + + // Add it to the minipool's score and the total score + minipool.AttestationScore.Add(&minipool.AttestationScore.Int, minipoolScore) + r.totalAttestationScore.Add(r.totalAttestationScore, minipoolScore) + + r.successfulAttestations++ + } + } + } + } + + // Determine how much ETH each node gets and how much the pool stakers get + poolStakerETH, nodeOpEth, bonusScalar, err := r.calculateNodeRewards() + if err != nil { + return err + } + if r.rewardsFile.RulesetVersion >= 10 { + r.minipoolPerformanceFile.BonusScalar = QuotedBigIntFromBigInt(bonusScalar) + } + + // Update the rewards maps + for _, nodeInfo := range r.nodeDetails { + if nodeInfo.IsEligible && nodeInfo.SmoothingPoolEth.Cmp(common.Big0) > 0 { + rewardsForNode, exists := r.nodeRewards[nodeInfo.Address] + if !exists { + network := nodeInfo.RewardsNetwork + validNetwork, err := r.validateNetwork(network) + if err != nil { + return err + } + if !validNetwork { + r.invalidNetworkNodes[nodeInfo.Address] = network + network = 0 + } + + rewardsForNode = ssz_types.NewNodeReward( + network, + ssz_types.AddressFromBytes(nodeInfo.Address.Bytes()), + ) + r.nodeRewards[nodeInfo.Address] = rewardsForNode + } + rewardsForNode.SmoothingPoolEth.Add(rewardsForNode.SmoothingPoolEth.Int, nodeInfo.SmoothingPoolEth) + + // Add minipool rewards to the JSON + for _, minipoolInfo := range nodeInfo.Minipools { + successfulAttestations := uint64(len(minipoolInfo.CompletedAttestations)) + missingAttestations := uint64(len(minipoolInfo.MissingAttestationSlots)) + performance := &SmoothingPoolMinipoolPerformance_v2{ + Pubkey: minipoolInfo.ValidatorPubkey.Hex(), + SuccessfulAttestations: successfulAttestations, + MissedAttestations: missingAttestations, + AttestationScore: minipoolInfo.AttestationScore, + EthEarned: QuotedBigIntFromBigInt(minipoolInfo.MinipoolShare), + BonusEthEarned: QuotedBigIntFromBigInt(minipoolInfo.MinipoolBonus), + ConsensusIncome: minipoolInfo.ConsensusIncome, + EffectiveCommission: QuotedBigIntFromBigInt(minipoolInfo.TotalFee), + MissingAttestationSlots: []uint64{}, + } + if successfulAttestations+missingAttestations == 0 { + // Don't include minipools that have zero attestations + continue + } + for slot := range minipoolInfo.MissingAttestationSlots { + performance.MissingAttestationSlots = append(performance.MissingAttestationSlots, slot) + } + r.minipoolPerformanceFile.MinipoolPerformance[minipoolInfo.Address] = performance + } + + // Add the rewards to the running total for the specified network + rewardsForNetwork, exists := r.networkRewards[rewardsForNode.Network] + if !exists { + rewardsForNetwork = ssz_types.NewNetworkReward(rewardsForNode.Network) + r.networkRewards[rewardsForNode.Network] = rewardsForNetwork + } + rewardsForNetwork.SmoothingPoolEth.Add(rewardsForNetwork.SmoothingPoolEth.Int, nodeInfo.SmoothingPoolEth) + } + } + + // Set the totals + r.rewardsFile.TotalRewards.PoolStakerSmoothingPoolEth.Set(poolStakerETH) + r.rewardsFile.TotalRewards.NodeOperatorSmoothingPoolEth.Set(nodeOpEth) + r.rewardsFile.TotalRewards.TotalSmoothingPoolEth.Set(r.smoothingPoolBalance) + return nil + +} + +func (r *treeGeneratorImpl_v11) calculateNodeBonuses() (*big.Int, error) { + totalConsensusBonus := big.NewInt(0) + for _, nsd := range r.nodeDetails { + if !nsd.IsEligible { + continue + } + + nodeDetails := r.networkState.NodeDetailsByAddress[nsd.Address] + eligible, _, eligibleEnd := nodeDetails.IsEligibleForBonuses(r.elStartTime, r.elEndTime) + if !eligible { + continue + } + + // Get the nodeDetails from the network state + eligibleBorrowedEth := nsd.EligibleBorrowedEth + _, percentOfBorrowedEth := r.networkState.GetStakedRplValueInEthAndPercentOfBorrowedEth(eligibleBorrowedEth, nsd.RplStake) + for _, mpd := range nsd.Minipools { + mpi := r.networkState.MinipoolDetailsByAddress[mpd.Address] + if !mpi.IsEligibleForBonuses(eligibleEnd) { + continue + } + bond, fee := mpi.GetMinipoolBondAndNodeFee(eligibleEnd) + feeWithBonus := fees.GetMinipoolFeeWithBonus(bond, fee, percentOfBorrowedEth) + if fee.Cmp(feeWithBonus) >= 0 { + // This minipool won't get any bonuses, so skip it + continue + } + // This minipool will get a bonus + // It is safe to populate the optional fields from here on. + + fee = feeWithBonus + // Save fee as totalFee for the Minipool + mpd.TotalFee = fee + + // Total fee for a minipool with a bonus shall never exceed 14% + if fee.Cmp(fourteenPercentEth) > 0 { + r.log.Printlnf("WARNING: Minipool %s has a fee of %s, which is greater than the maximum allowed of 14%", mpd.Address.Hex(), fee.String()) + r.log.Printlnf("WARNING: Aborting.") + return nil, fmt.Errorf("minipool %s has a fee of %s, which is greater than the maximum allowed of 14%%", mpd.Address.Hex(), fee.String()) + } + bonusFee := big.NewInt(0).Set(fee) + bonusFee.Sub(bonusFee, mpi.NodeFee) + withdrawalTotal := r.minipoolWithdrawals[mpd.Address] + if withdrawalTotal == nil { + withdrawalTotal = big.NewInt(0) + } + consensusIncome := big.NewInt(0).Set(withdrawalTotal) + mpd.ConsensusIncome = &QuotedBigInt{Int: *(big.NewInt(0).Set(consensusIncome))} + bonusShare := bonusFee.Mul(bonusFee, big.NewInt(0).Sub(thirtyTwoEth, mpi.NodeDepositBalance)) + bonusShare.Div(bonusShare, thirtyTwoEth) + minipoolBonus := consensusIncome.Mul(consensusIncome, bonusShare) + minipoolBonus.Div(minipoolBonus, oneEth) + if minipoolBonus.Sign() == -1 { + minipoolBonus = big.NewInt(0) + } + mpd.MinipoolBonus = minipoolBonus + totalConsensusBonus.Add(totalConsensusBonus, minipoolBonus) + nsd.BonusEth.Add(nsd.BonusEth, minipoolBonus) + } + } + return totalConsensusBonus, nil +} + +// Calculate the distribution of Smoothing Pool ETH to each node +func (r *treeGeneratorImpl_v11) calculateNodeRewards() (*big.Int, *big.Int, *big.Int, error) { + var err error + bonusScalar := big.NewInt(0).Set(oneEth) + + // If there weren't any successful attestations, everything goes to the pool stakers + if r.totalAttestationScore.Cmp(common.Big0) == 0 || r.successfulAttestations == 0 { + r.log.Printlnf("WARNING: Total attestation score = %s, successful attestations = %d... sending the whole smoothing pool balance to the pool stakers.", r.totalAttestationScore.String(), r.successfulAttestations) + return r.smoothingPoolBalance, big.NewInt(0), bonusScalar, nil + } + + // Calculate the minipool bonuses + isEligibleInterval := true // TODO - check on-chain for saturn 1 + var totalConsensusBonus *big.Int + if r.rewardsFile.RulesetVersion >= 10 && isEligibleInterval { + totalConsensusBonus, err = r.calculateNodeBonuses() + if err != nil { + return nil, nil, nil, err + } + } + + totalEthForMinipools := big.NewInt(0) + totalNodeOpShare := big.NewInt(0) + totalNodeOpShare.Mul(r.smoothingPoolBalance, r.totalAttestationScore) + totalNodeOpShare.Div(totalNodeOpShare, big.NewInt(int64(r.successfulAttestations))) + totalNodeOpShare.Div(totalNodeOpShare, oneEth) + + for _, nodeInfo := range r.nodeDetails { + nodeInfo.SmoothingPoolEth = big.NewInt(0) + if !nodeInfo.IsEligible { + continue + } + for _, minipool := range nodeInfo.Minipools { + if len(minipool.CompletedAttestations)+len(minipool.MissingAttestationSlots) == 0 || !minipool.WasActive { + // Ignore minipools that weren't active for the interval + minipool.WasActive = false + minipool.MinipoolShare = big.NewInt(0) + continue + } + + minipoolEth := big.NewInt(0).Set(totalNodeOpShare) + minipoolEth.Mul(minipoolEth, &minipool.AttestationScore.Int) + minipoolEth.Div(minipoolEth, r.totalAttestationScore) + minipool.MinipoolShare = minipoolEth + nodeInfo.SmoothingPoolEth.Add(nodeInfo.SmoothingPoolEth, minipoolEth) + } + totalEthForMinipools.Add(totalEthForMinipools, nodeInfo.SmoothingPoolEth) + } + + if r.rewardsFile.RulesetVersion >= 10 { + remainingBalance := big.NewInt(0).Sub(r.smoothingPoolBalance, totalEthForMinipools) + if remainingBalance.Cmp(totalConsensusBonus) < 0 { + r.log.Printlnf("WARNING: Remaining balance is less than total consensus bonus... Balance = %s, total consensus bonus = %s", remainingBalance.String(), totalConsensusBonus.String()) + // Scale bonuses down to fit the remaining balance + bonusScalar.Div(big.NewInt(0).Mul(remainingBalance, oneEth), totalConsensusBonus) + for _, nsd := range r.nodeDetails { + nsd.BonusEth.Mul(nsd.BonusEth, remainingBalance) + nsd.BonusEth.Div(nsd.BonusEth, totalConsensusBonus) + // Calculate the reduced bonus for each minipool + // Because of integer division, this will be less than the actual bonus by up to 1 wei + for _, mpd := range nsd.Minipools { + if mpd.MinipoolBonus == nil { + continue + } + mpd.MinipoolBonus.Mul(mpd.MinipoolBonus, remainingBalance) + mpd.MinipoolBonus.Div(mpd.MinipoolBonus, totalConsensusBonus) + } + } + } + } + + // Sanity check the totalNodeOpShare before bonuses are awarded + delta := big.NewInt(0).Sub(totalEthForMinipools, totalNodeOpShare) + delta.Abs(delta) + if delta.Cmp(r.epsilon) == 1 { + return nil, nil, nil, fmt.Errorf("error calculating smoothing pool ETH: total was %s, but expected %s; error was too large (%s wei)", totalEthForMinipools.String(), totalNodeOpShare.String(), delta.String()) + } + + // Finally, award the bonuses + if r.rewardsFile.RulesetVersion >= 10 { + for _, nsd := range r.nodeDetails { + nsd.SmoothingPoolEth.Add(nsd.SmoothingPoolEth, nsd.BonusEth) + totalEthForMinipools.Add(totalEthForMinipools, nsd.BonusEth) + } + } + + // This is how much actually goes to the pool stakers - it should ideally be equal to poolStakerShare but this accounts for any cumulative floating point errors + truePoolStakerAmount := big.NewInt(0).Sub(r.smoothingPoolBalance, totalEthForMinipools) + + // Calculate the staking pool share and the node op share + poolStakerShareBeforeBonuses := big.NewInt(0).Sub(r.smoothingPoolBalance, totalNodeOpShare) + + r.log.Printlnf("%s Pool staker ETH before bonuses: %s (%.3f)", r.logPrefix, poolStakerShareBeforeBonuses.String(), eth.WeiToEth(poolStakerShareBeforeBonuses)) + r.log.Printlnf("%s Pool staker ETH after bonuses: %s (%.3f)", r.logPrefix, truePoolStakerAmount.String(), eth.WeiToEth(truePoolStakerAmount)) + r.log.Printlnf("%s Node Op ETH before bonuses: %s (%.3f)", r.logPrefix, totalNodeOpShare.String(), eth.WeiToEth(totalNodeOpShare)) + r.log.Printlnf("%s Node Op ETH after bonuses: %s (%.3f)", r.logPrefix, totalEthForMinipools.String(), eth.WeiToEth(totalEthForMinipools)) + r.log.Printlnf("%s (error = %s wei)", r.logPrefix, delta.String()) + r.log.Printlnf("%s Adjusting pool staker ETH to %s to account for truncation", r.logPrefix, truePoolStakerAmount.String()) + + return truePoolStakerAmount, totalEthForMinipools, bonusScalar, nil + +} + +// Get all of the duties for a range of epochs +func (r *treeGeneratorImpl_v11) processAttestationsBalancesAndWithdrawalsForInterval() error { + + startEpoch := r.rewardsFile.ConsensusStartBlock / r.beaconConfig.SlotsPerEpoch + endEpoch := r.rewardsFile.ConsensusEndBlock / r.beaconConfig.SlotsPerEpoch + + // Determine the validator indices of each minipool + err := r.createMinipoolIndexMap() + if err != nil { + return err + } + + // Check all of the attestations for each epoch + r.log.Printlnf("%s Checking participation of %d minipools for epochs %d to %d", r.logPrefix, len(r.validatorIndexMap), startEpoch, endEpoch) + r.log.Printlnf("%s NOTE: this will take a long time, progress is reported every 100 epochs", r.logPrefix) + + epochsDone := 0 + reportStartTime := time.Now() + for epoch := startEpoch; epoch < endEpoch+1; epoch++ { + if epochsDone == 100 { + timeTaken := time.Since(reportStartTime) + r.log.Printlnf("%s On Epoch %d of %d (%.2f%%)... (%s so far)", r.logPrefix, epoch, endEpoch, float64(epoch-startEpoch)/float64(endEpoch-startEpoch)*100.0, timeTaken) + epochsDone = 0 + } + + err := r.processEpoch(true, epoch) + if err != nil { + return err + } + + epochsDone++ + } + + // Check the epoch after the end of the interval for any lingering attestations + epoch := endEpoch + 1 + err = r.processEpoch(false, epoch) + if err != nil { + return err + } + + r.log.Printlnf("%s Finished participation check (total time = %s)", r.logPrefix, time.Since(reportStartTime)) + return nil + +} + +// Process an epoch, optionally getting the duties for all eligible minipools in it and checking each one's attestation performance +func (r *treeGeneratorImpl_v11) processEpoch(duringInterval bool, epoch uint64) error { + + // Get the committee info and attestation records for this epoch + var committeeData beacon.Committees + attestationsPerSlot := make([][]beacon.AttestationInfo, r.slotsPerEpoch) + var wg errgroup.Group + + if duringInterval { + wg.Go(func() error { + var err error + committeeData, err = r.bc.GetCommitteesForEpoch(&epoch) + return err + }) + } + + withdrawalsLock := &sync.Mutex{} + for i := uint64(0); i < r.slotsPerEpoch; i++ { + // Get the beacon block for this slot + i := i + slot := epoch*r.slotsPerEpoch + i + slotTime := r.networkState.BeaconConfig.GetSlotTime(slot) + wg.Go(func() error { + beaconBlock, found, err := r.bc.GetBeaconBlock(fmt.Sprint(slot)) + if err != nil { + return err + } + if found { + attestationsPerSlot[i] = beaconBlock.Attestations + } + + // If we don't need withdrawal amounts because we're using ruleset 9, + // return early + if r.rewardsFile.RulesetVersion < 10 || !duringInterval { + return nil + } + + for _, withdrawal := range beaconBlock.Withdrawals { + // Ignore non-RP validators + mpi, exists := r.validatorIndexMap[withdrawal.ValidatorIndex] + if !exists { + continue + } + nnd := r.networkState.NodeDetailsByAddress[mpi.NodeAddress] + nmd := r.networkState.MinipoolDetailsByAddress[mpi.Address] + + // Check that the node is opted into the SP during this slot + if !nnd.WasOptedInAt(slotTime) { + continue + } + + // Check that the minipool's bond is eligible for bonuses at this slot + if eligible := nmd.IsEligibleForBonuses(slotTime); !eligible { + continue + } + + // If the withdrawal is in or after the minipool's withdrawable epoch, adjust it. + withdrawalAmount := withdrawal.Amount + validatorInfo := r.networkState.MinipoolValidatorDetails[mpi.ValidatorPubkey] + if slot >= r.networkState.BeaconConfig.FirstSlotOfEpoch(validatorInfo.WithdrawableEpoch) { + // Subtract 32 ETH from the withdrawal amount + withdrawalAmount = big.NewInt(0).Sub(withdrawalAmount, thirtyTwoEth) + // max(withdrawalAmount, 0) + if withdrawalAmount.Sign() < 0 { + withdrawalAmount.SetInt64(0) + } + } + + // Create the minipool's withdrawal sum big.Int if it doesn't exist + withdrawalsLock.Lock() + if r.minipoolWithdrawals[mpi.Address] == nil { + r.minipoolWithdrawals[mpi.Address] = big.NewInt(0) + } + // Add the withdrawal amount + r.minipoolWithdrawals[mpi.Address].Add(r.minipoolWithdrawals[mpi.Address], withdrawalAmount) + withdrawalsLock.Unlock() + } + return nil + }) + } + err := wg.Wait() + // Return preallocated memory to the pool if it exists + if committeeData != nil { + defer committeeData.Release() + } + if err != nil { + return fmt.Errorf("error getting committee and attestaion records for epoch %d: %w", epoch, err) + } + + if duringInterval { + // Get all of the expected duties for the epoch + err = r.getDutiesForEpoch(committeeData) + if err != nil { + return fmt.Errorf("error getting duties for epoch %d: %w", epoch, err) + } + } + + // Process all of the slots in the epoch + for i := uint64(0); i < r.slotsPerEpoch; i++ { + inclusionSlot := epoch*r.slotsPerEpoch + i + attestations := attestationsPerSlot[i] + if len(attestations) > 0 { + r.checkAttestations(attestations, inclusionSlot) + } + } + + return nil + +} + +func (r *treeGeneratorImpl_v11) checkAttestations(attestations []beacon.AttestationInfo, inclusionSlot uint64) error { + + // Go through the attestations for the block + for _, attestation := range attestations { + // Get the RP committees for this attestation's slot and index + slotInfo, exists := r.intervalDutiesInfo.Slots[attestation.SlotIndex] + if !exists { + continue + } + // Ignore attestations delayed by more than 32 slots + if inclusionSlot-attestation.SlotIndex > r.beaconConfig.SlotsPerEpoch { + continue + } + + for _, committeeIndex := range attestation.CommitteeIndices() { + rpCommittee, exists := slotInfo.Committees[uint64(committeeIndex)] + if !exists { + continue + } + blockTime := r.genesisTime.Add(time.Second * time.Duration(r.networkState.BeaconConfig.SecondsPerSlot*attestation.SlotIndex)) + + // Check if each RP validator attested successfully + for position, validator := range rpCommittee.Positions { + if !attestation.ValidatorAttested(committeeIndex, position, slotInfo.CommitteeSizes) { + continue + } + + // This was seen, so remove it from the missing attestations and add it to the completed ones + delete(rpCommittee.Positions, position) + if len(rpCommittee.Positions) == 0 { + delete(slotInfo.Committees, uint64(committeeIndex)) + } + if len(slotInfo.Committees) == 0 { + delete(r.intervalDutiesInfo.Slots, attestation.SlotIndex) + } + delete(validator.MissingAttestationSlots, attestation.SlotIndex) + + // Check if this minipool was opted into the SP for this block + nodeDetails := r.nodeDetails[validator.NodeIndex] + if blockTime.Before(nodeDetails.OptInTime) || blockTime.After(nodeDetails.OptOutTime) { + // Not opted in + continue + } + + eligibleBorrowedEth := nodeDetails.EligibleBorrowedEth + _, percentOfBorrowedEth := r.networkState.GetStakedRplValueInEthAndPercentOfBorrowedEth(eligibleBorrowedEth, nodeDetails.RplStake) + + // Mark this duty as completed + validator.CompletedAttestations[attestation.SlotIndex] = true + + // Get the pseudoscore for this attestation + details := r.networkState.MinipoolDetailsByAddress[validator.Address] + bond, fee := details.GetMinipoolBondAndNodeFee(blockTime) + + if r.rewardsFile.RulesetVersion >= 10 { + fee = fees.GetMinipoolFeeWithBonus(bond, fee, percentOfBorrowedEth) + } + + minipoolScore := big.NewInt(0).Sub(oneEth, fee) // 1 - fee + minipoolScore.Mul(minipoolScore, bond) // Multiply by bond + minipoolScore.Div(minipoolScore, thirtyTwoEth) // Divide by 32 to get the bond as a fraction of a total validator + minipoolScore.Add(minipoolScore, fee) // Total = fee + (bond/32)(1 - fee) + + // Add it to the minipool's score and the total score + validator.AttestationScore.Add(&validator.AttestationScore.Int, minipoolScore) + r.totalAttestationScore.Add(r.totalAttestationScore, minipoolScore) + r.successfulAttestations++ + } + } + } + + return nil + +} + +// Maps out the attestaion duties for the given epoch +func (r *treeGeneratorImpl_v11) getDutiesForEpoch(committees beacon.Committees) error { + + // Crawl the committees + for idx := 0; idx < committees.Count(); idx++ { + slotIndex := committees.Slot(idx) + if slotIndex < r.rewardsFile.ConsensusStartBlock || slotIndex > r.rewardsFile.ConsensusEndBlock { + // Ignore slots that are out of bounds + continue + } + blockTime := r.genesisTime.Add(time.Second * time.Duration(r.beaconConfig.SecondsPerSlot*slotIndex)) + committeeIndex := committees.Index(idx) + + // Add the committee size to the list, for calculating offset in post-electra aggregation_bits + slotInfo, exists := r.intervalDutiesInfo.Slots[slotIndex] + if !exists { + slotInfo = &SlotInfo{ + Index: slotIndex, + Committees: map[uint64]*CommitteeInfo{}, + CommitteeSizes: map[uint64]int{}, + } + r.intervalDutiesInfo.Slots[slotIndex] = slotInfo + } + slotInfo.CommitteeSizes[committeeIndex] = committees.ValidatorCount(idx) + + // Check if there are any RP validators in this committee + rpValidators := map[int]*MinipoolInfo{} + for position, validator := range committees.Validators(idx) { + minipoolInfo, exists := r.validatorIndexMap[validator] + if !exists { + // This isn't an RP validator, so ignore it + continue + } + + // Check if this minipool was opted into the SP for this block + nodeDetails := r.networkState.NodeDetailsByAddress[minipoolInfo.NodeAddress] + isOptedIn := nodeDetails.SmoothingPoolRegistrationState + spRegistrationTime := time.Unix(nodeDetails.SmoothingPoolRegistrationChanged.Int64(), 0) + if (isOptedIn && blockTime.Sub(spRegistrationTime) < 0) || // If this block occurred before the node opted in, ignore it + (!isOptedIn && spRegistrationTime.Sub(blockTime) < 0) { // If this block occurred after the node opted out, ignore it + continue + } + + // Check if this minipool was in the `staking` state during this time + mpd := r.networkState.MinipoolDetailsByAddress[minipoolInfo.Address] + statusChangeTime := time.Unix(mpd.StatusTime.Int64(), 0) + if mpd.Status != rptypes.Staking || blockTime.Sub(statusChangeTime) < 0 { + continue + } + + // This was a legal RP validator opted into the SP during this slot so add it + rpValidators[position] = minipoolInfo + minipoolInfo.MissingAttestationSlots[slotIndex] = true + } + + // If there are some RP validators, add this committee to the map + if len(rpValidators) > 0 { + slotInfo.Committees[committeeIndex] = &CommitteeInfo{ + Index: committeeIndex, + Positions: rpValidators, + } + } + } + + return nil + +} + +// Maps all minipools to their validator indices and creates a map of indices to minipool info +func (r *treeGeneratorImpl_v11) createMinipoolIndexMap() error { + + // Get the status for all uncached minipool validators and add them to the cache + r.validatorIndexMap = map[string]*MinipoolInfo{} + for _, details := range r.nodeDetails { + if details.IsEligible { + for _, minipoolInfo := range details.Minipools { + status, exists := r.networkState.MinipoolValidatorDetails[minipoolInfo.ValidatorPubkey] + if !exists { + // Remove minipools that don't have indices yet since they're not actually viable + //r.log.Printlnf("NOTE: minipool %s (pubkey %s) didn't exist at this slot; removing it", minipoolInfo.Address.Hex(), minipoolInfo.ValidatorPubkey.Hex()) + minipoolInfo.WasActive = false + } else { + switch status.Status { + case beacon.ValidatorState_PendingInitialized, beacon.ValidatorState_PendingQueued: + // Remove minipools that don't have indices yet since they're not actually viable + //r.log.Printlnf("NOTE: minipool %s (index %s, pubkey %s) was in state %s; removing it", minipoolInfo.Address.Hex(), status.Index, minipoolInfo.ValidatorPubkey.Hex(), string(status.Status)) + minipoolInfo.WasActive = false + default: + // Get the validator index + minipoolInfo.ValidatorIndex = status.Index + r.validatorIndexMap[minipoolInfo.ValidatorIndex] = minipoolInfo + + // Get the validator's activation start and end slots + startSlot := status.ActivationEpoch * r.beaconConfig.SlotsPerEpoch + endSlot := status.ExitEpoch * r.beaconConfig.SlotsPerEpoch + + // Verify this minipool has already started + if status.ActivationEpoch == FarEpoch { + //r.log.Printlnf("NOTE: minipool %s hasn't been scheduled for activation yet; removing it", minipoolInfo.Address.Hex()) + minipoolInfo.WasActive = false + continue + } else if startSlot > r.rewardsFile.ConsensusEndBlock { + //r.log.Printlnf("NOTE: minipool %s activates on slot %d which is after interval end %d; removing it", minipoolInfo.Address.Hex(), startSlot, r.rewardsFile.ConsensusEndBlock) + minipoolInfo.WasActive = false + } + + // Check if the minipool exited before this interval + if status.ExitEpoch != FarEpoch && endSlot < r.rewardsFile.ConsensusStartBlock { + //r.log.Printlnf("NOTE: minipool %s exited on slot %d which was before interval start %d; removing it", minipoolInfo.Address.Hex(), endSlot, r.rewardsFile.ConsensusStartBlock) + minipoolInfo.WasActive = false + continue + } + } + } + } + } + } + + return nil + +} + +// Get the details for every node that was opted into the Smoothing Pool for at least some portion of this interval +func (r *treeGeneratorImpl_v11) getSmoothingPoolNodeDetails() error { + + // For each NO, get their opt-in status and time of last change in batches + r.log.Printlnf("%s Getting details of nodes for Smoothing Pool calculation...", r.logPrefix) + nodeCount := uint64(len(r.networkState.NodeDetails)) + r.nodeDetails = make([]*NodeSmoothingDetails, nodeCount) + for batchStartIndex := uint64(0); batchStartIndex < nodeCount; batchStartIndex += SmoothingPoolDetailsBatchSize { + + // Get batch start & end index + iterationStartIndex := batchStartIndex + iterationEndIndex := min(batchStartIndex+SmoothingPoolDetailsBatchSize, nodeCount) + + // Load details + var wg errgroup.Group + for iterationIndex := iterationStartIndex; iterationIndex < iterationEndIndex; iterationIndex++ { + iterationIndex := iterationIndex + wg.Go(func() error { + nativeNodeDetails := r.networkState.NodeDetails[iterationIndex] + nodeDetails := &NodeSmoothingDetails{ + Address: nativeNodeDetails.NodeAddress, + Minipools: []*MinipoolInfo{}, + SmoothingPoolEth: big.NewInt(0), + BonusEth: big.NewInt(0), + RewardsNetwork: nativeNodeDetails.RewardNetwork.Uint64(), + RplStake: nativeNodeDetails.RplStake, + } + + nodeDetails.IsOptedIn = nativeNodeDetails.SmoothingPoolRegistrationState + statusChangeTimeBig := nativeNodeDetails.SmoothingPoolRegistrationChanged + statusChangeTime := time.Unix(statusChangeTimeBig.Int64(), 0) + + if nodeDetails.IsOptedIn { + nodeDetails.OptInTime = statusChangeTime + nodeDetails.OptOutTime = time.Unix(farFutureTimestamp, 0) + } else { + nodeDetails.OptOutTime = statusChangeTime + nodeDetails.OptInTime = time.Unix(farPastTimestamp, 0) + } + + // Get the details for each minipool in the node + for _, mpd := range r.networkState.MinipoolDetailsByNode[nodeDetails.Address] { + if mpd.Exists && mpd.Status == rptypes.Staking { + nativeMinipoolDetails := r.networkState.MinipoolDetailsByAddress[mpd.MinipoolAddress] + penaltyCount := nativeMinipoolDetails.PenaltyCount.Uint64() + if penaltyCount >= 3 { + // This node is a cheater + nodeDetails.IsEligible = false + nodeDetails.Minipools = []*MinipoolInfo{} + r.nodeDetails[iterationIndex] = nodeDetails + return nil + } + + // This minipool is below the penalty count, so include it + nodeDetails.Minipools = append(nodeDetails.Minipools, &MinipoolInfo{ + Address: mpd.MinipoolAddress, + ValidatorPubkey: mpd.Pubkey, + NodeAddress: nodeDetails.Address, + NodeIndex: iterationIndex, + Fee: nativeMinipoolDetails.NodeFee, + //MissedAttestations: 0, + //GoodAttestations: 0, + MissingAttestationSlots: map[uint64]bool{}, + CompletedAttestations: map[uint64]bool{}, + WasActive: true, + AttestationScore: NewQuotedBigInt(0), + NodeOperatorBond: nativeMinipoolDetails.NodeDepositBalance, + }) + } + } + + nodeDetails.IsEligible = len(nodeDetails.Minipools) > 0 + r.nodeDetails[iterationIndex] = nodeDetails + return nil + }) + } + if err := wg.Wait(); err != nil { + return err + } + } + + // Populate the eligible borrowed ETH field for all nodes + for _, nodeDetails := range r.nodeDetails { + nnd := r.networkState.NodeDetailsByAddress[nodeDetails.Address] + nodeDetails.EligibleBorrowedEth = r.networkState.GetEligibleBorrowedEth(nnd) + } + + return nil + +} + +// Validates that the provided network is legal +func (r *treeGeneratorImpl_v11) validateNetwork(network uint64) (bool, error) { + valid, exists := r.validNetworkCache[network] + if !exists { + var err error + valid, err = r.rp.GetNetworkEnabled(big.NewInt(int64(network)), r.opts) + if err != nil { + return false, err + } + r.validNetworkCache[network] = valid + } + + return valid, nil +} + +// Gets the start blocks for the given interval +func (r *treeGeneratorImpl_v11) getBlocksAndTimesForInterval(previousIntervalEvent rewards.RewardsEvent) (*types.Header, error) { + // Sanity check to confirm the BN can access the block from the previous interval + _, exists, err := r.bc.GetBeaconBlock(previousIntervalEvent.ConsensusBlock.String()) + if err != nil { + return nil, fmt.Errorf("error verifying block from previous interval: %w", err) + } + if !exists { + return nil, fmt.Errorf("couldn't retrieve CL block from previous interval (slot %d); this likely means you checkpoint sync'd your Beacon Node and it has not backfilled to the previous interval yet so it cannot be used for tree generation", previousIntervalEvent.ConsensusBlock.Uint64()) + } + + previousEpoch := previousIntervalEvent.ConsensusBlock.Uint64() / r.beaconConfig.SlotsPerEpoch + nextEpoch := previousEpoch + 1 + + consensusStartSlot := nextEpoch * r.beaconConfig.SlotsPerEpoch + startTime := r.beaconConfig.GetSlotTime(consensusStartSlot) + endTime := r.beaconConfig.GetSlotTime(r.snapshotEnd.Slot) + + r.rewardsFile.StartTime = startTime + r.minipoolPerformanceFile.StartTime = startTime + + r.rewardsFile.EndTime = endTime + r.minipoolPerformanceFile.EndTime = endTime + + r.rewardsFile.ConsensusStartBlock = nextEpoch * r.beaconConfig.SlotsPerEpoch + r.minipoolPerformanceFile.ConsensusStartBlock = r.rewardsFile.ConsensusStartBlock + + r.rewardsFile.ConsensusEndBlock = r.snapshotEnd.ConsensusBlock + r.minipoolPerformanceFile.ConsensusEndBlock = r.snapshotEnd.ConsensusBlock + + r.rewardsFile.ExecutionEndBlock = r.snapshotEnd.ExecutionBlock + r.minipoolPerformanceFile.ExecutionEndBlock = r.snapshotEnd.ExecutionBlock + + // Get the first block that isn't missing + var elBlockNumber uint64 + for { + beaconBlock, exists, err := r.bc.GetBeaconBlock(fmt.Sprint(r.rewardsFile.ConsensusStartBlock)) + if err != nil { + return nil, fmt.Errorf("error getting EL data for BC slot %d: %w", r.rewardsFile.ConsensusStartBlock, err) + } + if !exists { + r.rewardsFile.ConsensusStartBlock++ + r.minipoolPerformanceFile.ConsensusStartBlock++ + } else { + elBlockNumber = beaconBlock.ExecutionBlockNumber + break + } + } + + var startElHeader *types.Header + if elBlockNumber == 0 { + // We are pre-merge, so get the first block after the one from the previous interval + r.rewardsFile.ExecutionStartBlock = previousIntervalEvent.ExecutionBlock.Uint64() + 1 + r.minipoolPerformanceFile.ExecutionStartBlock = r.rewardsFile.ExecutionStartBlock + startElHeader, err = r.rp.HeaderByNumber(context.Background(), big.NewInt(int64(r.rewardsFile.ExecutionStartBlock))) + if err != nil { + return nil, fmt.Errorf("error getting EL start block %d: %w", r.rewardsFile.ExecutionStartBlock, err) + } + } else { + // We are post-merge, so get the EL block corresponding to the BC block + r.rewardsFile.ExecutionStartBlock = elBlockNumber + r.minipoolPerformanceFile.ExecutionStartBlock = r.rewardsFile.ExecutionStartBlock + startElHeader, err = r.rp.HeaderByNumber(context.Background(), big.NewInt(int64(elBlockNumber))) + if err != nil { + return nil, fmt.Errorf("error getting EL header for block %d: %w", elBlockNumber, err) + } + } + + return startElHeader, nil +} + +func (r *treeGeneratorImpl_v11) saveFiles(smartnode *config.SmartnodeConfig, treeResult *GenerateTreeResult, nodeTrusted bool) (cid.Cid, map[string]cid.Cid, error) { + return saveRewardsArtifacts(smartnode, treeResult, nodeTrusted) +} diff --git a/shared/services/rewards/mock_v11_test.go b/shared/services/rewards/mock_v11_test.go new file mode 100644 index 000000000..f62724eed --- /dev/null +++ b/shared/services/rewards/mock_v11_test.go @@ -0,0 +1,859 @@ +package rewards + +// This file contains treegen tests which use mock history. +// These mocks are faster to process than real history, and are useful for +// testing new features and refactoring. + +import ( + "fmt" + "math/big" + "strings" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/fatih/color" + "github.com/rocket-pool/smartnode/bindings/utils/eth" + "github.com/rocket-pool/smartnode/shared/services/beacon" + "github.com/rocket-pool/smartnode/shared/services/rewards/test" + "github.com/rocket-pool/smartnode/shared/services/rewards/test/assets" + "github.com/rocket-pool/smartnode/shared/utils/log" +) + +func TestMockIntervalDefaultsTreegenv11(tt *testing.T) { + + history := test.NewDefaultMockHistory() + // Add a node which is earning some bonus commission + node := history.GetNewDefaultMockNode(&test.NewMockNodeParams{ + SmoothingPool: true, + EightEthMinipools: 1, + CollateralRpl: 5, + }) + node.Minipools[0].NodeFee, _ = big.NewInt(0).SetString("50000000000000000", 10) + history.Nodes = append(history.Nodes, node) + state := history.GetEndNetworkState() + + t := newV8Test(tt, state.NetworkDetails.RewardIndex) + + t.bc.SetState(state) + + consensusStartBlock := history.GetConsensusStartBlock() + executionStartBlock := history.GetExecutionStartBlock() + consensusEndBlock := history.GetConsensusEndBlock() + executionEndBlock := history.GetExecutionEndBlock() + + logger := log.NewColorLogger(color.Faint) + + t.rp.SetRewardSnapshotEvent(history.GetPreviousRewardSnapshotEvent()) + t.bc.SetBeaconBlock(fmt.Sprint(consensusStartBlock-1), beacon.BeaconBlock{ExecutionBlockNumber: executionStartBlock - 1}) + t.bc.SetBeaconBlock(fmt.Sprint(consensusStartBlock), beacon.BeaconBlock{ExecutionBlockNumber: executionStartBlock}) + t.rp.SetHeaderByNumber(big.NewInt(int64(executionStartBlock)), &types.Header{Time: uint64(history.GetStartTime().Unix())}) + + for _, validator := range state.MinipoolValidatorDetails { + t.bc.SetMinipoolPerformance(validator.Index, make([]uint64, 0)) + } + + // Set some custom balances for the validators that opt in and out of smoothing pool + nodeSummary := history.GetNodeSummary() + customBalanceNodes := nodeSummary["single_eight_eth_opted_in_quarter"] + for _, node := range customBalanceNodes { + node.Minipools[0].SPWithdrawals = eth.EthToWei(0.75) + } + customBalanceNodes = nodeSummary["single_eight_eth_opted_out_three_quarters"] + for _, node := range customBalanceNodes { + node.Minipools[0].SPWithdrawals = eth.EthToWei(0.75) + } + customBalanceNodes = nodeSummary["single_bond_reduction"] + for _, node := range customBalanceNodes { + node.Minipools[0].SPWithdrawals = eth.EthToWei(0.5) + } + + history.SetWithdrawals(t.bc) + + generatorv11 := newTreeGeneratorImpl_v11( + &logger, + t.Name()+"-stateless", + state.NetworkDetails.RewardIndex, + &SnapshotEnd{ + Slot: consensusEndBlock, + ConsensusBlock: consensusEndBlock, + ExecutionBlock: executionEndBlock, + }, + &types.Header{ + Number: big.NewInt(int64(history.GetExecutionEndBlock())), + Time: assets.Mainnet20ELHeaderTime, + }, + /* intervalsPassed= */ 1, + state, + ) + + v11Artifacts, err := generatorv11.generateTree( + t.rp, + "mainnet", + make([]common.Address, 0), + t.bc, + ) + t.failIf(err) + + if testing.Verbose() { + t.saveArtifacts("v11", v11Artifacts) + } + + // Validate individual node details in the rewards file + rewardsFile := v11Artifacts.RewardsFile + minipoolPerformanceFile := v11Artifacts.MinipoolPerformanceFile + + singleEightEthNodes := nodeSummary["single_eight_eth"] + singleSixteenEthNodes := nodeSummary["single_sixteen_eth"] + for _, node := range append(singleEightEthNodes, singleSixteenEthNodes...) { + // Check the rewards amount in the rewards file + rewardsAmount := rewardsFile.GetNodeCollateralRpl(node.Address) + + expectedRewardsAmount, _ := big.NewInt(0).SetString("1019308880071990649542", 10) + + if rewardsAmount.Cmp(expectedRewardsAmount) != 0 { + t.Fatalf("Rewards amount does not match expected value for node %s: %s != %s", node.Notes, rewardsAmount.String(), expectedRewardsAmount.String()) + } + + // Make sure it got 0 ETH + ethAmount := rewardsFile.GetNodeSmoothingPoolEth(node.Address) + if ethAmount.Sign() != 0 { + t.Logf("Node %+v", node) + t.Fatalf("ETH amount does not match expected value for node %s: %s != %s", node.Notes, ethAmount.String(), "0") + } + + // Make sure it got 0 oDAO rpl + oDaoRplAmount := rewardsFile.GetNodeOracleDaoRpl(node.Address) + if oDaoRplAmount.Sign() != 0 { + t.Fatalf("oDAO rpl amount does not match expected value for node %s: %s != %s", node.Notes, oDaoRplAmount.String(), "0") + } + } + singleEightEthNodesSP := nodeSummary["single_eight_eth_sp"] + singleSixteenEthNodesSP := nodeSummary["single_sixteen_eth_sp"] + for _, node := range append(singleEightEthNodesSP, singleSixteenEthNodesSP...) { + // Check the rewards amount in the rewards file + rewardsAmount := rewardsFile.GetNodeCollateralRpl(node.Address) + + expectedRewardsAmount, _ := big.NewInt(0).SetString("1019308880071990649542", 10) + + if rewardsAmount.Cmp(expectedRewardsAmount) != 0 { + t.Fatalf("Rewards amount does not match expected value for node %s: %s != %s", node.Notes, rewardsAmount.String(), expectedRewardsAmount.String()) + } + + // Make sure it got ETH + ethAmount := rewardsFile.GetNodeSmoothingPoolEth(node.Address) + expectedEthAmount := big.NewInt(0) + if node.SmoothingPoolRegistrationState { + if node.Class == "single_eight_eth_sp" { + expectedEthAmount.SetString("1450562599049128367", 10) + // There should be a bonus for these nodes' minipools + if len(node.Minipools) != 1 { + t.Fatalf("Expected 1 minipool for node %s, got %d", node.Notes, len(node.Minipools)) + } + minipoolPerf, _ := minipoolPerformanceFile.GetSmoothingPoolPerformance(node.Minipools[0].Address) + // 8 eth minipools with 10% collateral earn 14% commission overall. + // They earned 10% on 24/32 of the 1 eth of consensus rewards already, which is 0.075 eth. + // Their bonus is therefore 4/10 of 0.075 eth, which is 0.03 eth. + expectedBonusEthEarned, _ := big.NewInt(0).SetString("30000000000000000", 10) + if minipoolPerf.GetBonusEthEarned().Cmp(expectedBonusEthEarned) != 0 { + t.Fatalf("Minipool %s bonus does not match expected value: %s != %s", node.Minipools[0].Address.Hex(), minipoolPerf.GetBonusEthEarned().String(), expectedBonusEthEarned.String()) + } + expectedAttestationScore := big.NewInt(0).Sub(oneEth, big.NewInt(14e16)) + expectedAttestationScore.Mul(expectedAttestationScore, eightEth) + expectedAttestationScore.Div(expectedAttestationScore, thirtyTwoEth) + expectedAttestationScore.Add(expectedAttestationScore, big.NewInt(14e16)) + expectedAttestationScore.Mul(expectedAttestationScore, big.NewInt(101)) // there are 101 epochs in the interval + if minipoolPerf.GetAttestationScore().Cmp(expectedAttestationScore) != 0 { + t.Fatalf("Minipool %s attestation score does not match expected value: %s != %s", node.Minipools[0].Address.Hex(), minipoolPerf.GetAttestationScore().String(), expectedAttestationScore.String()) + } + } else { + // 16-eth minipools earn more eth! A bit less than double. + expectedEthAmount.SetString("2200871632329635499", 10) + if len(node.Minipools) != 1 { + t.Fatalf("Expected 1 minipool for node %s, got %d", node.Notes, len(node.Minipools)) + } + minipoolPerf, _ := minipoolPerformanceFile.GetSmoothingPoolPerformance(node.Minipools[0].Address) + // The 16 eth minipools earn 10% on 24/32. + expectedAttestationScore := big.NewInt(0).Sub(oneEth, big.NewInt(1e17)) + expectedAttestationScore.Mul(expectedAttestationScore, sixteenEth) + expectedAttestationScore.Div(expectedAttestationScore, thirtyTwoEth) + expectedAttestationScore.Add(expectedAttestationScore, big.NewInt(1e17)) + expectedAttestationScore.Mul(expectedAttestationScore, big.NewInt(101)) // there are 101 epochs in the interval + if minipoolPerf.GetAttestationScore().Cmp(expectedAttestationScore) != 0 { + t.Fatalf("Minipool %s attestation score does not match expected value: %s != %s", node.Minipools[0].Address.Hex(), minipoolPerf.GetAttestationScore().String(), expectedAttestationScore.String()) + } + // 16 eth minipools earn no bonus. + if minipoolPerf.GetBonusEthEarned().Sign() != 0 { + t.Fatalf("Minipool %s bonus does not match expected value: %s != 0", node.Minipools[0].Address.Hex(), minipoolPerf.GetBonusEthEarned().String()) + } + } + } + if ethAmount.Cmp(expectedEthAmount) != 0 { + t.Fatalf("ETH amount does not match expected value for node %s: %s != %s", node.Notes, ethAmount.String(), expectedEthAmount.String()) + } + + // Make sure it got 0 oDAO rpl + oDaoRplAmount := rewardsFile.GetNodeOracleDaoRpl(node.Address) + if oDaoRplAmount.Sign() != 0 { + t.Fatalf("oDAO rpl amount does not match expected value for node %s: %s != %s", node.Notes, oDaoRplAmount.String(), "0") + } + } + + optingInNodesSP := append( + nodeSummary["single_eight_eth_opted_in_quarter"], + nodeSummary["single_sixteen_eth_opted_in_quarter"]..., + ) + for _, node := range optingInNodesSP { + // Check the rewards amount in the rewards file + rewardsAmount := rewardsFile.GetNodeCollateralRpl(node.Address) + + mp := node.Minipools[0] + perf, _ := minipoolPerformanceFile.GetSmoothingPoolPerformance(mp.Address) + + // Node has 20 RPL and only 1 8-eth minpool which puts it above the linear curve + expectedRewardsAmount := big.NewInt(0) + if node.Class == "single_eight_eth_opted_in_quarter" { + expectedRewardsAmount.SetString("1784353229014464268647", 10) + } else { + // 16-eth minipools earn less for the same RPL stake, due to RPIP-30 + expectedRewardsAmount.SetString("1310160289473732090952", 10) + if perf.GetBonusEthEarned().Sign() != 0 { + // 16 eth minipools should not get bonus commission + t.Fatalf("Minipool %s shouldn't have earned bonus eth and did", mp.Address.Hex()) + } + } + + if rewardsAmount.Cmp(expectedRewardsAmount) != 0 { + t.Fatalf("Rewards amount does not match expected value for node %s: %s != %s", node.Notes, rewardsAmount.String(), expectedRewardsAmount.String()) + } + + // Make sure it got ETH + ethAmount := rewardsFile.GetNodeSmoothingPoolEth(node.Address) + expectedEthAmount := big.NewInt(0) + if node.Class == "single_eight_eth_opted_in_quarter" { + // About 3/4 what the full nodes got + expectedEthAmount.SetString("1091438193343898573", 10) + // Earns 3/4 the bonus of a node that was in for the whole interval + expectedBonusEthEarned, _ := big.NewInt(0).SetString("22500000000000000", 10) + if perf.GetBonusEthEarned().Cmp(expectedBonusEthEarned) != 0 { + t.Fatalf("Minipool %s bonus does not match expected value: %s != %s", mp.Address.Hex(), perf.GetBonusEthEarned().String(), expectedBonusEthEarned.String()) + } + } else { + // 16-eth minipools earn more eth! A bit less than double. + expectedEthAmount.SetString("1656101426307448494", 10) + } + if ethAmount.Cmp(expectedEthAmount) != 0 { + t.Fatalf("ETH amount does not match expected value for node %s: %s != %s", node.Notes, ethAmount.String(), expectedEthAmount.String()) + } + + // Make sure it got 0 oDAO rpl + oDaoRplAmount := rewardsFile.GetNodeOracleDaoRpl(node.Address) + if oDaoRplAmount.Sign() != 0 { + t.Fatalf("oDAO rpl amount does not match expected value for node %s: %s != %s", node.Notes, oDaoRplAmount.String(), "0") + } + } + + optingOutNodesSP := append( + nodeSummary["single_eight_eth_opted_out_three_quarters"], + nodeSummary["single_sixteen_eth_opted_out_three_quarters"]..., + ) + for _, node := range optingOutNodesSP { + // Check the rewards amount in the rewards file + rewardsAmount := rewardsFile.GetNodeCollateralRpl(node.Address) + + mp := node.Minipools[0] + perf, _ := minipoolPerformanceFile.GetSmoothingPoolPerformance(mp.Address) + + // Node has 20 RPL and only 1 8-eth minpool which puts it above the linear curve + expectedRewardsAmount := big.NewInt(0) + if node.Class == "single_eight_eth_opted_out_three_quarters" { + expectedRewardsAmount.SetString("1784353229014464268647", 10) + } else { + // 16-eth minipools earn less for the same RPL stake, due to RPIP-30 + expectedRewardsAmount.SetString("1310160289473732090952", 10) + } + + if rewardsAmount.Cmp(expectedRewardsAmount) != 0 { + t.Fatalf("Rewards amount does not match expected value for node %s: %s != %s", node.Notes, rewardsAmount.String(), expectedRewardsAmount.String()) + } + + // Make sure it got ETH + ethAmount := rewardsFile.GetNodeSmoothingPoolEth(node.Address) + expectedEthAmount := big.NewInt(0) + if node.Class == "single_eight_eth_opted_out_three_quarters" { + // About 3/4 what the full nodes got + expectedEthAmount.SetString("1077373217115689381", 10) + // Earns 3/4 the bonus of a node that was in for the whole interval + expectedBonusEthEarned, _ := big.NewInt(0).SetString("22500000000000000", 10) + if perf.GetBonusEthEarned().Cmp(expectedBonusEthEarned) != 0 { + t.Fatalf("Minipool %s bonus does not match expected value: %s != %s", mp.Address.Hex(), perf.GetBonusEthEarned().String(), expectedBonusEthEarned.String()) + } + } else { + // 16-eth minipools earn more eth! A bit less than double. + expectedEthAmount.SetString("1634310618066561014", 10) + if perf.GetBonusEthEarned().Sign() != 0 { + // 16 eth minipools should not get bonus commission + t.Fatalf("Minipool %s shouldn't have earned bonus eth and did", mp.Address.Hex()) + } + } + if ethAmount.Cmp(expectedEthAmount) != 0 { + t.Fatalf("ETH amount does not match expected value for node %s: %s != %s", node.Notes, ethAmount.String(), expectedEthAmount.String()) + } + + // Make sure it got 0 oDAO rpl + oDaoRplAmount := rewardsFile.GetNodeOracleDaoRpl(node.Address) + if oDaoRplAmount.Sign() != 0 { + t.Fatalf("oDAO rpl amount does not match expected value for node %s: %s != %s", node.Notes, oDaoRplAmount.String(), "0") + } + } + + bondReductionNode := nodeSummary["single_bond_reduction"] + for _, node := range bondReductionNode { + + mp := node.Minipools[0] + perf, _ := minipoolPerformanceFile.GetSmoothingPoolPerformance(mp.Address) + + // Check the rewards amount in the rewards file + rewardsAmount := rewardsFile.GetNodeCollateralRpl(node.Address) + + // Nodes that bond reduce are treated as having their new bond for the full interval, + // when it comes to RPL rewards. + expectedRewardsAmount, _ := big.NewInt(0).SetString("1019308880071990649542", 10) + + if rewardsAmount.Cmp(expectedRewardsAmount) != 0 { + t.Fatalf("Rewards amount does not match expected value for node %s: %s != %s", node.Notes, rewardsAmount.String(), expectedRewardsAmount.String()) + } + + // Make sure it got reduced ETH + ethAmount := rewardsFile.GetNodeSmoothingPoolEth(node.Address) + expectedEthAmount, _ := big.NewInt(0).SetString("1920903328050713153", 10) + if ethAmount.Cmp(expectedEthAmount) != 0 { + t.Fatalf("ETH amount does not match expected value for node %s: %s != %s", node.Notes, ethAmount.String(), expectedEthAmount.String()) + } + + // And a reduced bonus + expectedBonusEthEarned, _ := big.NewInt(0).SetString("15000000000000000", 10) + if perf.GetBonusEthEarned().Cmp(expectedBonusEthEarned) != 0 { + t.Fatalf("Minipool %s bonus does not match expected value: %s != %s", mp.Address.Hex(), perf.GetBonusEthEarned().String(), expectedBonusEthEarned.String()) + } + + // Make sure it got 0 oDAO rpl + oDaoRplAmount := rewardsFile.GetNodeOracleDaoRpl(node.Address) + if oDaoRplAmount.Sign() != 0 { + t.Fatalf("oDAO rpl amount does not match expected value for node %s: %s != %s", node.Notes, oDaoRplAmount.String(), "0") + } + } + + noMinipoolsNodes := nodeSummary["no_minipools"] + for _, node := range noMinipoolsNodes { + // Check the rewards amount in the rewards file + rewardsAmount := rewardsFile.GetNodeCollateralRpl(node.Address) + if rewardsAmount.Sign() != 0 { + t.Fatalf("Rewards amount does not match expected value for node %s: %s != %s", node.Notes, rewardsAmount.String(), "0") + } + + // Make sure it got ETH + ethAmount := rewardsFile.GetNodeSmoothingPoolEth(node.Address) + if ethAmount.Sign() != 0 { + t.Fatalf("ETH amount does not match expected value for node %s: %s != %s", node.Notes, ethAmount.String(), "0") + } + + // Make sure it got 0 oDAO rpl + oDaoRplAmount := rewardsFile.GetNodeOracleDaoRpl(node.Address) + if oDaoRplAmount.Sign() != 0 { + t.Fatalf("oDAO rpl amount does not match expected value for node %s: %s != %s", node.Notes, oDaoRplAmount.String(), "0") + } + } + + // Validate merkle root + v11MerkleRoot := v11Artifacts.RewardsFile.GetMerkleRoot() + + // Expected merkle root: + // 0x176bba15231cb82edb5c34c8882af09dfb77a2ee31a96b623bffd8e48cedf18b + // + // If this does not match, it implies either you updated the set of default mock nodes, + // or you introduced a regression in treegen. + // DO NOT update this value unless you know what you are doing. + expectedMerkleRoot := "0x176bba15231cb82edb5c34c8882af09dfb77a2ee31a96b623bffd8e48cedf18b" + if !strings.EqualFold(v11MerkleRoot, expectedMerkleRoot) { + t.Fatalf("Merkle root does not match expected value %s != %s", v11MerkleRoot, expectedMerkleRoot) + } else { + t.Logf("Merkle root matches expected value %s", expectedMerkleRoot) + } +} + +func TestInsufficientEthForBonusesesV11(tt *testing.T) { + + history := test.NewDefaultMockHistoryNoNodes() + // Add two nodes which are earning some bonus commission + nodeOne := history.GetNewDefaultMockNode(&test.NewMockNodeParams{ + SmoothingPool: true, + EightEthMinipools: 1, + CollateralRpl: 5, + }) + nodeOne.Minipools[0].NodeFee, _ = big.NewInt(0).SetString("50000000000000000", 10) + history.Nodes = append(history.Nodes, nodeOne) + nodeTwo := history.GetNewDefaultMockNode(&test.NewMockNodeParams{ + SmoothingPool: true, + EightEthMinipools: 1, + CollateralRpl: 20, + }) + history.Nodes = append(history.Nodes, nodeTwo) + + // Add oDAO nodes + odaoNodes := history.GetDefaultMockODAONodes() + history.Nodes = append(history.Nodes, odaoNodes...) + + // Ovewrite the SP balance to a value under the bonus commission + history.NetworkDetails.SmoothingPoolBalance = big.NewInt(1000) + state := history.GetEndNetworkState() + + t := newV8Test(tt, state.NetworkDetails.RewardIndex) + + t.bc.SetState(state) + history.SetWithdrawals(t.bc) + + consensusStartBlock := history.GetConsensusStartBlock() + executionStartBlock := history.GetExecutionStartBlock() + consensusEndBlock := history.GetConsensusEndBlock() + executionEndBlock := history.GetExecutionEndBlock() + + logger := log.NewColorLogger(color.Faint) + + t.rp.SetRewardSnapshotEvent(history.GetPreviousRewardSnapshotEvent()) + t.bc.SetBeaconBlock(fmt.Sprint(consensusStartBlock-1), beacon.BeaconBlock{ExecutionBlockNumber: executionStartBlock - 1}) + t.bc.SetBeaconBlock(fmt.Sprint(consensusStartBlock), beacon.BeaconBlock{ExecutionBlockNumber: executionStartBlock}) + t.rp.SetHeaderByNumber(big.NewInt(int64(executionStartBlock)), &types.Header{Time: uint64(history.GetStartTime().Unix())}) + + for _, validator := range state.MinipoolValidatorDetails { + t.bc.SetMinipoolPerformance(validator.Index, make([]uint64, 0)) + } + + generatorv11 := newTreeGeneratorImpl_v11( + &logger, + t.Name(), + state.NetworkDetails.RewardIndex, + &SnapshotEnd{ + Slot: consensusEndBlock, + ConsensusBlock: consensusEndBlock, + ExecutionBlock: executionEndBlock, + }, + &types.Header{ + Number: big.NewInt(int64(history.GetExecutionEndBlock())), + Time: assets.Mainnet20ELHeaderTime, + }, + /* intervalsPassed= */ 1, + state, + ) + + v11Artifacts, err := generatorv11.generateTree( + t.rp, + "mainnet", + make([]common.Address, 0), + t.bc, + ) + t.failIf(err) + + if testing.Verbose() { + t.saveArtifacts("v11", v11Artifacts) + } + + // Check the rewards file + rewardsFile := v11Artifacts.RewardsFile + ethOne := rewardsFile.GetNodeSmoothingPoolEth(nodeOne.Address) + if ethOne.Uint64() != 169+416 { + t.Fatalf("Node one ETH amount does not match expected value: %s != %d", ethOne.String(), 169+416) + } + ethTwo := rewardsFile.GetNodeSmoothingPoolEth(nodeTwo.Address) + if ethTwo.Uint64() != 177+237 { + t.Fatalf("Node two ETH amount does not match expected value: %s != %d", ethTwo.String(), 177+237) + } + + // Check the minipool performance file + minipoolPerformanceFile := v11Artifacts.MinipoolPerformanceFile + perfOne, ok := minipoolPerformanceFile.GetSmoothingPoolPerformance(nodeOne.Minipools[0].Address) + if !ok { + t.Fatalf("Node one minipool performance not found") + } + if perfOne.GetBonusEthEarned().Uint64() != 416 { + t.Fatalf("Node one bonus does not match expected value: %s != %d", perfOne.GetBonusEthEarned().String(), 416) + } + perfTwo, ok := minipoolPerformanceFile.GetSmoothingPoolPerformance(nodeTwo.Minipools[0].Address) + if !ok { + t.Fatalf("Node two minipool performance not found") + } + if perfTwo.GetBonusEthEarned().Uint64() != 237 { + t.Fatalf("Node two bonus does not match expected value: %s != %d", perfTwo.GetBonusEthEarned().String(), 237) + } +} + +func TestMockNoRPLRewardsV11(tt *testing.T) { + + history := test.NewDefaultMockHistoryNoNodes() + // Add two nodes which are earning some bonus commission + nodeOne := history.GetNewDefaultMockNode(&test.NewMockNodeParams{ + SmoothingPool: false, + EightEthMinipools: 1, + CollateralRpl: 0, + }) + nodeOne.Minipools[0].NodeFee, _ = big.NewInt(0).SetString("50000000000000000", 10) + history.Nodes = append(history.Nodes, nodeOne) + nodeTwo := history.GetNewDefaultMockNode(&test.NewMockNodeParams{ + SmoothingPool: true, + EightEthMinipools: 2, + CollateralRpl: 0, + }) + nodeTwo.Minipools[0].NodeFee, _ = big.NewInt(0).SetString("50000000000000000", 10) + nodeTwo.Minipools[1].NodeFee, _ = big.NewInt(0).SetString("50000000000000000", 10) + history.Nodes = append(history.Nodes, nodeTwo) + + // Add oDAO nodes + odaoNodes := history.GetDefaultMockODAONodes() + history.Nodes = append(history.Nodes, odaoNodes...) + + state := history.GetEndNetworkState() + + t := newV8Test(tt, state.NetworkDetails.RewardIndex) + + t.bc.SetState(state) + + // Give all three minipools 1 ETH of consensus income + t.bc.AddWithdrawal(history.BeaconConfig.FirstSlotOfEpoch(history.StartEpoch+1), nodeOne.Minipools[0].ValidatorIndex, big.NewInt(1e18)) + t.bc.AddWithdrawal(history.BeaconConfig.FirstSlotOfEpoch(history.StartEpoch+1), nodeTwo.Minipools[0].ValidatorIndex, big.NewInt(1e18)) + t.bc.AddWithdrawal(history.BeaconConfig.FirstSlotOfEpoch(history.StartEpoch+1), nodeTwo.Minipools[1].ValidatorIndex, big.NewInt(1e18)) + + consensusStartBlock := history.GetConsensusStartBlock() + executionStartBlock := history.GetExecutionStartBlock() + consensusEndBlock := history.GetConsensusEndBlock() + executionEndBlock := history.GetExecutionEndBlock() + + logger := log.NewColorLogger(color.Faint) + + t.rp.SetRewardSnapshotEvent(history.GetPreviousRewardSnapshotEvent()) + t.bc.SetBeaconBlock(fmt.Sprint(consensusStartBlock-1), beacon.BeaconBlock{ExecutionBlockNumber: executionStartBlock - 1}) + t.bc.SetBeaconBlock(fmt.Sprint(consensusStartBlock), beacon.BeaconBlock{ExecutionBlockNumber: executionStartBlock}) + t.rp.SetHeaderByNumber(big.NewInt(int64(executionStartBlock)), &types.Header{Time: uint64(history.GetStartTime().Unix())}) + + for _, validator := range state.MinipoolValidatorDetails { + t.bc.SetMinipoolPerformance(validator.Index, make([]uint64, 0)) + } + + generatorv11 := newTreeGeneratorImpl_v11( + &logger, + t.Name(), + state.NetworkDetails.RewardIndex, + &SnapshotEnd{ + Slot: consensusEndBlock, + ConsensusBlock: consensusEndBlock, + ExecutionBlock: executionEndBlock, + }, + &types.Header{ + Number: big.NewInt(int64(history.GetExecutionEndBlock())), + Time: assets.Mainnet20ELHeaderTime, + }, + /* intervalsPassed= */ 1, + state, + ) + + v11Artifacts, err := generatorv11.generateTree( + t.rp, + "mainnet", + make([]common.Address, 0), + t.bc, + ) + t.failIf(err) + + if testing.Verbose() { + t.saveArtifacts("v11", v11Artifacts) + } + + // Check the rewards file + rewardsFile := v11Artifacts.RewardsFile + ethOne := rewardsFile.GetNodeSmoothingPoolEth(nodeOne.Address) + // Node one is not a SP, so it should have 0 ETH + if ethOne.Uint64() != 0 { + t.Fatalf("Node one ETH amount does not match expected value: %s != %d", ethOne.String(), 0) + } + ethTwo := rewardsFile.GetNodeSmoothingPoolEth(nodeTwo.Address) + expectedEthTwo, _ := big.NewInt(0).SetString("32575000000000000000", 10) + if ethTwo.Cmp(expectedEthTwo) != 0 { + t.Fatalf("Node two ETH amount does not match expected value: %s != %s", ethTwo.String(), expectedEthTwo.String()) + } + + // Check the minipool performance file + minipoolPerformanceFile := v11Artifacts.MinipoolPerformanceFile + _, ok := minipoolPerformanceFile.GetSmoothingPoolPerformance(nodeOne.Minipools[0].Address) + if ok { + t.Fatalf("Node one minipool performance should not be found") + } + perfTwo, ok := minipoolPerformanceFile.GetSmoothingPoolPerformance(nodeTwo.Minipools[0].Address) + if !ok { + t.Fatalf("Node two minipool one performance not found") + } + if perfTwo.GetBonusEthEarned().Uint64() != 37500000000000000 { + t.Fatalf("Node two minipool one bonus does not match expected value: %s != %d", perfTwo.GetBonusEthEarned().String(), 37500000000000000) + } + // Node two is in the SP and starts with 5% commission. It has no RPL staked, so it earns an extra 5% on top of that. + if perfTwo.GetEffectiveCommission().Uint64() != 100000000000000000 { + t.Fatalf("Node two minipool one effective commission does not match expected value: %s != %d", perfTwo.GetEffectiveCommission().String(), 100000000000000000) + } + perfThree, ok := minipoolPerformanceFile.GetSmoothingPoolPerformance(nodeTwo.Minipools[1].Address) + if !ok { + t.Fatalf("Node two minipool two performance not found") + } + if perfThree.GetBonusEthEarned().Uint64() != 37500000000000000 { + t.Fatalf("Node two minipool two bonus does not match expected value: %s != %d", perfThree.GetBonusEthEarned().String(), 37500000000000000) + } + // Node two is in the SP and starts with 5% commission. It has no RPL staked, so it earns an extra 5% on top of that. + if perfThree.GetEffectiveCommission().Uint64() != 100000000000000000 { + t.Fatalf("Node two minipool two effective commission does not match expected value: %s != %d", perfThree.GetEffectiveCommission().String(), 100000000000000000) + } +} + +func TestMockOptedOutAndThenBondReducedV11(tt *testing.T) { + + history := test.NewDefaultMockHistoryNoNodes() + // Add two nodes which are earning some bonus commission + nodeOne := history.GetNewDefaultMockNode(&test.NewMockNodeParams{ + SmoothingPool: false, + EightEthMinipools: 1, + CollateralRpl: 0, + }) + // Opted out 1/4 of the way through the interval + nodeOne.SmoothingPoolRegistrationChanged = history.BeaconConfig.GetSlotTime(history.BeaconConfig.FirstSlotOfEpoch(history.StartEpoch + (history.EndEpoch-history.StartEpoch)/4)) + nodeOne.Minipools[0].NodeFee, _ = big.NewInt(0).SetString("50000000000000000", 10) + // Bond reduced 1/2 of the way through the interval + nodeOne.Minipools[0].LastBondReductionTime = history.BeaconConfig.GetSlotTime(history.BeaconConfig.FirstSlotOfEpoch(history.StartEpoch + (history.EndEpoch-history.StartEpoch)/2)) + nodeOne.Minipools[0].LastBondReductionPrevValue = big.NewInt(0).Set(sixteenEth) + nodeOne.Minipools[0].LastBondReductionPrevNodeFee, _ = big.NewInt(0).SetString("140000000000000000", 10) + history.Nodes = append(history.Nodes, nodeOne) + nodeTwo := history.GetNewDefaultMockNode(&test.NewMockNodeParams{ + SmoothingPool: true, + EightEthMinipools: 1, + CollateralRpl: 0, + }) + nodeTwo.Minipools[0].NodeFee, _ = big.NewInt(0).SetString("50000000000000000", 10) + history.Nodes = append(history.Nodes, nodeTwo) + + // Add oDAO nodes + odaoNodes := history.GetDefaultMockODAONodes() + history.Nodes = append(history.Nodes, odaoNodes...) + + state := history.GetEndNetworkState() + + t := newV8Test(tt, state.NetworkDetails.RewardIndex) + + t.bc.SetState(state) + + // Add withdrawals to both minipools + t.bc.AddWithdrawal(history.BeaconConfig.FirstSlotOfEpoch(history.StartEpoch+1), nodeOne.Minipools[0].ValidatorIndex, big.NewInt(1e18)) + t.bc.AddWithdrawal(history.BeaconConfig.FirstSlotOfEpoch(history.StartEpoch+1), nodeTwo.Minipools[0].ValidatorIndex, big.NewInt(1e18)) + + consensusStartBlock := history.GetConsensusStartBlock() + executionStartBlock := history.GetExecutionStartBlock() + consensusEndBlock := history.GetConsensusEndBlock() + executionEndBlock := history.GetExecutionEndBlock() + + logger := log.NewColorLogger(color.Faint) + + t.rp.SetRewardSnapshotEvent(history.GetPreviousRewardSnapshotEvent()) + t.bc.SetBeaconBlock(fmt.Sprint(consensusStartBlock-1), beacon.BeaconBlock{ExecutionBlockNumber: executionStartBlock - 1}) + t.bc.SetBeaconBlock(fmt.Sprint(consensusStartBlock), beacon.BeaconBlock{ExecutionBlockNumber: executionStartBlock}) + t.rp.SetHeaderByNumber(big.NewInt(int64(executionStartBlock)), &types.Header{Time: uint64(history.GetStartTime().Unix())}) + + for _, validator := range state.MinipoolValidatorDetails { + t.bc.SetMinipoolPerformance(validator.Index, make([]uint64, 0)) + } + + generatorv11 := newTreeGeneratorImpl_v11( + &logger, + t.Name(), + state.NetworkDetails.RewardIndex, + &SnapshotEnd{ + Slot: consensusEndBlock, + ConsensusBlock: consensusEndBlock, + ExecutionBlock: executionEndBlock, + }, + &types.Header{ + Number: big.NewInt(int64(history.GetExecutionEndBlock())), + Time: assets.Mainnet20ELHeaderTime, + }, + /* intervalsPassed= */ 1, + state, + ) + + v11Artifacts, err := generatorv11.generateTree( + t.rp, + "mainnet", + make([]common.Address, 0), + t.bc, + ) + t.failIf(err) + + if testing.Verbose() { + t.saveArtifacts("v11", v11Artifacts) + } + + // Check the rewards file + rewardsFile := v11Artifacts.RewardsFile + ethOne := rewardsFile.GetNodeSmoothingPoolEth(nodeOne.Address) + // Node one was in the SP so it should have some ETH, but no bonuses + expectedEthOne, _ := big.NewInt(0).SetString("11309523809523809523", 10) + if ethOne.Cmp(expectedEthOne) != 0 { + t.Fatalf("Node one ETH amount does not match expected value: %s != %s", ethOne.String(), expectedEthOne.String()) + } + ethTwo := rewardsFile.GetNodeSmoothingPoolEth(nodeTwo.Address) + expectedEthTwo, _ := big.NewInt(0).SetString("26089087301587301587", 10) + if ethTwo.Cmp(expectedEthTwo) != 0 { + t.Fatalf("Node two ETH amount does not match expected value: %s != %s", ethTwo.String(), expectedEthTwo.String()) + } + + // Check the minipool performance file + minipoolPerformanceFile := v11Artifacts.MinipoolPerformanceFile + perfOne, ok := minipoolPerformanceFile.GetSmoothingPoolPerformance(nodeOne.Minipools[0].Address) + if !ok { + t.Fatalf("Node one minipool performance should be found") + } + if perfOne.GetBonusEthEarned().Uint64() != 0 { + t.Fatalf("Node one minipool one bonus does not match expected value: %s != %d", perfOne.GetBonusEthEarned().String(), 0) + } + perfTwo, ok := minipoolPerformanceFile.GetSmoothingPoolPerformance(nodeTwo.Minipools[0].Address) + if !ok { + t.Fatalf("Node two minipool one performance not found") + } + if perfTwo.GetBonusEthEarned().Uint64() != 37500000000000000 { + t.Fatalf("Node two minipool one bonus does not match expected value: %s != %d", perfTwo.GetBonusEthEarned().String(), 37500000000000000) + } + // Node two is in the SP and starts with 5% commission. It has no RPL staked, so it earns an extra 5% on top of that. + if perfTwo.GetEffectiveCommission().Uint64() != 100000000000000000 { + t.Fatalf("Node two minipool one effective commission does not match expected value: %s != %d", perfTwo.GetEffectiveCommission().String(), 100000000000000000) + } +} + +func TestMockWithdrawableEpochV11(tt *testing.T) { + + history := test.NewDefaultMockHistoryNoNodes() + // Add two nodes which are earning some bonus commission + nodeOne := history.GetNewDefaultMockNode(&test.NewMockNodeParams{ + SmoothingPool: true, + EightEthMinipools: 1, + CollateralRpl: 0, + }) + nodeOne.Minipools[0].NodeFee, _ = big.NewInt(0).SetString("50000000000000000", 10) + history.Nodes = append(history.Nodes, nodeOne) + nodeTwo := history.GetNewDefaultMockNode(&test.NewMockNodeParams{ + SmoothingPool: true, + EightEthMinipools: 1, + CollateralRpl: 0, + }) + nodeTwo.Minipools[0].NodeFee, _ = big.NewInt(0).SetString("50000000000000000", 10) + // Withdrawable epoch half way through the interval + nodeTwo.Minipools[0].WithdrawableEpoch = history.StartEpoch + (history.EndEpoch-history.StartEpoch)/2 + history.Nodes = append(history.Nodes, nodeTwo) + + // Add oDAO nodes + odaoNodes := history.GetDefaultMockODAONodes() + history.Nodes = append(history.Nodes, odaoNodes...) + + state := history.GetEndNetworkState() + + t := newV8Test(tt, state.NetworkDetails.RewardIndex) + + t.bc.SetState(state) + + // Add withdrawals to both minipools + t.bc.AddWithdrawal(history.BeaconConfig.FirstSlotOfEpoch(history.StartEpoch+1), nodeOne.Minipools[0].ValidatorIndex, big.NewInt(1e18)) + // Add a withdrawal in the epoch after the interval ends + t.bc.AddWithdrawal(history.BeaconConfig.FirstSlotOfEpoch(history.EndEpoch+1), nodeOne.Minipools[0].ValidatorIndex, big.NewInt(1e18)) + // Withdraw 0.5 eth at the start of the interval + t.bc.AddWithdrawal(history.BeaconConfig.FirstSlotOfEpoch(history.StartEpoch+1), nodeTwo.Minipools[0].ValidatorIndex, big.NewInt(5e17)) + // Withdraw 32.5 eth at the end of the interval + t.bc.AddWithdrawal(history.BeaconConfig.FirstSlotOfEpoch(history.EndEpoch-1), nodeTwo.Minipools[0].ValidatorIndex, big.NewInt(0).Mul(big.NewInt(325), big.NewInt(1e17))) + + consensusStartBlock := history.GetConsensusStartBlock() + executionStartBlock := history.GetExecutionStartBlock() + consensusEndBlock := history.GetConsensusEndBlock() + executionEndBlock := history.GetExecutionEndBlock() + + logger := log.NewColorLogger(color.Faint) + + t.rp.SetRewardSnapshotEvent(history.GetPreviousRewardSnapshotEvent()) + t.bc.SetBeaconBlock(fmt.Sprint(consensusStartBlock-1), beacon.BeaconBlock{ExecutionBlockNumber: executionStartBlock - 1}) + t.bc.SetBeaconBlock(fmt.Sprint(consensusStartBlock), beacon.BeaconBlock{ExecutionBlockNumber: executionStartBlock}) + t.rp.SetHeaderByNumber(big.NewInt(int64(executionStartBlock)), &types.Header{Time: uint64(history.GetStartTime().Unix())}) + + for _, validator := range state.MinipoolValidatorDetails { + t.bc.SetMinipoolPerformance(validator.Index, make([]uint64, 0)) + } + + generatorv11 := newTreeGeneratorImpl_v11( + &logger, + t.Name(), + state.NetworkDetails.RewardIndex, + &SnapshotEnd{ + Slot: consensusEndBlock, + ConsensusBlock: consensusEndBlock, + ExecutionBlock: executionEndBlock, + }, + &types.Header{ + Number: big.NewInt(int64(history.GetExecutionEndBlock())), + Time: assets.Mainnet20ELHeaderTime, + }, + /* intervalsPassed= */ 1, + state, + ) + + v11Artifacts, err := generatorv11.generateTree( + t.rp, + "mainnet", + make([]common.Address, 0), + t.bc, + ) + t.failIf(err) + + if testing.Verbose() { + t.saveArtifacts("v11", v11Artifacts) + } + + // Check the rewards file + rewardsFile := v11Artifacts.RewardsFile + ethOne := rewardsFile.GetNodeSmoothingPoolEth(nodeOne.Address) + expectedEthOne, _ := big.NewInt(0).SetString("21920833333333333333", 10) + if ethOne.Cmp(expectedEthOne) != 0 { + t.Fatalf("Node one ETH amount does not match expected value: %s != %s", ethOne.String(), expectedEthOne.String()) + } + ethTwo := rewardsFile.GetNodeSmoothingPoolEth(nodeTwo.Address) + expectedEthTwo, _ := big.NewInt(0).SetString("10654166666666666666", 10) + if ethTwo.Cmp(expectedEthTwo) != 0 { + t.Fatalf("Node two ETH amount does not match expected value: %s != %s", ethTwo.String(), expectedEthTwo.String()) + } + + // Check the minipool performance file + minipoolPerformanceFile := v11Artifacts.MinipoolPerformanceFile + perfOne, ok := minipoolPerformanceFile.GetSmoothingPoolPerformance(nodeOne.Minipools[0].Address) + if !ok { + t.Fatalf("Node one minipool performance should be found") + } + if perfOne.GetBonusEthEarned().Uint64() != 37500000000000000 { + t.Fatalf("Node one minipool one bonus does not match expected value: %s != %d", perfOne.GetBonusEthEarned().String(), 37500000000000000) + } + if perfOne.GetEffectiveCommission().Uint64() != 100000000000000000 { + t.Fatalf("Node one minipool one effective commission does not match expected value: %s != %d", perfOne.GetEffectiveCommission().String(), 1000000000000000000) + } + if perfOne.GetConsensusIncome().Uint64() != 1000000000000000000 { + t.Fatalf("Node one minipool one consensus income does not match expected value: %s != %d", perfOne.GetConsensusIncome().String(), 1000000000000000000) + } + perfTwo, ok := minipoolPerformanceFile.GetSmoothingPoolPerformance(nodeTwo.Minipools[0].Address) + if !ok { + t.Fatalf("Node two minipool one performance not found") + } + if perfTwo.GetBonusEthEarned().Uint64() != 37500000000000000 { + t.Fatalf("Node two minipool one bonus does not match expected value: %s != %d", perfTwo.GetBonusEthEarned().String(), 37500000000000000) + } + if perfTwo.GetEffectiveCommission().Uint64() != 100000000000000000 { + t.Fatalf("Node two minipool one effective commission does not match expected value: %s != %d", perfTwo.GetEffectiveCommission().String(), 100000000000000000) + } + if perfTwo.GetConsensusIncome().Uint64() != 1000000000000000000 { + t.Fatalf("Node two minipool one consensus income does not match expected value: %s != %d", perfTwo.GetConsensusIncome().String(), 1000000000000000000) + } +} From 65c5beea3439049dcf64d3f4a2cd53c2f37a302c Mon Sep 17 00:00:00 2001 From: Jacob Shufro Date: Tue, 22 Jul 2025 14:58:13 -0400 Subject: [PATCH 02/33] Remove 32 slot limit for attestation inclusion --- shared/services/rewards/generator-impl-v11.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/shared/services/rewards/generator-impl-v11.go b/shared/services/rewards/generator-impl-v11.go index 708858825..c938e2a3f 100644 --- a/shared/services/rewards/generator-impl-v11.go +++ b/shared/services/rewards/generator-impl-v11.go @@ -939,10 +939,6 @@ func (r *treeGeneratorImpl_v11) checkAttestations(attestations []beacon.Attestat if !exists { continue } - // Ignore attestations delayed by more than 32 slots - if inclusionSlot-attestation.SlotIndex > r.beaconConfig.SlotsPerEpoch { - continue - } for _, committeeIndex := range attestation.CommitteeIndices() { rpCommittee, exists := slotInfo.Committees[uint64(committeeIndex)] From a64b7b2f5fd0a3243e24f1ed6549fdab09fca27e Mon Sep 17 00:00:00 2001 From: Jacob Shufro Date: Tue, 22 Jul 2025 15:42:09 -0400 Subject: [PATCH 03/33] Convert node data in minipool struct to backref, add megapool info --- shared/services/rewards/generator-impl-v11.go | 29 +++++++++++++++---- shared/services/rewards/generator-impl-v8.go | 8 ++--- .../services/rewards/generator-impl-v9-v10.go | 10 +++---- shared/services/rewards/types.go | 20 +++++++++++-- 4 files changed, 51 insertions(+), 16 deletions(-) diff --git a/shared/services/rewards/generator-impl-v11.go b/shared/services/rewards/generator-impl-v11.go index c938e2a3f..5796c5837 100644 --- a/shared/services/rewards/generator-impl-v11.go +++ b/shared/services/rewards/generator-impl-v11.go @@ -863,7 +863,7 @@ func (r *treeGeneratorImpl_v11) processEpoch(duringInterval bool, epoch uint64) if !exists { continue } - nnd := r.networkState.NodeDetailsByAddress[mpi.NodeAddress] + nnd := r.networkState.NodeDetailsByAddress[mpi.Node.Address] nmd := r.networkState.MinipoolDetailsByAddress[mpi.Address] // Check that the node is opted into the SP during this slot @@ -964,7 +964,7 @@ func (r *treeGeneratorImpl_v11) checkAttestations(attestations []beacon.Attestat delete(validator.MissingAttestationSlots, attestation.SlotIndex) // Check if this minipool was opted into the SP for this block - nodeDetails := r.nodeDetails[validator.NodeIndex] + nodeDetails := r.nodeDetails[validator.Node.Index] if blockTime.Before(nodeDetails.OptInTime) || blockTime.After(nodeDetails.OptOutTime) { // Not opted in continue @@ -1036,7 +1036,7 @@ func (r *treeGeneratorImpl_v11) getDutiesForEpoch(committees beacon.Committees) } // Check if this minipool was opted into the SP for this block - nodeDetails := r.networkState.NodeDetailsByAddress[minipoolInfo.NodeAddress] + nodeDetails := r.networkState.NodeDetailsByAddress[minipoolInfo.Node.Address] isOptedIn := nodeDetails.SmoothingPoolRegistrationState spRegistrationTime := time.Unix(nodeDetails.SmoothingPoolRegistrationChanged.Int64(), 0) if (isOptedIn && blockTime.Sub(spRegistrationTime) < 0) || // If this block occurred before the node opted in, ignore it @@ -1143,6 +1143,7 @@ func (r *treeGeneratorImpl_v11) getSmoothingPoolNodeDetails() error { wg.Go(func() error { nativeNodeDetails := r.networkState.NodeDetails[iterationIndex] nodeDetails := &NodeSmoothingDetails{ + Index: iterationIndex, Address: nativeNodeDetails.NodeAddress, Minipools: []*MinipoolInfo{}, SmoothingPoolEth: big.NewInt(0), @@ -1180,8 +1181,7 @@ func (r *treeGeneratorImpl_v11) getSmoothingPoolNodeDetails() error { nodeDetails.Minipools = append(nodeDetails.Minipools, &MinipoolInfo{ Address: mpd.MinipoolAddress, ValidatorPubkey: mpd.Pubkey, - NodeAddress: nodeDetails.Address, - NodeIndex: iterationIndex, + Node: nodeDetails, Fee: nativeMinipoolDetails.NodeFee, //MissedAttestations: 0, //GoodAttestations: 0, @@ -1194,6 +1194,25 @@ func (r *treeGeneratorImpl_v11) getSmoothingPoolNodeDetails() error { } } + if nativeNodeDetails.MegapoolDeployed { + // Get the megapool details + megapoolAddress := nativeNodeDetails.MegapoolAddress + validators := r.networkState.MegapoolToPubkeysMap[megapoolAddress] + + mpInfo := &MegapoolInfo{ + Address: megapoolAddress, + } + + for _, validator := range validators { + mpInfo.Validators = append(mpInfo.Validators, &MegapoolValidatorInfo{ + Pubkey: validator, + MissingAttestationSlots: map[uint64]bool{}, + AttestationScore: NewQuotedBigInt(0), + }) + } + nodeDetails.Megapools = append(nodeDetails.Megapools, mpInfo) + } + nodeDetails.IsEligible = len(nodeDetails.Minipools) > 0 r.nodeDetails[iterationIndex] = nodeDetails return nil diff --git a/shared/services/rewards/generator-impl-v8.go b/shared/services/rewards/generator-impl-v8.go index ed03cc952..5578cb32a 100644 --- a/shared/services/rewards/generator-impl-v8.go +++ b/shared/services/rewards/generator-impl-v8.go @@ -876,7 +876,7 @@ func (r *treeGeneratorImpl_v8) checkDutiesForSlot(attestations []beacon.Attestat delete(validator.MissingAttestationSlots, attestation.SlotIndex) // Check if this minipool was opted into the SP for this block - nodeDetails := r.nodeDetails[validator.NodeIndex] + nodeDetails := r.nodeDetails[validator.Node.Index] if blockTime.Sub(nodeDetails.OptInTime) < 0 || nodeDetails.OptOutTime.Sub(blockTime) < 0 { // Not opted in continue @@ -940,7 +940,7 @@ func (r *treeGeneratorImpl_v8) getDutiesForEpoch(committees beacon.Committees) e } // Check if this minipool was opted into the SP for this block - nodeDetails := r.networkState.NodeDetailsByAddress[minipoolInfo.NodeAddress] + nodeDetails := r.networkState.NodeDetailsByAddress[minipoolInfo.Node.Address] isOptedIn := nodeDetails.SmoothingPoolRegistrationState spRegistrationTime := time.Unix(nodeDetails.SmoothingPoolRegistrationChanged.Int64(), 0) if (isOptedIn && blockTime.Sub(spRegistrationTime) < 0) || // If this block occurred before the node opted in, ignore it @@ -1050,6 +1050,7 @@ func (r *treeGeneratorImpl_v8) getSmoothingPoolNodeDetails() error { wg.Go(func() error { nativeNodeDetails := r.networkState.NodeDetails[iterationIndex] nodeDetails := &NodeSmoothingDetails{ + Index: iterationIndex, Address: nativeNodeDetails.NodeAddress, Minipools: []*MinipoolInfo{}, SmoothingPoolEth: big.NewInt(0), @@ -1086,8 +1087,7 @@ func (r *treeGeneratorImpl_v8) getSmoothingPoolNodeDetails() error { nodeDetails.Minipools = append(nodeDetails.Minipools, &MinipoolInfo{ Address: mpd.MinipoolAddress, ValidatorPubkey: mpd.Pubkey, - NodeAddress: nodeDetails.Address, - NodeIndex: iterationIndex, + Node: nodeDetails, Fee: nativeMinipoolDetails.NodeFee, //MissedAttestations: 0, //GoodAttestations: 0, diff --git a/shared/services/rewards/generator-impl-v9-v10.go b/shared/services/rewards/generator-impl-v9-v10.go index a7e9e0b1d..ea59398b4 100644 --- a/shared/services/rewards/generator-impl-v9-v10.go +++ b/shared/services/rewards/generator-impl-v9-v10.go @@ -868,7 +868,7 @@ func (r *treeGeneratorImpl_v9_v10) processEpoch(duringInterval bool, epoch uint6 if !exists { continue } - nnd := r.networkState.NodeDetailsByAddress[mpi.NodeAddress] + nnd := r.networkState.NodeDetailsByAddress[mpi.Node.Address] nmd := r.networkState.MinipoolDetailsByAddress[mpi.Address] // Check that the node is opted into the SP during this slot @@ -973,7 +973,7 @@ func (r *treeGeneratorImpl_v9_v10) checkAttestations(attestations []beacon.Attes delete(validator.MissingAttestationSlots, attestation.SlotIndex) // Check if this minipool was opted into the SP for this block - nodeDetails := r.nodeDetails[validator.NodeIndex] + nodeDetails := r.nodeDetails[validator.Node.Index] if blockTime.Before(nodeDetails.OptInTime) || blockTime.After(nodeDetails.OptOutTime) { // Not opted in continue @@ -1045,7 +1045,7 @@ func (r *treeGeneratorImpl_v9_v10) getDutiesForEpoch(committees beacon.Committee } // Check if this minipool was opted into the SP for this block - nodeDetails := r.networkState.NodeDetailsByAddress[minipoolInfo.NodeAddress] + nodeDetails := r.networkState.NodeDetailsByAddress[minipoolInfo.Node.Address] isOptedIn := nodeDetails.SmoothingPoolRegistrationState spRegistrationTime := time.Unix(nodeDetails.SmoothingPoolRegistrationChanged.Int64(), 0) if (isOptedIn && blockTime.Sub(spRegistrationTime) < 0) || // If this block occurred before the node opted in, ignore it @@ -1155,6 +1155,7 @@ func (r *treeGeneratorImpl_v9_v10) getSmoothingPoolNodeDetails() error { wg.Go(func() error { nativeNodeDetails := r.networkState.NodeDetails[iterationIndex] nodeDetails := &NodeSmoothingDetails{ + Index: iterationIndex, Address: nativeNodeDetails.NodeAddress, Minipools: []*MinipoolInfo{}, SmoothingPoolEth: big.NewInt(0), @@ -1192,8 +1193,7 @@ func (r *treeGeneratorImpl_v9_v10) getSmoothingPoolNodeDetails() error { nodeDetails.Minipools = append(nodeDetails.Minipools, &MinipoolInfo{ Address: mpd.MinipoolAddress, ValidatorPubkey: mpd.Pubkey, - NodeAddress: nodeDetails.Address, - NodeIndex: iterationIndex, + Node: nodeDetails, Fee: nativeMinipoolDetails.NodeFee, //MissedAttestations: 0, //GoodAttestations: 0, diff --git a/shared/services/rewards/types.go b/shared/services/rewards/types.go index 06791d0f9..33a21f313 100644 --- a/shared/services/rewards/types.go +++ b/shared/services/rewards/types.go @@ -203,12 +203,26 @@ type IntervalInfo struct { TotalNodeWeight *big.Int `json:"-"` } +type MegapoolValidatorInfo struct { + Pubkey types.ValidatorPubkey `json:"pubkey"` + Index string `json:"index"` + MissedAttestations uint64 `json:"-"` + GoodAttestations uint64 `json:"-"` + MissingAttestationSlots map[uint64]bool `json:"missingAttestationSlots"` + AttestationScore *QuotedBigInt `json:"attestationScore"` + AttestationCount int `json:"attestationCount"` +} + +type MegapoolInfo struct { + Address common.Address `json:"address"` + Validators []*MegapoolValidatorInfo +} + type MinipoolInfo struct { Address common.Address `json:"address"` ValidatorPubkey types.ValidatorPubkey `json:"pubkey"` ValidatorIndex string `json:"index"` - NodeAddress common.Address `json:"nodeAddress"` - NodeIndex uint64 `json:"-"` + Node *NodeSmoothingDetails `json:"node"` Fee *big.Int `json:"-"` MissedAttestations uint64 `json:"-"` GoodAttestations uint64 `json:"-"` @@ -246,11 +260,13 @@ type CommitteeInfo struct { // Details about a node for the Smoothing Pool type NodeSmoothingDetails struct { + Index uint64 Address common.Address IsEligible bool IsOptedIn bool StatusChangeTime time.Time Minipools []*MinipoolInfo + Megapools []*MegapoolInfo EligibleSeconds *big.Int StartSlot uint64 EndSlot uint64 From 37379bb1b1fde8f96003240811b57a2bd63a527a Mon Sep 17 00:00:00 2001 From: Jacob Shufro Date: Tue, 29 Jul 2025 12:47:50 -0400 Subject: [PATCH 04/33] calculate megapool scores --- bindings/utils/state/contracts.go | 4 + bindings/utils/state/megapool.go | 27 +- bindings/utils/state/network.go | 24 ++ shared/services/rewards/generator-impl-v11.go | 386 +++++++++++++----- shared/services/rewards/generator-impl-v8.go | 18 +- .../services/rewards/generator-impl-v9-v10.go | 18 +- shared/services/rewards/types.go | 59 ++- shared/services/state/network-state.go | 34 +- 8 files changed, 440 insertions(+), 130 deletions(-) diff --git a/bindings/utils/state/contracts.go b/bindings/utils/state/contracts.go index 18bb708e1..70b88ce77 100644 --- a/bindings/utils/state/contracts.go +++ b/bindings/utils/state/contracts.go @@ -57,6 +57,7 @@ type NetworkContracts struct { // Saturn RocketMegapoolFactory *rocketpool.Contract RocketMegapoolManager *rocketpool.Contract + RocketNetworkRevenues *rocketpool.Contract } type contractArtifacts struct { @@ -186,6 +187,9 @@ func NewNetworkContracts(rp *rocketpool.RocketPool, isSaturnDeployed bool, multi }, contractArtifacts{ name: "rocketMegapoolManager", contract: &contracts.RocketMegapoolManager, + }, contractArtifacts{ + name: "rocketNetworkRevenues", + contract: &contracts.RocketNetworkRevenues, }) } diff --git a/bindings/utils/state/megapool.go b/bindings/utils/state/megapool.go index 7661933cd..7e7395471 100644 --- a/bindings/utils/state/megapool.go +++ b/bindings/utils/state/megapool.go @@ -2,12 +2,12 @@ package state import ( "context" + "fmt" "math/big" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/rocket-pool/smartnode/bindings/megapool" - "github.com/rocket-pool/smartnode/bindings/network" "github.com/rocket-pool/smartnode/bindings/node" "github.com/rocket-pool/smartnode/bindings/rocketpool" "golang.org/x/sync/errgroup" @@ -34,12 +34,17 @@ type NativeMegapoolDetails struct { AssignedValue *big.Int `json:"assignedValue"` NodeBond *big.Int `json:"nodeBond"` UserCapital *big.Int `json:"userCapital"` - NodeShare *big.Int `json:"nodeShare"` BondRequirement *big.Int `json:"bondRequirement"` EthBalance *big.Int `json:"ethBalance"` LastDistributionBlock uint64 `json:"lastDistributionBlock"` } +// Get the normalized bond per 32 eth validator +// This is used in treegen to calculate attestation scores +func (m *NativeMegapoolDetails) GetMegapoolBondNormalized() *big.Int { + return big.NewInt(0).Div(m.NodeBond, big.NewInt(int64(m.ActiveValidatorCount))) +} + // Get all megapool validators using the multicaller func GetAllMegapoolValidators(rp *rocketpool.RocketPool, contracts *NetworkContracts) ([]megapool.ValidatorInfoFromGlobalIndex, error) { opts := &bind.CallOpts{ @@ -68,20 +73,13 @@ func GetAllMegapoolValidators(rp *rocketpool.RocketPool, contracts *NetworkContr for j := i; j < max; j++ { j := j // Create a new variable `j` scoped to the loop iteration - //wg.Go(func() error { validators[j], err = megapool.GetValidatorInfo(rp, uint32(j), opts) - // if err != nil { - // return fmt.Errorf("error executing GetValidatorInfo with global index %d", j) - // } - // return nil - //}) + if err != nil { + return nil, fmt.Errorf("error executing GetValidatorInfo with global index %d", j) + } } } - // if err := wg.Wait(); err != nil { - // return nil, fmt.Errorf("error getting all megapool validators: %w", err) - // } - return validators, nil } @@ -133,11 +131,6 @@ func GetNodeMegapoolDetails(rp *rocketpool.RocketPool, nodeAccount common.Addres if err != nil { return NativeMegapoolDetails{}, err } - wg.Go(func() error { - var err error - details.NodeShare, err = network.GetCurrentNodeShare(rp, nil) - return err - }) wg.Go(func() error { var err error details.NodeDebt, err = mega.GetDebt(nil) diff --git a/bindings/utils/state/network.go b/bindings/utils/state/network.go index ec9a35f9b..2b543c509 100644 --- a/bindings/utils/state/network.go +++ b/bindings/utils/state/network.go @@ -64,6 +64,20 @@ type NetworkDetails struct { // Houston PricesSubmissionFrequency uint64 `json:"prices_submission_frequency"` BalancesSubmissionFrequency uint64 `json:"balances_submission_frequency"` + + // Saturn + MegapoolRevenueSplitSettings struct { + NodeOperatorCommissionShare *big.Int `json:"node_operator_commission_share"` + NodeOperatorCommissionAdder *big.Int `json:"node_operator_commission_adder"` + VoterCommissionShare *big.Int `json:"voter_commission_share"` + PdaoCommissionShare *big.Int `json:"pdao_commission_share"` + } + + MegapoolRevenueSplitTimeWeightedAverages struct { + NodeShare *big.Int `json:"node_share"` + VoterShare *big.Int `json:"voter_share"` + PdaoShare *big.Int `json:"pdao_share"` + } } // Create a snapshot of all of the network's details @@ -136,6 +150,16 @@ func NewNetworkDetails(rp *rocketpool.RocketPool, contracts *NetworkContracts) ( contracts.Multicaller.AddCall(contracts.RocketDAOProtocolSettingsNetwork, &pricesSubmissionFrequency, "getSubmitPricesFrequency") contracts.Multicaller.AddCall(contracts.RocketDAOProtocolSettingsNetwork, &balancesSubmissionFrequency, "getSubmitBalancesFrequency") + // Saturn + contracts.Multicaller.AddCall(contracts.RocketDAOProtocolSettingsNetwork, &details.MegapoolRevenueSplitSettings.NodeOperatorCommissionShare, "getNodeShare") + contracts.Multicaller.AddCall(contracts.RocketDAOProtocolSettingsNetwork, &details.MegapoolRevenueSplitSettings.NodeOperatorCommissionAdder, "getNodeShareSecurityCouncilAdder") + contracts.Multicaller.AddCall(contracts.RocketDAOProtocolSettingsNetwork, &details.MegapoolRevenueSplitSettings.VoterCommissionShare, "getVoterShare") + contracts.Multicaller.AddCall(contracts.RocketDAOProtocolSettingsNetwork, &details.MegapoolRevenueSplitSettings.PdaoCommissionShare, "getProtocolDAOShare") + + contracts.Multicaller.AddCall(contracts.RocketNetworkRevenues, &details.MegapoolRevenueSplitTimeWeightedAverages.NodeShare, "getCurrentNodeShare") + contracts.Multicaller.AddCall(contracts.RocketNetworkRevenues, &details.MegapoolRevenueSplitTimeWeightedAverages.VoterShare, "getCurrentVoterShare") + contracts.Multicaller.AddCall(contracts.RocketNetworkRevenues, &details.MegapoolRevenueSplitTimeWeightedAverages.PdaoShare, "getCurrentProtocolDAOShare") + _, err := contracts.Multicaller.FlexibleCall(true, opts) if err != nil { return nil, fmt.Errorf("error executing multicall: %w", err) diff --git a/shared/services/rewards/generator-impl-v11.go b/shared/services/rewards/generator-impl-v11.go index 5796c5837..f2b3d3fc9 100644 --- a/shared/services/rewards/generator-impl-v11.go +++ b/shared/services/rewards/generator-impl-v11.go @@ -15,6 +15,7 @@ import ( "github.com/rocket-pool/smartnode/bindings/rewards" rptypes "github.com/rocket-pool/smartnode/bindings/types" "github.com/rocket-pool/smartnode/bindings/utils/eth" + rpstate "github.com/rocket-pool/smartnode/bindings/utils/state" "github.com/rocket-pool/smartnode/shared/services/beacon" "github.com/rocket-pool/smartnode/shared/services/config" "github.com/rocket-pool/smartnode/shared/services/rewards/fees" @@ -44,7 +45,8 @@ type treeGeneratorImpl_v11 struct { smoothingPoolBalance *big.Int intervalDutiesInfo *IntervalDutiesInfo slotsPerEpoch uint64 - validatorIndexMap map[string]*MinipoolInfo + minipoolValidatorIndexMap map[string]*MinipoolInfo + megapoolValidatorIndexMap map[string]*MegapoolInfo elStartTime time.Time elEndTime time.Time validNetworkCache map[uint64]bool @@ -53,6 +55,8 @@ type treeGeneratorImpl_v11 struct { beaconConfig beacon.Eth2Config validatorStatusMap map[rptypes.ValidatorPubkey]beacon.ValidatorStatus totalAttestationScore *big.Int + totalVoterScore *big.Int + totalPdaoScore *big.Int successfulAttestations uint64 genesisTime time.Time invalidNetworkNodes map[common.Address]uint64 @@ -85,15 +89,17 @@ func newTreeGeneratorImpl_v11(log *log.ColorLogger, logPrefix string, index uint NetworkRewards: ssz_types.NetworkRewards{}, NodeRewards: ssz_types.NodeRewards{}, }, - validatorStatusMap: map[rptypes.ValidatorPubkey]beacon.ValidatorStatus{}, - validatorIndexMap: map[string]*MinipoolInfo{}, - elSnapshotHeader: elSnapshotHeader, - snapshotEnd: snapshotEnd, - log: log, - logPrefix: logPrefix, - totalAttestationScore: big.NewInt(0), - networkState: state, - invalidNetworkNodes: map[common.Address]uint64{}, + validatorStatusMap: map[rptypes.ValidatorPubkey]beacon.ValidatorStatus{}, + minipoolValidatorIndexMap: map[string]*MinipoolInfo{}, + elSnapshotHeader: elSnapshotHeader, + snapshotEnd: snapshotEnd, + log: log, + logPrefix: logPrefix, + totalAttestationScore: big.NewInt(0), + totalVoterScore: big.NewInt(0), + totalPdaoScore: big.NewInt(0), + networkState: state, + invalidNetworkNodes: map[common.Address]uint64{}, minipoolPerformanceFile: &MinipoolPerformanceFile_v2{ Index: index, MinipoolPerformance: map[common.Address]*SmoothingPoolMinipoolPerformance_v2{}, @@ -530,6 +536,44 @@ func (r *treeGeneratorImpl_v11) calculateEthRewards(checkBeaconPerformance bool) r.successfulAttestations++ } + + // Repeat, for megapools + if nodeInfo.Megapools != nil { + for _, megapool := range nodeInfo.Megapools { + for _, validator := range megapool.Validators { + details := r.networkState.MegapoolDetails[megapool.Address] + bond := details.GetMegapoolBondNormalized() + nodeFee := r.networkState.NetworkDetails.MegapoolRevenueSplitTimeWeightedAverages.NodeShare + nodeFeeAdder := r.networkState.NetworkDetails.MegapoolRevenueSplitSettings.NodeOperatorCommissionAdder + voterFee := r.networkState.NetworkDetails.MegapoolRevenueSplitTimeWeightedAverages.VoterShare + pdaoFee := r.networkState.NetworkDetails.MegapoolRevenueSplitTimeWeightedAverages.PdaoShare + + effectiveNodeFee := big.NewInt(0).Add(nodeFee, nodeFeeAdder) + effectiveVoterFee := big.NewInt(0).Sub(voterFee, nodeFeeAdder) + + megapoolScore := big.NewInt(0).Sub(oneEth, effectiveNodeFee) // 1 - nodeFee + megapoolScore.Mul(megapoolScore, bond) // Multiply by bond + megapoolScore.Div(megapoolScore, thirtyTwoEth) // Divide by 32 to get the bond as a fraction of a total megapool + megapoolScore.Add(megapoolScore, effectiveNodeFee) // Total = fee + (bond/32)(1 - fee) + + // Add it to the megapool's score and the total score + validator.AttestationScore.Add(&validator.AttestationScore.Int, megapoolScore) + r.totalAttestationScore.Add(r.totalAttestationScore, megapoolScore) + + voterScore := big.NewInt(0).Sub(oneEth, effectiveVoterFee) // 1 - voterFee + voterScore.Mul(voterScore, bond) // Multiply by bond + voterScore.Div(voterScore, thirtyTwoEth) // Divide by 32 to get the bond as a fraction of a total megapool + voterScore.Add(voterScore, effectiveVoterFee) // Total = fee + (bond/32)(1 - fee) + r.totalVoterScore.Add(r.totalVoterScore, voterScore) + + pdaoScore := big.NewInt(0).Sub(oneEth, pdaoFee) // 1 - pdaoFee + pdaoScore.Mul(pdaoScore, bond) // Multiply by bond + pdaoScore.Div(pdaoScore, thirtyTwoEth) // Divide by 32 to get the bond as a fraction of a total megapool + pdaoScore.Add(pdaoScore, pdaoFee) // Total = fee + (bond/32)(1 - fee) + r.totalPdaoScore.Add(r.totalPdaoScore, pdaoScore) + } + } + } } } } @@ -787,8 +831,13 @@ func (r *treeGeneratorImpl_v11) processAttestationsBalancesAndWithdrawalsForInte return err } + err = r.createMegapoolIndexMap() + if err != nil { + return err + } + // Check all of the attestations for each epoch - r.log.Printlnf("%s Checking participation of %d minipools for epochs %d to %d", r.logPrefix, len(r.validatorIndexMap), startEpoch, endEpoch) + r.log.Printlnf("%s Checking participation of %d minipools and %d megapool validators for epochs %d to %d", r.logPrefix, len(r.minipoolValidatorIndexMap), len(r.megapoolValidatorIndexMap), startEpoch, endEpoch) r.log.Printlnf("%s NOTE: this will take a long time, progress is reported every 100 epochs", r.logPrefix) epochsDone := 0 @@ -859,7 +908,7 @@ func (r *treeGeneratorImpl_v11) processEpoch(duringInterval bool, epoch uint64) for _, withdrawal := range beaconBlock.Withdrawals { // Ignore non-RP validators - mpi, exists := r.validatorIndexMap[withdrawal.ValidatorIndex] + mpi, exists := r.minipoolValidatorIndexMap[withdrawal.ValidatorIndex] if !exists { continue } @@ -948,7 +997,7 @@ func (r *treeGeneratorImpl_v11) checkAttestations(attestations []beacon.Attestat blockTime := r.genesisTime.Add(time.Second * time.Duration(r.networkState.BeaconConfig.SecondsPerSlot*attestation.SlotIndex)) // Check if each RP validator attested successfully - for position, validator := range rpCommittee.Positions { + for position, positionInfo := range rpCommittee.Positions { if !attestation.ValidatorAttested(committeeIndex, position, slotInfo.CommitteeSizes) { continue } @@ -961,10 +1010,10 @@ func (r *treeGeneratorImpl_v11) checkAttestations(attestations []beacon.Attestat if len(slotInfo.Committees) == 0 { delete(r.intervalDutiesInfo.Slots, attestation.SlotIndex) } - delete(validator.MissingAttestationSlots, attestation.SlotIndex) + positionInfo.DeleteMissingAttestationSlot(attestation.SlotIndex) // Check if this minipool was opted into the SP for this block - nodeDetails := r.nodeDetails[validator.Node.Index] + nodeDetails := positionInfo.GetNodeDetails() if blockTime.Before(nodeDetails.OptInTime) || blockTime.After(nodeDetails.OptOutTime) { // Not opted in continue @@ -974,25 +1023,69 @@ func (r *treeGeneratorImpl_v11) checkAttestations(attestations []beacon.Attestat _, percentOfBorrowedEth := r.networkState.GetStakedRplValueInEthAndPercentOfBorrowedEth(eligibleBorrowedEth, nodeDetails.RplStake) // Mark this duty as completed - validator.CompletedAttestations[attestation.SlotIndex] = true + positionInfo.MarkAttestationCompleted(attestation.SlotIndex) - // Get the pseudoscore for this attestation - details := r.networkState.MinipoolDetailsByAddress[validator.Address] - bond, fee := details.GetMinipoolBondAndNodeFee(blockTime) + if positionInfo.MinipoolInfo != nil { + validator := positionInfo.MinipoolInfo - if r.rewardsFile.RulesetVersion >= 10 { - fee = fees.GetMinipoolFeeWithBonus(bond, fee, percentOfBorrowedEth) + // Get the pseudoscore for this attestation + details := r.networkState.MinipoolDetailsByAddress[validator.Address] + bond, fee := details.GetMinipoolBondAndNodeFee(blockTime) + + if r.rewardsFile.RulesetVersion >= 10 { + fee = fees.GetMinipoolFeeWithBonus(bond, fee, percentOfBorrowedEth) + } + + minipoolScore := big.NewInt(0).Sub(oneEth, fee) // 1 - fee + minipoolScore.Mul(minipoolScore, bond) // Multiply by bond + minipoolScore.Div(minipoolScore, thirtyTwoEth) // Divide by 32 to get the bond as a fraction of a total validator + minipoolScore.Add(minipoolScore, fee) // Total = fee + (bond/32)(1 - fee) + + // Add it to the minipool's score and the total score + validator.AttestationScore.Add(&validator.AttestationScore.Int, minipoolScore) + r.totalAttestationScore.Add(r.totalAttestationScore, minipoolScore) + r.successfulAttestations++ + continue } - minipoolScore := big.NewInt(0).Sub(oneEth, fee) // 1 - fee - minipoolScore.Mul(minipoolScore, bond) // Multiply by bond - minipoolScore.Div(minipoolScore, thirtyTwoEth) // Divide by 32 to get the bond as a fraction of a total validator - minipoolScore.Add(minipoolScore, fee) // Total = fee + (bond/32)(1 - fee) + megapool := positionInfo.Megapool + validator := megapool.GetValidator() - // Add it to the minipool's score and the total score - validator.AttestationScore.Add(&validator.AttestationScore.Int, minipoolScore) - r.totalAttestationScore.Add(r.totalAttestationScore, minipoolScore) - r.successfulAttestations++ + // Get the pseudoscore for this attestation + details := r.networkState.MegapoolDetails[megapool.Info.Address] + bond := details.GetMegapoolBondNormalized() + nodeFee := r.networkState.NetworkDetails.MegapoolRevenueSplitTimeWeightedAverages.NodeShare + // The node fee adder is added to nodeFee and deducted from voter fee + nodeFeeAdder := r.networkState.NetworkDetails.MegapoolRevenueSplitSettings.NodeOperatorCommissionAdder + voterFee := r.networkState.NetworkDetails.MegapoolRevenueSplitTimeWeightedAverages.VoterShare + pdaoFee := r.networkState.NetworkDetails.MegapoolRevenueSplitTimeWeightedAverages.PdaoShare + + effectiveNodeFee := big.NewInt(0).Add(nodeFee, nodeFeeAdder) + effectiveVoterFee := big.NewInt(0).Sub(voterFee, nodeFeeAdder) + + // Calculate the pseudoscore for this attestation + megapoolScore := big.NewInt(0).Sub(oneEth, effectiveNodeFee) // 1 - nodeFee + megapoolScore.Mul(megapoolScore, bond) // Multiply by bond + megapoolScore.Div(megapoolScore, thirtyTwoEth) // Divide by 32 to get the bond as a fraction of a total megapool + megapoolScore.Add(megapoolScore, effectiveNodeFee) // Total = fee + (bond/32)(1 - fee) + + // Add it to the megapool's score and the total score + validator.AttestationScore.Add(&validator.AttestationScore.Int, megapoolScore) + r.totalAttestationScore.Add(r.totalAttestationScore, megapoolScore) + + // Calculate the voter pseudoscore for this attestation + voterScore := big.NewInt(0).Sub(oneEth, effectiveVoterFee) // 1 - voterFee + voterScore.Mul(voterScore, bond) // Multiply by bond + voterScore.Div(voterScore, thirtyTwoEth) // Divide by 32 to get the bond as a fraction of a total megapool + voterScore.Add(voterScore, effectiveVoterFee) // Total = fee + (bond/32)(1 - fee) + r.totalVoterScore.Add(r.totalVoterScore, voterScore) + + // Calculate the pdao pseudoscore for this attestation + pdaoScore := big.NewInt(0).Sub(oneEth, pdaoFee) // 1 - pdaoFee + pdaoScore.Mul(pdaoScore, bond) // Multiply by bond + pdaoScore.Div(pdaoScore, thirtyTwoEth) // Divide by 32 to get the bond as a fraction of a total megapool + pdaoScore.Add(pdaoScore, pdaoFee) // Total = fee + (bond/32)(1 - fee) + r.totalPdaoScore.Add(r.totalPdaoScore, pdaoScore) } } } @@ -1027,16 +1120,23 @@ func (r *treeGeneratorImpl_v11) getDutiesForEpoch(committees beacon.Committees) slotInfo.CommitteeSizes[committeeIndex] = committees.ValidatorCount(idx) // Check if there are any RP validators in this committee - rpValidators := map[int]*MinipoolInfo{} + rpValidators := map[int]*PositionInfo{} for position, validator := range committees.Validators(idx) { - minipoolInfo, exists := r.validatorIndexMap[validator] - if !exists { + minipoolInfo, miniExists := r.minipoolValidatorIndexMap[validator] + megapoolInfo, megaExists := r.megapoolValidatorIndexMap[validator] + if !miniExists && !megaExists { // This isn't an RP validator, so ignore it continue } - // Check if this minipool was opted into the SP for this block - nodeDetails := r.networkState.NodeDetailsByAddress[minipoolInfo.Node.Address] + // Check if this validator was opted into the SP for this block + var nodeDetails *rpstate.NativeNodeDetails + if miniExists { + nodeDetails = r.networkState.NodeDetailsByAddress[minipoolInfo.Node.Address] + } else { + nodeDetails = r.networkState.NodeDetailsByAddress[megapoolInfo.Node.Address] + } + isOptedIn := nodeDetails.SmoothingPoolRegistrationState spRegistrationTime := time.Unix(nodeDetails.SmoothingPoolRegistrationChanged.Int64(), 0) if (isOptedIn && blockTime.Sub(spRegistrationTime) < 0) || // If this block occurred before the node opted in, ignore it @@ -1044,16 +1144,42 @@ func (r *treeGeneratorImpl_v11) getDutiesForEpoch(committees beacon.Committees) continue } - // Check if this minipool was in the `staking` state during this time - mpd := r.networkState.MinipoolDetailsByAddress[minipoolInfo.Address] - statusChangeTime := time.Unix(mpd.StatusTime.Int64(), 0) - if mpd.Status != rptypes.Staking || blockTime.Sub(statusChangeTime) < 0 { + // Check if this validator was in the `staking` state during this time + if miniExists { + mpd := r.networkState.MinipoolDetailsByAddress[minipoolInfo.Address] + statusChangeTime := time.Unix(mpd.StatusTime.Int64(), 0) + if mpd.Status != rptypes.Staking || blockTime.Sub(statusChangeTime) < 0 { + continue + } + + // This was a legal RP validator opted into the SP during this slot so add it + rpValidators[position] = &PositionInfo{ + MinipoolInfo: minipoolInfo, + } + minipoolInfo.MissingAttestationSlots[slotIndex] = true + continue + } + megapoolInfo, ok := r.megapoolValidatorIndexMap[validator] + if !ok { + return fmt.Errorf("megapool not found indexed by validator %s", validator) + } + + validatorInfo, exists := megapoolInfo.ValidatorIndexMap[validator] + if !exists { + return fmt.Errorf("validator %s not found indexed in megapool %s", validator, megapoolInfo.Address.Hex()) + } + + if !validatorInfo.NativeValidatorInfo.ValidatorInfo.Staked { continue } - // This was a legal RP validator opted into the SP during this slot so add it - rpValidators[position] = minipoolInfo - minipoolInfo.MissingAttestationSlots[slotIndex] = true + rpValidators[position] = &PositionInfo{ + Megapool: &MegapoolPositionInfo{ + Info: megapoolInfo, + ValidatorIndex: validator, + }, + } + validatorInfo.MissingAttestationSlots[slotIndex] = true } // If there are some RP validators, add this committee to the map @@ -1069,58 +1195,115 @@ func (r *treeGeneratorImpl_v11) getDutiesForEpoch(committees beacon.Committees) } +// Maps all megapools to their validator indices and creates map of validator indices to megapool info +func (r *treeGeneratorImpl_v11) createMegapoolIndexMap() error { + + // Get the status for all uncached megapool validators and add them to the cache + r.megapoolValidatorIndexMap = map[string]*MegapoolInfo{} + for _, details := range r.nodeDetails { + if !details.IsEligible { + continue + } + for _, megapoolInfo := range details.Megapools { + for _, validatorInfo := range megapoolInfo.Validators { + status, exists := r.networkState.MegapoolValidatorDetails[validatorInfo.Pubkey] + if !exists { + validatorInfo.WasActive = false + continue + } + + switch status.Status { + + case beacon.ValidatorState_PendingInitialized, beacon.ValidatorState_PendingQueued: + // Remove megapool validators that don't have indices yet since they're not actually viable + //r.log.Printlnf("NOTE: megapool %s (index %s, pubkey %s) was in state %s; removing it", megapoolInfo.Address.Hex(), status.Index, validatorInfo.Pubkey.Hex(), string(status.Status)) + validatorInfo.WasActive = false + default: + // Get the validator index + validatorInfo.Index = status.Index + r.megapoolValidatorIndexMap[validatorInfo.Index] = megapoolInfo + + // Get the validator's activation start and end slots + + // Get the validator's activation start and end slots + startSlot := status.ActivationEpoch * r.beaconConfig.SlotsPerEpoch + endSlot := status.ExitEpoch * r.beaconConfig.SlotsPerEpoch + + // Verify this megapool has already started + if status.ActivationEpoch == FarEpoch { + //r.log.Printlnf("NOTE: megapool %s hasn't been scheduled for activation yet; removing it", megapoolInfo.Address.Hex()) + validatorInfo.WasActive = false + } else if startSlot > r.rewardsFile.ConsensusEndBlock { + //r.log.Printlnf("NOTE: megapool %s activates on slot %d which is after interval end %d; removing it", megapoolInfo.Address.Hex(), startSlot, r.rewardsFile.ConsensusEndBlock) + validatorInfo.WasActive = false + } + + // Check if the megapool exited before this interval + if status.ExitEpoch != FarEpoch && endSlot < r.rewardsFile.ConsensusStartBlock { + //r.log.Printlnf("NOTE: megapool %s exited on slot %d which was before interval start %d; removing it", megapoolInfo.Address.Hex(), endSlot, r.rewardsFile.ConsensusStartBlock) + validatorInfo.WasActive = false + } + } + } + } + } + + return nil +} + // Maps all minipools to their validator indices and creates a map of indices to minipool info func (r *treeGeneratorImpl_v11) createMinipoolIndexMap() error { // Get the status for all uncached minipool validators and add them to the cache - r.validatorIndexMap = map[string]*MinipoolInfo{} + r.minipoolValidatorIndexMap = map[string]*MinipoolInfo{} for _, details := range r.nodeDetails { - if details.IsEligible { - for _, minipoolInfo := range details.Minipools { - status, exists := r.networkState.MinipoolValidatorDetails[minipoolInfo.ValidatorPubkey] - if !exists { - // Remove minipools that don't have indices yet since they're not actually viable - //r.log.Printlnf("NOTE: minipool %s (pubkey %s) didn't exist at this slot; removing it", minipoolInfo.Address.Hex(), minipoolInfo.ValidatorPubkey.Hex()) + if !details.IsEligible { + continue + } + for _, minipoolInfo := range details.Minipools { + status, exists := r.networkState.MinipoolValidatorDetails[minipoolInfo.ValidatorPubkey] + if !exists { + // Remove minipools that don't have indices yet since they're not actually viable + //r.log.Printlnf("NOTE: minipool %s (pubkey %s) didn't exist at this slot; removing it", minipoolInfo.Address.Hex(), minipoolInfo.ValidatorPubkey.Hex()) + minipoolInfo.WasActive = false + continue + } + + switch status.Status { + case beacon.ValidatorState_PendingInitialized, beacon.ValidatorState_PendingQueued: + // Remove minipools that don't have indices yet since they're not actually viable + //r.log.Printlnf("NOTE: minipool %s (index %s, pubkey %s) was in state %s; removing it", minipoolInfo.Address.Hex(), status.Index, minipoolInfo.ValidatorPubkey.Hex(), string(status.Status)) + minipoolInfo.WasActive = false + default: + // Get the validator index + minipoolInfo.ValidatorIndex = status.Index + r.minipoolValidatorIndexMap[minipoolInfo.ValidatorIndex] = minipoolInfo + + // Get the validator's activation start and end slots + startSlot := status.ActivationEpoch * r.beaconConfig.SlotsPerEpoch + endSlot := status.ExitEpoch * r.beaconConfig.SlotsPerEpoch + + // Verify this minipool has already started + if status.ActivationEpoch == FarEpoch { + //r.log.Printlnf("NOTE: minipool %s hasn't been scheduled for activation yet; removing it", minipoolInfo.Address.Hex()) minipoolInfo.WasActive = false - } else { - switch status.Status { - case beacon.ValidatorState_PendingInitialized, beacon.ValidatorState_PendingQueued: - // Remove minipools that don't have indices yet since they're not actually viable - //r.log.Printlnf("NOTE: minipool %s (index %s, pubkey %s) was in state %s; removing it", minipoolInfo.Address.Hex(), status.Index, minipoolInfo.ValidatorPubkey.Hex(), string(status.Status)) - minipoolInfo.WasActive = false - default: - // Get the validator index - minipoolInfo.ValidatorIndex = status.Index - r.validatorIndexMap[minipoolInfo.ValidatorIndex] = minipoolInfo - - // Get the validator's activation start and end slots - startSlot := status.ActivationEpoch * r.beaconConfig.SlotsPerEpoch - endSlot := status.ExitEpoch * r.beaconConfig.SlotsPerEpoch - - // Verify this minipool has already started - if status.ActivationEpoch == FarEpoch { - //r.log.Printlnf("NOTE: minipool %s hasn't been scheduled for activation yet; removing it", minipoolInfo.Address.Hex()) - minipoolInfo.WasActive = false - continue - } else if startSlot > r.rewardsFile.ConsensusEndBlock { - //r.log.Printlnf("NOTE: minipool %s activates on slot %d which is after interval end %d; removing it", minipoolInfo.Address.Hex(), startSlot, r.rewardsFile.ConsensusEndBlock) - minipoolInfo.WasActive = false - } + continue + } else if startSlot > r.rewardsFile.ConsensusEndBlock { + //r.log.Printlnf("NOTE: minipool %s activates on slot %d which is after interval end %d; removing it", minipoolInfo.Address.Hex(), startSlot, r.rewardsFile.ConsensusEndBlock) + minipoolInfo.WasActive = false + } - // Check if the minipool exited before this interval - if status.ExitEpoch != FarEpoch && endSlot < r.rewardsFile.ConsensusStartBlock { - //r.log.Printlnf("NOTE: minipool %s exited on slot %d which was before interval start %d; removing it", minipoolInfo.Address.Hex(), endSlot, r.rewardsFile.ConsensusStartBlock) - minipoolInfo.WasActive = false - continue - } - } + // Check if the minipool exited before this interval + if status.ExitEpoch != FarEpoch && endSlot < r.rewardsFile.ConsensusStartBlock { + //r.log.Printlnf("NOTE: minipool %s exited on slot %d which was before interval start %d; removing it", minipoolInfo.Address.Hex(), endSlot, r.rewardsFile.ConsensusStartBlock) + minipoolInfo.WasActive = false + continue } } } } return nil - } // Get the details for every node that was opted into the Smoothing Pool for at least some portion of this interval @@ -1179,12 +1362,10 @@ func (r *treeGeneratorImpl_v11) getSmoothingPoolNodeDetails() error { // This minipool is below the penalty count, so include it nodeDetails.Minipools = append(nodeDetails.Minipools, &MinipoolInfo{ - Address: mpd.MinipoolAddress, - ValidatorPubkey: mpd.Pubkey, - Node: nodeDetails, - Fee: nativeMinipoolDetails.NodeFee, - //MissedAttestations: 0, - //GoodAttestations: 0, + Address: mpd.MinipoolAddress, + ValidatorPubkey: mpd.Pubkey, + Node: nodeDetails, + Fee: nativeMinipoolDetails.NodeFee, MissingAttestationSlots: map[uint64]bool{}, CompletedAttestations: map[uint64]bool{}, WasActive: true, @@ -1200,20 +1381,43 @@ func (r *treeGeneratorImpl_v11) getSmoothingPoolNodeDetails() error { validators := r.networkState.MegapoolToPubkeysMap[megapoolAddress] mpInfo := &MegapoolInfo{ - Address: megapoolAddress, + Address: megapoolAddress, + Node: nodeDetails, + Validators: []*MegapoolValidatorInfo{}, + ValidatorIndexMap: make(map[string]*MegapoolValidatorInfo), } for _, validator := range validators { - mpInfo.Validators = append(mpInfo.Validators, &MegapoolValidatorInfo{ + status, exists := r.networkState.MegapoolValidatorDetails[validator] + if !exists { + continue + } + + nativeValidatorInfo, exists := r.networkState.MegapoolValidatorInfo[validator] + if !exists { + continue + } + + v := &MegapoolValidatorInfo{ Pubkey: validator, + Index: status.Index, MissingAttestationSlots: map[uint64]bool{}, AttestationScore: NewQuotedBigInt(0), - }) + CompletedAttestations: map[uint64]bool{}, + NativeValidatorInfo: nativeValidatorInfo, + } + + mpInfo.Validators = append(mpInfo.Validators, v) + mpInfo.ValidatorIndexMap[v.Index] = v } + nodeDetails.Megapools = append(nodeDetails.Megapools, mpInfo) + // The node is eligible if it has a megapool or minipools + nodeDetails.IsEligible = len(validators) > 0 || len(nodeDetails.Minipools) > 0 + } else { + // The node is eligible if it has minipools + nodeDetails.IsEligible = len(nodeDetails.Minipools) > 0 } - - nodeDetails.IsEligible = len(nodeDetails.Minipools) > 0 r.nodeDetails[iterationIndex] = nodeDetails return nil }) diff --git a/shared/services/rewards/generator-impl-v8.go b/shared/services/rewards/generator-impl-v8.go index 5578cb32a..0b11beecf 100644 --- a/shared/services/rewards/generator-impl-v8.go +++ b/shared/services/rewards/generator-impl-v8.go @@ -860,7 +860,7 @@ func (r *treeGeneratorImpl_v8) checkDutiesForSlot(attestations []beacon.Attestat blockTime := r.genesisTime.Add(time.Second * time.Duration(r.networkState.BeaconConfig.SecondsPerSlot*attestation.SlotIndex)) // Check if each RP validator attested successfully - for position, validator := range rpCommittee.Positions { + for position, positionInfo := range rpCommittee.Positions { if !attestation.ValidatorAttested(committeeIndex, position, slotInfo.CommitteeSizes) { continue } @@ -873,20 +873,20 @@ func (r *treeGeneratorImpl_v8) checkDutiesForSlot(attestations []beacon.Attestat if len(slotInfo.Committees) == 0 { delete(r.intervalDutiesInfo.Slots, attestation.SlotIndex) } - delete(validator.MissingAttestationSlots, attestation.SlotIndex) + positionInfo.DeleteMissingAttestationSlot(attestation.SlotIndex) // Check if this minipool was opted into the SP for this block - nodeDetails := r.nodeDetails[validator.Node.Index] + nodeDetails := positionInfo.GetNodeDetails() if blockTime.Sub(nodeDetails.OptInTime) < 0 || nodeDetails.OptOutTime.Sub(blockTime) < 0 { // Not opted in continue } // Mark this duty as completed - validator.CompletedAttestations[attestation.SlotIndex] = true + positionInfo.MarkAttestationCompleted(attestation.SlotIndex) // Get the pseudoscore for this attestation - details := r.networkState.MinipoolDetailsByAddress[validator.Address] + details := r.networkState.MinipoolDetailsByAddress[positionInfo.MinipoolInfo.Address] bond, fee := r.getMinipoolBondAndNodeFee(details, blockTime) minipoolScore := big.NewInt(0).Sub(one, fee) // 1 - fee minipoolScore.Mul(minipoolScore, bond) // Multiply by bond @@ -894,7 +894,7 @@ func (r *treeGeneratorImpl_v8) checkDutiesForSlot(attestations []beacon.Attestat minipoolScore.Add(minipoolScore, fee) // Total = fee + (bond/32)(1 - fee) // Add it to the minipool's score and the total score - validator.AttestationScore.Add(&validator.AttestationScore.Int, minipoolScore) + positionInfo.MinipoolInfo.AttestationScore.Add(&positionInfo.MinipoolInfo.AttestationScore.Int, minipoolScore) r.totalAttestationScore.Add(r.totalAttestationScore, minipoolScore) r.successfulAttestations++ } @@ -931,7 +931,7 @@ func (r *treeGeneratorImpl_v8) getDutiesForEpoch(committees beacon.Committees) e slotInfo.CommitteeSizes[committeeIndex] = committees.ValidatorCount(idx) // Check if there are any RP validators in this committee - rpValidators := map[int]*MinipoolInfo{} + rpValidators := map[int]*PositionInfo{} for position, validator := range committees.Validators(idx) { minipoolInfo, exists := r.validatorIndexMap[validator] if !exists { @@ -956,7 +956,9 @@ func (r *treeGeneratorImpl_v8) getDutiesForEpoch(committees beacon.Committees) e } // This was a legal RP validator opted into the SP during this slot so add it - rpValidators[position] = minipoolInfo + rpValidators[position] = &PositionInfo{ + MinipoolInfo: minipoolInfo, + } minipoolInfo.MissingAttestationSlots[slotIndex] = true } diff --git a/shared/services/rewards/generator-impl-v9-v10.go b/shared/services/rewards/generator-impl-v9-v10.go index ea59398b4..bd5038990 100644 --- a/shared/services/rewards/generator-impl-v9-v10.go +++ b/shared/services/rewards/generator-impl-v9-v10.go @@ -957,7 +957,7 @@ func (r *treeGeneratorImpl_v9_v10) checkAttestations(attestations []beacon.Attes blockTime := r.genesisTime.Add(time.Second * time.Duration(r.networkState.BeaconConfig.SecondsPerSlot*attestation.SlotIndex)) // Check if each RP validator attested successfully - for position, validator := range rpCommittee.Positions { + for position, positionInfo := range rpCommittee.Positions { if !attestation.ValidatorAttested(committeeIndex, position, slotInfo.CommitteeSizes) { continue } @@ -970,10 +970,10 @@ func (r *treeGeneratorImpl_v9_v10) checkAttestations(attestations []beacon.Attes if len(slotInfo.Committees) == 0 { delete(r.intervalDutiesInfo.Slots, attestation.SlotIndex) } - delete(validator.MissingAttestationSlots, attestation.SlotIndex) + positionInfo.DeleteMissingAttestationSlot(attestation.SlotIndex) // Check if this minipool was opted into the SP for this block - nodeDetails := r.nodeDetails[validator.Node.Index] + nodeDetails := positionInfo.GetNodeDetails() if blockTime.Before(nodeDetails.OptInTime) || blockTime.After(nodeDetails.OptOutTime) { // Not opted in continue @@ -983,10 +983,10 @@ func (r *treeGeneratorImpl_v9_v10) checkAttestations(attestations []beacon.Attes _, percentOfBorrowedEth := r.networkState.GetStakedRplValueInEthAndPercentOfBorrowedEth(eligibleBorrowedEth, nodeDetails.RplStake) // Mark this duty as completed - validator.CompletedAttestations[attestation.SlotIndex] = true + positionInfo.MarkAttestationCompleted(attestation.SlotIndex) // Get the pseudoscore for this attestation - details := r.networkState.MinipoolDetailsByAddress[validator.Address] + details := r.networkState.MinipoolDetailsByAddress[positionInfo.MinipoolInfo.Address] bond, fee := details.GetMinipoolBondAndNodeFee(blockTime) if r.rewardsFile.RulesetVersion >= 10 { @@ -999,7 +999,7 @@ func (r *treeGeneratorImpl_v9_v10) checkAttestations(attestations []beacon.Attes minipoolScore.Add(minipoolScore, fee) // Total = fee + (bond/32)(1 - fee) // Add it to the minipool's score and the total score - validator.AttestationScore.Add(&validator.AttestationScore.Int, minipoolScore) + positionInfo.MinipoolInfo.AttestationScore.Add(&positionInfo.MinipoolInfo.AttestationScore.Int, minipoolScore) r.totalAttestationScore.Add(r.totalAttestationScore, minipoolScore) r.successfulAttestations++ } @@ -1036,7 +1036,7 @@ func (r *treeGeneratorImpl_v9_v10) getDutiesForEpoch(committees beacon.Committee slotInfo.CommitteeSizes[committeeIndex] = committees.ValidatorCount(idx) // Check if there are any RP validators in this committee - rpValidators := map[int]*MinipoolInfo{} + rpValidators := map[int]*PositionInfo{} for position, validator := range committees.Validators(idx) { minipoolInfo, exists := r.validatorIndexMap[validator] if !exists { @@ -1061,7 +1061,9 @@ func (r *treeGeneratorImpl_v9_v10) getDutiesForEpoch(committees beacon.Committee } // This was a legal RP validator opted into the SP during this slot so add it - rpValidators[position] = minipoolInfo + rpValidators[position] = &PositionInfo{ + MinipoolInfo: minipoolInfo, + } minipoolInfo.MissingAttestationSlots[slotIndex] = true } diff --git a/shared/services/rewards/types.go b/shared/services/rewards/types.go index 33a21f313..ac12f0c88 100644 --- a/shared/services/rewards/types.go +++ b/shared/services/rewards/types.go @@ -10,6 +10,7 @@ import ( "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" ethtypes "github.com/ethereum/go-ethereum/core/types" + "github.com/rocket-pool/smartnode/bindings/megapool" "github.com/rocket-pool/smartnode/bindings/rewards" "github.com/rocket-pool/smartnode/bindings/types" "github.com/rocket-pool/smartnode/shared/services/beacon" @@ -209,13 +210,20 @@ type MegapoolValidatorInfo struct { MissedAttestations uint64 `json:"-"` GoodAttestations uint64 `json:"-"` MissingAttestationSlots map[uint64]bool `json:"missingAttestationSlots"` + WasActive bool `json:"-"` AttestationScore *QuotedBigInt `json:"attestationScore"` + CompletedAttestations map[uint64]bool `json:"-"` AttestationCount int `json:"attestationCount"` + + NativeValidatorInfo *megapool.ValidatorInfoFromGlobalIndex `json:"nativeValidatorInfo"` } type MegapoolInfo struct { - Address common.Address `json:"address"` - Validators []*MegapoolValidatorInfo + Address common.Address `json:"address"` + Node *NodeSmoothingDetails `json:"node"` + Validators []*MegapoolValidatorInfo `json:"validators"` + // Indexes over Validators slice above + ValidatorIndexMap map[string]*MegapoolValidatorInfo `json:"-"` } type MinipoolInfo struct { @@ -253,9 +261,54 @@ type SlotInfo struct { CommitteeSizes map[uint64]int } +// MegapoolPositionInfo is a wrapper around MegapoolInfo with additional indexing and functionality +type MegapoolPositionInfo struct { + Info *MegapoolInfo + ValidatorIndex string +} + +func (m *MegapoolPositionInfo) GetValidator() *MegapoolValidatorInfo { + return m.Info.ValidatorIndexMap[m.ValidatorIndex] +} + +// PositionInfo is a union of MinipoolInfo and MegapoolInfo +type PositionInfo struct { + MinipoolInfo *MinipoolInfo + Megapool *MegapoolPositionInfo +} + +func (m *MegapoolPositionInfo) GetValidatorInfo() *MegapoolValidatorInfo { + return m.Info.ValidatorIndexMap[m.ValidatorIndex] +} + +func (p *PositionInfo) DeleteMissingAttestationSlot(slotIndex uint64) { + if p.MinipoolInfo != nil { + delete(p.MinipoolInfo.MissingAttestationSlots, slotIndex) + return + } + validatorInfo := p.Megapool.GetValidator() + delete(validatorInfo.MissingAttestationSlots, slotIndex) +} + +func (p *PositionInfo) GetNodeDetails() *NodeSmoothingDetails { + if p.MinipoolInfo != nil { + return p.MinipoolInfo.Node + } + return p.Megapool.Info.Node +} + +func (p *PositionInfo) MarkAttestationCompleted(slotIndex uint64) { + if p.MinipoolInfo != nil { + p.MinipoolInfo.CompletedAttestations[slotIndex] = true + return + } + validatorInfo := p.Megapool.GetValidator() + validatorInfo.CompletedAttestations[slotIndex] = true +} + type CommitteeInfo struct { Index uint64 - Positions map[int]*MinipoolInfo + Positions map[int]*PositionInfo } // Details about a node for the Smoothing Pool diff --git a/shared/services/state/network-state.go b/shared/services/state/network-state.go index f9f2826b8..59a3c6c0f 100644 --- a/shared/services/state/network-state.go +++ b/shared/services/state/network-state.go @@ -100,6 +100,8 @@ type NetworkState struct { MinipoolValidatorDetails ValidatorDetailsMap `json:"validator_details"` MegapoolValidatorDetails ValidatorDetailsMap `json:"megapool_validator_details"` + MegapoolValidatorInfo map[types.ValidatorPubkey]*megapool.ValidatorInfoFromGlobalIndex `json:"-"` + // Oracle DAO details OracleDaoMemberDetails []rpstate.OracleDaoMemberDetails `json:"oracle_dao_member_details"` @@ -263,12 +265,13 @@ func (m *NetworkStateManager) createNetworkState(slotNumber uint64) (*NetworkSta megapoolValidatorPubkeys := make([]types.ValidatorPubkey, 0, len(state.MegapoolValidatorGlobalIndex)) // Iterate over the megapool validators to add their pubkey to the list of pubkeys megapoolAddressMap := make(map[common.Address][]types.ValidatorPubkey) + megapoolValidatorInfo := make(map[types.ValidatorPubkey]*megapool.ValidatorInfoFromGlobalIndex) for _, validator := range state.MegapoolValidatorGlobalIndex { // Add the megapool address to a set if len(validator.Pubkey) > 0 { // TODO CHECK validators without a pubkey megapoolAddressMap[validator.MegapoolAddress] = append(megapoolAddressMap[validator.MegapoolAddress], types.ValidatorPubkey(validator.Pubkey)) megapoolValidatorPubkeys = append(megapoolValidatorPubkeys, types.ValidatorPubkey(validator.Pubkey)) - + megapoolValidatorInfo[types.ValidatorPubkey(validator.Pubkey)] = &validator } } state.MegapoolToPubkeysMap = megapoolAddressMap @@ -279,6 +282,7 @@ func (m *NetworkStateManager) createNetworkState(slotNumber uint64) (*NetworkSta return nil, err } state.MegapoolValidatorDetails = statusMap + state.MegapoolValidatorInfo = megapoolValidatorInfo // initialize state.MegapoolDetails state.MegapoolDetails = make(map[common.Address]rpstate.NativeMegapoolDetails) @@ -615,6 +619,7 @@ func (s *NetworkState) CalculateNodeWeights() (map[common.Address]*big.Int, *big func (s *NetworkState) GetEligibleBorrowedEth(node *rpstate.NativeNodeDetails) *big.Int { eligibleBorrowedEth := big.NewInt(0) + intervalEndEpoch := s.BeaconSlotNumber / s.BeaconConfig.SlotsPerEpoch for _, mpd := range s.MinipoolDetailsByNode[node.NodeAddress] { @@ -630,8 +635,6 @@ func (s *NetworkState) GetEligibleBorrowedEth(node *rpstate.NativeNodeDetails) * continue } - intervalEndEpoch := s.BeaconSlotNumber / s.BeaconConfig.SlotsPerEpoch - // Already exited if validatorStatus.ExitEpoch <= intervalEndEpoch { //s.logLine("NOTE: Minipool %s exited on epoch %d which is not after interval epoch %d so it's not eligible for RPL rewards", mpd.MinipoolAddress.Hex(), validatorStatus.ExitEpoch, intervalEndEpoch) @@ -641,6 +644,31 @@ func (s *NetworkState) GetEligibleBorrowedEth(node *rpstate.NativeNodeDetails) * // It's eligible, so add up the borrowed and bonded amounts eligibleBorrowedEth.Add(eligibleBorrowedEth, mpd.UserDepositBalance) } + + if node.MegapoolDeployed { + megapool := s.MegapoolDetails[node.MegapoolAddress] + validators := s.MegapoolToPubkeysMap[node.MegapoolAddress] + activeValidators := 0 + for _, validator := range validators { + validatorStatus, exists := s.MegapoolValidatorDetails[validator] + if !exists { + continue + } + + if validatorStatus.ExitEpoch <= intervalEndEpoch { + continue + } + + activeValidators += 1 + } + totalValidators := megapool.ValidatorCount + userCapital := big.NewInt(0).Mul(megapool.UserCapital, big.NewInt(int64(activeValidators))) + // Scale the userCapital by active validators / total validators + userCapital.Mul(userCapital, big.NewInt(int64(activeValidators))) + userCapital.Quo(userCapital, big.NewInt(int64(totalValidators))) + eligibleBorrowedEth.Add(eligibleBorrowedEth, userCapital) + } + return eligibleBorrowedEth } From d62323bf8b2a21f252359f324de3b21eab37c992 Mon Sep 17 00:00:00 2001 From: Jacob Shufro Date: Tue, 29 Jul 2025 12:56:16 -0400 Subject: [PATCH 05/33] Remove unused test mock function --- shared/services/rewards/test/mock.go | 49 ---------------------------- 1 file changed, 49 deletions(-) diff --git a/shared/services/rewards/test/mock.go b/shared/services/rewards/test/mock.go index abcd1bff1..af1b0d452 100644 --- a/shared/services/rewards/test/mock.go +++ b/shared/services/rewards/test/mock.go @@ -11,7 +11,6 @@ import ( "github.com/rocket-pool/smartnode/bindings/utils/eth" rpstate "github.com/rocket-pool/smartnode/bindings/utils/state" "github.com/rocket-pool/smartnode/shared/services/beacon" - "github.com/rocket-pool/smartnode/shared/services/rewards/fees" "github.com/rocket-pool/smartnode/shared/services/state" ) @@ -49,54 +48,6 @@ func (h *MockHistory) GetNodeAddress() common.Address { var oneEth = big.NewInt(1000000000000000000) var thirtyTwoEth = big.NewInt(0).Mul(oneEth, big.NewInt(32)) -func (h *MockHistory) GetMinipoolAttestationScoreAndCount(address common.Address, state *state.NetworkState) (*big.Int, uint64) { - out := big.NewInt(0) - mpi := state.MinipoolDetailsByAddress[address] - nodeDetails := state.NodeDetailsByAddress[mpi.NodeAddress] - - // Check every slot in the history - count := uint64(0) - for slot := h.GetConsensusStartBlock(); slot <= h.GetConsensusEndBlock(); slot++ { - // Get the time at the slot - blockTime := h.BeaconConfig.GetSlotTime(slot) - // Check the status of the minipool at this time - if mpi.Status != types.Staking { - continue - } - if mpi.Finalised { - continue - } - // Check if the minipool was opted in at this time - if !nodeDetails.WasOptedInAt(blockTime) { - continue - } - pubkey := mpi.Pubkey - validator := state.MinipoolValidatorDetails[pubkey] - // Check if the validator was exited before this slot - if validator.ExitEpoch <= h.BeaconConfig.SlotToEpoch(slot) { - continue - } - index := validator.Index - indexInt, _ := strconv.ParseUint(index, 10, 64) - // Count the attestation if index%32 == slot%32 - if indexInt%32 == uint64(slot%32) { - count++ - - bond, fee := mpi.GetMinipoolBondAndNodeFee(blockTime) - // Give the minipool a score according to its fee - eligibleBorrowedEth := state.GetEligibleBorrowedEth(nodeDetails) - _, percentOfBorrowedEth := state.GetStakedRplValueInEthAndPercentOfBorrowedEth(eligibleBorrowedEth, nodeDetails.RplStake) - fee = fees.GetMinipoolFeeWithBonus(bond, fee, percentOfBorrowedEth) - minipoolScore := big.NewInt(0).Sub(oneEth, fee) // 1 - fee - minipoolScore.Mul(minipoolScore, bond) // Multiply by bond - minipoolScore.Div(minipoolScore, thirtyTwoEth) // Divide by 32 to get the bond as a fraction of a total validator - minipoolScore.Add(minipoolScore, fee) // Total = fee + (bond/32)(1 - fee) - out.Add(out, minipoolScore) - } - } - return out, count -} - type MockMinipool struct { Address common.Address Pubkey types.ValidatorPubkey From 98a26bdc1d08058d0771c58d6e23d49601fecb5c Mon Sep 17 00:00:00 2001 From: Jacob Shufro Date: Tue, 29 Jul 2025 13:14:22 -0400 Subject: [PATCH 06/33] Use legacy + megapool rpl stake for saturn --- shared/services/state/network-state.go | 36 ++++++++++---------------- 1 file changed, 14 insertions(+), 22 deletions(-) diff --git a/shared/services/state/network-state.go b/shared/services/state/network-state.go index 59a3c6c0f..d95e9a2ff 100644 --- a/shared/services/state/network-state.go +++ b/shared/services/state/network-state.go @@ -570,6 +570,7 @@ func (s *NetworkState) CalculateNodeWeights() (map[common.Address]*big.Int, *big node := node wg.Go(func() error { eligibleBorrowedEth := s.GetEligibleBorrowedEth(&node) + rplStake := s.GetRplStake(&node) // minCollateral := borrowedEth * minCollateralFraction / ratio // NOTE: minCollateralFraction and ratio are both percentages, but multiplying and dividing by them cancels out the need for normalization by eth.EthToWei(1) @@ -578,12 +579,12 @@ func (s *NetworkState) CalculateNodeWeights() (map[common.Address]*big.Int, *big // Calculate the weight nodeWeight := big.NewInt(0) - if node.RplStake.Cmp(minCollateral) == -1 || eligibleBorrowedEth.Sign() <= 0 { + if rplStake.Cmp(minCollateral) == -1 || eligibleBorrowedEth.Sign() <= 0 { weightSlice[i] = nodeWeight return nil } - nodeWeight.Set(s.GetNodeWeight(eligibleBorrowedEth, node.RplStake)) + nodeWeight.Set(s.GetNodeWeight(eligibleBorrowedEth, rplStake)) // Scale the node weight by the participation in the current interval // Get the timestamp of the node's registration @@ -617,6 +618,16 @@ func (s *NetworkState) CalculateNodeWeights() (map[common.Address]*big.Int, *big return weights, totalWeight, nil } +func (s *NetworkState) GetRplStake(node *rpstate.NativeNodeDetails) *big.Int { + if !s.IsSaturnDeployed { + return node.RplStake + } + + out := big.NewInt(0).Set(node.LegacyStakedRPL) + out.Add(out, node.MegapoolStakedRPL) + return out +} + func (s *NetworkState) GetEligibleBorrowedEth(node *rpstate.NativeNodeDetails) *big.Int { eligibleBorrowedEth := big.NewInt(0) intervalEndEpoch := s.BeaconSlotNumber / s.BeaconConfig.SlotsPerEpoch @@ -647,26 +658,7 @@ func (s *NetworkState) GetEligibleBorrowedEth(node *rpstate.NativeNodeDetails) * if node.MegapoolDeployed { megapool := s.MegapoolDetails[node.MegapoolAddress] - validators := s.MegapoolToPubkeysMap[node.MegapoolAddress] - activeValidators := 0 - for _, validator := range validators { - validatorStatus, exists := s.MegapoolValidatorDetails[validator] - if !exists { - continue - } - - if validatorStatus.ExitEpoch <= intervalEndEpoch { - continue - } - - activeValidators += 1 - } - totalValidators := megapool.ValidatorCount - userCapital := big.NewInt(0).Mul(megapool.UserCapital, big.NewInt(int64(activeValidators))) - // Scale the userCapital by active validators / total validators - userCapital.Mul(userCapital, big.NewInt(int64(activeValidators))) - userCapital.Quo(userCapital, big.NewInt(int64(totalValidators))) - eligibleBorrowedEth.Add(eligibleBorrowedEth, userCapital) + eligibleBorrowedEth.Add(eligibleBorrowedEth, megapool.UserCapital) } return eligibleBorrowedEth From 830d1e035e371f2c47c20630bebe0e70411bac52 Mon Sep 17 00:00:00 2001 From: Jacob Shufro Date: Tue, 29 Jul 2025 13:22:52 -0400 Subject: [PATCH 07/33] devectorize megapools per node --- shared/services/rewards/generator-impl-v11.go | 140 +++++++++--------- shared/services/rewards/types.go | 2 +- 2 files changed, 71 insertions(+), 71 deletions(-) diff --git a/shared/services/rewards/generator-impl-v11.go b/shared/services/rewards/generator-impl-v11.go index f2b3d3fc9..e9e2a35fd 100644 --- a/shared/services/rewards/generator-impl-v11.go +++ b/shared/services/rewards/generator-impl-v11.go @@ -538,40 +538,39 @@ func (r *treeGeneratorImpl_v11) calculateEthRewards(checkBeaconPerformance bool) } // Repeat, for megapools - if nodeInfo.Megapools != nil { - for _, megapool := range nodeInfo.Megapools { - for _, validator := range megapool.Validators { - details := r.networkState.MegapoolDetails[megapool.Address] - bond := details.GetMegapoolBondNormalized() - nodeFee := r.networkState.NetworkDetails.MegapoolRevenueSplitTimeWeightedAverages.NodeShare - nodeFeeAdder := r.networkState.NetworkDetails.MegapoolRevenueSplitSettings.NodeOperatorCommissionAdder - voterFee := r.networkState.NetworkDetails.MegapoolRevenueSplitTimeWeightedAverages.VoterShare - pdaoFee := r.networkState.NetworkDetails.MegapoolRevenueSplitTimeWeightedAverages.PdaoShare - - effectiveNodeFee := big.NewInt(0).Add(nodeFee, nodeFeeAdder) - effectiveVoterFee := big.NewInt(0).Sub(voterFee, nodeFeeAdder) - - megapoolScore := big.NewInt(0).Sub(oneEth, effectiveNodeFee) // 1 - nodeFee - megapoolScore.Mul(megapoolScore, bond) // Multiply by bond - megapoolScore.Div(megapoolScore, thirtyTwoEth) // Divide by 32 to get the bond as a fraction of a total megapool - megapoolScore.Add(megapoolScore, effectiveNodeFee) // Total = fee + (bond/32)(1 - fee) - - // Add it to the megapool's score and the total score - validator.AttestationScore.Add(&validator.AttestationScore.Int, megapoolScore) - r.totalAttestationScore.Add(r.totalAttestationScore, megapoolScore) - - voterScore := big.NewInt(0).Sub(oneEth, effectiveVoterFee) // 1 - voterFee - voterScore.Mul(voterScore, bond) // Multiply by bond - voterScore.Div(voterScore, thirtyTwoEth) // Divide by 32 to get the bond as a fraction of a total megapool - voterScore.Add(voterScore, effectiveVoterFee) // Total = fee + (bond/32)(1 - fee) - r.totalVoterScore.Add(r.totalVoterScore, voterScore) - - pdaoScore := big.NewInt(0).Sub(oneEth, pdaoFee) // 1 - pdaoFee - pdaoScore.Mul(pdaoScore, bond) // Multiply by bond - pdaoScore.Div(pdaoScore, thirtyTwoEth) // Divide by 32 to get the bond as a fraction of a total megapool - pdaoScore.Add(pdaoScore, pdaoFee) // Total = fee + (bond/32)(1 - fee) - r.totalPdaoScore.Add(r.totalPdaoScore, pdaoScore) - } + if nodeInfo.Megapool != nil { + megapool := nodeInfo.Megapool + for _, validator := range megapool.Validators { + details := r.networkState.MegapoolDetails[megapool.Address] + bond := details.GetMegapoolBondNormalized() + nodeFee := r.networkState.NetworkDetails.MegapoolRevenueSplitTimeWeightedAverages.NodeShare + nodeFeeAdder := r.networkState.NetworkDetails.MegapoolRevenueSplitSettings.NodeOperatorCommissionAdder + voterFee := r.networkState.NetworkDetails.MegapoolRevenueSplitTimeWeightedAverages.VoterShare + pdaoFee := r.networkState.NetworkDetails.MegapoolRevenueSplitTimeWeightedAverages.PdaoShare + + effectiveNodeFee := big.NewInt(0).Add(nodeFee, nodeFeeAdder) + effectiveVoterFee := big.NewInt(0).Sub(voterFee, nodeFeeAdder) + + megapoolScore := big.NewInt(0).Sub(oneEth, effectiveNodeFee) // 1 - nodeFee + megapoolScore.Mul(megapoolScore, bond) // Multiply by bond + megapoolScore.Div(megapoolScore, thirtyTwoEth) // Divide by 32 to get the bond as a fraction of a total megapool + megapoolScore.Add(megapoolScore, effectiveNodeFee) // Total = fee + (bond/32)(1 - fee) + + // Add it to the megapool's score and the total score + validator.AttestationScore.Add(&validator.AttestationScore.Int, megapoolScore) + r.totalAttestationScore.Add(r.totalAttestationScore, megapoolScore) + + voterScore := big.NewInt(0).Sub(oneEth, effectiveVoterFee) // 1 - voterFee + voterScore.Mul(voterScore, bond) // Multiply by bond + voterScore.Div(voterScore, thirtyTwoEth) // Divide by 32 to get the bond as a fraction of a total megapool + voterScore.Add(voterScore, effectiveVoterFee) // Total = fee + (bond/32)(1 - fee) + r.totalVoterScore.Add(r.totalVoterScore, voterScore) + + pdaoScore := big.NewInt(0).Sub(oneEth, pdaoFee) // 1 - pdaoFee + pdaoScore.Mul(pdaoScore, bond) // Multiply by bond + pdaoScore.Div(pdaoScore, thirtyTwoEth) // Divide by 32 to get the bond as a fraction of a total megapool + pdaoScore.Add(pdaoScore, pdaoFee) // Total = fee + (bond/32)(1 - fee) + r.totalPdaoScore.Add(r.totalPdaoScore, pdaoScore) } } } @@ -1204,45 +1203,46 @@ func (r *treeGeneratorImpl_v11) createMegapoolIndexMap() error { if !details.IsEligible { continue } - for _, megapoolInfo := range details.Megapools { - for _, validatorInfo := range megapoolInfo.Validators { - status, exists := r.networkState.MegapoolValidatorDetails[validatorInfo.Pubkey] - if !exists { - validatorInfo.WasActive = false - continue - } + if details.Megapool == nil { + continue + } + for _, validatorInfo := range details.Megapool.Validators { + status, exists := r.networkState.MegapoolValidatorDetails[validatorInfo.Pubkey] + if !exists { + validatorInfo.WasActive = false + continue + } + + switch status.Status { - switch status.Status { + case beacon.ValidatorState_PendingInitialized, beacon.ValidatorState_PendingQueued: + // Remove megapool validators that don't have indices yet since they're not actually viable + //r.log.Printlnf("NOTE: megapool %s (index %s, pubkey %s) was in state %s; removing it", megapoolInfo.Address.Hex(), status.Index, validatorInfo.Pubkey.Hex(), string(status.Status)) + validatorInfo.WasActive = false + default: + // Get the validator index + validatorInfo.Index = status.Index + r.megapoolValidatorIndexMap[validatorInfo.Index] = details.Megapool - case beacon.ValidatorState_PendingInitialized, beacon.ValidatorState_PendingQueued: - // Remove megapool validators that don't have indices yet since they're not actually viable - //r.log.Printlnf("NOTE: megapool %s (index %s, pubkey %s) was in state %s; removing it", megapoolInfo.Address.Hex(), status.Index, validatorInfo.Pubkey.Hex(), string(status.Status)) + // Get the validator's activation start and end slots + + // Get the validator's activation start and end slots + startSlot := status.ActivationEpoch * r.beaconConfig.SlotsPerEpoch + endSlot := status.ExitEpoch * r.beaconConfig.SlotsPerEpoch + + // Verify this megapool has already started + if status.ActivationEpoch == FarEpoch { + //r.log.Printlnf("NOTE: megapool %s hasn't been scheduled for activation yet; removing it", megapoolInfo.Address.Hex()) validatorInfo.WasActive = false - default: - // Get the validator index - validatorInfo.Index = status.Index - r.megapoolValidatorIndexMap[validatorInfo.Index] = megapoolInfo - - // Get the validator's activation start and end slots - - // Get the validator's activation start and end slots - startSlot := status.ActivationEpoch * r.beaconConfig.SlotsPerEpoch - endSlot := status.ExitEpoch * r.beaconConfig.SlotsPerEpoch - - // Verify this megapool has already started - if status.ActivationEpoch == FarEpoch { - //r.log.Printlnf("NOTE: megapool %s hasn't been scheduled for activation yet; removing it", megapoolInfo.Address.Hex()) - validatorInfo.WasActive = false - } else if startSlot > r.rewardsFile.ConsensusEndBlock { - //r.log.Printlnf("NOTE: megapool %s activates on slot %d which is after interval end %d; removing it", megapoolInfo.Address.Hex(), startSlot, r.rewardsFile.ConsensusEndBlock) - validatorInfo.WasActive = false - } + } else if startSlot > r.rewardsFile.ConsensusEndBlock { + //r.log.Printlnf("NOTE: megapool %s activates on slot %d which is after interval end %d; removing it", megapoolInfo.Address.Hex(), startSlot, r.rewardsFile.ConsensusEndBlock) + validatorInfo.WasActive = false + } - // Check if the megapool exited before this interval - if status.ExitEpoch != FarEpoch && endSlot < r.rewardsFile.ConsensusStartBlock { - //r.log.Printlnf("NOTE: megapool %s exited on slot %d which was before interval start %d; removing it", megapoolInfo.Address.Hex(), endSlot, r.rewardsFile.ConsensusStartBlock) - validatorInfo.WasActive = false - } + // Check if the megapool exited before this interval + if status.ExitEpoch != FarEpoch && endSlot < r.rewardsFile.ConsensusStartBlock { + //r.log.Printlnf("NOTE: megapool %s exited on slot %d which was before interval start %d; removing it", megapoolInfo.Address.Hex(), endSlot, r.rewardsFile.ConsensusStartBlock) + validatorInfo.WasActive = false } } } @@ -1411,7 +1411,7 @@ func (r *treeGeneratorImpl_v11) getSmoothingPoolNodeDetails() error { mpInfo.ValidatorIndexMap[v.Index] = v } - nodeDetails.Megapools = append(nodeDetails.Megapools, mpInfo) + nodeDetails.Megapool = mpInfo // The node is eligible if it has a megapool or minipools nodeDetails.IsEligible = len(validators) > 0 || len(nodeDetails.Minipools) > 0 } else { diff --git a/shared/services/rewards/types.go b/shared/services/rewards/types.go index ac12f0c88..e054cbbdc 100644 --- a/shared/services/rewards/types.go +++ b/shared/services/rewards/types.go @@ -319,7 +319,7 @@ type NodeSmoothingDetails struct { IsOptedIn bool StatusChangeTime time.Time Minipools []*MinipoolInfo - Megapools []*MegapoolInfo + Megapool *MegapoolInfo EligibleSeconds *big.Int StartSlot uint64 EndSlot uint64 From 8da0ac1db3d36e7474760204be1e1f09240928b0 Mon Sep 17 00:00:00 2001 From: Jacob Shufro Date: Tue, 29 Jul 2025 13:52:45 -0400 Subject: [PATCH 08/33] Add pending voter share to the network details --- bindings/utils/state/network.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/bindings/utils/state/network.go b/bindings/utils/state/network.go index 2b543c509..9e1fd137f 100644 --- a/bindings/utils/state/network.go +++ b/bindings/utils/state/network.go @@ -48,6 +48,7 @@ type NetworkDetails struct { TotalRETHSupply *big.Int `json:"total_reth_supply"` TotalRPLStake *big.Int `json:"total_rpl_stake"` SmoothingPoolBalance *big.Int `json:"smoothing_pool_balance"` + PendingVoterShare *big.Int `json:"pending_voter_share"` NodeFee float64 `json:"node_fee"` BalancesBlock uint64 `json:"balances_block"` LatestReportableBalancesBlock uint64 `json:"latest_reportable_balances_block"` @@ -119,6 +120,7 @@ func NewNetworkDetails(rp *rocketpool.RocketPool, contracts *NetworkContracts) ( contracts.Multicaller.AddCall(contracts.RocketRewardsPool, &details.TrustedNodeOperatorRewardsPercent, "getClaimingContractPerc", "rocketClaimTrustedNode") contracts.Multicaller.AddCall(contracts.RocketRewardsPool, &details.ProtocolDaoRewardsPercent, "getClaimingContractPerc", "rocketClaimDAO") contracts.Multicaller.AddCall(contracts.RocketRewardsPool, &details.PendingRPLRewards, "getPendingRPLRewards") + contracts.Multicaller.AddCall(contracts.RocketRewardsPool, &details.PendingVoterShare, "getPendingVoterShare") contracts.Multicaller.AddCall(contracts.RocketDAONodeTrustedSettingsMinipool, &scrubPeriodSeconds, "getScrubPeriod") contracts.Multicaller.AddCall(contracts.RocketDepositPool, &details.DepositPoolBalance, "getBalance") contracts.Multicaller.AddCall(contracts.RocketDepositPool, &details.DepositPoolExcess, "getExcessBalance") From a0f511630bc24fff577c7795be5eb5b6883a9002 Mon Sep 17 00:00:00 2001 From: Jacob Shufro Date: Tue, 29 Jul 2025 14:05:39 -0400 Subject: [PATCH 09/33] Add active validators in megapools to epsilon --- shared/services/rewards/generator-impl-v11.go | 41 ++++++++++++++++--- shared/services/rewards/types.go | 7 ++-- 2 files changed, 40 insertions(+), 8 deletions(-) diff --git a/shared/services/rewards/generator-impl-v11.go b/shared/services/rewards/generator-impl-v11.go index e9e2a35fd..57abb8496 100644 --- a/shared/services/rewards/generator-impl-v11.go +++ b/shared/services/rewards/generator-impl-v11.go @@ -152,6 +152,15 @@ func (r *treeGeneratorImpl_v11) generateTree(rp RewardsExecutionClient, networkN r.epsilon = big.NewInt(int64(nodeCount)) } else { r.epsilon = big.NewInt(int64(minipoolCount)) + if r.networkState.IsSaturnDeployed { + // Add the number of megapool validators + for _, nodeInfo := range r.nodeDetails { + if nodeInfo.Megapool == nil { + continue + } + r.epsilon.Add(r.epsilon, big.NewInt(int64(nodeInfo.Megapool.ActiveValidatorCount))) + } + } } // Calculate the RPL rewards @@ -166,6 +175,8 @@ func (r *treeGeneratorImpl_v11) generateTree(rp RewardsExecutionClient, networkN return nil, fmt.Errorf("error calculating ETH rewards: %w", err) } + // Calculate the voter share distribution + // Sort and assign the maps to the ssz file lists for nodeAddress, nodeReward := range r.nodeRewards { copy(nodeReward.Address[:], nodeAddress[:]) @@ -234,6 +245,15 @@ func (r *treeGeneratorImpl_v11) approximateStakerShareOfSmoothingPool(rp Rewards r.epsilon = big.NewInt(int64(nodeCount)) } else { r.epsilon = big.NewInt(int64(minipoolCount)) + if r.networkState.IsSaturnDeployed { + // Add the number of megapool validators + for _, nodeInfo := range r.nodeDetails { + if nodeInfo.Megapool == nil { + continue + } + r.epsilon.Add(r.epsilon, big.NewInt(int64(nodeInfo.Megapool.ActiveValidatorCount))) + } + } } // Calculate the ETH rewards @@ -451,6 +471,11 @@ func (r *treeGeneratorImpl_v11) calculateRplRewards() error { } +// Calculate the voter rewards +func (r *treeGeneratorImpl_v11) calculateVoterRewards() error { + return nil +} + // Calculates the ETH rewards for the given interval func (r *treeGeneratorImpl_v11) calculateEthRewards(checkBeaconPerformance bool) error { @@ -490,12 +515,16 @@ func (r *treeGeneratorImpl_v11) calculateEthRewards(checkBeaconPerformance bool) return err } eligible := 0 + megapools := 0 for _, nodeInfo := range r.nodeDetails { if nodeInfo.IsEligible { eligible++ + if nodeInfo.Megapool != nil { + megapools++ + } } } - r.log.Printlnf("%s %d / %d nodes were eligible for Smoothing Pool rewards", r.logPrefix, eligible, len(r.nodeDetails)) + r.log.Printlnf("%s %d / %d nodes (%d with megapools) were eligible for Smoothing Pool rewards", r.logPrefix, eligible, len(r.nodeDetails), megapools) // Process the attestation performance for each minipool during this interval r.intervalDutiesInfo = &IntervalDutiesInfo{ @@ -1378,13 +1407,15 @@ func (r *treeGeneratorImpl_v11) getSmoothingPoolNodeDetails() error { if nativeNodeDetails.MegapoolDeployed { // Get the megapool details megapoolAddress := nativeNodeDetails.MegapoolAddress + nativeMegapoolDetails := r.networkState.MegapoolDetails[megapoolAddress] validators := r.networkState.MegapoolToPubkeysMap[megapoolAddress] mpInfo := &MegapoolInfo{ - Address: megapoolAddress, - Node: nodeDetails, - Validators: []*MegapoolValidatorInfo{}, - ValidatorIndexMap: make(map[string]*MegapoolValidatorInfo), + Address: megapoolAddress, + Node: nodeDetails, + Validators: []*MegapoolValidatorInfo{}, + ValidatorIndexMap: make(map[string]*MegapoolValidatorInfo), + ActiveValidatorCount: nativeMegapoolDetails.ActiveValidatorCount, } for _, validator := range validators { diff --git a/shared/services/rewards/types.go b/shared/services/rewards/types.go index e054cbbdc..2b5c30003 100644 --- a/shared/services/rewards/types.go +++ b/shared/services/rewards/types.go @@ -219,9 +219,10 @@ type MegapoolValidatorInfo struct { } type MegapoolInfo struct { - Address common.Address `json:"address"` - Node *NodeSmoothingDetails `json:"node"` - Validators []*MegapoolValidatorInfo `json:"validators"` + Address common.Address `json:"address"` + Node *NodeSmoothingDetails `json:"node"` + Validators []*MegapoolValidatorInfo `json:"validators"` + ActiveValidatorCount uint32 `json:"active_validator_count"` // Indexes over Validators slice above ValidatorIndexMap map[string]*MegapoolValidatorInfo `json:"-"` } From ab8de50c309f282c9070fcd2d5f88ff46d11145e Mon Sep 17 00:00:00 2001 From: Jacob Shufro Date: Tue, 29 Jul 2025 18:20:55 -0400 Subject: [PATCH 10/33] Correct megapool scoring --- shared/services/rewards/generator-impl-v11.go | 76 ++++++++++++------- 1 file changed, 48 insertions(+), 28 deletions(-) diff --git a/shared/services/rewards/generator-impl-v11.go b/shared/services/rewards/generator-impl-v11.go index 57abb8496..fb868759b 100644 --- a/shared/services/rewards/generator-impl-v11.go +++ b/shared/services/rewards/generator-impl-v11.go @@ -580,26 +580,38 @@ func (r *treeGeneratorImpl_v11) calculateEthRewards(checkBeaconPerformance bool) effectiveNodeFee := big.NewInt(0).Add(nodeFee, nodeFeeAdder) effectiveVoterFee := big.NewInt(0).Sub(voterFee, nodeFeeAdder) - megapoolScore := big.NewInt(0).Sub(oneEth, effectiveNodeFee) // 1 - nodeFee - megapoolScore.Mul(megapoolScore, bond) // Multiply by bond - megapoolScore.Div(megapoolScore, thirtyTwoEth) // Divide by 32 to get the bond as a fraction of a total megapool - megapoolScore.Add(megapoolScore, effectiveNodeFee) // Total = fee + (bond/32)(1 - fee) + // The megapool score is given by: + // (bond + effectiveNodeFee*(32-bond)) / 32 + // However, when multiplying eth values, we need to normalize the wei to eth + // So really it's (bond + (32*fee / 1E) - (32*bond / 1E)) / 32 + // If we multiply the numerator by 1 eth each, we can avoid some + // integer math inaccuracy, and when we divide by 32 it is removed. + // + // (b*1 + 32f - f*b) / 32 + megapoolScore := big.NewInt(0).Mul(oneEth, bond) // b*1 + megapoolScore.Add(megapoolScore, big.NewInt(0).Mul(thirtyTwoEth, effectiveNodeFee)) // b*1 + 32f + megapoolScore.Sub(megapoolScore, big.NewInt(0).Mul(effectiveNodeFee, bond)) // b*1 + 32f - f*b + megapoolScore.Div(megapoolScore, thirtyTwoEth) // (b*1 + 32f - f*b) / 32 // Add it to the megapool's score and the total score validator.AttestationScore.Add(&validator.AttestationScore.Int, megapoolScore) r.totalAttestationScore.Add(r.totalAttestationScore, megapoolScore) - voterScore := big.NewInt(0).Sub(oneEth, effectiveVoterFee) // 1 - voterFee - voterScore.Mul(voterScore, bond) // Multiply by bond - voterScore.Div(voterScore, thirtyTwoEth) // Divide by 32 to get the bond as a fraction of a total megapool - voterScore.Add(voterScore, effectiveVoterFee) // Total = fee + (bond/32)(1 - fee) + // Calculate the voter share + // This is simply (effectiveVoterFee * (32 - bond)) / 32 + // Simplify to (32f - f*b) / 32 + voterScore := big.NewInt(0).Mul(thirtyTwoEth, effectiveVoterFee) + voterScore.Sub(voterScore, big.NewInt(0).Mul(effectiveVoterFee, bond)) + voterScore.Div(voterScore, thirtyTwoEth) r.totalVoterScore.Add(r.totalVoterScore, voterScore) - pdaoScore := big.NewInt(0).Sub(oneEth, pdaoFee) // 1 - pdaoFee - pdaoScore.Mul(pdaoScore, bond) // Multiply by bond - pdaoScore.Div(pdaoScore, thirtyTwoEth) // Divide by 32 to get the bond as a fraction of a total megapool - pdaoScore.Add(pdaoScore, pdaoFee) // Total = fee + (bond/32)(1 - fee) + // Calculate the pdao share + // Same formula as the voter share + pdaoScore := big.NewInt(0).Mul(thirtyTwoEth, pdaoFee) + pdaoScore.Sub(pdaoScore, big.NewInt(0).Mul(pdaoFee, bond)) + pdaoScore.Div(pdaoScore, thirtyTwoEth) r.totalPdaoScore.Add(r.totalPdaoScore, pdaoScore) + r.successfulAttestations++ } } } @@ -1090,30 +1102,38 @@ func (r *treeGeneratorImpl_v11) checkAttestations(attestations []beacon.Attestat effectiveNodeFee := big.NewInt(0).Add(nodeFee, nodeFeeAdder) effectiveVoterFee := big.NewInt(0).Sub(voterFee, nodeFeeAdder) - - // Calculate the pseudoscore for this attestation - megapoolScore := big.NewInt(0).Sub(oneEth, effectiveNodeFee) // 1 - nodeFee - megapoolScore.Mul(megapoolScore, bond) // Multiply by bond - megapoolScore.Div(megapoolScore, thirtyTwoEth) // Divide by 32 to get the bond as a fraction of a total megapool - megapoolScore.Add(megapoolScore, effectiveNodeFee) // Total = fee + (bond/32)(1 - fee) + // The megapool score is given by: + // (bond + effectiveNodeFee*(32-bond)) / 32 + // However, when multiplying eth values, we need to normalize the wei to eth + // So really it's (bond + (32*fee / 1E) - (32*bond / 1E)) / 32 + // If we multiply the numerator by 1 eth each, we can avoid some + // integer math inaccuracy, and when we divide by 32 it is removed. + // + // (b*1 + 32f - f*b) / 32 + megapoolScore := big.NewInt(0).Mul(oneEth, bond) // b*1 + megapoolScore.Add(megapoolScore, big.NewInt(0).Mul(thirtyTwoEth, effectiveNodeFee)) // b*1 + 32f + megapoolScore.Sub(megapoolScore, big.NewInt(0).Mul(effectiveNodeFee, bond)) // b*1 + 32f - f*b + megapoolScore.Div(megapoolScore, thirtyTwoEth) // (b*1 + 32f - f*b) / 32 // Add it to the megapool's score and the total score validator.AttestationScore.Add(&validator.AttestationScore.Int, megapoolScore) r.totalAttestationScore.Add(r.totalAttestationScore, megapoolScore) - // Calculate the voter pseudoscore for this attestation - voterScore := big.NewInt(0).Sub(oneEth, effectiveVoterFee) // 1 - voterFee - voterScore.Mul(voterScore, bond) // Multiply by bond - voterScore.Div(voterScore, thirtyTwoEth) // Divide by 32 to get the bond as a fraction of a total megapool - voterScore.Add(voterScore, effectiveVoterFee) // Total = fee + (bond/32)(1 - fee) + // Calculate the voter share + // This is simply (effectiveVoterFee * (32 - bond)) / 32 + // Simplify to (32f - f*b) / 32 + voterScore := big.NewInt(0).Mul(thirtyTwoEth, effectiveVoterFee) + voterScore.Sub(voterScore, big.NewInt(0).Mul(effectiveVoterFee, bond)) + voterScore.Div(voterScore, thirtyTwoEth) r.totalVoterScore.Add(r.totalVoterScore, voterScore) - // Calculate the pdao pseudoscore for this attestation - pdaoScore := big.NewInt(0).Sub(oneEth, pdaoFee) // 1 - pdaoFee - pdaoScore.Mul(pdaoScore, bond) // Multiply by bond - pdaoScore.Div(pdaoScore, thirtyTwoEth) // Divide by 32 to get the bond as a fraction of a total megapool - pdaoScore.Add(pdaoScore, pdaoFee) // Total = fee + (bond/32)(1 - fee) + // Calculate the pdao share + // Same formula as the voter share + pdaoScore := big.NewInt(0).Mul(thirtyTwoEth, pdaoFee) + pdaoScore.Sub(pdaoScore, big.NewInt(0).Mul(pdaoFee, bond)) + pdaoScore.Div(pdaoScore, thirtyTwoEth) r.totalPdaoScore.Add(r.totalPdaoScore, pdaoScore) + r.successfulAttestations++ } } } From d51ecc6cea671873e0ad5d93ff281f24cf76639d Mon Sep 17 00:00:00 2001 From: Jacob Shufro Date: Fri, 1 Aug 2025 14:25:25 -0400 Subject: [PATCH 11/33] Refactor calculateNodeRewards to return a struct instead of multiple ints --- shared/services/rewards/generator-impl-v11.go | 40 ++++++++++++++----- shared/services/rewards/types.go | 1 + 2 files changed, 30 insertions(+), 11 deletions(-) diff --git a/shared/services/rewards/generator-impl-v11.go b/shared/services/rewards/generator-impl-v11.go index fb868759b..af4a7fea2 100644 --- a/shared/services/rewards/generator-impl-v11.go +++ b/shared/services/rewards/generator-impl-v11.go @@ -175,8 +175,6 @@ func (r *treeGeneratorImpl_v11) generateTree(rp RewardsExecutionClient, networkN return nil, fmt.Errorf("error calculating ETH rewards: %w", err) } - // Calculate the voter share distribution - // Sort and assign the maps to the ssz file lists for nodeAddress, nodeReward := range r.nodeRewards { copy(nodeReward.Address[:], nodeAddress[:]) @@ -619,12 +617,12 @@ func (r *treeGeneratorImpl_v11) calculateEthRewards(checkBeaconPerformance bool) } // Determine how much ETH each node gets and how much the pool stakers get - poolStakerETH, nodeOpEth, bonusScalar, err := r.calculateNodeRewards() + nodeRewards, err := r.calculateNodeRewards() if err != nil { return err } if r.rewardsFile.RulesetVersion >= 10 { - r.minipoolPerformanceFile.BonusScalar = QuotedBigIntFromBigInt(bonusScalar) + r.minipoolPerformanceFile.BonusScalar = QuotedBigIntFromBigInt(nodeRewards.bonusScalar) } // Update the rewards maps @@ -686,8 +684,8 @@ func (r *treeGeneratorImpl_v11) calculateEthRewards(checkBeaconPerformance bool) } // Set the totals - r.rewardsFile.TotalRewards.PoolStakerSmoothingPoolEth.Set(poolStakerETH) - r.rewardsFile.TotalRewards.NodeOperatorSmoothingPoolEth.Set(nodeOpEth) + r.rewardsFile.TotalRewards.PoolStakerSmoothingPoolEth.Set(nodeRewards.poolStakerEth) + r.rewardsFile.TotalRewards.NodeOperatorSmoothingPoolEth.Set(nodeRewards.nodeOpEth) r.rewardsFile.TotalRewards.TotalSmoothingPoolEth.Set(r.smoothingPoolBalance) return nil @@ -756,15 +754,29 @@ func (r *treeGeneratorImpl_v11) calculateNodeBonuses() (*big.Int, error) { return totalConsensusBonus, nil } +type nodeRewards struct { + poolStakerEth *big.Int + nodeOpEth *big.Int + pdaoEth *big.Int + voterEth *big.Int + bonusScalar *big.Int +} + // Calculate the distribution of Smoothing Pool ETH to each node -func (r *treeGeneratorImpl_v11) calculateNodeRewards() (*big.Int, *big.Int, *big.Int, error) { +func (r *treeGeneratorImpl_v11) calculateNodeRewards() (*nodeRewards, error) { var err error bonusScalar := big.NewInt(0).Set(oneEth) // If there weren't any successful attestations, everything goes to the pool stakers if r.totalAttestationScore.Cmp(common.Big0) == 0 || r.successfulAttestations == 0 { r.log.Printlnf("WARNING: Total attestation score = %s, successful attestations = %d... sending the whole smoothing pool balance to the pool stakers.", r.totalAttestationScore.String(), r.successfulAttestations) - return r.smoothingPoolBalance, big.NewInt(0), bonusScalar, nil + return &nodeRewards{ + poolStakerEth: r.smoothingPoolBalance, + nodeOpEth: big.NewInt(0), + pdaoEth: big.NewInt(0), + voterEth: big.NewInt(0), + bonusScalar: bonusScalar, + }, nil } // Calculate the minipool bonuses @@ -773,7 +785,7 @@ func (r *treeGeneratorImpl_v11) calculateNodeRewards() (*big.Int, *big.Int, *big if r.rewardsFile.RulesetVersion >= 10 && isEligibleInterval { totalConsensusBonus, err = r.calculateNodeBonuses() if err != nil { - return nil, nil, nil, err + return nil, err } } @@ -831,7 +843,7 @@ func (r *treeGeneratorImpl_v11) calculateNodeRewards() (*big.Int, *big.Int, *big delta := big.NewInt(0).Sub(totalEthForMinipools, totalNodeOpShare) delta.Abs(delta) if delta.Cmp(r.epsilon) == 1 { - return nil, nil, nil, fmt.Errorf("error calculating smoothing pool ETH: total was %s, but expected %s; error was too large (%s wei)", totalEthForMinipools.String(), totalNodeOpShare.String(), delta.String()) + return nil, fmt.Errorf("error calculating smoothing pool ETH: total was %s, but expected %s; error was too large (%s wei)", totalEthForMinipools.String(), totalNodeOpShare.String(), delta.String()) } // Finally, award the bonuses @@ -855,7 +867,13 @@ func (r *treeGeneratorImpl_v11) calculateNodeRewards() (*big.Int, *big.Int, *big r.log.Printlnf("%s (error = %s wei)", r.logPrefix, delta.String()) r.log.Printlnf("%s Adjusting pool staker ETH to %s to account for truncation", r.logPrefix, truePoolStakerAmount.String()) - return truePoolStakerAmount, totalEthForMinipools, bonusScalar, nil + return &nodeRewards{ + poolStakerEth: truePoolStakerAmount, + nodeOpEth: totalEthForMinipools, + bonusScalar: bonusScalar, + pdaoEth: big.NewInt(0), + voterEth: big.NewInt(0), + }, nil } diff --git a/shared/services/rewards/types.go b/shared/services/rewards/types.go index 2b5c30003..e92d40117 100644 --- a/shared/services/rewards/types.go +++ b/shared/services/rewards/types.go @@ -225,6 +225,7 @@ type MegapoolInfo struct { ActiveValidatorCount uint32 `json:"active_validator_count"` // Indexes over Validators slice above ValidatorIndexMap map[string]*MegapoolValidatorInfo `json:"-"` + VoteEligibleRpl *big.Int `json:"vote_eligible_rpl"` } type MinipoolInfo struct { From ade5c847ad2fba2c346296259d12d2c771721239 Mon Sep 17 00:00:00 2001 From: Jacob Shufro Date: Fri, 1 Aug 2025 15:10:25 -0400 Subject: [PATCH 12/33] Add a new ssz rewards type that supports voter share --- shared/services/rewards/generator-impl-v11.go | 23 +- .../services/rewards/generator-impl-v9-v10.go | 3 - shared/services/rewards/rewards-file-v1.go | 8 + shared/services/rewards/rewards-file-v2.go | 8 + shared/services/rewards/rewards-file-v3.go | 11 + shared/services/rewards/ssz_types/encoding.go | 621 +++++++++++++++++- shared/services/rewards/ssz_types/gen.sh | 2 +- .../rewards/ssz_types/rewards-file-v4.go | 9 + .../rewards/ssz_types/rewards-file-v5.go | 555 ++++++++++++++++ shared/services/rewards/types.go | 7 + 10 files changed, 1228 insertions(+), 19 deletions(-) create mode 100644 shared/services/rewards/ssz_types/rewards-file-v5.go diff --git a/shared/services/rewards/generator-impl-v11.go b/shared/services/rewards/generator-impl-v11.go index af4a7fea2..4c52e192c 100644 --- a/shared/services/rewards/generator-impl-v11.go +++ b/shared/services/rewards/generator-impl-v11.go @@ -26,13 +26,10 @@ import ( "golang.org/x/sync/errgroup" ) -// Type assertion to ensure SSZFile_v1 is IRewardsFile -var _ IRewardsFile = (*ssz_types.SSZFile_v1)(nil) - // Implementation for tree generator ruleset v9 type treeGeneratorImpl_v11 struct { networkState *state.NetworkState - rewardsFile *ssz_types.SSZFile_v1 + rewardsFile *ssz_types.SSZFile_v2 elSnapshotHeader *types.Header snapshotEnd *SnapshotEnd log *log.ColorLogger @@ -61,7 +58,7 @@ type treeGeneratorImpl_v11 struct { genesisTime time.Time invalidNetworkNodes map[common.Address]uint64 minipoolPerformanceFile *MinipoolPerformanceFile_v2 - nodeRewards map[common.Address]*ssz_types.NodeReward + nodeRewards map[common.Address]*ssz_types.NodeReward_v2 networkRewards map[ssz_types.Layer]*ssz_types.NetworkReward // fields for RPIP-62 bonus calculations @@ -72,12 +69,12 @@ type treeGeneratorImpl_v11 struct { // Create a new tree generator func newTreeGeneratorImpl_v11(log *log.ColorLogger, logPrefix string, index uint64, snapshotEnd *SnapshotEnd, elSnapshotHeader *types.Header, intervalsPassed uint64, state *state.NetworkState) *treeGeneratorImpl_v11 { return &treeGeneratorImpl_v11{ - rewardsFile: &ssz_types.SSZFile_v1{ + rewardsFile: &ssz_types.SSZFile_v2{ RewardsFileVersion: 3, RulesetVersion: 11, Index: index, IntervalsPassed: intervalsPassed, - TotalRewards: &ssz_types.TotalRewards{ + TotalRewards: &ssz_types.TotalRewards_v2{ ProtocolDaoRpl: sszbig.NewUint256(0), TotalCollateralRpl: sszbig.NewUint256(0), TotalOracleDaoRpl: sszbig.NewUint256(0), @@ -85,9 +82,11 @@ func newTreeGeneratorImpl_v11(log *log.ColorLogger, logPrefix string, index uint PoolStakerSmoothingPoolEth: sszbig.NewUint256(0), NodeOperatorSmoothingPoolEth: sszbig.NewUint256(0), TotalNodeWeight: sszbig.NewUint256(0), + TotalVoterShareEth: sszbig.NewUint256(0), + SmoothingPoolVoterShareEth: sszbig.NewUint256(0), }, NetworkRewards: ssz_types.NetworkRewards{}, - NodeRewards: ssz_types.NodeRewards{}, + NodeRewards: ssz_types.NodeRewards_v2{}, }, validatorStatusMap: map[rptypes.ValidatorPubkey]beacon.ValidatorStatus{}, minipoolValidatorIndexMap: map[string]*MinipoolInfo{}, @@ -104,7 +103,7 @@ func newTreeGeneratorImpl_v11(log *log.ColorLogger, logPrefix string, index uint Index: index, MinipoolPerformance: map[common.Address]*SmoothingPoolMinipoolPerformance_v2{}, }, - nodeRewards: map[common.Address]*ssz_types.NodeReward{}, + nodeRewards: map[common.Address]*ssz_types.NodeReward_v2{}, networkRewards: map[ssz_types.Layer]*ssz_types.NetworkReward{}, minipoolWithdrawals: map[common.Address]*big.Int{}, } @@ -336,7 +335,7 @@ func (r *treeGeneratorImpl_v11) calculateRplRewards() error { network = 0 } - rewardsForNode = ssz_types.NewNodeReward( + rewardsForNode = ssz_types.NewNodeReward_v2( network, ssz_types.AddressFromBytes(nodeDetails.NodeAddress.Bytes()), ) @@ -425,7 +424,7 @@ func (r *treeGeneratorImpl_v11) calculateRplRewards() error { network = 0 } - rewardsForNode = ssz_types.NewNodeReward( + rewardsForNode = ssz_types.NewNodeReward_v2( network, ssz_types.AddressFromBytes(address.Bytes()), ) @@ -640,7 +639,7 @@ func (r *treeGeneratorImpl_v11) calculateEthRewards(checkBeaconPerformance bool) network = 0 } - rewardsForNode = ssz_types.NewNodeReward( + rewardsForNode = ssz_types.NewNodeReward_v2( network, ssz_types.AddressFromBytes(nodeInfo.Address.Bytes()), ) diff --git a/shared/services/rewards/generator-impl-v9-v10.go b/shared/services/rewards/generator-impl-v9-v10.go index bd5038990..aee400721 100644 --- a/shared/services/rewards/generator-impl-v9-v10.go +++ b/shared/services/rewards/generator-impl-v9-v10.go @@ -25,9 +25,6 @@ import ( "golang.org/x/sync/errgroup" ) -// Type assertion to ensure SSZFile_v1 is IRewardsFile -var _ IRewardsFile = (*ssz_types.SSZFile_v1)(nil) - // Implementation for tree generator ruleset v9 type treeGeneratorImpl_v9_v10 struct { networkState *state.NetworkState diff --git a/shared/services/rewards/rewards-file-v1.go b/shared/services/rewards/rewards-file-v1.go index a0487c82c..7016d85a4 100644 --- a/shared/services/rewards/rewards-file-v1.go +++ b/shared/services/rewards/rewards-file-v1.go @@ -279,6 +279,14 @@ func (f *RewardsFile_v1) GetNodeSmoothingPoolEth(addr common.Address) *big.Int { return &nr.SmoothingPoolEth.Int } +func (f *RewardsFile_v1) GetNodeVoterShareEth(addr common.Address) *big.Int { + return big.NewInt(0) +} + +func (f *RewardsFile_v1) GetNodeEth(addr common.Address) *big.Int { + return f.GetNodeSmoothingPoolEth(addr) +} + // Getters for network info func (f *RewardsFile_v1) HasRewardsForNetwork(network uint64) bool { _, ok := f.NetworkRewards[network] diff --git a/shared/services/rewards/rewards-file-v2.go b/shared/services/rewards/rewards-file-v2.go index b20c06e24..d49da407c 100644 --- a/shared/services/rewards/rewards-file-v2.go +++ b/shared/services/rewards/rewards-file-v2.go @@ -289,6 +289,14 @@ func (f *RewardsFile_v2) GetNodeSmoothingPoolEth(addr common.Address) *big.Int { return &nr.SmoothingPoolEth.Int } +func (f *RewardsFile_v2) GetNodeVoterShareEth(addr common.Address) *big.Int { + return big.NewInt(0) +} + +func (f *RewardsFile_v2) GetNodeEth(addr common.Address) *big.Int { + return f.GetNodeSmoothingPoolEth(addr) +} + // Getters for network info func (f *RewardsFile_v2) HasRewardsForNetwork(network uint64) bool { _, ok := f.NetworkRewards[network] diff --git a/shared/services/rewards/rewards-file-v3.go b/shared/services/rewards/rewards-file-v3.go index 231badc45..3b4bf8903 100644 --- a/shared/services/rewards/rewards-file-v3.go +++ b/shared/services/rewards/rewards-file-v3.go @@ -20,6 +20,9 @@ type RewardsFile_v3 struct { MinipoolPerformanceFile MinipoolPerformanceFile_v2 `json:"-"` } +// Type assertion to implement IRewardsFile +var _ IRewardsFile = (*RewardsFile_v3)(nil) + // Serialize a rewards file into bytes func (f *RewardsFile_v3) Serialize() ([]byte, error) { return json.Marshal(f) @@ -179,6 +182,14 @@ func (f *RewardsFile_v3) GetNodeSmoothingPoolEth(addr common.Address) *big.Int { return &nr.SmoothingPoolEth.Int } +func (f *RewardsFile_v3) GetNodeVoterShareEth(addr common.Address) *big.Int { + return big.NewInt(0) +} + +func (f *RewardsFile_v3) GetNodeEth(addr common.Address) *big.Int { + return f.GetNodeSmoothingPoolEth(addr) +} + func (f *RewardsFile_v3) GetMerkleProof(addr common.Address) ([]common.Hash, error) { nr, ok := f.getNodeRewardsInfo(addr) if !ok { diff --git a/shared/services/rewards/ssz_types/encoding.go b/shared/services/rewards/ssz_types/encoding.go index d99385074..2a2e69d3c 100644 --- a/shared/services/rewards/ssz_types/encoding.go +++ b/shared/services/rewards/ssz_types/encoding.go @@ -1,5 +1,5 @@ // Code generated by fastssz. DO NOT EDIT. -// Hash: c302f5cab9af79d858415e7e5bc2002568baf2333120ecc30517636a1b041db6 +// Hash: d9fade4005dbe51e1b4b46856af7da0261506ec1712eab7b58cb3ddb6cd315d6 // Version: 0.1.3 package ssz_types @@ -70,7 +70,6 @@ func (s *SSZFile_v1) MarshalSSZTo(buf []byte) (dst []byte, err error) { // Offset (15) 'NodeRewards' dst = ssz.WriteOffset(dst, offset) - offset += len(s.NodeRewards) * 124 // Field (14) 'NetworkRewards' if size := len(s.NetworkRewards); size > 128 { @@ -160,7 +159,7 @@ func (s *SSZFile_v1) UnmarshalSSZ(buf []byte) error { return ssz.ErrOffset } - if o14 < 356 { + if o14 != 356 { return ssz.ErrInvalidVariableOffset } @@ -679,3 +678,619 @@ func (n *NodeReward) HashTreeRootWith(hh ssz.HashWalker) (err error) { func (n *NodeReward) GetTree() (*ssz.Node, error) { return ssz.ProofTree(n) } + +// MarshalSSZ ssz marshals the SSZFile_v2 object +func (s *SSZFile_v2) MarshalSSZ() ([]byte, error) { + return ssz.MarshalSSZ(s) +} + +// MarshalSSZTo ssz marshals the SSZFile_v2 object to a target array +func (s *SSZFile_v2) MarshalSSZTo(buf []byte) (dst []byte, err error) { + dst = buf + offset := int(420) + + // Field (0) 'Magic' + dst = append(dst, s.Magic[:]...) + + // Field (1) 'RewardsFileVersion' + dst = ssz.MarshalUint64(dst, s.RewardsFileVersion) + + // Field (2) 'RulesetVersion' + dst = ssz.MarshalUint64(dst, s.RulesetVersion) + + // Field (3) 'Network' + dst = ssz.MarshalUint64(dst, uint64(s.Network)) + + // Field (4) 'Index' + dst = ssz.MarshalUint64(dst, s.Index) + + // Field (5) 'StartTime' + dst = ssz.MarshalTime(dst, s.StartTime) + + // Field (6) 'EndTime' + dst = ssz.MarshalTime(dst, s.EndTime) + + // Field (7) 'ConsensusStartBlock' + dst = ssz.MarshalUint64(dst, s.ConsensusStartBlock) + + // Field (8) 'ConsensusEndBlock' + dst = ssz.MarshalUint64(dst, s.ConsensusEndBlock) + + // Field (9) 'ExecutionStartBlock' + dst = ssz.MarshalUint64(dst, s.ExecutionStartBlock) + + // Field (10) 'ExecutionEndBlock' + dst = ssz.MarshalUint64(dst, s.ExecutionEndBlock) + + // Field (11) 'IntervalsPassed' + dst = ssz.MarshalUint64(dst, s.IntervalsPassed) + + // Field (12) 'MerkleRoot' + dst = append(dst, s.MerkleRoot[:]...) + + // Field (13) 'TotalRewards' + if s.TotalRewards == nil { + s.TotalRewards = new(TotalRewards_v2) + } + if dst, err = s.TotalRewards.MarshalSSZTo(dst); err != nil { + return + } + + // Offset (14) 'NetworkRewards' + dst = ssz.WriteOffset(dst, offset) + offset += len(s.NetworkRewards) * 104 + + // Offset (15) 'NodeRewards' + dst = ssz.WriteOffset(dst, offset) + + // Field (14) 'NetworkRewards' + if size := len(s.NetworkRewards); size > 128 { + err = ssz.ErrListTooBigFn("SSZFile_v2.NetworkRewards", size, 128) + return + } + for ii := 0; ii < len(s.NetworkRewards); ii++ { + if dst, err = s.NetworkRewards[ii].MarshalSSZTo(dst); err != nil { + return + } + } + + // Field (15) 'NodeRewards' + if size := len(s.NodeRewards); size > 9223372036854775807 { + err = ssz.ErrListTooBigFn("SSZFile_v2.NodeRewards", size, 9223372036854775807) + return + } + for ii := 0; ii < len(s.NodeRewards); ii++ { + if dst, err = s.NodeRewards[ii].MarshalSSZTo(dst); err != nil { + return + } + } + + return +} + +// UnmarshalSSZ ssz unmarshals the SSZFile_v2 object +func (s *SSZFile_v2) UnmarshalSSZ(buf []byte) error { + var err error + size := uint64(len(buf)) + if size < 420 { + return ssz.ErrSize + } + + tail := buf + var o14, o15 uint64 + + // Field (0) 'Magic' + copy(s.Magic[:], buf[0:4]) + + // Field (1) 'RewardsFileVersion' + s.RewardsFileVersion = ssz.UnmarshallUint64(buf[4:12]) + + // Field (2) 'RulesetVersion' + s.RulesetVersion = ssz.UnmarshallUint64(buf[12:20]) + + // Field (3) 'Network' + s.Network = Network(ssz.UnmarshallUint64(buf[20:28])) + + // Field (4) 'Index' + s.Index = ssz.UnmarshallUint64(buf[28:36]) + + // Field (5) 'StartTime' + s.StartTime = ssz.UnmarshalTime(buf[36:44]) + + // Field (6) 'EndTime' + s.EndTime = ssz.UnmarshalTime(buf[44:52]) + + // Field (7) 'ConsensusStartBlock' + s.ConsensusStartBlock = ssz.UnmarshallUint64(buf[52:60]) + + // Field (8) 'ConsensusEndBlock' + s.ConsensusEndBlock = ssz.UnmarshallUint64(buf[60:68]) + + // Field (9) 'ExecutionStartBlock' + s.ExecutionStartBlock = ssz.UnmarshallUint64(buf[68:76]) + + // Field (10) 'ExecutionEndBlock' + s.ExecutionEndBlock = ssz.UnmarshallUint64(buf[76:84]) + + // Field (11) 'IntervalsPassed' + s.IntervalsPassed = ssz.UnmarshallUint64(buf[84:92]) + + // Field (12) 'MerkleRoot' + copy(s.MerkleRoot[:], buf[92:124]) + + // Field (13) 'TotalRewards' + if s.TotalRewards == nil { + s.TotalRewards = new(TotalRewards_v2) + } + if err = s.TotalRewards.UnmarshalSSZ(buf[124:412]); err != nil { + return err + } + + // Offset (14) 'NetworkRewards' + if o14 = ssz.ReadOffset(buf[412:416]); o14 > size { + return ssz.ErrOffset + } + + if o14 != 420 { + return ssz.ErrInvalidVariableOffset + } + + // Offset (15) 'NodeRewards' + if o15 = ssz.ReadOffset(buf[416:420]); o15 > size || o14 > o15 { + return ssz.ErrOffset + } + + // Field (14) 'NetworkRewards' + { + buf = tail[o14:o15] + num, err := ssz.DivideInt2(len(buf), 104, 128) + if err != nil { + return err + } + s.NetworkRewards = make([]*NetworkReward, num) + for ii := 0; ii < num; ii++ { + if s.NetworkRewards[ii] == nil { + s.NetworkRewards[ii] = new(NetworkReward) + } + if err = s.NetworkRewards[ii].UnmarshalSSZ(buf[ii*104 : (ii+1)*104]); err != nil { + return err + } + } + } + + // Field (15) 'NodeRewards' + { + buf = tail[o15:] + num, err := ssz.DivideInt2(len(buf), 156, 9223372036854775807) + if err != nil { + return err + } + s.NodeRewards = make([]*NodeReward_v2, num) + for ii := 0; ii < num; ii++ { + if s.NodeRewards[ii] == nil { + s.NodeRewards[ii] = new(NodeReward_v2) + } + if err = s.NodeRewards[ii].UnmarshalSSZ(buf[ii*156 : (ii+1)*156]); err != nil { + return err + } + } + } + return err +} + +// SizeSSZ returns the ssz encoded size in bytes for the SSZFile_v2 object +func (s *SSZFile_v2) SizeSSZ() (size int) { + size = 420 + + // Field (14) 'NetworkRewards' + size += len(s.NetworkRewards) * 104 + + // Field (15) 'NodeRewards' + size += len(s.NodeRewards) * 156 + + return +} + +// HashTreeRoot ssz hashes the SSZFile_v2 object +func (s *SSZFile_v2) HashTreeRoot() ([32]byte, error) { + return ssz.HashWithDefaultHasher(s) +} + +// HashTreeRootWith ssz hashes the SSZFile_v2 object with a hasher +func (s *SSZFile_v2) HashTreeRootWith(hh ssz.HashWalker) (err error) { + indx := hh.Index() + + // Field (0) 'Magic' + hh.PutBytes(s.Magic[:]) + + // Field (1) 'RewardsFileVersion' + hh.PutUint64(s.RewardsFileVersion) + + // Field (2) 'RulesetVersion' + hh.PutUint64(s.RulesetVersion) + + // Field (3) 'Network' + hh.PutUint64(uint64(s.Network)) + + // Field (4) 'Index' + hh.PutUint64(s.Index) + + // Field (5) 'StartTime' + hh.PutUint64(uint64(s.StartTime.Unix())) + + // Field (6) 'EndTime' + hh.PutUint64(uint64(s.EndTime.Unix())) + + // Field (7) 'ConsensusStartBlock' + hh.PutUint64(s.ConsensusStartBlock) + + // Field (8) 'ConsensusEndBlock' + hh.PutUint64(s.ConsensusEndBlock) + + // Field (9) 'ExecutionStartBlock' + hh.PutUint64(s.ExecutionStartBlock) + + // Field (10) 'ExecutionEndBlock' + hh.PutUint64(s.ExecutionEndBlock) + + // Field (11) 'IntervalsPassed' + hh.PutUint64(s.IntervalsPassed) + + // Field (12) 'MerkleRoot' + hh.PutBytes(s.MerkleRoot[:]) + + // Field (13) 'TotalRewards' + if s.TotalRewards == nil { + s.TotalRewards = new(TotalRewards_v2) + } + if err = s.TotalRewards.HashTreeRootWith(hh); err != nil { + return + } + + // Field (14) 'NetworkRewards' + { + subIndx := hh.Index() + num := uint64(len(s.NetworkRewards)) + if num > 128 { + err = ssz.ErrIncorrectListSize + return + } + for _, elem := range s.NetworkRewards { + if err = elem.HashTreeRootWith(hh); err != nil { + return + } + } + hh.MerkleizeWithMixin(subIndx, num, 128) + } + + // Field (15) 'NodeRewards' + { + subIndx := hh.Index() + num := uint64(len(s.NodeRewards)) + if num > 9223372036854775807 { + err = ssz.ErrIncorrectListSize + return + } + for _, elem := range s.NodeRewards { + if err = elem.HashTreeRootWith(hh); err != nil { + return + } + } + hh.MerkleizeWithMixin(subIndx, num, 9223372036854775807) + } + + hh.Merkleize(indx) + return +} + +// GetTree ssz hashes the SSZFile_v2 object +func (s *SSZFile_v2) GetTree() (*ssz.Node, error) { + return ssz.ProofTree(s) +} + +// MarshalSSZ ssz marshals the TotalRewards_v2 object +func (t *TotalRewards_v2) MarshalSSZ() ([]byte, error) { + return ssz.MarshalSSZ(t) +} + +// MarshalSSZTo ssz marshals the TotalRewards_v2 object to a target array +func (t *TotalRewards_v2) MarshalSSZTo(buf []byte) (dst []byte, err error) { + dst = buf + + // Field (0) 'ProtocolDaoRpl' + if dst, err = t.ProtocolDaoRpl.MarshalSSZTo(dst); err != nil { + return + } + + // Field (1) 'TotalCollateralRpl' + if dst, err = t.TotalCollateralRpl.MarshalSSZTo(dst); err != nil { + return + } + + // Field (2) 'TotalOracleDaoRpl' + if dst, err = t.TotalOracleDaoRpl.MarshalSSZTo(dst); err != nil { + return + } + + // Field (3) 'TotalSmoothingPoolEth' + if dst, err = t.TotalSmoothingPoolEth.MarshalSSZTo(dst); err != nil { + return + } + + // Field (4) 'PoolStakerSmoothingPoolEth' + if dst, err = t.PoolStakerSmoothingPoolEth.MarshalSSZTo(dst); err != nil { + return + } + + // Field (5) 'NodeOperatorSmoothingPoolEth' + if dst, err = t.NodeOperatorSmoothingPoolEth.MarshalSSZTo(dst); err != nil { + return + } + + // Field (6) 'TotalNodeWeight' + if dst, err = t.TotalNodeWeight.MarshalSSZTo(dst); err != nil { + return + } + + // Field (7) 'TotalVoterShareEth' + if dst, err = t.TotalVoterShareEth.MarshalSSZTo(dst); err != nil { + return + } + + // Field (8) 'SmoothingPoolVoterShareEth' + if dst, err = t.SmoothingPoolVoterShareEth.MarshalSSZTo(dst); err != nil { + return + } + + return +} + +// UnmarshalSSZ ssz unmarshals the TotalRewards_v2 object +func (t *TotalRewards_v2) UnmarshalSSZ(buf []byte) error { + var err error + size := uint64(len(buf)) + if size != 288 { + return ssz.ErrSize + } + + // Field (0) 'ProtocolDaoRpl' + if err = t.ProtocolDaoRpl.UnmarshalSSZ(buf[0:32]); err != nil { + return err + } + + // Field (1) 'TotalCollateralRpl' + if err = t.TotalCollateralRpl.UnmarshalSSZ(buf[32:64]); err != nil { + return err + } + + // Field (2) 'TotalOracleDaoRpl' + if err = t.TotalOracleDaoRpl.UnmarshalSSZ(buf[64:96]); err != nil { + return err + } + + // Field (3) 'TotalSmoothingPoolEth' + if err = t.TotalSmoothingPoolEth.UnmarshalSSZ(buf[96:128]); err != nil { + return err + } + + // Field (4) 'PoolStakerSmoothingPoolEth' + if err = t.PoolStakerSmoothingPoolEth.UnmarshalSSZ(buf[128:160]); err != nil { + return err + } + + // Field (5) 'NodeOperatorSmoothingPoolEth' + if err = t.NodeOperatorSmoothingPoolEth.UnmarshalSSZ(buf[160:192]); err != nil { + return err + } + + // Field (6) 'TotalNodeWeight' + if err = t.TotalNodeWeight.UnmarshalSSZ(buf[192:224]); err != nil { + return err + } + + // Field (7) 'TotalVoterShareEth' + if err = t.TotalVoterShareEth.UnmarshalSSZ(buf[224:256]); err != nil { + return err + } + + // Field (8) 'SmoothingPoolVoterShareEth' + if err = t.SmoothingPoolVoterShareEth.UnmarshalSSZ(buf[256:288]); err != nil { + return err + } + + return err +} + +// SizeSSZ returns the ssz encoded size in bytes for the TotalRewards_v2 object +func (t *TotalRewards_v2) SizeSSZ() (size int) { + size = 288 + return +} + +// HashTreeRoot ssz hashes the TotalRewards_v2 object +func (t *TotalRewards_v2) HashTreeRoot() ([32]byte, error) { + return ssz.HashWithDefaultHasher(t) +} + +// HashTreeRootWith ssz hashes the TotalRewards_v2 object with a hasher +func (t *TotalRewards_v2) HashTreeRootWith(hh ssz.HashWalker) (err error) { + indx := hh.Index() + + // Field (0) 'ProtocolDaoRpl' + if err = t.ProtocolDaoRpl.HashTreeRootWith(hh); err != nil { + return + } + + // Field (1) 'TotalCollateralRpl' + if err = t.TotalCollateralRpl.HashTreeRootWith(hh); err != nil { + return + } + + // Field (2) 'TotalOracleDaoRpl' + if err = t.TotalOracleDaoRpl.HashTreeRootWith(hh); err != nil { + return + } + + // Field (3) 'TotalSmoothingPoolEth' + if err = t.TotalSmoothingPoolEth.HashTreeRootWith(hh); err != nil { + return + } + + // Field (4) 'PoolStakerSmoothingPoolEth' + if err = t.PoolStakerSmoothingPoolEth.HashTreeRootWith(hh); err != nil { + return + } + + // Field (5) 'NodeOperatorSmoothingPoolEth' + if err = t.NodeOperatorSmoothingPoolEth.HashTreeRootWith(hh); err != nil { + return + } + + // Field (6) 'TotalNodeWeight' + if err = t.TotalNodeWeight.HashTreeRootWith(hh); err != nil { + return + } + + // Field (7) 'TotalVoterShareEth' + if err = t.TotalVoterShareEth.HashTreeRootWith(hh); err != nil { + return + } + + // Field (8) 'SmoothingPoolVoterShareEth' + if err = t.SmoothingPoolVoterShareEth.HashTreeRootWith(hh); err != nil { + return + } + + hh.Merkleize(indx) + return +} + +// GetTree ssz hashes the TotalRewards_v2 object +func (t *TotalRewards_v2) GetTree() (*ssz.Node, error) { + return ssz.ProofTree(t) +} + +// MarshalSSZ ssz marshals the NodeReward_v2 object +func (n *NodeReward_v2) MarshalSSZ() ([]byte, error) { + return ssz.MarshalSSZ(n) +} + +// MarshalSSZTo ssz marshals the NodeReward_v2 object to a target array +func (n *NodeReward_v2) MarshalSSZTo(buf []byte) (dst []byte, err error) { + dst = buf + + // Field (0) 'Address' + dst = append(dst, n.Address[:]...) + + // Field (1) 'Network' + dst = ssz.MarshalUint64(dst, uint64(n.Network)) + + // Field (2) 'CollateralRpl' + if dst, err = n.CollateralRpl.MarshalSSZTo(dst); err != nil { + return + } + + // Field (3) 'OracleDaoRpl' + if dst, err = n.OracleDaoRpl.MarshalSSZTo(dst); err != nil { + return + } + + // Field (4) 'SmoothingPoolEth' + if dst, err = n.SmoothingPoolEth.MarshalSSZTo(dst); err != nil { + return + } + + // Field (5) 'VoterShareEth' + if dst, err = n.VoterShareEth.MarshalSSZTo(dst); err != nil { + return + } + + return +} + +// UnmarshalSSZ ssz unmarshals the NodeReward_v2 object +func (n *NodeReward_v2) UnmarshalSSZ(buf []byte) error { + var err error + size := uint64(len(buf)) + if size != 156 { + return ssz.ErrSize + } + + // Field (0) 'Address' + copy(n.Address[:], buf[0:20]) + + // Field (1) 'Network' + n.Network = Layer(ssz.UnmarshallUint64(buf[20:28])) + + // Field (2) 'CollateralRpl' + if err = n.CollateralRpl.UnmarshalSSZ(buf[28:60]); err != nil { + return err + } + + // Field (3) 'OracleDaoRpl' + if err = n.OracleDaoRpl.UnmarshalSSZ(buf[60:92]); err != nil { + return err + } + + // Field (4) 'SmoothingPoolEth' + if err = n.SmoothingPoolEth.UnmarshalSSZ(buf[92:124]); err != nil { + return err + } + + // Field (5) 'VoterShareEth' + if err = n.VoterShareEth.UnmarshalSSZ(buf[124:156]); err != nil { + return err + } + + return err +} + +// SizeSSZ returns the ssz encoded size in bytes for the NodeReward_v2 object +func (n *NodeReward_v2) SizeSSZ() (size int) { + size = 156 + return +} + +// HashTreeRoot ssz hashes the NodeReward_v2 object +func (n *NodeReward_v2) HashTreeRoot() ([32]byte, error) { + return ssz.HashWithDefaultHasher(n) +} + +// HashTreeRootWith ssz hashes the NodeReward_v2 object with a hasher +func (n *NodeReward_v2) HashTreeRootWith(hh ssz.HashWalker) (err error) { + indx := hh.Index() + + // Field (0) 'Address' + hh.PutBytes(n.Address[:]) + + // Field (1) 'Network' + hh.PutUint64(uint64(n.Network)) + + // Field (2) 'CollateralRpl' + if err = n.CollateralRpl.HashTreeRootWith(hh); err != nil { + return + } + + // Field (3) 'OracleDaoRpl' + if err = n.OracleDaoRpl.HashTreeRootWith(hh); err != nil { + return + } + + // Field (4) 'SmoothingPoolEth' + if err = n.SmoothingPoolEth.HashTreeRootWith(hh); err != nil { + return + } + + // Field (5) 'VoterShareEth' + if err = n.VoterShareEth.HashTreeRootWith(hh); err != nil { + return + } + + hh.Merkleize(indx) + return +} + +// GetTree ssz hashes the NodeReward_v2 object +func (n *NodeReward_v2) GetTree() (*ssz.Node, error) { + return ssz.ProofTree(n) +} diff --git a/shared/services/rewards/ssz_types/gen.sh b/shared/services/rewards/ssz_types/gen.sh index 0e575bea7..fbaf0c44f 100755 --- a/shared/services/rewards/ssz_types/gen.sh +++ b/shared/services/rewards/ssz_types/gen.sh @@ -1,3 +1,3 @@ #!/bin/bash rm -fr encoding.go -sszgen --path . -objs SSZFile_v1 -output encoding.go -include big/ +sszgen --path . -objs SSZFile_v1,SSZFile_v2 -output encoding.go -include big/ diff --git a/shared/services/rewards/ssz_types/rewards-file-v4.go b/shared/services/rewards/ssz_types/rewards-file-v4.go index 4a551e9f2..19d19d51e 100644 --- a/shared/services/rewards/ssz_types/rewards-file-v4.go +++ b/shared/services/rewards/ssz_types/rewards-file-v4.go @@ -317,6 +317,7 @@ type NetworkReward struct { // Amount of RPL sent to the network for oDAO members OracleDaoRpl big.Uint256 `ssz-size:"32" json:"oracleDaoRpl"` // Amount of Eth sent to the network for Node Operators + // In v11+ this includes the voter share SmoothingPoolEth big.Uint256 `ssz-size:"32" json:"smoothingPoolEth"` } @@ -562,6 +563,14 @@ func (f *SSZFile_v1) GetNodeSmoothingPoolEth(addr common.Address) *stdbig.Int { return nr.SmoothingPoolEth.Int } +func (f *SSZFile_v1) GetNodeVoterShareEth(addr common.Address) *stdbig.Int { + return stdbig.NewInt(0) +} + +func (f *SSZFile_v1) GetNodeEth(addr common.Address) *stdbig.Int { + return f.GetNodeSmoothingPoolEth(addr) +} + func (f *SSZFile_v1) GetRewardsFileVersion() uint64 { return f.RewardsFileVersion } diff --git a/shared/services/rewards/ssz_types/rewards-file-v5.go b/shared/services/rewards/ssz_types/rewards-file-v5.go new file mode 100644 index 000000000..a3547001c --- /dev/null +++ b/shared/services/rewards/ssz_types/rewards-file-v5.go @@ -0,0 +1,555 @@ +package ssz_types + +import ( + "bytes" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + stdbig "math/big" + "slices" + "sort" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/holiman/uint256" + "github.com/rocket-pool/smartnode/shared/services/rewards/ssz_types/big" + "github.com/wealdtech/go-merkletree" + "github.com/wealdtech/go-merkletree/keccak256" +) + +type NodeRewards_v2 []*NodeReward_v2 + +type SSZFile_v2 struct { + // Fields specific to ssz encoding are first + + // A magic header. Four bytes. Helps immediately verify what follows is a rewards tree. + // 0x52505254 - it's RPRT in ASCII and easy to recognize + Magic [4]byte `ssz-size:"4" json:"-"` + // Version is first- parsers can check the first 12 bytes of the file to make sure they're + // parsing a rewards tree and it is a version they know how to parse. + RewardsFileVersion uint64 `json:"rewardsFileVersion"` + + // Next, we need fields for the rest of the RewardsFileHeader + + // RulesetVersion is the version of the ruleset used to generate the tree, e.g., v9 for the first + // ruleset to use ssz + RulesetVersion uint64 `json:"rulesetVersion"` + // Network is the chain id for which the tree is generated + Network Network `json:"network"` + // Index is the rewards interval index + Index uint64 `json:"index"` + // StartTime is the time of the first slot of the interval + StartTime time.Time `json:"startTime"` + // EndTime is the time fo the last slot of the interval + EndTime time.Time `json:"endTime"` + // ConsensusStartBlock is the first non-empty slot of the interval + ConsensusStartBlock uint64 `json:"consensusStartBlock,omitempty"` + // ConsensusEndBlock is the last non-empty slot of the interval + ConsensusEndBlock uint64 `json:"consensusEndBlock"` + // ExecutionStartBlock is the execution block number included in ConsensusStartBlock + ExecutionStartBlock uint64 `json:"executionStartBlock,omitempty"` + // ExecutionEndBlock is the execution block number included in ConsensusEndBlock + ExecutionEndBlock uint64 `json:"executionEndBlock"` + // IntervalsPassed is the number of rewards intervals contained in this tree + IntervalsPassed uint64 `json:"intervalsPassed"` + // MerkleRoot is the root of the merkle tree of all the nodes in this tree. + MerkleRoot Hash `ssz-size:"32" json:"merkleRoot,omitempty"` + // TotalRewards is aggregate data on how many rewards this tree contains + TotalRewards *TotalRewards_v2 `json:"totalRewards"` + // NetworkRewards is the destinations and aggregate amounts for each network + // this tree distributes to. + // Must be sorted by Chain ID ascending + NetworkRewards NetworkRewards `ssz-max:"128" json:"networkRewards"` + + // Finally, the actual per-node objects that get merkle-ized + + // NodeRewards are the objects that make up the merkle tree. + // Must be sorted by Node Address ascending + NodeRewards NodeRewards_v2 `ssz-max:"9223372036854775807" json:"nodeRewards"` + + merkleProofs map[Address]MerkleProof `ssz:"-" json:"-"` +} + +func NewSSZFile_v2() *SSZFile_v2 { + return &SSZFile_v2{ + Magic: Magic, + } +} + +// Check if the NodeRewards field respects unique constraints +func (f *SSZFile_v2) nodeRewardsUnique() bool { + m := make(map[Address]any, len(f.NodeRewards)) + + for _, nr := range f.NodeRewards { + _, found := m[nr.Address] + if found { + return false + } + m[nr.Address] = struct{}{} + } + + return true +} + +// Check if the NetworkRewards field respects unique constraints +func (f *SSZFile_v2) networkRewardsUnique() bool { + m := make(map[uint64]any, len(f.NetworkRewards)) + + for _, nr := range f.NetworkRewards { + _, found := m[nr.Network] + if found { + return false + } + m[nr.Network] = struct{}{} + } + + return true +} + +// Verify checks that the arrays in the file are appropriately sorted and that +// the merkle proof, if present, matches. +func (f *SSZFile_v2) Verify() error { + if !sort.IsSorted(f.NodeRewards) { + return errors.New("ssz file node rewards out of order") + } + + if !sort.IsSorted(f.NetworkRewards) { + return errors.New("ssz file network rewards out of order") + } + + if !f.nodeRewardsUnique() { + return errors.New("ssz file has duplicate entries in its NodeRewards field") + } + + if !f.networkRewardsUnique() { + return errors.New("ssz file has duplicate entries in its NetworkRewards field") + } + + if f.TotalRewards == nil { + return errors.New("missing required field TotalRewards") + } + + if _, err := f.Proofs(); err != nil { + return err + } + + return nil +} + +// Minipool Performance CID is deprecated, but we must implement this for the interface +func (f *SSZFile_v2) SetMinipoolPerformanceFileCID(cid string) { +} + +// The "normal" serialize() call is expected to be JSON by ISerializable in files.go +func (f *SSZFile_v2) Serialize() ([]byte, error) { + return json.Marshal(f) +} + +// Write as SSZ +func (f *SSZFile_v2) SerializeSSZ() ([]byte, error) { + return f.FinalizeSSZ() +} + +func (f *SSZFile_v2) GenerateMerkleTree() error { + _, err := f.Proofs() + return err +} + +// Marshal wrappers that adds the magic header if absent and sets or validators merkle root +func (f *SSZFile_v2) FinalizeSSZ() ([]byte, error) { + + return f.FinalizeSSZTo(make([]byte, 0, f.SizeSSZ())) +} + +func (f *SSZFile_v2) FinalizeSSZTo(buf []byte) ([]byte, error) { + copy(f.Magic[:], Magic[:]) + if err := f.Verify(); err != nil { + return nil, err + } + + return f.MarshalSSZTo(buf) +} + +// Parsing wrapper that adds verification to the merkle root and magic header +func ParseSSZFile_v2(buf []byte) (*SSZFile_v2, error) { + if !bytes.HasPrefix(buf, Magic[:]) { + return nil, errors.New("magic header not found in reward ssz file") + } + + f := &SSZFile_v2{} + if err := f.UnmarshalSSZ(buf); err != nil { + return nil, err + } + + if err := f.Verify(); err != nil { + return nil, err + } + + return f, nil +} + +// This getter lazy-computes the proofs and caches them on the file +func (f *SSZFile_v2) Proofs() (map[Address]MerkleProof, error) { + if f.merkleProofs != nil { + return f.merkleProofs, nil + } + + sort.Sort(f.NodeRewards) + sort.Sort(f.NetworkRewards) + + nodeDataMap := make(map[Address][]byte, len(f.NodeRewards)) + treeData := make([][]byte, 0, len(f.NodeRewards)) + for _, nr := range f.NodeRewards { + // 20 bytes for address, 32 each for network/rpl/eth + address := nr.Address + network := uint256.NewInt(nr.Network).Bytes32() + rpl := stdbig.NewInt(0) + rpl.Add(rpl, nr.CollateralRpl.Int) + rpl.Add(rpl, nr.OracleDaoRpl.Int) + rplBytes := make([]byte, 32) + rplBytes = rpl.FillBytes(rplBytes) + + eth := stdbig.NewInt(0) + eth.Add(eth, nr.SmoothingPoolEth.Int) + eth.Add(eth, nr.VoterShareEth.Int) + ethBytes := make([]byte, 32) + ethBytes = eth.FillBytes(ethBytes) + + const dataSize = 20 + 32*3 + nodeData := make([]byte, dataSize) + copy(nodeData[0:20], address[:]) + copy(nodeData[20:20+32], network[:]) + copy(nodeData[20+32:20+32*2], rplBytes[:]) + copy(nodeData[20+32*2:20+32*3], ethBytes[:]) + + treeData = append(treeData, nodeData) + nodeDataMap[nr.Address] = nodeData + } + + tree, err := merkletree.NewUsing(treeData, keccak256.New(), false, true) + if err != nil { + return nil, fmt.Errorf("error generating Merkle Tree: %w", err) + } + + // Generate the proofs + out := make(map[Address]MerkleProof) + f.merkleProofs = out + for address, nodeData := range nodeDataMap { + proof, err := tree.GenerateProof(nodeData, 0) + if err != nil { + return nil, fmt.Errorf("error generating proof for node 0x%s: %w", hex.EncodeToString(address[:]), err) + } + + // Store the proof in the result map + out[address] = make([]Hash, len(proof.Hashes)) + for i, hash := range proof.Hashes { + out[address][i] = Hash{} + copy(out[address][i][:], hash) + } + } + + // Populate missing proofs at node level + for _, nr := range f.NodeRewards { + if nr.MerkleProof == nil { + nr.MerkleProof = out[nr.Address] + } + } + + // Finally, set the root. If it's already set, and differs, return an error. + root := Hash{} + copy(root[:], tree.Root()) + if bytes.Count(f.MerkleRoot[:], []byte{0x00}) >= 32 { + f.MerkleRoot = root + return out, nil + } + + if !bytes.Equal(f.MerkleRoot[:], root[:]) { + return nil, fmt.Errorf("generated root %s mismatch against existing root %s", root, f.MerkleRoot) + } + + // The existing root matches the calculated root + return out, nil +} + +type TotalRewards_v2 struct { + // Total amount of RPL sent to the pDAO + ProtocolDaoRpl big.Uint256 `ssz-size:"32" json:"protocolDaoRpl"` + // Total amount of RPL sent to Node Operators + TotalCollateralRpl big.Uint256 `ssz-size:"32" json:"totalCollateralRpl"` + // Total amount of RPL sent to the oDAO + TotalOracleDaoRpl big.Uint256 `ssz-size:"32" json:"totalOracleDaoRpl"` + // Total amount of ETH in the Smoothing Pool + TotalSmoothingPoolEth big.Uint256 `ssz-size:"32" json:"totalSmoothingPoolEth"` + // Total amount of Eth sent to the rETH contract + PoolStakerSmoothingPoolEth big.Uint256 `ssz-size:"32" json:"poolStakerSmoothingPoolEth"` + // Total amount of Eth sent to Node Operators in the Smoothing Pool + NodeOperatorSmoothingPoolEth big.Uint256 `ssz-size:"32" json:"nodeOperatorSmoothingPoolEth"` + // Total Node Weight as defined by RPIP-30 + TotalNodeWeight big.Uint256 `ssz-size:"32" json:"totalNodeWeight,omitempty"` + // Total Voter Share as defined by RPIP-46 + TotalVoterShareEth big.Uint256 `ssz-size:"32" json:"totalVoterShareEth"` + // Smoothing Pool Voter Share is the portion of TotalVoterShareEth that comes from + // the Smoothing Pool contract (as opposed to megapool distribution) + SmoothingPoolVoterShareEth big.Uint256 `ssz-size:"32" json:"smoothingPoolVoterShareEth"` +} + +type NodeReward_v2 struct { + // Address of the Node (key) + Address Address `ssz-size:"20" json:"-"` + + // Chain ID on which the Node will claim + Network Layer `json:"rewardNetwork"` + // Amount of staking RPL earned by the Node + CollateralRpl big.Uint256 `ssz-size:"32" json:"collateralRpl"` + // Amount of oDAO RPL earned by the Node + OracleDaoRpl big.Uint256 `ssz-size:"32" json:"oracleDaoRpl"` + // Amount of Smoothing Pool ETH earned by the Node + SmoothingPoolEth big.Uint256 `ssz-size:"32" json:"smoothingPoolEth"` + // Amount of Voter Share ETH earned by the Node + VoterShareEth big.Uint256 `ssz-size:"32" json:"voterShareEth"` + // Merkle proof for the node claim, sorted with the Merkle root last + MerkleProof MerkleProof `ssz:"-" json:"merkleProof"` +} + +func NewNodeReward_v2(network Layer, address Address) *NodeReward_v2 { + return &NodeReward_v2{ + Address: address, + Network: network, + CollateralRpl: big.NewUint256(0), + OracleDaoRpl: big.NewUint256(0), + SmoothingPoolEth: big.NewUint256(0), + VoterShareEth: big.NewUint256(0), + } +} + +// NodeRewards should implement sort.Interface to make it easier to sort. +func (n NodeRewards_v2) Len() int { + return len(n) +} + +func (n NodeRewards_v2) Less(i, j int) bool { + ia := n[i].Address + ja := n[j].Address + + if bytes.Compare(ia[:], ja[:]) < 0 { + return true + } + + return false +} + +func (n NodeRewards_v2) Swap(i, j int) { + tmp := n[i] + n[i] = n[j] + n[j] = tmp +} + +func (n NodeRewards_v2) Find(addr Address) *NodeReward_v2 { + idx := slices.IndexFunc(n, func(nr *NodeReward_v2) bool { + return bytes.Equal(nr.Address[:], addr[:]) + }) + if idx == -1 { + return nil + } + return n[idx] +} + +// Functions to implement IRewardsFile +func (f *SSZFile_v2) Deserialize(data []byte) error { + if bytes.HasPrefix(data, Magic[:]) { + if err := f.UnmarshalSSZ(data); err != nil { + return err + } + + return f.Verify() + } + + return json.Unmarshal(data, f) +} + +func (f *SSZFile_v2) GetIndex() uint64 { + return f.Index +} + +func (f *SSZFile_v2) GetMerkleRoot() string { + return f.MerkleRoot.String() +} + +func (f *SSZFile_v2) GetNodeAddresses() []common.Address { + out := make([]common.Address, 0, len(f.NodeRewards)) + + for _, nr := range f.NodeRewards { + out = append(out, common.BytesToAddress(nr.Address[:])) + } + return out +} + +func (f *SSZFile_v2) GetConsensusStartBlock() uint64 { + return f.ConsensusStartBlock +} + +func (f *SSZFile_v2) GetExecutionStartBlock() uint64 { + return f.ExecutionStartBlock +} + +func (f *SSZFile_v2) GetConsensusEndBlock() uint64 { + return f.ConsensusEndBlock +} + +func (f *SSZFile_v2) GetExecutionEndBlock() uint64 { + return f.ExecutionEndBlock +} + +func (f *SSZFile_v2) GetStartTime() time.Time { + return f.StartTime +} + +func (f *SSZFile_v2) GetEndTime() time.Time { + return f.EndTime +} + +func (f *SSZFile_v2) GetIntervalsPassed() uint64 { + return f.IntervalsPassed +} + +func (f *SSZFile_v2) GetMerkleProof(address common.Address) ([]common.Hash, error) { + proofs, err := f.Proofs() + if err != nil { + return nil, fmt.Errorf("error while calculating proof for %s: %w", address.String(), err) + } + + var nativeAddress Address + copy(nativeAddress[:], address[:]) + nativeProofs := proofs[nativeAddress] + out := make([]common.Hash, 0, len(nativeProofs)) + for _, p := range nativeProofs { + var h common.Hash + copy(h[:], p[:]) + out = append(out, h) + } + + return out, nil +} + +func (f *SSZFile_v2) getRewardsForNetwork(network uint64) *NetworkReward { + for _, nr := range f.NetworkRewards { + if nr.Network == network { + return nr + } + } + return nil +} + +func (f *SSZFile_v2) HasRewardsForNetwork(network uint64) bool { + return f.getRewardsForNetwork(network) != nil +} + +func (f *SSZFile_v2) GetNetworkCollateralRpl(network uint64) *stdbig.Int { + nr := f.getRewardsForNetwork(network) + if nr == nil { + return stdbig.NewInt(0) + } + + return nr.CollateralRpl.Int +} + +func (f *SSZFile_v2) GetNetworkOracleDaoRpl(network uint64) *stdbig.Int { + nr := f.getRewardsForNetwork(network) + if nr == nil { + return stdbig.NewInt(0) + } + + return nr.OracleDaoRpl.Int +} + +func (f *SSZFile_v2) GetNetworkSmoothingPoolEth(network uint64) *stdbig.Int { + nr := f.getRewardsForNetwork(network) + if nr == nil { + return stdbig.NewInt(0) + } + + return nr.SmoothingPoolEth.Int +} + +func (f *SSZFile_v2) getNodeRewards(addr common.Address) *NodeReward_v2 { + var nativeAddress Address + copy(nativeAddress[:], addr[:]) + return f.NodeRewards.Find(nativeAddress) +} + +func (f *SSZFile_v2) HasRewardsFor(addr common.Address) bool { + return f.getNodeRewards(addr) != nil +} + +func (f *SSZFile_v2) GetNodeCollateralRpl(addr common.Address) *stdbig.Int { + nr := f.getNodeRewards(addr) + if nr == nil { + return stdbig.NewInt(0) + } + + return nr.CollateralRpl.Int +} + +func (f *SSZFile_v2) GetNodeOracleDaoRpl(addr common.Address) *stdbig.Int { + nr := f.getNodeRewards(addr) + if nr == nil { + return stdbig.NewInt(0) + } + + return nr.OracleDaoRpl.Int +} + +func (f *SSZFile_v2) GetNodeSmoothingPoolEth(addr common.Address) *stdbig.Int { + nr := f.getNodeRewards(addr) + if nr == nil { + return stdbig.NewInt(0) + } + + return nr.SmoothingPoolEth.Int +} + +func (f *SSZFile_v2) GetNodeVoterShareEth(addr common.Address) *stdbig.Int { + nr := f.getNodeRewards(addr) + if nr == nil { + return stdbig.NewInt(0) + } + + return nr.VoterShareEth.Int +} + +func (f *SSZFile_v2) GetNodeEth(addr common.Address) *stdbig.Int { + out := stdbig.NewInt(0) + out.Add(out, f.GetNodeSmoothingPoolEth(addr)) + out.Add(out, f.GetNodeVoterShareEth(addr)) + + return out +} + +func (f *SSZFile_v2) GetRewardsFileVersion() uint64 { + return f.RewardsFileVersion +} + +func (f *SSZFile_v2) GetTotalCollateralRpl() *stdbig.Int { + return f.TotalRewards.TotalCollateralRpl.Int +} + +func (f *SSZFile_v2) GetTotalNodeOperatorSmoothingPoolEth() *stdbig.Int { + return f.TotalRewards.NodeOperatorSmoothingPoolEth.Int +} + +func (f *SSZFile_v2) GetTotalNodeWeight() *stdbig.Int { + return f.TotalRewards.TotalNodeWeight.Int +} + +func (f *SSZFile_v2) GetTotalOracleDaoRpl() *stdbig.Int { + return f.TotalRewards.TotalOracleDaoRpl.Int +} + +func (f *SSZFile_v2) GetTotalPoolStakerSmoothingPoolEth() *stdbig.Int { + return f.TotalRewards.PoolStakerSmoothingPoolEth.Int +} + +func (f *SSZFile_v2) GetTotalProtocolDaoRpl() *stdbig.Int { + return f.TotalRewards.ProtocolDaoRpl.Int +} diff --git a/shared/services/rewards/types.go b/shared/services/rewards/types.go index e92d40117..c651f919a 100644 --- a/shared/services/rewards/types.go +++ b/shared/services/rewards/types.go @@ -14,6 +14,7 @@ import ( "github.com/rocket-pool/smartnode/bindings/rewards" "github.com/rocket-pool/smartnode/bindings/types" "github.com/rocket-pool/smartnode/shared/services/beacon" + "github.com/rocket-pool/smartnode/shared/services/rewards/ssz_types" "github.com/wealdtech/go-merkletree" ) @@ -111,6 +112,8 @@ type IRewardsFile interface { GetNodeCollateralRpl(common.Address) *big.Int GetNodeOracleDaoRpl(common.Address) *big.Int GetNodeSmoothingPoolEth(common.Address) *big.Int + GetNodeVoterShareEth(common.Address) *big.Int + GetNodeEth(common.Address) *big.Int GetMerkleProof(common.Address) ([]common.Hash, error) // Getters for network info @@ -126,6 +129,10 @@ type IRewardsFile interface { GenerateMerkleTree() error } +// Type assertions for ssz rewards files +var _ IRewardsFile = (*ssz_types.SSZFile_v1)(nil) +var _ IRewardsFile = (*ssz_types.SSZFile_v2)(nil) + // Rewards per network type NetworkRewardsInfo struct { CollateralRpl *QuotedBigInt `json:"collateralRpl"` From 4c8610f3fbeb3017c89b3b0b35fe5198f97f2bf9 Mon Sep 17 00:00:00 2001 From: Jacob Shufro Date: Fri, 1 Aug 2025 15:16:32 -0400 Subject: [PATCH 13/33] Make commands aware of voter share vs smoothing pool eth --- rocketpool-cli/node/claim-rewards.go | 5 ++++- rocketpool/api/node/rewards.go | 4 ++-- rocketpool/node/collectors/node-collector.go | 4 ++-- shared/services/rewards/types.go | 2 ++ shared/services/rewards/utils.go | 2 ++ 5 files changed, 12 insertions(+), 5 deletions(-) diff --git a/rocketpool-cli/node/claim-rewards.go b/rocketpool-cli/node/claim-rewards.go index 515c9afd4..839e160e5 100644 --- a/rocketpool-cli/node/claim-rewards.go +++ b/rocketpool-cli/node/claim-rewards.go @@ -117,10 +117,12 @@ func nodeClaimRewards(c *cli.Context) error { fmt.Printf("\tOracle DAO: %.6f RPL\n", eth.WeiToEth(&intervalInfo.ODaoRplAmount.Int)) } fmt.Printf("\tSmoothing Pool: %.6f ETH\n\n", eth.WeiToEth(&intervalInfo.SmoothingPoolEthAmount.Int)) + fmt.Printf("\tVoter Share: %.6f ETH\n", eth.WeiToEth(&intervalInfo.VoterShareEth.Int)) + fmt.Printf("\tTotal: %.6f ETH\n\n", eth.WeiToEth(&intervalInfo.TotalEthAmount.Int)) totalRpl.Add(totalRpl, &intervalInfo.CollateralRplAmount.Int) totalRpl.Add(totalRpl, &intervalInfo.ODaoRplAmount.Int) - totalEth.Add(totalEth, &intervalInfo.SmoothingPoolEthAmount.Int) + totalEth.Add(totalEth, &intervalInfo.TotalEthAmount.Int) } fmt.Println("Total Pending Rewards:") @@ -186,6 +188,7 @@ func nodeClaimRewards(c *cli.Context) error { claimRpl.Add(claimRpl, &intervalInfo.CollateralRplAmount.Int) claimRpl.Add(claimRpl, &intervalInfo.ODaoRplAmount.Int) claimEth.Add(claimEth, &intervalInfo.SmoothingPoolEthAmount.Int) + claimEth.Add(claimEth, &intervalInfo.VoterShareEth.Int) } } } diff --git a/rocketpool/api/node/rewards.go b/rocketpool/api/node/rewards.go index bea75c858..6eee7709c 100644 --- a/rocketpool/api/node/rewards.go +++ b/rocketpool/api/node/rewards.go @@ -139,7 +139,7 @@ func getRewards(c *cli.Context) (*api.NodeRewardsResponse, error) { return fmt.Errorf("Error calculating lifetime node rewards: rewards file %s doesn't exist but interval %d was claimed", intervalInfo.TreeFilePath, claimedInterval) } rplRewards.Add(rplRewards, &intervalInfo.CollateralRplAmount.Int) - ethRewards.Add(ethRewards, &intervalInfo.SmoothingPoolEthAmount.Int) + ethRewards.Add(ethRewards, &intervalInfo.TotalEthAmount.Int) } // Get the unclaimed rewards @@ -153,7 +153,7 @@ func getRewards(c *cli.Context) (*api.NodeRewardsResponse, error) { } if intervalInfo.NodeExists { unclaimedRplRewardsWei.Add(unclaimedRplRewardsWei, &intervalInfo.CollateralRplAmount.Int) - unclaimedEthRewardsWei.Add(unclaimedEthRewardsWei, &intervalInfo.SmoothingPoolEthAmount.Int) + unclaimedEthRewardsWei.Add(unclaimedEthRewardsWei, &intervalInfo.TotalEthAmount.Int) } } diff --git a/rocketpool/node/collectors/node-collector.go b/rocketpool/node/collectors/node-collector.go index 6286b4b77..778298d95 100644 --- a/rocketpool/node/collectors/node-collector.go +++ b/rocketpool/node/collectors/node-collector.go @@ -335,7 +335,7 @@ func (collector *NodeCollector) Collect(channel chan<- prometheus.Metric) { } newRewards.Add(newRewards, &intervalInfo.CollateralRplAmount.Int) - newClaimedEthRewards.Add(newClaimedEthRewards, &intervalInfo.SmoothingPoolEthAmount.Int) + newClaimedEthRewards.Add(newClaimedEthRewards, &intervalInfo.TotalEthAmount.Int) collector.handledIntervals[claimedInterval] = true } } @@ -350,7 +350,7 @@ func (collector *NodeCollector) Collect(channel chan<- prometheus.Metric) { } if intervalInfo.NodeExists { unclaimedRplWei.Add(unclaimedRplWei, &intervalInfo.CollateralRplAmount.Int) - unclaimedEthWei.Add(unclaimedEthWei, &intervalInfo.SmoothingPoolEthAmount.Int) + unclaimedEthWei.Add(unclaimedEthWei, &intervalInfo.TotalEthAmount.Int) } } diff --git a/shared/services/rewards/types.go b/shared/services/rewards/types.go index c651f919a..ef0536e46 100644 --- a/shared/services/rewards/types.go +++ b/shared/services/rewards/types.go @@ -206,6 +206,8 @@ type IntervalInfo struct { CollateralRplAmount *QuotedBigInt `json:"collateralRplAmount"` ODaoRplAmount *QuotedBigInt `json:"oDaoRplAmount"` SmoothingPoolEthAmount *QuotedBigInt `json:"smoothingPoolEthAmount"` + VoterShareEth *QuotedBigInt `json:"voterShareEth"` + TotalEthAmount *QuotedBigInt `json:"totalEthAmount"` MerkleProof []common.Hash `json:"merkleProof"` TotalNodeWeight *big.Int `json:"-"` diff --git a/shared/services/rewards/utils.go b/shared/services/rewards/utils.go index f264be956..37b77368d 100644 --- a/shared/services/rewards/utils.go +++ b/shared/services/rewards/utils.go @@ -137,6 +137,8 @@ func GetIntervalInfo(rp *rocketpool.RocketPool, cfg *config.RocketPoolConfig, no info.CollateralRplAmount = &QuotedBigInt{*proofWrapper.GetNodeCollateralRpl(nodeAddress)} info.ODaoRplAmount = &QuotedBigInt{*proofWrapper.GetNodeOracleDaoRpl(nodeAddress)} info.SmoothingPoolEthAmount = &QuotedBigInt{*proofWrapper.GetNodeSmoothingPoolEth(nodeAddress)} + info.VoterShareEth = &QuotedBigInt{*proofWrapper.GetNodeVoterShareEth(nodeAddress)} + info.TotalEthAmount = &QuotedBigInt{*proofWrapper.GetNodeEth(nodeAddress)} proof, err := proofWrapper.GetMerkleProof(nodeAddress) if proof == nil { From b991b484f008797e8b7a45c90fb7bf2c50e2d880 Mon Sep 17 00:00:00 2001 From: Jacob Shufro Date: Fri, 1 Aug 2025 15:54:01 -0400 Subject: [PATCH 14/33] Calculate voter share --- shared/services/rewards/generator-impl-v11.go | 94 ++++++++++++++++--- shared/services/rewards/types.go | 4 + 2 files changed, 87 insertions(+), 11 deletions(-) diff --git a/shared/services/rewards/generator-impl-v11.go b/shared/services/rewards/generator-impl-v11.go index 4c52e192c..92e6f116c 100644 --- a/shared/services/rewards/generator-impl-v11.go +++ b/shared/services/rewards/generator-impl-v11.go @@ -646,6 +646,7 @@ func (r *treeGeneratorImpl_v11) calculateEthRewards(checkBeaconPerformance bool) r.nodeRewards[nodeInfo.Address] = rewardsForNode } rewardsForNode.SmoothingPoolEth.Add(rewardsForNode.SmoothingPoolEth.Int, nodeInfo.SmoothingPoolEth) + rewardsForNode.VoterShareEth.Add(rewardsForNode.VoterShareEth.Int, nodeInfo.VoterShareEth) // Add minipool rewards to the JSON for _, minipoolInfo := range nodeInfo.Minipools { @@ -766,14 +767,57 @@ func (r *treeGeneratorImpl_v11) calculateNodeRewards() (*nodeRewards, error) { var err error bonusScalar := big.NewInt(0).Set(oneEth) + voterEth := big.NewInt(0) + pdaoEth := big.NewInt(0) + + // If pdao score is greater than 0, calculate the pdao share + if r.totalPdaoScore.Cmp(common.Big0) > 0 { + pdaoEth.Mul(r.smoothingPoolBalance, r.totalPdaoScore) + pdaoEth.Div(pdaoEth, big.NewInt(int64(r.successfulAttestations))) + pdaoEth.Div(pdaoEth, oneEth) + } + + // If voter score is greater than 0, calculate the voter share + if r.totalVoterScore.Cmp(common.Big0) > 0 { + voterEth.Mul(r.smoothingPoolBalance, r.totalVoterScore) + voterEth.Div(voterEth, big.NewInt(int64(r.successfulAttestations))) + voterEth.Div(voterEth, oneEth) + } + + trueVoterEth := big.NewInt(0) + totalMegapoolVoteEligibleRpl := big.NewInt(0) + for _, nodeInfo := range r.nodeDetails { + // Check if the node is eligible for voter share + if nodeInfo.Megapool == nil { + continue + } + totalMegapoolVoteEligibleRpl.Add(totalMegapoolVoteEligibleRpl, nodeInfo.MegapoolVoteEligibleRpl) + } + // Calculate the voter share for each node + for _, nodeInfo := range r.nodeDetails { + if nodeInfo.Megapool == nil { + continue + } + if nodeInfo.MegapoolVoteEligibleRpl.Cmp(common.Big0) == 0 { + continue + } + + // The node's voter share is nodeRpl*voterEth/totalMegapoolVoteEligibleRpl + nodeInfo.VoterShareEth.Set(nodeInfo.MegapoolVoteEligibleRpl) + nodeInfo.VoterShareEth.Mul(nodeInfo.VoterShareEth, voterEth) + nodeInfo.VoterShareEth.Div(nodeInfo.VoterShareEth, totalMegapoolVoteEligibleRpl) + + trueVoterEth.Add(trueVoterEth, nodeInfo.VoterShareEth) + } + // If there weren't any successful attestations, everything goes to the pool stakers if r.totalAttestationScore.Cmp(common.Big0) == 0 || r.successfulAttestations == 0 { r.log.Printlnf("WARNING: Total attestation score = %s, successful attestations = %d... sending the whole smoothing pool balance to the pool stakers.", r.totalAttestationScore.String(), r.successfulAttestations) return &nodeRewards{ poolStakerEth: r.smoothingPoolBalance, nodeOpEth: big.NewInt(0), - pdaoEth: big.NewInt(0), - voterEth: big.NewInt(0), + pdaoEth: pdaoEth, + voterEth: trueVoterEth, bonusScalar: bonusScalar, }, nil } @@ -855,14 +899,21 @@ func (r *treeGeneratorImpl_v11) calculateNodeRewards() (*nodeRewards, error) { // This is how much actually goes to the pool stakers - it should ideally be equal to poolStakerShare but this accounts for any cumulative floating point errors truePoolStakerAmount := big.NewInt(0).Sub(r.smoothingPoolBalance, totalEthForMinipools) + truePoolStakerAmount.Sub(truePoolStakerAmount, pdaoEth) + truePoolStakerAmount.Sub(truePoolStakerAmount, trueVoterEth) // Calculate the staking pool share and the node op share poolStakerShareBeforeBonuses := big.NewInt(0).Sub(r.smoothingPoolBalance, totalNodeOpShare) + poolStakerShareBeforeBonuses.Sub(poolStakerShareBeforeBonuses, pdaoEth) + poolStakerShareBeforeBonuses.Sub(poolStakerShareBeforeBonuses, trueVoterEth) r.log.Printlnf("%s Pool staker ETH before bonuses: %s (%.3f)", r.logPrefix, poolStakerShareBeforeBonuses.String(), eth.WeiToEth(poolStakerShareBeforeBonuses)) r.log.Printlnf("%s Pool staker ETH after bonuses: %s (%.3f)", r.logPrefix, truePoolStakerAmount.String(), eth.WeiToEth(truePoolStakerAmount)) r.log.Printlnf("%s Node Op ETH before bonuses: %s (%.3f)", r.logPrefix, totalNodeOpShare.String(), eth.WeiToEth(totalNodeOpShare)) r.log.Printlnf("%s Node Op ETH after bonuses: %s (%.3f)", r.logPrefix, totalEthForMinipools.String(), eth.WeiToEth(totalEthForMinipools)) + r.log.Printlnf("%s PDAO ETH: %s (%.3f)", r.logPrefix, pdaoEth.String(), eth.WeiToEth(pdaoEth)) + r.log.Printlnf("%s Voter Eth before distribution: %s (%.3f)", r.logPrefix, voterEth.String(), eth.WeiToEth(voterEth)) + r.log.Printlnf("%s Voter Eth after distribution: %s (%.3f)", r.logPrefix, trueVoterEth.String(), eth.WeiToEth(trueVoterEth)) r.log.Printlnf("%s (error = %s wei)", r.logPrefix, delta.String()) r.log.Printlnf("%s Adjusting pool staker ETH to %s to account for truncation", r.logPrefix, truePoolStakerAmount.String()) @@ -870,8 +921,8 @@ func (r *treeGeneratorImpl_v11) calculateNodeRewards() (*nodeRewards, error) { poolStakerEth: truePoolStakerAmount, nodeOpEth: totalEthForMinipools, bonusScalar: bonusScalar, - pdaoEth: big.NewInt(0), - voterEth: big.NewInt(0), + pdaoEth: pdaoEth, + voterEth: trueVoterEth, }, nil } @@ -1392,13 +1443,15 @@ func (r *treeGeneratorImpl_v11) getSmoothingPoolNodeDetails() error { wg.Go(func() error { nativeNodeDetails := r.networkState.NodeDetails[iterationIndex] nodeDetails := &NodeSmoothingDetails{ - Index: iterationIndex, - Address: nativeNodeDetails.NodeAddress, - Minipools: []*MinipoolInfo{}, - SmoothingPoolEth: big.NewInt(0), - BonusEth: big.NewInt(0), - RewardsNetwork: nativeNodeDetails.RewardNetwork.Uint64(), - RplStake: nativeNodeDetails.RplStake, + Index: iterationIndex, + Address: nativeNodeDetails.NodeAddress, + Minipools: []*MinipoolInfo{}, + SmoothingPoolEth: big.NewInt(0), + BonusEth: big.NewInt(0), + RewardsNetwork: nativeNodeDetails.RewardNetwork.Uint64(), + RplStake: nativeNodeDetails.RplStake, + MegapoolVoteEligibleRpl: big.NewInt(0), + VoterShareEth: big.NewInt(0), } nodeDetails.IsOptedIn = nativeNodeDetails.SmoothingPoolRegistrationState @@ -1477,6 +1530,25 @@ func (r *treeGeneratorImpl_v11) getSmoothingPoolNodeDetails() error { mpInfo.Validators = append(mpInfo.Validators, v) mpInfo.ValidatorIndexMap[v.Index] = v + + // Check if the megapool has staked RPL + if nativeNodeDetails.MegapoolStakedRPL.Sign() > 0 { + // The megapool's eligible staked RPL is defined by + // min(1.5*RPL value of megapool bonded_eth, megapool staked rpl) + bondedEth := nativeNodeDetails.EthBonded + rplPrice := r.networkState.NetworkDetails.RplPrice + // Price is eth per rpl, so to calculate the rpl value of the bonded eth, + // we need to divide the bonded eth by the price. This nukes the 1eth unit, so + // multiply by 1.5 eth first. + bondedEthRplValue := big.NewInt(0).Mul(bondedEth, big.NewInt(15e17)) + bondedEthRplValue.Div(bondedEthRplValue, rplPrice) + // Now take the minimum of the node's actual rpl vs bondedEthRplValue + if nativeNodeDetails.RplStake.Cmp(bondedEthRplValue) < 0 { + nodeDetails.MegapoolVoteEligibleRpl = nativeNodeDetails.RplStake + } else { + nodeDetails.MegapoolVoteEligibleRpl = bondedEthRplValue + } + } } nodeDetails.Megapool = mpInfo diff --git a/shared/services/rewards/types.go b/shared/services/rewards/types.go index ef0536e46..7e692f4c1 100644 --- a/shared/services/rewards/types.go +++ b/shared/services/rewards/types.go @@ -345,6 +345,10 @@ type NodeSmoothingDetails struct { BonusEth *big.Int EligibleBorrowedEth *big.Int RplStake *big.Int + + // v11 Fields + MegapoolVoteEligibleRpl *big.Int + VoterShareEth *big.Int } type QuotedBigInt struct { From e224a9b1e63339d60685aad6eb33f2676acf9ede Mon Sep 17 00:00:00 2001 From: Jacob Shufro Date: Fri, 1 Aug 2025 15:55:09 -0400 Subject: [PATCH 15/33] Remove unused stub --- shared/services/rewards/generator-impl-v11.go | 5 ----- 1 file changed, 5 deletions(-) diff --git a/shared/services/rewards/generator-impl-v11.go b/shared/services/rewards/generator-impl-v11.go index 92e6f116c..b6815ce1b 100644 --- a/shared/services/rewards/generator-impl-v11.go +++ b/shared/services/rewards/generator-impl-v11.go @@ -468,11 +468,6 @@ func (r *treeGeneratorImpl_v11) calculateRplRewards() error { } -// Calculate the voter rewards -func (r *treeGeneratorImpl_v11) calculateVoterRewards() error { - return nil -} - // Calculates the ETH rewards for the given interval func (r *treeGeneratorImpl_v11) calculateEthRewards(checkBeaconPerformance bool) error { From 8b256e5b0831de5758f87d94340c0c0fcea7d228 Mon Sep 17 00:00:00 2001 From: Jacob Shufro Date: Tue, 12 Aug 2025 13:04:35 -0400 Subject: [PATCH 16/33] Update sszfile_v2 to support new merkle tree format --- .../rewards/ssz_types/rewards-file-v5.go | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/shared/services/rewards/ssz_types/rewards-file-v5.go b/shared/services/rewards/ssz_types/rewards-file-v5.go index a3547001c..e394dc020 100644 --- a/shared/services/rewards/ssz_types/rewards-file-v5.go +++ b/shared/services/rewards/ssz_types/rewards-file-v5.go @@ -210,18 +210,23 @@ func (f *SSZFile_v2) Proofs() (map[Address]MerkleProof, error) { rplBytes := make([]byte, 32) rplBytes = rpl.FillBytes(rplBytes) - eth := stdbig.NewInt(0) - eth.Add(eth, nr.SmoothingPoolEth.Int) - eth.Add(eth, nr.VoterShareEth.Int) - ethBytes := make([]byte, 32) - ethBytes = eth.FillBytes(ethBytes) + smoothingEth := stdbig.NewInt(0) + smoothingEth.Add(smoothingEth, nr.SmoothingPoolEth.Int) + smoothingEthBytes := make([]byte, 32) + smoothingEthBytes = smoothingEth.FillBytes(smoothingEthBytes) - const dataSize = 20 + 32*3 + voterShareEth := stdbig.NewInt(0) + voterShareEth.Add(voterShareEth, nr.VoterShareEth.Int) + voterShareEthBytes := make([]byte, 32) + voterShareEthBytes = voterShareEth.FillBytes(voterShareEthBytes) + + const dataSize = 20 + 32*4 nodeData := make([]byte, dataSize) copy(nodeData[0:20], address[:]) copy(nodeData[20:20+32], network[:]) copy(nodeData[20+32:20+32*2], rplBytes[:]) - copy(nodeData[20+32*2:20+32*3], ethBytes[:]) + copy(nodeData[20+32*2:20+32*3], smoothingEthBytes[:]) + copy(nodeData[20+32*3:20+32*4], voterShareEthBytes[:]) treeData = append(treeData, nodeData) nodeDataMap[nr.Address] = nodeData From 8b92159f9cb72c81e813faf1a8f13c0533d1d65c Mon Sep 17 00:00:00 2001 From: Jacob Shufro Date: Tue, 12 Aug 2025 13:16:31 -0400 Subject: [PATCH 17/33] Copy legacy distributor bindings --- .../v1.3.1/rewards/distributor-mainnet.go | 90 +++++++++++++++++++ 1 file changed, 90 insertions(+) create mode 100644 bindings/legacy/v1.3.1/rewards/distributor-mainnet.go diff --git a/bindings/legacy/v1.3.1/rewards/distributor-mainnet.go b/bindings/legacy/v1.3.1/rewards/distributor-mainnet.go new file mode 100644 index 000000000..ff0e4c829 --- /dev/null +++ b/bindings/legacy/v1.3.1/rewards/distributor-mainnet.go @@ -0,0 +1,90 @@ +package rewards + +import ( + "fmt" + "math/big" + "sync" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/rocket-pool/smartnode/bindings/rocketpool" +) + +// Check if the given node has already claimed rewards for the given interval +func IsClaimed(rp *rocketpool.RocketPool, index *big.Int, claimerAddress common.Address, opts *bind.CallOpts) (bool, error) { + rocketDistributorMainnet, err := getRocketDistributorMainnet(rp, opts) + if err != nil { + return false, err + } + isClaimed := new(bool) + if err := rocketDistributorMainnet.Call(opts, isClaimed, "isClaimed", index, claimerAddress); err != nil { + return false, fmt.Errorf("error getting rewards claim status for interval %s, node %s: %w", index.String(), claimerAddress.Hex(), err) + } + return *isClaimed, nil +} + +// Get the Merkle root for an interval +func MerkleRoots(rp *rocketpool.RocketPool, interval *big.Int, opts *bind.CallOpts) ([]byte, error) { + rocketDistributorMainnet, err := getRocketDistributorMainnet(rp, opts) + if err != nil { + return nil, err + } + bytes := new([32]byte) + if err := rocketDistributorMainnet.Call(opts, bytes, "merkleRoots", interval); err != nil { + return nil, fmt.Errorf("error getting Merkle root for interval %s: %w", interval.String(), err) + } + return (*bytes)[:], nil +} + +// Estimate claim rewards gas +func EstimateClaimGas(rp *rocketpool.RocketPool, address common.Address, indices []*big.Int, amountRPL []*big.Int, amountETH []*big.Int, merkleProofs [][]common.Hash, opts *bind.TransactOpts) (rocketpool.GasInfo, error) { + rocketDistributorMainnet, err := getRocketDistributorMainnet(rp, nil) + if err != nil { + return rocketpool.GasInfo{}, err + } + return rocketDistributorMainnet.GetTransactionGasInfo(opts, "claim", address, indices, amountRPL, amountETH, merkleProofs) +} + +// Claim rewards +func Claim(rp *rocketpool.RocketPool, address common.Address, indices []*big.Int, amountRPL []*big.Int, amountETH []*big.Int, merkleProofs [][]common.Hash, opts *bind.TransactOpts) (common.Hash, error) { + rocketDistributorMainnet, err := getRocketDistributorMainnet(rp, nil) + if err != nil { + return common.Hash{}, err + } + tx, err := rocketDistributorMainnet.Transact(opts, "claim", address, indices, amountRPL, amountETH, merkleProofs) + if err != nil { + return common.Hash{}, fmt.Errorf("error claiming rewards: %w", err) + } + return tx.Hash(), nil +} + +// Estimate claim and restake rewards gas +func EstimateClaimAndStakeGas(rp *rocketpool.RocketPool, address common.Address, indices []*big.Int, amountRPL []*big.Int, amountETH []*big.Int, merkleProofs [][]common.Hash, stakeAmount *big.Int, opts *bind.TransactOpts) (rocketpool.GasInfo, error) { + rocketDistributorMainnet, err := getRocketDistributorMainnet(rp, nil) + if err != nil { + return rocketpool.GasInfo{}, err + } + return rocketDistributorMainnet.GetTransactionGasInfo(opts, "claimAndStake", address, indices, amountRPL, amountETH, merkleProofs, stakeAmount) +} + +// Claim and restake rewards +func ClaimAndStake(rp *rocketpool.RocketPool, address common.Address, indices []*big.Int, amountRPL []*big.Int, amountETH []*big.Int, merkleProofs [][]common.Hash, stakeAmount *big.Int, opts *bind.TransactOpts) (common.Hash, error) { + rocketDistributorMainnet, err := getRocketDistributorMainnet(rp, nil) + if err != nil { + return common.Hash{}, err + } + tx, err := rocketDistributorMainnet.Transact(opts, "claimAndStake", address, indices, amountRPL, amountETH, merkleProofs, stakeAmount) + if err != nil { + return common.Hash{}, fmt.Errorf("error claiming rewards: %w", err) + } + return tx.Hash(), nil +} + +// Get contracts +var rocketDistributorMainnetLock sync.Mutex + +func getRocketDistributorMainnet(rp *rocketpool.RocketPool, opts *bind.CallOpts) (*rocketpool.Contract, error) { + rocketDistributorMainnetLock.Lock() + defer rocketDistributorMainnetLock.Unlock() + return rp.GetContract("rocketMerkleDistributorMainnet", opts) +} From bc584337c9b88fb189bf6420f6264643c9d6e377 Mon Sep 17 00:00:00 2001 From: Jacob Shufro Date: Tue, 12 Aug 2025 13:31:39 -0400 Subject: [PATCH 18/33] Copy rewards event to legacy --- bindings/legacy/v1.3.1/rewards/rewards.go | 379 ++++++++++++++++++++++ 1 file changed, 379 insertions(+) create mode 100644 bindings/legacy/v1.3.1/rewards/rewards.go diff --git a/bindings/legacy/v1.3.1/rewards/rewards.go b/bindings/legacy/v1.3.1/rewards/rewards.go new file mode 100644 index 000000000..be382d6a2 --- /dev/null +++ b/bindings/legacy/v1.3.1/rewards/rewards.go @@ -0,0 +1,379 @@ +package rewards + +import ( + "context" + "fmt" + "math/big" + "sync" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + + "github.com/rocket-pool/smartnode/bindings/rocketpool" + "github.com/rocket-pool/smartnode/bindings/utils/eth" +) + +const ( + rewardsSnapshotSubmittedNodeKey string = "rewards.snapshot.submitted.node.key" +) + +// Info for a rewards snapshot event +type RewardsEvent struct { + Index *big.Int + ExecutionBlock *big.Int + ConsensusBlock *big.Int + MerkleRoot common.Hash + MerkleTreeCID string + IntervalsPassed *big.Int + TreasuryRPL *big.Int + TrustedNodeRPL []*big.Int + NodeRPL []*big.Int + NodeETH []*big.Int + UserETH *big.Int + IntervalStartTime time.Time + IntervalEndTime time.Time + SubmissionTime time.Time +} + +// Struct for submitting the rewards for a checkpoint +type RewardSubmission struct { + RewardIndex *big.Int `json:"rewardIndex"` + ExecutionBlock *big.Int `json:"executionBlock"` + ConsensusBlock *big.Int `json:"consensusBlock"` + MerkleRoot [32]byte `json:"merkleRoot"` + MerkleTreeCID string `json:"merkleTreeCID"` + IntervalsPassed *big.Int `json:"intervalsPassed"` + TreasuryRPL *big.Int `json:"treasuryRPL"` + TrustedNodeRPL []*big.Int `json:"trustedNodeRPL"` + NodeRPL []*big.Int `json:"nodeRPL"` + NodeETH []*big.Int `json:"nodeETH"` + UserETH *big.Int `json:"userETH"` +} + +// Internal struct - this is the structure of what gets returned by the RewardSnapshot event +type rewardSnapshot struct { + RewardIndex *big.Int `json:"rewardIndex"` + Submission RewardSubmission `json:"submission"` + IntervalStartTime *big.Int `json:"intervalStartTime"` + IntervalEndTime *big.Int `json:"intervalEndTime"` + Time *big.Int `json:"time"` +} + +// Get the timestamp that the current rewards interval started +func GetClaimIntervalTimeStart(rp *rocketpool.RocketPool, opts *bind.CallOpts) (time.Time, error) { + rocketRewardsPool, err := getRocketRewardsPool(rp, opts) + if err != nil { + return time.Time{}, err + } + unixTime := new(*big.Int) + if err := rocketRewardsPool.Call(opts, unixTime, "getClaimIntervalTimeStart"); err != nil { + return time.Time{}, fmt.Errorf("error getting claim interval time start: %w", err) + } + return time.Unix((*unixTime).Int64(), 0), nil +} + +// Get the number of seconds in a claim interval +func GetClaimIntervalTime(rp *rocketpool.RocketPool, opts *bind.CallOpts) (time.Duration, error) { + rocketRewardsPool, err := getRocketRewardsPool(rp, opts) + if err != nil { + return 0, err + } + unixTime := new(*big.Int) + if err := rocketRewardsPool.Call(opts, unixTime, "getClaimIntervalTime"); err != nil { + return 0, fmt.Errorf("error getting claim interval time: %w", err) + } + return time.Duration((*unixTime).Int64()) * time.Second, nil +} + +// Get the percent of checkpoint rewards that goes to node operators +func GetNodeOperatorRewardsPercent(rp *rocketpool.RocketPool, opts *bind.CallOpts) (*big.Int, error) { + rocketRewardsPool, err := getRocketRewardsPool(rp, opts) + if err != nil { + return nil, err + } + perc := new(*big.Int) + if err := rocketRewardsPool.Call(opts, perc, "getClaimingContractPerc", "rocketClaimNode"); err != nil { + return nil, fmt.Errorf("error getting node operator rewards percent: %w", err) + } + return *perc, nil +} + +// Get the percent of checkpoint rewards that goes to ODAO members +func GetTrustedNodeOperatorRewardsPercent(rp *rocketpool.RocketPool, opts *bind.CallOpts) (*big.Int, error) { + rocketRewardsPool, err := getRocketRewardsPool(rp, opts) + if err != nil { + return nil, err + } + perc := new(*big.Int) + if err := rocketRewardsPool.Call(opts, perc, "getClaimingContractPerc", "rocketClaimTrustedNode"); err != nil { + return nil, fmt.Errorf("error getting trusted node operator rewards percent: %w", err) + } + return *perc, nil +} + +// Get the percent of checkpoint rewards that goes to the PDAO +func GetProtocolDaoRewardsPercent(rp *rocketpool.RocketPool, opts *bind.CallOpts) (*big.Int, error) { + rocketRewardsPool, err := getRocketRewardsPool(rp, opts) + if err != nil { + return nil, err + } + perc := new(*big.Int) + if err := rocketRewardsPool.Call(opts, perc, "getClaimingContractPerc", "rocketClaimDAO"); err != nil { + return nil, fmt.Errorf("error getting protocol DAO rewards percent: %w", err) + } + return *perc, nil +} + +// Get the amount of RPL rewards that will be provided to node operators +func GetPendingRPLRewards(rp *rocketpool.RocketPool, opts *bind.CallOpts) (*big.Int, error) { + rocketRewardsPool, err := getRocketRewardsPool(rp, opts) + if err != nil { + return nil, err + } + rewards := new(*big.Int) + if err := rocketRewardsPool.Call(opts, rewards, "getPendingRPLRewards"); err != nil { + return nil, fmt.Errorf("error getting pending RPL rewards: %w", err) + } + return *rewards, nil +} + +// Get the amount of ETH rewards that will be provided to node operators +func GetPendingETHRewards(rp *rocketpool.RocketPool, opts *bind.CallOpts) (*big.Int, error) { + rocketRewardsPool, err := getRocketRewardsPool(rp, opts) + if err != nil { + return nil, err + } + rewards := new(*big.Int) + if err := rocketRewardsPool.Call(opts, rewards, "getPendingETHRewards"); err != nil { + return nil, fmt.Errorf("error getting pending ETH rewards: %w", err) + } + return *rewards, nil +} + +// Check whether or not the given address has submitted for the given rewards interval +func GetTrustedNodeSubmitted(rp *rocketpool.RocketPool, nodeAddress common.Address, rewardsIndex uint64, opts *bind.CallOpts) (bool, error) { + rocketRewardsPool, err := getRocketRewardsPool(rp, opts) + if err != nil { + return false, err + } + + indexBig := big.NewInt(0).SetUint64(rewardsIndex) + hasSubmitted := new(bool) + if err := rocketRewardsPool.Call(opts, hasSubmitted, "getTrustedNodeSubmitted", nodeAddress, indexBig); err != nil { + return false, fmt.Errorf("error getting trusted node submission status: %w", err) + } + return *hasSubmitted, nil +} + +// Check whether or not the given address has submitted specific rewards info +func GetTrustedNodeSubmittedSpecificRewards(rp *rocketpool.RocketPool, nodeAddress common.Address, submission RewardSubmission, opts *bind.CallOpts) (bool, error) { + // NOTE: this doesn't have a view yet so we have to construct it manually, and RLP + stringTy, _ := abi.NewType("string", "string", nil) + addressTy, _ := abi.NewType("address", "address", nil) + + submissionTy, _ := abi.NewType("tuple", "struct RewardSubmission", []abi.ArgumentMarshaling{ + {Name: "rewardIndex", Type: "uint256"}, + {Name: "executionBlock", Type: "uint256"}, + {Name: "consensusBlock", Type: "uint256"}, + {Name: "merkleRoot", Type: "bytes32"}, + {Name: "merkleTreeCID", Type: "string"}, + {Name: "intervalsPassed", Type: "uint256"}, + {Name: "treasuryRPL", Type: "uint256"}, + {Name: "trustedNodeRPL", Type: "uint256[]"}, + {Name: "nodeRPL", Type: "uint256[]"}, + {Name: "nodeETH", Type: "uint256[]"}, + {Name: "userETH", Type: "uint256"}, + }) + + args := abi.Arguments{ + {Type: stringTy, Name: "key"}, + {Type: addressTy, Name: "trustedNodeAddress"}, + {Type: submissionTy, Name: "submission"}, + } + + bytes, err := args.Pack(rewardsSnapshotSubmittedNodeKey, nodeAddress, &submission) + if err != nil { + return false, fmt.Errorf("error encoding submission data into ABI format: %w", err) + } + + key := crypto.Keccak256Hash(bytes) + result, err := rp.RocketStorage.GetBool(opts, key) + if err != nil { + return false, fmt.Errorf("error checking if trusted node submitted specific rewards: %w", err) + } + return result, nil +} + +// Estimate the gas for submitting a Merkle Tree-based snapshot for a rewards interval +func EstimateSubmitRewardSnapshotGas(rp *rocketpool.RocketPool, submission RewardSubmission, opts *bind.TransactOpts) (rocketpool.GasInfo, error) { + rocketRewardsPool, err := getRocketRewardsPool(rp, nil) + if err != nil { + return rocketpool.GasInfo{}, err + } + return rocketRewardsPool.GetTransactionGasInfo(opts, "submitRewardSnapshot", submission) +} + +// Submit a Merkle Tree-based snapshot for a rewards interval +func SubmitRewardSnapshot(rp *rocketpool.RocketPool, submission RewardSubmission, opts *bind.TransactOpts) (common.Hash, error) { + rocketRewardsPool, err := getRocketRewardsPool(rp, nil) + if err != nil { + return common.Hash{}, err + } + tx, err := rocketRewardsPool.Transact(opts, "submitRewardSnapshot", submission) + if err != nil { + return common.Hash{}, fmt.Errorf("error submitting rewards snapshot: %w", err) + } + return tx.Hash(), nil +} + +// Get the event info for a rewards snapshot using the Atlas getter +func GetRewardsEvent(rp *rocketpool.RocketPool, index uint64, rocketRewardsPoolAddresses []common.Address, opts *bind.CallOpts) (bool, RewardsEvent, error) { + // Check if the client is requesting interval 0 on mainnet, then return the hardcoded RewardsEvent + data, ok, err := getMainnetInterval0RewardsEvent(rp, index) + if err != nil { + return false, RewardsEvent{}, err + } + if ok { + return true, data, nil + } + + // Get contracts + rocketRewardsPool, err := getRocketRewardsPool(rp, opts) + if err != nil { + return false, RewardsEvent{}, err + } + + // Get the block that the event was emitted on + indexBig := big.NewInt(0).SetUint64(index) + blockWrapper := new(*big.Int) + if err := rocketRewardsPool.Call(opts, blockWrapper, "getClaimIntervalExecutionBlock", indexBig); err != nil { + return false, RewardsEvent{}, fmt.Errorf("error getting the event block for interval %d: %w", index, err) + } + block := *blockWrapper + + // Create the list of addresses to check + currentAddress := *rocketRewardsPool.Address + if rocketRewardsPoolAddresses == nil { + rocketRewardsPoolAddresses = []common.Address{currentAddress} + } else { + found := false + for _, address := range rocketRewardsPoolAddresses { + if address == currentAddress { + found = true + break + } + } + if !found { + rocketRewardsPoolAddresses = append(rocketRewardsPoolAddresses, currentAddress) + } + } + + // Construct a filter query for relevant logs + rewardsSnapshotEvent := rocketRewardsPool.ABI.Events["RewardSnapshot"] + indexBytes := [32]byte{} + indexBig.FillBytes(indexBytes[:]) + addressFilter := rocketRewardsPoolAddresses + topicFilter := [][]common.Hash{{rewardsSnapshotEvent.ID}, {indexBytes}} + + // Get the event logs + logs, err := eth.GetLogs(rp, addressFilter, topicFilter, big.NewInt(1), block, block, nil) + if err != nil { + return false, RewardsEvent{}, err + } + if len(logs) == 0 { + return false, RewardsEvent{}, nil + } + + // Get the log info values + values, err := rewardsSnapshotEvent.Inputs.Unpack(logs[0].Data) + if err != nil { + return false, RewardsEvent{}, fmt.Errorf("error unpacking rewards snapshot event data: %w", err) + } + + // Convert to a native struct + var snapshot rewardSnapshot + err = rewardsSnapshotEvent.Inputs.Copy(&snapshot, values) + if err != nil { + return false, RewardsEvent{}, fmt.Errorf("error converting rewards snapshot event data to struct: %w", err) + } + + // Get the decoded data + submission := snapshot.Submission + eventData := RewardsEvent{ + Index: indexBig, + ExecutionBlock: submission.ExecutionBlock, + ConsensusBlock: submission.ConsensusBlock, + IntervalsPassed: submission.IntervalsPassed, + TreasuryRPL: submission.TreasuryRPL, + TrustedNodeRPL: submission.TrustedNodeRPL, + NodeRPL: submission.NodeRPL, + NodeETH: submission.NodeETH, + UserETH: submission.UserETH, + MerkleRoot: submission.MerkleRoot, + MerkleTreeCID: submission.MerkleTreeCID, + IntervalStartTime: time.Unix(snapshot.IntervalStartTime.Int64(), 0), + IntervalEndTime: time.Unix(snapshot.IntervalEndTime.Int64(), 0), + SubmissionTime: time.Unix(snapshot.Time.Int64(), 0), + } + + // Convert v1.1.0-rc1 events to modern ones + if eventData.UserETH == nil { + eventData.UserETH = big.NewInt(0) + } + + return true, eventData, nil +} + +// Check if the client is requesting interval 0 on mainnet, then return the hardcoded RewardsEvent +func getMainnetInterval0RewardsEvent(rp *rocketpool.RocketPool, index uint64) (RewardsEvent, bool, error) { + if index != 0 { + return RewardsEvent{}, false, nil + } + // Check if the ec is synced to mainnet + chainID, err := rp.Client.ChainID(context.Background()) + if err != nil { + return RewardsEvent{}, false, fmt.Errorf("error getting chainID: %w", err) + } + if chainID.Cmp(big.NewInt(1)) != 0 { + return RewardsEvent{}, false, nil + } + + // Hardcoded RewardsEvent for interval 0 on mainnet + treasuryRPL := new(big.Int) + treasuryRPL.SetString("10633670478560109530497", 10) + trustedNodeRPL := new(big.Int) + trustedNodeRPL.SetString("10633670478560109529794", 10) + nodeRPL := new(big.Int) + nodeRPL.SetString("49623795566613844471758", 10) + + eventDataInterval_0 := RewardsEvent{ + Index: big.NewInt(0), + ExecutionBlock: big.NewInt(15451078), + ConsensusBlock: big.NewInt(4598879), + IntervalsPassed: big.NewInt(1), + TreasuryRPL: treasuryRPL, + TrustedNodeRPL: []*big.Int{trustedNodeRPL}, + NodeRPL: []*big.Int{nodeRPL}, + NodeETH: []*big.Int{big.NewInt(0)}, + UserETH: big.NewInt(0), + MerkleRoot: common.HexToHash("0xb839fa0f5842bf3c8f19091361889fb0f1cb399d64b8da476d372b7de7a93463"), + MerkleTreeCID: "bafybeidrck3sz24acv32h56xdb7ruarxq52oci32del7moxqtief3do73y", + IntervalStartTime: time.Unix(1659591339, 0), + IntervalEndTime: time.Unix(1662010539, 0), + SubmissionTime: time.Unix(1662011717, 0), + } + + return eventDataInterval_0, true, nil +} + +// Get contracts +var rocketRewardsPoolLock sync.Mutex + +func getRocketRewardsPool(rp *rocketpool.RocketPool, opts *bind.CallOpts) (*rocketpool.Contract, error) { + rocketRewardsPoolLock.Lock() + defer rocketRewardsPoolLock.Unlock() + return rp.GetContract("rocketRewardsPool", opts) +} From 78d664f693194943431464a413ec1d45688d0282 Mon Sep 17 00:00:00 2001 From: Jacob Shufro Date: Tue, 12 Aug 2025 13:54:53 -0400 Subject: [PATCH 19/33] Remove CID from rewards struct, switch to new claims system --- bindings/legacy/v1.3.1/rewards/rewards.go | 9 ++-- bindings/rewards/distributor-mainnet.go | 17 ++++---- bindings/rewards/rewards.go | 13 ++---- bindings/types/claim.go | 18 ++++++++ rocketpool-cli/node/claim-rewards.go | 12 ++++-- rocketpool/api/node/claim-rewards.go | 50 +++++++++++++---------- shared/services/rewards/types.go | 1 - shared/services/rewards/utils.go | 5 --- 8 files changed, 70 insertions(+), 55 deletions(-) create mode 100644 bindings/types/claim.go diff --git a/bindings/legacy/v1.3.1/rewards/rewards.go b/bindings/legacy/v1.3.1/rewards/rewards.go index be382d6a2..6879ef2ad 100644 --- a/bindings/legacy/v1.3.1/rewards/rewards.go +++ b/bindings/legacy/v1.3.1/rewards/rewards.go @@ -232,7 +232,7 @@ func SubmitRewardSnapshot(rp *rocketpool.RocketPool, submission RewardSubmission // Get the event info for a rewards snapshot using the Atlas getter func GetRewardsEvent(rp *rocketpool.RocketPool, index uint64, rocketRewardsPoolAddresses []common.Address, opts *bind.CallOpts) (bool, RewardsEvent, error) { // Check if the client is requesting interval 0 on mainnet, then return the hardcoded RewardsEvent - data, ok, err := getMainnetInterval0RewardsEvent(rp, index) + data, ok, err := getMainnetInterval0RewardsEvent(rp) if err != nil { return false, RewardsEvent{}, err } @@ -327,11 +327,8 @@ func GetRewardsEvent(rp *rocketpool.RocketPool, index uint64, rocketRewardsPoolA return true, eventData, nil } -// Check if the client is requesting interval 0 on mainnet, then return the hardcoded RewardsEvent -func getMainnetInterval0RewardsEvent(rp *rocketpool.RocketPool, index uint64) (RewardsEvent, bool, error) { - if index != 0 { - return RewardsEvent{}, false, nil - } +// return the hardcoded RewardsEvent +func getMainnetInterval0RewardsEvent(rp *rocketpool.RocketPool) (RewardsEvent, bool, error) { // Check if the ec is synced to mainnet chainID, err := rp.Client.ChainID(context.Background()) if err != nil { diff --git a/bindings/rewards/distributor-mainnet.go b/bindings/rewards/distributor-mainnet.go index ff0e4c829..b8743fb2e 100644 --- a/bindings/rewards/distributor-mainnet.go +++ b/bindings/rewards/distributor-mainnet.go @@ -8,6 +8,7 @@ import ( "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/rocket-pool/smartnode/bindings/rocketpool" + "github.com/rocket-pool/smartnode/bindings/types" ) // Check if the given node has already claimed rewards for the given interval @@ -37,21 +38,21 @@ func MerkleRoots(rp *rocketpool.RocketPool, interval *big.Int, opts *bind.CallOp } // Estimate claim rewards gas -func EstimateClaimGas(rp *rocketpool.RocketPool, address common.Address, indices []*big.Int, amountRPL []*big.Int, amountETH []*big.Int, merkleProofs [][]common.Hash, opts *bind.TransactOpts) (rocketpool.GasInfo, error) { +func EstimateClaimGas(rp *rocketpool.RocketPool, address common.Address, claims types.Claims, opts *bind.TransactOpts) (rocketpool.GasInfo, error) { rocketDistributorMainnet, err := getRocketDistributorMainnet(rp, nil) if err != nil { return rocketpool.GasInfo{}, err } - return rocketDistributorMainnet.GetTransactionGasInfo(opts, "claim", address, indices, amountRPL, amountETH, merkleProofs) + return rocketDistributorMainnet.GetTransactionGasInfo(opts, "claim", address, claims) } // Claim rewards -func Claim(rp *rocketpool.RocketPool, address common.Address, indices []*big.Int, amountRPL []*big.Int, amountETH []*big.Int, merkleProofs [][]common.Hash, opts *bind.TransactOpts) (common.Hash, error) { +func Claim(rp *rocketpool.RocketPool, address common.Address, claims types.Claims, opts *bind.TransactOpts) (common.Hash, error) { rocketDistributorMainnet, err := getRocketDistributorMainnet(rp, nil) if err != nil { return common.Hash{}, err } - tx, err := rocketDistributorMainnet.Transact(opts, "claim", address, indices, amountRPL, amountETH, merkleProofs) + tx, err := rocketDistributorMainnet.Transact(opts, "claim", address, claims) if err != nil { return common.Hash{}, fmt.Errorf("error claiming rewards: %w", err) } @@ -59,21 +60,21 @@ func Claim(rp *rocketpool.RocketPool, address common.Address, indices []*big.Int } // Estimate claim and restake rewards gas -func EstimateClaimAndStakeGas(rp *rocketpool.RocketPool, address common.Address, indices []*big.Int, amountRPL []*big.Int, amountETH []*big.Int, merkleProofs [][]common.Hash, stakeAmount *big.Int, opts *bind.TransactOpts) (rocketpool.GasInfo, error) { +func EstimateClaimAndStakeGas(rp *rocketpool.RocketPool, address common.Address, claims types.Claims, stakeAmount *big.Int, opts *bind.TransactOpts) (rocketpool.GasInfo, error) { rocketDistributorMainnet, err := getRocketDistributorMainnet(rp, nil) if err != nil { return rocketpool.GasInfo{}, err } - return rocketDistributorMainnet.GetTransactionGasInfo(opts, "claimAndStake", address, indices, amountRPL, amountETH, merkleProofs, stakeAmount) + return rocketDistributorMainnet.GetTransactionGasInfo(opts, "claimAndStake", address, claims, stakeAmount) } // Claim and restake rewards -func ClaimAndStake(rp *rocketpool.RocketPool, address common.Address, indices []*big.Int, amountRPL []*big.Int, amountETH []*big.Int, merkleProofs [][]common.Hash, stakeAmount *big.Int, opts *bind.TransactOpts) (common.Hash, error) { +func ClaimAndStake(rp *rocketpool.RocketPool, address common.Address, claims types.Claims, stakeAmount *big.Int, opts *bind.TransactOpts) (common.Hash, error) { rocketDistributorMainnet, err := getRocketDistributorMainnet(rp, nil) if err != nil { return common.Hash{}, err } - tx, err := rocketDistributorMainnet.Transact(opts, "claimAndStake", address, indices, amountRPL, amountETH, merkleProofs, stakeAmount) + tx, err := rocketDistributorMainnet.Transact(opts, "claimAndStake", address, claims, stakeAmount) if err != nil { return common.Hash{}, fmt.Errorf("error claiming rewards: %w", err) } diff --git a/bindings/rewards/rewards.go b/bindings/rewards/rewards.go index be382d6a2..0d7c79f08 100644 --- a/bindings/rewards/rewards.go +++ b/bindings/rewards/rewards.go @@ -26,9 +26,9 @@ type RewardsEvent struct { ExecutionBlock *big.Int ConsensusBlock *big.Int MerkleRoot common.Hash - MerkleTreeCID string IntervalsPassed *big.Int TreasuryRPL *big.Int + TreasuryETH *big.Int TrustedNodeRPL []*big.Int NodeRPL []*big.Int NodeETH []*big.Int @@ -232,7 +232,7 @@ func SubmitRewardSnapshot(rp *rocketpool.RocketPool, submission RewardSubmission // Get the event info for a rewards snapshot using the Atlas getter func GetRewardsEvent(rp *rocketpool.RocketPool, index uint64, rocketRewardsPoolAddresses []common.Address, opts *bind.CallOpts) (bool, RewardsEvent, error) { // Check if the client is requesting interval 0 on mainnet, then return the hardcoded RewardsEvent - data, ok, err := getMainnetInterval0RewardsEvent(rp, index) + data, ok, err := getMainnetInterval0RewardsEvent(rp) if err != nil { return false, RewardsEvent{}, err } @@ -313,7 +313,6 @@ func GetRewardsEvent(rp *rocketpool.RocketPool, index uint64, rocketRewardsPoolA NodeETH: submission.NodeETH, UserETH: submission.UserETH, MerkleRoot: submission.MerkleRoot, - MerkleTreeCID: submission.MerkleTreeCID, IntervalStartTime: time.Unix(snapshot.IntervalStartTime.Int64(), 0), IntervalEndTime: time.Unix(snapshot.IntervalEndTime.Int64(), 0), SubmissionTime: time.Unix(snapshot.Time.Int64(), 0), @@ -327,11 +326,8 @@ func GetRewardsEvent(rp *rocketpool.RocketPool, index uint64, rocketRewardsPoolA return true, eventData, nil } -// Check if the client is requesting interval 0 on mainnet, then return the hardcoded RewardsEvent -func getMainnetInterval0RewardsEvent(rp *rocketpool.RocketPool, index uint64) (RewardsEvent, bool, error) { - if index != 0 { - return RewardsEvent{}, false, nil - } +// return the hardcoded RewardsEvent +func getMainnetInterval0RewardsEvent(rp *rocketpool.RocketPool) (RewardsEvent, bool, error) { // Check if the ec is synced to mainnet chainID, err := rp.Client.ChainID(context.Background()) if err != nil { @@ -360,7 +356,6 @@ func getMainnetInterval0RewardsEvent(rp *rocketpool.RocketPool, index uint64) (R NodeETH: []*big.Int{big.NewInt(0)}, UserETH: big.NewInt(0), MerkleRoot: common.HexToHash("0xb839fa0f5842bf3c8f19091361889fb0f1cb399d64b8da476d372b7de7a93463"), - MerkleTreeCID: "bafybeidrck3sz24acv32h56xdb7ruarxq52oci32del7moxqtief3do73y", IntervalStartTime: time.Unix(1659591339, 0), IntervalEndTime: time.Unix(1662010539, 0), SubmissionTime: time.Unix(1662011717, 0), diff --git a/bindings/types/claim.go b/bindings/types/claim.go new file mode 100644 index 000000000..0bc6ce4a3 --- /dev/null +++ b/bindings/types/claim.go @@ -0,0 +1,18 @@ +package types + +import ( + "math/big" + + "github.com/ethereum/go-ethereum/common" +) + +// Claim struct +type Claim struct { + Index *big.Int + AmountRPL *big.Int + AmountSmoothingETH *big.Int + AmountVoterShareETH *big.Int + Proof []common.Hash +} + +type Claims []Claim diff --git a/rocketpool-cli/node/claim-rewards.go b/rocketpool-cli/node/claim-rewards.go index 839e160e5..5c0c45646 100644 --- a/rocketpool-cli/node/claim-rewards.go +++ b/rocketpool-cli/node/claim-rewards.go @@ -33,7 +33,8 @@ func nodeClaimRewards(c *cli.Context) error { defer rp.Close() // Provide a notice - fmt.Printf("%sWelcome to the new rewards system!\nYou no longer need to claim rewards at each interval - you can simply let them accumulate and claim them whenever you want.\nHere you can see which intervals you haven't claimed yet, and how many rewards you earned during each one.%s\n\n", colorBlue, colorReset) + fmt.Printf("%sWelcome to the new rewards system!\nYou no longer need to claim rewards at each interval - you can simply let them accumulate and claim them whenever you want.\nHere you can see which intervals you haven't claimed yet, and how many rewards you earned during each one.%s\n", colorBlue, colorReset) + fmt.Println() // Get eligible intervals rewardsInfoResponse, err := rp.GetRewardsInfo() @@ -109,7 +110,8 @@ func nodeClaimRewards(c *cli.Context) error { // Print the info for all available periods totalRpl := big.NewInt(0) - totalEth := big.NewInt(0) + totalSmoothingEth := big.NewInt(0) + totalVoterShareEth := big.NewInt(0) for _, intervalInfo := range rewardsInfoResponse.UnclaimedIntervals { fmt.Printf("Rewards for Interval %d (%s to %s):\n", intervalInfo.Index, intervalInfo.StartTime.Local(), intervalInfo.EndTime.Local()) fmt.Printf("\tStaking: %.6f RPL\n", eth.WeiToEth(&intervalInfo.CollateralRplAmount.Int)) @@ -122,12 +124,14 @@ func nodeClaimRewards(c *cli.Context) error { totalRpl.Add(totalRpl, &intervalInfo.CollateralRplAmount.Int) totalRpl.Add(totalRpl, &intervalInfo.ODaoRplAmount.Int) - totalEth.Add(totalEth, &intervalInfo.TotalEthAmount.Int) + totalSmoothingEth.Add(totalSmoothingEth, &intervalInfo.SmoothingPoolEthAmount.Int) + totalVoterShareEth.Add(totalVoterShareEth, &intervalInfo.VoterShareEth.Int) } fmt.Println("Total Pending Rewards:") fmt.Printf("\t%.6f RPL\n", eth.WeiToEth(totalRpl)) - fmt.Printf("\t%.6f ETH\n\n", eth.WeiToEth(totalEth)) + fmt.Printf("\t%.6f Smoothing Pool ETH\n", eth.WeiToEth(totalSmoothingEth)) + fmt.Printf("\t%.6f Voter Share ETH\n\n", eth.WeiToEth(totalVoterShareEth)) // Get the list of intervals to claim var indices []uint64 diff --git a/rocketpool/api/node/claim-rewards.go b/rocketpool/api/node/claim-rewards.go index af88ab97c..19af9acfe 100644 --- a/rocketpool/api/node/claim-rewards.go +++ b/rocketpool/api/node/claim-rewards.go @@ -16,6 +16,7 @@ import ( "github.com/rocket-pool/smartnode/bindings/rewards" "github.com/rocket-pool/smartnode/bindings/rocketpool" "github.com/rocket-pool/smartnode/bindings/settings/protocol" + "github.com/rocket-pool/smartnode/bindings/types" "github.com/rocket-pool/smartnode/bindings/utils/eth" "github.com/rocket-pool/smartnode/shared/services" "github.com/rocket-pool/smartnode/shared/services/config" @@ -215,7 +216,7 @@ func canClaimRewards(c *cli.Context, indicesString string) (*api.CanNodeClaimRew } // Get the rewards - indices, amountRPL, amountETH, merkleProofs, err := getRewardsForIntervals(rp, cfg, nodeAccount.Address, indicesString) + claims, err := getRewardsForIntervals(rp, cfg, nodeAccount.Address, indicesString) if err != nil { return nil, err } @@ -225,7 +226,7 @@ func canClaimRewards(c *cli.Context, indicesString string) (*api.CanNodeClaimRew if err != nil { return nil, err } - gasInfo, err := rewards.EstimateClaimGas(rp, nodeAccount.Address, indices, amountRPL, amountETH, merkleProofs, opts) + gasInfo, err := rewards.EstimateClaimGas(rp, nodeAccount.Address, claims, opts) if err != nil { return nil, err } @@ -263,7 +264,7 @@ func claimRewards(c *cli.Context, indicesString string) (*api.NodeClaimRewardsRe } // Get the rewards - indices, amountRPL, amountETH, merkleProofs, err := getRewardsForIntervals(rp, cfg, nodeAccount.Address, indicesString) + claims, err := getRewardsForIntervals(rp, cfg, nodeAccount.Address, indicesString) if err != nil { return nil, err } @@ -281,7 +282,7 @@ func claimRewards(c *cli.Context, indicesString string) (*api.NodeClaimRewardsRe } // Claim rewards - hash, err := rewards.Claim(rp, nodeAccount.Address, indices, amountRPL, amountETH, merkleProofs, opts) + hash, err := rewards.Claim(rp, nodeAccount.Address, claims, opts) if err != nil { return nil, err } @@ -321,7 +322,7 @@ func canClaimAndStakeRewards(c *cli.Context, indicesString string, stakeAmount * } // Get the rewards - indices, amountRPL, amountETH, merkleProofs, err := getRewardsForIntervals(rp, cfg, nodeAccount.Address, indicesString) + claims, err := getRewardsForIntervals(rp, cfg, nodeAccount.Address, indicesString) if err != nil { return nil, err } @@ -331,7 +332,7 @@ func canClaimAndStakeRewards(c *cli.Context, indicesString string, stakeAmount * if err != nil { return nil, err } - gasInfo, err := rewards.EstimateClaimAndStakeGas(rp, nodeAccount.Address, indices, amountRPL, amountETH, merkleProofs, stakeAmount, opts) + gasInfo, err := rewards.EstimateClaimAndStakeGas(rp, nodeAccount.Address, claims, stakeAmount, opts) if err != nil { return nil, err } @@ -375,7 +376,7 @@ func claimAndStakeRewards(c *cli.Context, indicesString string, stakeAmount *big } // Get the rewards - indices, amountRPL, amountETH, merkleProofs, err := getRewardsForIntervals(rp, cfg, nodeAccount.Address, indicesString) + claims, err := getRewardsForIntervals(rp, cfg, nodeAccount.Address, indicesString) if err != nil { return nil, err } @@ -387,7 +388,7 @@ func claimAndStakeRewards(c *cli.Context, indicesString string, stakeAmount *big } // Claim rewards - hash, err := rewards.ClaimAndStake(rp, nodeAccount.Address, indices, amountRPL, amountETH, merkleProofs, stakeAmount, opts) + hash, err := rewards.ClaimAndStake(rp, nodeAccount.Address, claims, stakeAmount, opts) if err != nil { return nil, err } @@ -399,7 +400,7 @@ func claimAndStakeRewards(c *cli.Context, indicesString string, stakeAmount *big } // Get the rewards for the provided interval indices -func getRewardsForIntervals(rp *rocketpool.RocketPool, cfg *config.RocketPoolConfig, nodeAddress common.Address, indicesString string) ([]*big.Int, []*big.Int, []*big.Int, [][]common.Hash, error) { +func getRewardsForIntervals(rp *rocketpool.RocketPool, cfg *config.RocketPoolConfig, nodeAddress common.Address, indicesString string) (types.Claims, error) { // Get the indices seenIndices := map[uint64]bool{} @@ -408,7 +409,7 @@ func getRewardsForIntervals(rp *rocketpool.RocketPool, cfg *config.RocketPoolCon for _, element := range elements { index, err := strconv.ParseUint(element, 0, 64) if err != nil { - return nil, nil, nil, nil, fmt.Errorf("cannot convert index %s to a number: %w", element, err) + return nil, fmt.Errorf("cannot convert index %s to a number: %w", element, err) } // Ignore duplicates _, exists := seenIndices[index] @@ -419,24 +420,22 @@ func getRewardsForIntervals(rp *rocketpool.RocketPool, cfg *config.RocketPoolCon } // Read the tree files to get the details - amountRPL := []*big.Int{} - amountETH := []*big.Int{} - merkleProofs := [][]common.Hash{} + claims := types.Claims{} // Populate the interval info for each one for _, index := range indices { intervalInfo, err := rprewards.GetIntervalInfo(rp, cfg, nodeAddress, index.Uint64(), nil) if err != nil { - return nil, nil, nil, nil, err + return nil, err } // Validate if !intervalInfo.TreeFileExists { - return nil, nil, nil, nil, fmt.Errorf("rewards tree file '%s' doesn't exist", intervalInfo.TreeFilePath) + return nil, fmt.Errorf("rewards tree file '%s' doesn't exist", intervalInfo.TreeFilePath) } if !intervalInfo.MerkleRootValid { - return nil, nil, nil, nil, fmt.Errorf("merkle root for rewards tree file '%s' doesn't match the canonical merkle root for interval %d", intervalInfo.TreeFilePath, index.Uint64()) + return nil, fmt.Errorf("merkle root for rewards tree file '%s' doesn't match the canonical merkle root for interval %d", intervalInfo.TreeFilePath, index.Uint64()) } // Get the rewards from it @@ -445,16 +444,23 @@ func getRewardsForIntervals(rp *rocketpool.RocketPool, cfg *config.RocketPoolCon rplForInterval.Add(rplForInterval, &intervalInfo.CollateralRplAmount.Int) rplForInterval.Add(rplForInterval, &intervalInfo.ODaoRplAmount.Int) - ethForInterval := big.NewInt(0) - ethForInterval.Add(ethForInterval, &intervalInfo.SmoothingPoolEthAmount.Int) + smoothingEthForInterval := big.NewInt(0) + smoothingEthForInterval.Add(smoothingEthForInterval, &intervalInfo.SmoothingPoolEthAmount.Int) + + voterShareEthForInterval := big.NewInt(0) + voterShareEthForInterval.Add(voterShareEthForInterval, &intervalInfo.VoterShareEth.Int) - amountRPL = append(amountRPL, rplForInterval) - amountETH = append(amountETH, ethForInterval) - merkleProofs = append(merkleProofs, intervalInfo.MerkleProof) + claims = append(claims, types.Claim{ + Index: index, + AmountRPL: rplForInterval, + AmountSmoothingETH: smoothingEthForInterval, + AmountVoterShareETH: voterShareEthForInterval, + Proof: intervalInfo.MerkleProof, + }) } } // Return - return indices, amountRPL, amountETH, merkleProofs, nil + return claims, nil } diff --git a/shared/services/rewards/types.go b/shared/services/rewards/types.go index 7e692f4c1..8e9d8c9cf 100644 --- a/shared/services/rewards/types.go +++ b/shared/services/rewards/types.go @@ -199,7 +199,6 @@ type IntervalInfo struct { TreeFileExists bool `json:"treeFileExists"` MerkleRootValid bool `json:"merkleRootValid"` MerkleRoot common.Hash `json:"merkleRoot"` - CID string `json:"cid"` StartTime time.Time `json:"startTime"` EndTime time.Time `json:"endTime"` NodeExists bool `json:"nodeExists"` diff --git a/shared/services/rewards/utils.go b/shared/services/rewards/utils.go index 37b77368d..5c9de08d5 100644 --- a/shared/services/rewards/utils.go +++ b/shared/services/rewards/utils.go @@ -94,7 +94,6 @@ func GetIntervalInfo(rp *rocketpool.RocketPool, cfg *config.RocketPoolConfig, no return } - info.CID = event.MerkleTreeCID info.StartTime = event.IntervalStartTime info.EndTime = event.IntervalEndTime merkleRootCanon := event.MerkleRoot @@ -156,7 +155,6 @@ func GetIntervalInfo(rp *rocketpool.RocketPool, cfg *config.RocketPoolConfig, no // Downloads the rewards file for this interval func (i *IntervalInfo) DownloadRewardsFile(cfg *config.RocketPoolConfig, isDaemon bool) error { interval := i.Index - expectedCid := i.CID expectedRoot := i.MerkleRoot // Determine file name and path rewardsTreePath, err := homedir.Expand(cfg.Smartnode.GetRewardsTreePath(interval, isDaemon, config.RewardsExtensionJSON)) @@ -164,12 +162,9 @@ func (i *IntervalInfo) DownloadRewardsFile(cfg *config.RocketPoolConfig, isDaemo return fmt.Errorf("error expanding rewards tree path: %w", err) } rewardsTreeFilename := filepath.Base(rewardsTreePath) - ipfsFilename := rewardsTreeFilename + config.RewardsTreeIpfsExtension // Create URL list urls := []string{ - fmt.Sprintf(config.PrimaryRewardsFileUrl, expectedCid, ipfsFilename), - fmt.Sprintf(config.SecondaryRewardsFileUrl, expectedCid, ipfsFilename), fmt.Sprintf(config.GithubRewardsFileUrl, string(cfg.Smartnode.Network.Value.(cfgtypes.Network)), rewardsTreeFilename), } From 46784f7e8ee51e5289d9bc1a983c142cffcde956 Mon Sep 17 00:00:00 2001 From: Jacob Shufro Date: Tue, 12 Aug 2025 14:08:31 -0400 Subject: [PATCH 20/33] Fix tests --- shared/services/rewards/test/assets/assets.go | 1 - shared/services/rewards/test/mock.go | 1 - 2 files changed, 2 deletions(-) diff --git a/shared/services/rewards/test/assets/assets.go b/shared/services/rewards/test/assets/assets.go index 43a661c83..99c79a05a 100644 --- a/shared/services/rewards/test/assets/assets.go +++ b/shared/services/rewards/test/assets/assets.go @@ -97,7 +97,6 @@ func GetRewardSnapshotEventInterval19() rewards.RewardsEvent { ExecutionBlock: big.NewInt(19231284), ConsensusBlock: big.NewInt(8429279), MerkleRoot: common.HexToHash("0x35d1be64d49aa71dc5b5ea13dd6f91d8613c81aef2593796d6dee599cd228aea"), - MerkleTreeCID: "bafybeiazkzsqe7molppbhbxg2khdgocrip36eoezroa7anbe53za7mxjpq", IntervalsPassed: big.NewInt(1), TreasuryRPL: big.NewInt(0), // Set below TrustedNodeRPL: []*big.Int{}, // XXX Not set, but probably not needed diff --git a/shared/services/rewards/test/mock.go b/shared/services/rewards/test/mock.go index af1b0d452..606639f69 100644 --- a/shared/services/rewards/test/mock.go +++ b/shared/services/rewards/test/mock.go @@ -669,7 +669,6 @@ func (h *MockHistory) GetPreviousRewardSnapshotEvent() rprewards.RewardsEvent { ExecutionBlock: big.NewInt(int64(consensusEndBlock + h.BlockOffset)), ConsensusBlock: big.NewInt(int64(consensusEndBlock)), MerkleRoot: common.Hash{}, - MerkleTreeCID: "", IntervalsPassed: big.NewInt(1), TreasuryRPL: big.NewInt(0), TrustedNodeRPL: []*big.Int{}, From aafc29e4a583dd06c899d1d4680e5e41bb84dcc4 Mon Sep 17 00:00:00 2001 From: Jacob Shufro Date: Tue, 2 Sep 2025 12:53:02 -0400 Subject: [PATCH 21/33] Update test merkle root for v11- now includes separate voterShare --- shared/services/rewards/mock_v11_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/shared/services/rewards/mock_v11_test.go b/shared/services/rewards/mock_v11_test.go index f62724eed..3313b78bd 100644 --- a/shared/services/rewards/mock_v11_test.go +++ b/shared/services/rewards/mock_v11_test.go @@ -369,12 +369,12 @@ func TestMockIntervalDefaultsTreegenv11(tt *testing.T) { v11MerkleRoot := v11Artifacts.RewardsFile.GetMerkleRoot() // Expected merkle root: - // 0x176bba15231cb82edb5c34c8882af09dfb77a2ee31a96b623bffd8e48cedf18b + // 0x6fc204154008bd9beefac7ded7009467eca0de9fa8e8b4f802d8f0fb7c56754a // // If this does not match, it implies either you updated the set of default mock nodes, // or you introduced a regression in treegen. // DO NOT update this value unless you know what you are doing. - expectedMerkleRoot := "0x176bba15231cb82edb5c34c8882af09dfb77a2ee31a96b623bffd8e48cedf18b" + expectedMerkleRoot := "0x6fc204154008bd9beefac7ded7009467eca0de9fa8e8b4f802d8f0fb7c56754a" if !strings.EqualFold(v11MerkleRoot, expectedMerkleRoot) { t.Fatalf("Merkle root does not match expected value %s != %s", v11MerkleRoot, expectedMerkleRoot) } else { From 1358e3e66290068753f5ea751abf7526d15f1247 Mon Sep 17 00:00:00 2001 From: Jacob Shufro Date: Thu, 4 Sep 2025 17:49:12 -0400 Subject: [PATCH 22/33] Fix bugs found under test, update tests --- bindings/utils/state/network.go | 28 +- shared/services/rewards/generator-impl-v11.go | 145 +++++-- shared/services/rewards/mock_test.go | 2 +- shared/services/rewards/mock_v10_test.go | 2 +- shared/services/rewards/mock_v11_test.go | 382 ++++++++++++++++-- shared/services/rewards/ssz_types/json.go | 89 ++++ .../rewards/ssz_types/rewards-file-v5.go | 10 +- shared/services/rewards/test/beacon.go | 12 +- shared/services/rewards/test/mock.go | 293 +++++++++++++- shared/services/rewards/types.go | 3 + shared/services/state/network-state.go | 17 +- 11 files changed, 890 insertions(+), 93 deletions(-) diff --git a/bindings/utils/state/network.go b/bindings/utils/state/network.go index 9e1fd137f..737d92d45 100644 --- a/bindings/utils/state/network.go +++ b/bindings/utils/state/network.go @@ -18,6 +18,18 @@ const ( networkEffectiveStakeBatchSize int = 250 ) +type MegapoolRevenueSplitSettings struct { + NodeOperatorCommissionShare *big.Int `json:"node_operator_commission_share"` + NodeOperatorCommissionAdder *big.Int `json:"node_operator_commission_adder"` + VoterCommissionShare *big.Int `json:"voter_commission_share"` + PdaoCommissionShare *big.Int `json:"pdao_commission_share"` +} +type MegapoolRevenueSplitTimeWeightedAverages struct { + NodeShare *big.Int `json:"node_share"` + VoterShare *big.Int `json:"voter_share"` + PdaoShare *big.Int `json:"pdao_share"` +} + type NetworkDetails struct { // Redstone RplPrice *big.Int `json:"rpl_price"` @@ -67,18 +79,9 @@ type NetworkDetails struct { BalancesSubmissionFrequency uint64 `json:"balances_submission_frequency"` // Saturn - MegapoolRevenueSplitSettings struct { - NodeOperatorCommissionShare *big.Int `json:"node_operator_commission_share"` - NodeOperatorCommissionAdder *big.Int `json:"node_operator_commission_adder"` - VoterCommissionShare *big.Int `json:"voter_commission_share"` - PdaoCommissionShare *big.Int `json:"pdao_commission_share"` - } - - MegapoolRevenueSplitTimeWeightedAverages struct { - NodeShare *big.Int `json:"node_share"` - VoterShare *big.Int `json:"voter_share"` - PdaoShare *big.Int `json:"pdao_share"` - } + MegapoolRevenueSplitSettings MegapoolRevenueSplitSettings + MegapoolRevenueSplitTimeWeightedAverages MegapoolRevenueSplitTimeWeightedAverages + SmoothingPoolPendingVoterShare *big.Int `json:"smoothing_pool_earmarked_voter_share_eth"` } // Create a snapshot of all of the network's details @@ -161,6 +164,7 @@ func NewNetworkDetails(rp *rocketpool.RocketPool, contracts *NetworkContracts) ( contracts.Multicaller.AddCall(contracts.RocketNetworkRevenues, &details.MegapoolRevenueSplitTimeWeightedAverages.NodeShare, "getCurrentNodeShare") contracts.Multicaller.AddCall(contracts.RocketNetworkRevenues, &details.MegapoolRevenueSplitTimeWeightedAverages.VoterShare, "getCurrentVoterShare") contracts.Multicaller.AddCall(contracts.RocketNetworkRevenues, &details.MegapoolRevenueSplitTimeWeightedAverages.PdaoShare, "getCurrentProtocolDAOShare") + contracts.Multicaller.AddCall(contracts.RocketRewardsPool, &details.SmoothingPoolPendingVoterShare, "getPendingVoterShare") _, err := contracts.Multicaller.FlexibleCall(true, opts) if err != nil { diff --git a/shared/services/rewards/generator-impl-v11.go b/shared/services/rewards/generator-impl-v11.go index b6815ce1b..8b2895699 100644 --- a/shared/services/rewards/generator-impl-v11.go +++ b/shared/services/rewards/generator-impl-v11.go @@ -473,7 +473,8 @@ func (r *treeGeneratorImpl_v11) calculateEthRewards(checkBeaconPerformance bool) // Get the Smoothing Pool contract's balance r.smoothingPoolBalance = r.networkState.NetworkDetails.SmoothingPoolBalance - r.log.Printlnf("%s Smoothing Pool Balance: %s (%.3f)", r.logPrefix, r.smoothingPoolBalance.String(), eth.WeiToEth(r.smoothingPoolBalance)) + r.log.Printlnf("%s Smoothing Pool Balance:\t%s\t(%.3f)", r.logPrefix, r.smoothingPoolBalance.String(), eth.WeiToEth(r.smoothingPoolBalance)) + r.log.Printlnf("%s Earmarked Voter Share:\t%s\t(%.3f)", r.logPrefix, r.networkState.NetworkDetails.SmoothingPoolPendingVoterShare.String(), eth.WeiToEth(r.networkState.NetworkDetails.SmoothingPoolPendingVoterShare)) // Ignore the ETH calculation if there are no rewards if r.smoothingPoolBalance.Cmp(common.Big0) == 0 { @@ -621,6 +622,38 @@ func (r *treeGeneratorImpl_v11) calculateEthRewards(checkBeaconPerformance bool) // Update the rewards maps for _, nodeInfo := range r.nodeDetails { + // First, take care of voter share + if nodeInfo.VoterShareEth.Cmp(common.Big0) > 0 { + rewardsForNode, exists := r.nodeRewards[nodeInfo.Address] + if !exists { + network := nodeInfo.RewardsNetwork + validNetwork, err := r.validateNetwork(network) + if err != nil { + return err + } + if !validNetwork { + r.invalidNetworkNodes[nodeInfo.Address] = network + network = 0 + } + + rewardsForNode = ssz_types.NewNodeReward_v2( + network, + ssz_types.AddressFromBytes(nodeInfo.Address.Bytes()), + ) + r.nodeRewards[nodeInfo.Address] = rewardsForNode + } + rewardsForNode.VoterShareEth.Add(rewardsForNode.VoterShareEth.Int, nodeInfo.VoterShareEth) + + // Add the rewards to the running total for the specified network + rewardsForNetwork, exists := r.networkRewards[rewardsForNode.Network] + if !exists { + rewardsForNetwork = ssz_types.NewNetworkReward(rewardsForNode.Network) + r.networkRewards[rewardsForNode.Network] = rewardsForNetwork + } + rewardsForNetwork.SmoothingPoolEth.Add(rewardsForNetwork.SmoothingPoolEth.Int, nodeInfo.SmoothingPoolEth) + } + + // Next, take care of smoothing pool ETH if nodeInfo.IsEligible && nodeInfo.SmoothingPoolEth.Cmp(common.Big0) > 0 { rewardsForNode, exists := r.nodeRewards[nodeInfo.Address] if !exists { @@ -641,7 +674,6 @@ func (r *treeGeneratorImpl_v11) calculateEthRewards(checkBeaconPerformance bool) r.nodeRewards[nodeInfo.Address] = rewardsForNode } rewardsForNode.SmoothingPoolEth.Add(rewardsForNode.SmoothingPoolEth.Int, nodeInfo.SmoothingPoolEth) - rewardsForNode.VoterShareEth.Add(rewardsForNode.VoterShareEth.Int, nodeInfo.VoterShareEth) // Add minipool rewards to the JSON for _, minipoolInfo := range nodeInfo.Minipools { @@ -765,21 +797,30 @@ func (r *treeGeneratorImpl_v11) calculateNodeRewards() (*nodeRewards, error) { voterEth := big.NewInt(0) pdaoEth := big.NewInt(0) + smoothingPoolBalance := big.NewInt(0).Set(r.smoothingPoolBalance) + // Subtract the earmarked voter share + smoothingPoolBalance.Sub(smoothingPoolBalance, r.networkState.NetworkDetails.SmoothingPoolPendingVoterShare) + if smoothingPoolBalance.Sign() <= 0 { + return nil, fmt.Errorf("smoothing pool balance is less than or equal to the earmarked voter share") + } + // If pdao score is greater than 0, calculate the pdao share if r.totalPdaoScore.Cmp(common.Big0) > 0 { - pdaoEth.Mul(r.smoothingPoolBalance, r.totalPdaoScore) + pdaoEth.Mul(smoothingPoolBalance, r.totalPdaoScore) pdaoEth.Div(pdaoEth, big.NewInt(int64(r.successfulAttestations))) pdaoEth.Div(pdaoEth, oneEth) } // If voter score is greater than 0, calculate the voter share if r.totalVoterScore.Cmp(common.Big0) > 0 { - voterEth.Mul(r.smoothingPoolBalance, r.totalVoterScore) + voterEth.Mul(smoothingPoolBalance, r.totalVoterScore) voterEth.Div(voterEth, big.NewInt(int64(r.successfulAttestations))) voterEth.Div(voterEth, oneEth) + + // Add in the earmarked voter share + voterEth.Add(voterEth, r.networkState.NetworkDetails.SmoothingPoolPendingVoterShare) } - trueVoterEth := big.NewInt(0) totalMegapoolVoteEligibleRpl := big.NewInt(0) for _, nodeInfo := range r.nodeDetails { // Check if the node is eligible for voter share @@ -789,6 +830,7 @@ func (r *treeGeneratorImpl_v11) calculateNodeRewards() (*nodeRewards, error) { totalMegapoolVoteEligibleRpl.Add(totalMegapoolVoteEligibleRpl, nodeInfo.MegapoolVoteEligibleRpl) } // Calculate the voter share for each node + trueVoterEth := big.NewInt(0) for _, nodeInfo := range r.nodeDetails { if nodeInfo.Megapool == nil { continue @@ -801,15 +843,17 @@ func (r *treeGeneratorImpl_v11) calculateNodeRewards() (*nodeRewards, error) { nodeInfo.VoterShareEth.Set(nodeInfo.MegapoolVoteEligibleRpl) nodeInfo.VoterShareEth.Mul(nodeInfo.VoterShareEth, voterEth) nodeInfo.VoterShareEth.Div(nodeInfo.VoterShareEth, totalMegapoolVoteEligibleRpl) - trueVoterEth.Add(trueVoterEth, nodeInfo.VoterShareEth) } // If there weren't any successful attestations, everything goes to the pool stakers if r.totalAttestationScore.Cmp(common.Big0) == 0 || r.successfulAttestations == 0 { r.log.Printlnf("WARNING: Total attestation score = %s, successful attestations = %d... sending the whole smoothing pool balance to the pool stakers.", r.totalAttestationScore.String(), r.successfulAttestations) + poolStakerEth := big.NewInt(0).Set(smoothingPoolBalance) + poolStakerEth.Sub(poolStakerEth, trueVoterEth) + poolStakerEth.Sub(poolStakerEth, pdaoEth) return &nodeRewards{ - poolStakerEth: r.smoothingPoolBalance, + poolStakerEth: poolStakerEth, nodeOpEth: big.NewInt(0), pdaoEth: pdaoEth, voterEth: trueVoterEth, @@ -828,8 +872,9 @@ func (r *treeGeneratorImpl_v11) calculateNodeRewards() (*nodeRewards, error) { } totalEthForMinipools := big.NewInt(0) + totalEthForMegapools := big.NewInt(0) totalNodeOpShare := big.NewInt(0) - totalNodeOpShare.Mul(r.smoothingPoolBalance, r.totalAttestationScore) + totalNodeOpShare.Mul(smoothingPoolBalance, r.totalAttestationScore) totalNodeOpShare.Div(totalNodeOpShare, big.NewInt(int64(r.successfulAttestations))) totalNodeOpShare.Div(totalNodeOpShare, oneEth) @@ -853,10 +898,29 @@ func (r *treeGeneratorImpl_v11) calculateNodeRewards() (*nodeRewards, error) { nodeInfo.SmoothingPoolEth.Add(nodeInfo.SmoothingPoolEth, minipoolEth) } totalEthForMinipools.Add(totalEthForMinipools, nodeInfo.SmoothingPoolEth) + + // Check megapool eth as well + if nodeInfo.Megapool != nil { + for _, validator := range nodeInfo.Megapool.Validators { + validatorEth := big.NewInt(0).Set(totalNodeOpShare) + validatorEth.Mul(validatorEth, &validator.AttestationScore.Int) + validatorEth.Div(validatorEth, r.totalAttestationScore) + validator.MegapoolValidatorShare = validatorEth + nodeInfo.SmoothingPoolEth.Add(nodeInfo.SmoothingPoolEth, validatorEth) + + totalEthForMegapools.Add(totalEthForMegapools, validatorEth) + } + } } if r.rewardsFile.RulesetVersion >= 10 { + // NB: We use the raw smoothing pool balance here, not the adjusted one + // (r.smoothingPoolBalance instead of smoothingPoolBalance) + // Otherwise, when we subtract trueVoterEth, we subtract the earmarked voter share twice. remainingBalance := big.NewInt(0).Sub(r.smoothingPoolBalance, totalEthForMinipools) + remainingBalance.Sub(remainingBalance, totalEthForMegapools) + remainingBalance.Sub(remainingBalance, pdaoEth) + remainingBalance.Sub(remainingBalance, trueVoterEth) if remainingBalance.Cmp(totalConsensusBonus) < 0 { r.log.Printlnf("WARNING: Remaining balance is less than total consensus bonus... Balance = %s, total consensus bonus = %s", remainingBalance.String(), totalConsensusBonus.String()) // Scale bonuses down to fit the remaining balance @@ -874,47 +938,60 @@ func (r *treeGeneratorImpl_v11) calculateNodeRewards() (*nodeRewards, error) { mpd.MinipoolBonus.Div(mpd.MinipoolBonus, totalConsensusBonus) } } + } else { + r.log.Printlnf("%s Smoothing Pool has %s (%.3f) Pool Staker ETH before bonuses which is enough for %s (%.3f) in bonuses.", r.logPrefix, remainingBalance.String(), eth.WeiToEth(remainingBalance), totalConsensusBonus.String(), eth.WeiToEth(totalConsensusBonus)) } } - // Sanity check the totalNodeOpShare before bonuses are awarded - delta := big.NewInt(0).Sub(totalEthForMinipools, totalNodeOpShare) - delta.Abs(delta) - if delta.Cmp(r.epsilon) == 1 { - return nil, fmt.Errorf("error calculating smoothing pool ETH: total was %s, but expected %s; error was too large (%s wei)", totalEthForMinipools.String(), totalNodeOpShare.String(), delta.String()) - } - // Finally, award the bonuses + totalEthForBonuses := big.NewInt(0) if r.rewardsFile.RulesetVersion >= 10 { for _, nsd := range r.nodeDetails { nsd.SmoothingPoolEth.Add(nsd.SmoothingPoolEth, nsd.BonusEth) - totalEthForMinipools.Add(totalEthForMinipools, nsd.BonusEth) + totalEthForBonuses.Add(totalEthForBonuses, nsd.BonusEth) } } + trueNodeOperatorAmount := big.NewInt(0) + trueNodeOperatorAmount.Add(trueNodeOperatorAmount, totalEthForMinipools) + trueNodeOperatorAmount.Add(trueNodeOperatorAmount, totalEthForMegapools) + + delta := big.NewInt(0).Sub(trueNodeOperatorAmount, totalNodeOpShare) + delta.Abs(delta) + if delta.Cmp(r.epsilon) == 1 { + return nil, fmt.Errorf("error calculating smoothing pool ETH: total was %s, but expected %s; error was too large (%s wei)", trueNodeOperatorAmount.String(), totalNodeOpShare.String(), delta.String()) + } + + trueNodeOperatorAmount.Add(trueNodeOperatorAmount, totalEthForBonuses) + // This is how much actually goes to the pool stakers - it should ideally be equal to poolStakerShare but this accounts for any cumulative floating point errors - truePoolStakerAmount := big.NewInt(0).Sub(r.smoothingPoolBalance, totalEthForMinipools) + // NB: We use the raw smoothing pool balance here, not the adjusted one + // (r.smoothingPoolBalance instead of smoothingPoolBalance) + // Otherwise, when we subtract trueVoterEth, we subtract the earmarked voter share twice. + truePoolStakerAmount := big.NewInt(0).Sub(r.smoothingPoolBalance, trueNodeOperatorAmount) truePoolStakerAmount.Sub(truePoolStakerAmount, pdaoEth) truePoolStakerAmount.Sub(truePoolStakerAmount, trueVoterEth) - // Calculate the staking pool share and the node op share - poolStakerShareBeforeBonuses := big.NewInt(0).Sub(r.smoothingPoolBalance, totalNodeOpShare) - poolStakerShareBeforeBonuses.Sub(poolStakerShareBeforeBonuses, pdaoEth) - poolStakerShareBeforeBonuses.Sub(poolStakerShareBeforeBonuses, trueVoterEth) - - r.log.Printlnf("%s Pool staker ETH before bonuses: %s (%.3f)", r.logPrefix, poolStakerShareBeforeBonuses.String(), eth.WeiToEth(poolStakerShareBeforeBonuses)) - r.log.Printlnf("%s Pool staker ETH after bonuses: %s (%.3f)", r.logPrefix, truePoolStakerAmount.String(), eth.WeiToEth(truePoolStakerAmount)) - r.log.Printlnf("%s Node Op ETH before bonuses: %s (%.3f)", r.logPrefix, totalNodeOpShare.String(), eth.WeiToEth(totalNodeOpShare)) - r.log.Printlnf("%s Node Op ETH after bonuses: %s (%.3f)", r.logPrefix, totalEthForMinipools.String(), eth.WeiToEth(totalEthForMinipools)) - r.log.Printlnf("%s PDAO ETH: %s (%.3f)", r.logPrefix, pdaoEth.String(), eth.WeiToEth(pdaoEth)) - r.log.Printlnf("%s Voter Eth before distribution: %s (%.3f)", r.logPrefix, voterEth.String(), eth.WeiToEth(voterEth)) - r.log.Printlnf("%s Voter Eth after distribution: %s (%.3f)", r.logPrefix, trueVoterEth.String(), eth.WeiToEth(trueVoterEth)) + r.log.Printlnf("%s Smoothing Pool ETH: \t%s\t(%.3f)", r.logPrefix, r.smoothingPoolBalance.String(), eth.WeiToEth(r.smoothingPoolBalance)) + r.log.Printlnf("%s Pool staker ETH: \t%s\t(%.3f)", r.logPrefix, truePoolStakerAmount.String(), eth.WeiToEth(truePoolStakerAmount)) + r.log.Printlnf("%s Node Op Eth: \t%s\t(%.3f)", r.logPrefix, trueNodeOperatorAmount.String(), eth.WeiToEth(trueNodeOperatorAmount)) + r.log.Printlnf("%s '--> minipool attestations:\t%s\t(%.3f)", r.logPrefix, totalEthForMinipools.String(), eth.WeiToEth(totalEthForMinipools)) + r.log.Printlnf("%s '----------------> bonuses:\t%s\t(%.3f)", r.logPrefix, totalEthForBonuses.String(), eth.WeiToEth(totalEthForBonuses)) + r.log.Printlnf("%s '--> megapool attestations:\t%s\t(%.3f)", r.logPrefix, totalEthForMegapools.String(), eth.WeiToEth(totalEthForMegapools)) + r.log.Printlnf("%s Voter Share: \t%s\t(%.3f)", r.logPrefix, trueVoterEth.String(), eth.WeiToEth(trueVoterEth)) + r.log.Printlnf("%s PDAO ETH: \t%s\t(%.3f)", r.logPrefix, pdaoEth.String(), eth.WeiToEth(pdaoEth)) + // Sum the actual values to determine how much eth is distributed + toBeDistributed := big.NewInt(0) + toBeDistributed.Add(toBeDistributed, truePoolStakerAmount) + toBeDistributed.Add(toBeDistributed, trueNodeOperatorAmount) + toBeDistributed.Add(toBeDistributed, trueVoterEth) + toBeDistributed.Add(toBeDistributed, pdaoEth) + r.log.Printlnf("%s TOTAL to be distributed: \t%s\t(%.3f)", r.logPrefix, toBeDistributed.String(), eth.WeiToEth(toBeDistributed)) r.log.Printlnf("%s (error = %s wei)", r.logPrefix, delta.String()) - r.log.Printlnf("%s Adjusting pool staker ETH to %s to account for truncation", r.logPrefix, truePoolStakerAmount.String()) return &nodeRewards{ poolStakerEth: truePoolStakerAmount, - nodeOpEth: totalEthForMinipools, + nodeOpEth: trueNodeOperatorAmount, bonusScalar: bonusScalar, pdaoEth: pdaoEth, voterEth: trueVoterEth, @@ -1530,7 +1607,7 @@ func (r *treeGeneratorImpl_v11) getSmoothingPoolNodeDetails() error { if nativeNodeDetails.MegapoolStakedRPL.Sign() > 0 { // The megapool's eligible staked RPL is defined by // min(1.5*RPL value of megapool bonded_eth, megapool staked rpl) - bondedEth := nativeNodeDetails.EthBonded + bondedEth := nativeNodeDetails.MegapoolEthBonded rplPrice := r.networkState.NetworkDetails.RplPrice // Price is eth per rpl, so to calculate the rpl value of the bonded eth, // we need to divide the bonded eth by the price. This nukes the 1eth unit, so @@ -1538,8 +1615,8 @@ func (r *treeGeneratorImpl_v11) getSmoothingPoolNodeDetails() error { bondedEthRplValue := big.NewInt(0).Mul(bondedEth, big.NewInt(15e17)) bondedEthRplValue.Div(bondedEthRplValue, rplPrice) // Now take the minimum of the node's actual rpl vs bondedEthRplValue - if nativeNodeDetails.RplStake.Cmp(bondedEthRplValue) < 0 { - nodeDetails.MegapoolVoteEligibleRpl = nativeNodeDetails.RplStake + if nativeNodeDetails.MegapoolStakedRPL.Cmp(bondedEthRplValue) < 0 { + nodeDetails.MegapoolVoteEligibleRpl = nativeNodeDetails.MegapoolStakedRPL } else { nodeDetails.MegapoolVoteEligibleRpl = bondedEthRplValue } diff --git a/shared/services/rewards/mock_test.go b/shared/services/rewards/mock_test.go index 54cc07f47..e3dda9f46 100644 --- a/shared/services/rewards/mock_test.go +++ b/shared/services/rewards/mock_test.go @@ -20,7 +20,7 @@ import ( ) func TestMockIntervalDefaultsTreegenv8v9(tt *testing.T) { - history := test.NewDefaultMockHistory() + history := test.NewDefaultMockHistory(false) state := history.GetEndNetworkState() t := newV8Test(tt, state.NetworkDetails.RewardIndex) diff --git a/shared/services/rewards/mock_v10_test.go b/shared/services/rewards/mock_v10_test.go index 2ae78e30e..c79fd4bbe 100644 --- a/shared/services/rewards/mock_v10_test.go +++ b/shared/services/rewards/mock_v10_test.go @@ -22,7 +22,7 @@ import ( func TestMockIntervalDefaultsTreegenv10(tt *testing.T) { - history := test.NewDefaultMockHistory() + history := test.NewDefaultMockHistory(false) // Add a node which is earning some bonus commission node := history.GetNewDefaultMockNode(&test.NewMockNodeParams{ SmoothingPool: true, diff --git a/shared/services/rewards/mock_v11_test.go b/shared/services/rewards/mock_v11_test.go index 3313b78bd..6d34126be 100644 --- a/shared/services/rewards/mock_v11_test.go +++ b/shared/services/rewards/mock_v11_test.go @@ -7,6 +7,7 @@ package rewards import ( "fmt" "math/big" + "strconv" "strings" "testing" @@ -22,7 +23,7 @@ import ( func TestMockIntervalDefaultsTreegenv11(tt *testing.T) { - history := test.NewDefaultMockHistory() + history := test.NewDefaultMockHistory(true) // Add a node which is earning some bonus commission node := history.GetNewDefaultMockNode(&test.NewMockNodeParams{ SmoothingPool: true, @@ -32,6 +33,7 @@ func TestMockIntervalDefaultsTreegenv11(tt *testing.T) { node.Minipools[0].NodeFee, _ = big.NewInt(0).SetString("50000000000000000", 10) history.Nodes = append(history.Nodes, node) state := history.GetEndNetworkState() + state.IsSaturnDeployed = true t := newV8Test(tt, state.NetworkDetails.RewardIndex) @@ -52,18 +54,21 @@ func TestMockIntervalDefaultsTreegenv11(tt *testing.T) { for _, validator := range state.MinipoolValidatorDetails { t.bc.SetMinipoolPerformance(validator.Index, make([]uint64, 0)) } + for _, validator := range state.MegapoolValidatorGlobalIndex { + t.bc.SetMinipoolPerformance(strconv.Itoa(int(validator.ValidatorInfo.ValidatorIndex)), make([]uint64, 0)) + } // Set some custom balances for the validators that opt in and out of smoothing pool nodeSummary := history.GetNodeSummary() - customBalanceNodes := nodeSummary["single_eight_eth_opted_in_quarter"] + customBalanceNodes := nodeSummary.MustGetClass(tt, "single_eight_eth_opted_in_quarter") for _, node := range customBalanceNodes { node.Minipools[0].SPWithdrawals = eth.EthToWei(0.75) } - customBalanceNodes = nodeSummary["single_eight_eth_opted_out_three_quarters"] + customBalanceNodes = nodeSummary.MustGetClass(tt, "single_eight_eth_opted_out_three_quarters") for _, node := range customBalanceNodes { node.Minipools[0].SPWithdrawals = eth.EthToWei(0.75) } - customBalanceNodes = nodeSummary["single_bond_reduction"] + customBalanceNodes = nodeSummary.MustGetClass(tt, "single_bond_reduction") for _, node := range customBalanceNodes { node.Minipools[0].SPWithdrawals = eth.EthToWei(0.5) } @@ -103,13 +108,13 @@ func TestMockIntervalDefaultsTreegenv11(tt *testing.T) { rewardsFile := v11Artifacts.RewardsFile minipoolPerformanceFile := v11Artifacts.MinipoolPerformanceFile - singleEightEthNodes := nodeSummary["single_eight_eth"] - singleSixteenEthNodes := nodeSummary["single_sixteen_eth"] + singleEightEthNodes := nodeSummary.MustGetClass(tt, "single_eight_eth") + singleSixteenEthNodes := nodeSummary.MustGetClass(tt, "single_sixteen_eth") for _, node := range append(singleEightEthNodes, singleSixteenEthNodes...) { // Check the rewards amount in the rewards file rewardsAmount := rewardsFile.GetNodeCollateralRpl(node.Address) - expectedRewardsAmount, _ := big.NewInt(0).SetString("1019308880071990649542", 10) + expectedRewardsAmount, _ := big.NewInt(0).SetString("911258527391109533960", 10) if rewardsAmount.Cmp(expectedRewardsAmount) != 0 { t.Fatalf("Rewards amount does not match expected value for node %s: %s != %s", node.Notes, rewardsAmount.String(), expectedRewardsAmount.String()) @@ -127,14 +132,20 @@ func TestMockIntervalDefaultsTreegenv11(tt *testing.T) { if oDaoRplAmount.Sign() != 0 { t.Fatalf("oDAO rpl amount does not match expected value for node %s: %s != %s", node.Notes, oDaoRplAmount.String(), "0") } + + // Make sure it got 0 voter share ETH + voterShareEthAmount := rewardsFile.GetNodeVoterShareEth(node.Address) + if voterShareEthAmount.Sign() != 0 { + t.Fatalf("Voter share ETH amount does not match expected value for node %s: %s != %s", node.Notes, voterShareEthAmount.String(), "0") + } } - singleEightEthNodesSP := nodeSummary["single_eight_eth_sp"] - singleSixteenEthNodesSP := nodeSummary["single_sixteen_eth_sp"] + singleEightEthNodesSP := nodeSummary.MustGetClass(tt, "single_eight_eth_sp") + singleSixteenEthNodesSP := nodeSummary.MustGetClass(tt, "single_sixteen_eth_sp") for _, node := range append(singleEightEthNodesSP, singleSixteenEthNodesSP...) { // Check the rewards amount in the rewards file rewardsAmount := rewardsFile.GetNodeCollateralRpl(node.Address) - expectedRewardsAmount, _ := big.NewInt(0).SetString("1019308880071990649542", 10) + expectedRewardsAmount, _ := big.NewInt(0).SetString("911258527391109533960", 10) if rewardsAmount.Cmp(expectedRewardsAmount) != 0 { t.Fatalf("Rewards amount does not match expected value for node %s: %s != %s", node.Notes, rewardsAmount.String(), expectedRewardsAmount.String()) @@ -145,7 +156,7 @@ func TestMockIntervalDefaultsTreegenv11(tt *testing.T) { expectedEthAmount := big.NewInt(0) if node.SmoothingPoolRegistrationState { if node.Class == "single_eight_eth_sp" { - expectedEthAmount.SetString("1450562599049128367", 10) + expectedEthAmount.SetString("571616314199395770", 10) // There should be a bonus for these nodes' minipools if len(node.Minipools) != 1 { t.Fatalf("Expected 1 minipool for node %s, got %d", node.Notes, len(node.Minipools)) @@ -168,7 +179,7 @@ func TestMockIntervalDefaultsTreegenv11(tt *testing.T) { } } else { // 16-eth minipools earn more eth! A bit less than double. - expectedEthAmount.SetString("2200871632329635499", 10) + expectedEthAmount.SetString("839123867069486404", 10) if len(node.Minipools) != 1 { t.Fatalf("Expected 1 minipool for node %s, got %d", node.Notes, len(node.Minipools)) } @@ -197,11 +208,17 @@ func TestMockIntervalDefaultsTreegenv11(tt *testing.T) { if oDaoRplAmount.Sign() != 0 { t.Fatalf("oDAO rpl amount does not match expected value for node %s: %s != %s", node.Notes, oDaoRplAmount.String(), "0") } + + // Make sure it got 0 voter share ETH + voterShareEthAmount := rewardsFile.GetNodeVoterShareEth(node.Address) + if voterShareEthAmount.Sign() != 0 { + t.Fatalf("Voter share ETH amount does not match expected value for node %s: %s != %s", node.Notes, voterShareEthAmount.String(), "0") + } } optingInNodesSP := append( - nodeSummary["single_eight_eth_opted_in_quarter"], - nodeSummary["single_sixteen_eth_opted_in_quarter"]..., + nodeSummary.MustGetClass(tt, "single_eight_eth_opted_in_quarter"), + nodeSummary.MustGetClass(tt, "single_sixteen_eth_opted_in_quarter")..., ) for _, node := range optingInNodesSP { // Check the rewards amount in the rewards file @@ -213,10 +230,10 @@ func TestMockIntervalDefaultsTreegenv11(tt *testing.T) { // Node has 20 RPL and only 1 8-eth minpool which puts it above the linear curve expectedRewardsAmount := big.NewInt(0) if node.Class == "single_eight_eth_opted_in_quarter" { - expectedRewardsAmount.SetString("1784353229014464268647", 10) + expectedRewardsAmount.SetString("1595205464807146635862", 10) } else { // 16-eth minipools earn less for the same RPL stake, due to RPIP-30 - expectedRewardsAmount.SetString("1310160289473732090952", 10) + expectedRewardsAmount.SetString("1171278656914890979185", 10) if perf.GetBonusEthEarned().Sign() != 0 { // 16 eth minipools should not get bonus commission t.Fatalf("Minipool %s shouldn't have earned bonus eth and did", mp.Address.Hex()) @@ -232,7 +249,7 @@ func TestMockIntervalDefaultsTreegenv11(tt *testing.T) { expectedEthAmount := big.NewInt(0) if node.Class == "single_eight_eth_opted_in_quarter" { // About 3/4 what the full nodes got - expectedEthAmount.SetString("1091438193343898573", 10) + expectedEthAmount.SetString("430052870090634441", 10) // Earns 3/4 the bonus of a node that was in for the whole interval expectedBonusEthEarned, _ := big.NewInt(0).SetString("22500000000000000", 10) if perf.GetBonusEthEarned().Cmp(expectedBonusEthEarned) != 0 { @@ -240,7 +257,7 @@ func TestMockIntervalDefaultsTreegenv11(tt *testing.T) { } } else { // 16-eth minipools earn more eth! A bit less than double. - expectedEthAmount.SetString("1656101426307448494", 10) + expectedEthAmount.SetString("631419939577039274", 10) } if ethAmount.Cmp(expectedEthAmount) != 0 { t.Fatalf("ETH amount does not match expected value for node %s: %s != %s", node.Notes, ethAmount.String(), expectedEthAmount.String()) @@ -251,11 +268,17 @@ func TestMockIntervalDefaultsTreegenv11(tt *testing.T) { if oDaoRplAmount.Sign() != 0 { t.Fatalf("oDAO rpl amount does not match expected value for node %s: %s != %s", node.Notes, oDaoRplAmount.String(), "0") } + + // Make sure it got 0 voter share ETH + voterShareEthAmount := rewardsFile.GetNodeVoterShareEth(node.Address) + if voterShareEthAmount.Sign() != 0 { + t.Fatalf("Voter share ETH amount does not match expected value for node %s: %s != %s", node.Notes, voterShareEthAmount.String(), "0") + } } optingOutNodesSP := append( - nodeSummary["single_eight_eth_opted_out_three_quarters"], - nodeSummary["single_sixteen_eth_opted_out_three_quarters"]..., + nodeSummary.MustGetClass(tt, "single_eight_eth_opted_out_three_quarters"), + nodeSummary.MustGetClass(tt, "single_sixteen_eth_opted_out_three_quarters")..., ) for _, node := range optingOutNodesSP { // Check the rewards amount in the rewards file @@ -267,10 +290,10 @@ func TestMockIntervalDefaultsTreegenv11(tt *testing.T) { // Node has 20 RPL and only 1 8-eth minpool which puts it above the linear curve expectedRewardsAmount := big.NewInt(0) if node.Class == "single_eight_eth_opted_out_three_quarters" { - expectedRewardsAmount.SetString("1784353229014464268647", 10) + expectedRewardsAmount.SetString("1595205464807146635862", 10) } else { // 16-eth minipools earn less for the same RPL stake, due to RPIP-30 - expectedRewardsAmount.SetString("1310160289473732090952", 10) + expectedRewardsAmount.SetString("1171278656914890979185", 10) } if rewardsAmount.Cmp(expectedRewardsAmount) != 0 { @@ -282,7 +305,7 @@ func TestMockIntervalDefaultsTreegenv11(tt *testing.T) { expectedEthAmount := big.NewInt(0) if node.Class == "single_eight_eth_opted_out_three_quarters" { // About 3/4 what the full nodes got - expectedEthAmount.SetString("1077373217115689381", 10) + expectedEthAmount.SetString("424690332326283987", 10) // Earns 3/4 the bonus of a node that was in for the whole interval expectedBonusEthEarned, _ := big.NewInt(0).SetString("22500000000000000", 10) if perf.GetBonusEthEarned().Cmp(expectedBonusEthEarned) != 0 { @@ -290,7 +313,7 @@ func TestMockIntervalDefaultsTreegenv11(tt *testing.T) { } } else { // 16-eth minipools earn more eth! A bit less than double. - expectedEthAmount.SetString("1634310618066561014", 10) + expectedEthAmount.SetString("623111782477341389", 10) if perf.GetBonusEthEarned().Sign() != 0 { // 16 eth minipools should not get bonus commission t.Fatalf("Minipool %s shouldn't have earned bonus eth and did", mp.Address.Hex()) @@ -305,9 +328,15 @@ func TestMockIntervalDefaultsTreegenv11(tt *testing.T) { if oDaoRplAmount.Sign() != 0 { t.Fatalf("oDAO rpl amount does not match expected value for node %s: %s != %s", node.Notes, oDaoRplAmount.String(), "0") } + + // Make sure it got 0 voter share ETH + voterShareEthAmount := rewardsFile.GetNodeVoterShareEth(node.Address) + if voterShareEthAmount.Sign() != 0 { + t.Fatalf("Voter share ETH amount does not match expected value for node %s: %s != %s", node.Notes, voterShareEthAmount.String(), "0") + } } - bondReductionNode := nodeSummary["single_bond_reduction"] + bondReductionNode := nodeSummary.MustGetClass(tt, "single_bond_reduction") for _, node := range bondReductionNode { mp := node.Minipools[0] @@ -318,7 +347,7 @@ func TestMockIntervalDefaultsTreegenv11(tt *testing.T) { // Nodes that bond reduce are treated as having their new bond for the full interval, // when it comes to RPL rewards. - expectedRewardsAmount, _ := big.NewInt(0).SetString("1019308880071990649542", 10) + expectedRewardsAmount, _ := big.NewInt(0).SetString("911258527391109533960", 10) if rewardsAmount.Cmp(expectedRewardsAmount) != 0 { t.Fatalf("Rewards amount does not match expected value for node %s: %s != %s", node.Notes, rewardsAmount.String(), expectedRewardsAmount.String()) @@ -326,7 +355,7 @@ func TestMockIntervalDefaultsTreegenv11(tt *testing.T) { // Make sure it got reduced ETH ethAmount := rewardsFile.GetNodeSmoothingPoolEth(node.Address) - expectedEthAmount, _ := big.NewInt(0).SetString("1920903328050713153", 10) + expectedEthAmount, _ := big.NewInt(0).SetString("741661631419939577", 10) if ethAmount.Cmp(expectedEthAmount) != 0 { t.Fatalf("ETH amount does not match expected value for node %s: %s != %s", node.Notes, ethAmount.String(), expectedEthAmount.String()) } @@ -342,9 +371,15 @@ func TestMockIntervalDefaultsTreegenv11(tt *testing.T) { if oDaoRplAmount.Sign() != 0 { t.Fatalf("oDAO rpl amount does not match expected value for node %s: %s != %s", node.Notes, oDaoRplAmount.String(), "0") } + + // Make sure it got 0 voter share ETH + voterShareEthAmount := rewardsFile.GetNodeVoterShareEth(node.Address) + if voterShareEthAmount.Sign() != 0 { + t.Fatalf("Voter share ETH amount does not match expected value for node %s: %s != %s", node.Notes, voterShareEthAmount.String(), "0") + } } - noMinipoolsNodes := nodeSummary["no_minipools"] + noMinipoolsNodes := nodeSummary.MustGetClass(tt, "no_minipools") for _, node := range noMinipoolsNodes { // Check the rewards amount in the rewards file rewardsAmount := rewardsFile.GetNodeCollateralRpl(node.Address) @@ -363,8 +398,293 @@ func TestMockIntervalDefaultsTreegenv11(tt *testing.T) { if oDaoRplAmount.Sign() != 0 { t.Fatalf("oDAO rpl amount does not match expected value for node %s: %s != %s", node.Notes, oDaoRplAmount.String(), "0") } + + // Make sure it got 0 voter share ETH + voterShareEthAmount := rewardsFile.GetNodeVoterShareEth(node.Address) + if voterShareEthAmount.Sign() != 0 { + t.Fatalf("Voter share ETH amount does not match expected value for node %s: %s != %s", node.Notes, voterShareEthAmount.String(), "0") + } + } + + megapoolNoRplNoSpNodes := nodeSummary.MustGetClass(tt, "megapool_no_rpl_no_sp") + for _, node := range megapoolNoRplNoSpNodes { + // Check the rewards amount in the rewards file + rewardsAmount := rewardsFile.GetNodeCollateralRpl(node.Address) + if rewardsAmount.Sign() != 0 { + t.Fatalf("Rewards amount does not match expected value for node %s: %s != %s", node.Notes, rewardsAmount.String(), "0") + } + + // Make sure it didn't get any ETH + ethAmount := rewardsFile.GetNodeSmoothingPoolEth(node.Address) + if ethAmount.Sign() != 0 { + t.Fatalf("ETH amount does not match expected value for node %s: %s != %s", node.Notes, ethAmount.String(), "0") + } + + // Make sure it got 0 oDAO rpl + oDaoRplAmount := rewardsFile.GetNodeOracleDaoRpl(node.Address) + if oDaoRplAmount.Sign() != 0 { + t.Fatalf("oDAO rpl amount does not match expected value for node %s: %s != %s", node.Notes, oDaoRplAmount.String(), "0") + } + + // Make sure it got 0 voter share ETH + voterShareEthAmount := rewardsFile.GetNodeVoterShareEth(node.Address) + if voterShareEthAmount.Sign() != 0 { + t.Fatalf("Voter share ETH amount does not match expected value for node %s: %s != %s", node.Notes, voterShareEthAmount.String(), "0") + } + } + + megapoolNoRplNoSpMinipoolsNodes := nodeSummary.MustGetClass(tt, "megapool_no_rpl_no_sp_minipools") + for _, node := range megapoolNoRplNoSpMinipoolsNodes { + // Check the rewards amount in the rewards file + rewardsAmount := rewardsFile.GetNodeCollateralRpl(node.Address) + if rewardsAmount.Sign() != 0 { + t.Fatalf("Rewards amount does not match expected value for node %s: %s != %s", node.Notes, rewardsAmount.String(), "0") + } + + // Make sure it didn't get any ETH + ethAmount := rewardsFile.GetNodeSmoothingPoolEth(node.Address) + if ethAmount.Sign() != 0 { + t.Fatalf("ETH amount does not match expected value for node %s: %s != %s", node.Notes, ethAmount.String(), "0") + } + + // Make sure it got 0 oDAO rpl + oDaoRplAmount := rewardsFile.GetNodeOracleDaoRpl(node.Address) + if oDaoRplAmount.Sign() != 0 { + t.Fatalf("oDAO rpl amount does not match expected value for node %s: %s != %s", node.Notes, oDaoRplAmount.String(), "0") + } + + // Make sure it got 0 voter share ETH + voterShareEthAmount := rewardsFile.GetNodeVoterShareEth(node.Address) + if voterShareEthAmount.Sign() != 0 { + t.Fatalf("Voter share ETH amount does not match expected value for node %s: %s != %s", node.Notes, voterShareEthAmount.String(), "0") + } + } + + megapoolNoRplNoSpMinipoolsCollateralNodes := nodeSummary.MustGetClass(tt, "megapool_no_rpl_no_sp_minipools_collateral") + for _, node := range megapoolNoRplNoSpMinipoolsCollateralNodes { + // Check the rewards amount in the rewards file + expectedRewardsAmount, _ := big.NewInt(0).SetString("911258527391109533960", 10) + rewardsAmount := rewardsFile.GetNodeCollateralRpl(node.Address) + if rewardsAmount.Cmp(expectedRewardsAmount) != 0 { + t.Logf("Node %+v", node) + t.Fatalf("Rewards amount does not match expected value for node %s: %s != %s", node.Notes, rewardsAmount.String(), expectedRewardsAmount.String()) + } + + // Make sure it didn't get any ETH + ethAmount := rewardsFile.GetNodeSmoothingPoolEth(node.Address) + if ethAmount.Sign() != 0 { + t.Fatalf("ETH amount does not match expected value for node %s: %s != %s", node.Notes, ethAmount.String(), "0") + } + + // Make sure it got 0 oDAO rpl + oDaoRplAmount := rewardsFile.GetNodeOracleDaoRpl(node.Address) + if oDaoRplAmount.Sign() != 0 { + t.Fatalf("oDAO rpl amount does not match expected value for node %s: %s != %s", node.Notes, oDaoRplAmount.String(), "0") + } + + // Make sure it got 0 voter share ETH + voterShareEthAmount := rewardsFile.GetNodeVoterShareEth(node.Address) + if voterShareEthAmount.Sign() != 0 { + t.Fatalf("Voter share ETH amount does not match expected value for node %s: %s != %s", node.Notes, voterShareEthAmount.String(), "0") + } + } + + megapoolNoRplSpNodes := nodeSummary.MustGetClass(tt, "megapool_no_rpl_sp") + for _, node := range megapoolNoRplSpNodes { + // Check the rewards amount in the rewards file + rewardsAmount := rewardsFile.GetNodeCollateralRpl(node.Address) + if rewardsAmount.Sign() != 0 { + t.Fatalf("Rewards amount does not match expected value for node %s: %s != %s", node.Notes, rewardsAmount.String(), "0") + } + + // Make sure it got ETH + expectedEthAmount, _ := big.NewInt(0).SetString("772375377643504530", 10) + ethAmount := rewardsFile.GetNodeSmoothingPoolEth(node.Address) + if ethAmount.Cmp(expectedEthAmount) != 0 { + t.Fatalf("ETH amount does not match expected value for node %s: %s != %s", node.Notes, ethAmount.String(), expectedEthAmount.String()) + } + + // Make sure it got 0 oDAO rpl + oDaoRplAmount := rewardsFile.GetNodeOracleDaoRpl(node.Address) + if oDaoRplAmount.Sign() != 0 { + t.Fatalf("oDAO rpl amount does not match expected value for node %s: %s != %s", node.Notes, oDaoRplAmount.String(), "0") + } + + // Make sure it got 0 voter share ETH + voterShareEthAmount := rewardsFile.GetNodeVoterShareEth(node.Address) + if voterShareEthAmount.Sign() != 0 { + t.Fatalf("Voter share ETH amount does not match expected value for node %s: %s != %s", node.Notes, voterShareEthAmount.String(), "0") + } + } + + megapoolNoRplSpMinipoolsNodes := nodeSummary.MustGetClass(tt, "megapool_no_rpl_sp_minipools") + for _, node := range megapoolNoRplSpMinipoolsNodes { + // Check the rewards amount in the rewards file + rewardsAmount := rewardsFile.GetNodeCollateralRpl(node.Address) + if rewardsAmount.Sign() != 0 { + t.Fatalf("Rewards amount does not match expected value for node %s: %s != %s", node.Notes, rewardsAmount.String(), "0") + } + + // Make sure it got ETH + expectedEthAmount, _ := big.NewInt(0).SetString("3442314954682779452", 10) + ethAmount := rewardsFile.GetNodeSmoothingPoolEth(node.Address) + if ethAmount.Cmp(expectedEthAmount) != 0 { + t.Fatalf("ETH amount does not match expected value for node %s: %s != %s", node.Notes, ethAmount.String(), expectedEthAmount.String()) + } + + // Make sure it got 0 oDAO rpl + oDaoRplAmount := rewardsFile.GetNodeOracleDaoRpl(node.Address) + if oDaoRplAmount.Sign() != 0 { + t.Fatalf("oDAO rpl amount does not match expected value for node %s: %s != %s", node.Notes, oDaoRplAmount.String(), "0") + } + + // Make sure it got 0 voter share ETH + voterShareEthAmount := rewardsFile.GetNodeVoterShareEth(node.Address) + if voterShareEthAmount.Sign() != 0 { + t.Fatalf("Voter share ETH amount does not match expected value for node %s: %s != %s", node.Notes, voterShareEthAmount.String(), "0") + } + } + + megapoolNoRplSpMinipoolsCollateralNodes := nodeSummary.MustGetClass(tt, "megapool_no_rpl_sp_minipools_collateral") + for _, node := range megapoolNoRplSpMinipoolsCollateralNodes { + // Check the rewards amount in the rewards file + expectedRewardsAmount, _ := big.NewInt(0).SetString("911258527391109533960", 10) + rewardsAmount := rewardsFile.GetNodeCollateralRpl(node.Address) + if rewardsAmount.Cmp(expectedRewardsAmount) != 0 { + t.Fatalf("Rewards amount does not match expected value for node %s: %s != %s", node.Notes, rewardsAmount.String(), expectedRewardsAmount.String()) + } + + // Make sure it got ETH + expectedEthAmount, _ := big.NewInt(0).SetString("3466242447129909356", 10) + ethAmount := rewardsFile.GetNodeSmoothingPoolEth(node.Address) + if ethAmount.Cmp(expectedEthAmount) != 0 { + t.Fatalf("ETH amount does not match expected value for node %s: %s != %s", node.Notes, ethAmount.String(), expectedEthAmount.String()) + } + + // Make sure it got 0 oDAO rpl + oDaoRplAmount := rewardsFile.GetNodeOracleDaoRpl(node.Address) + if oDaoRplAmount.Sign() != 0 { + t.Fatalf("oDAO rpl amount does not match expected value for node %s: %s != %s", node.Notes, oDaoRplAmount.String(), "0") + } + + // Make sure it got 0 voter share ETH + voterShareEthAmount := rewardsFile.GetNodeVoterShareEth(node.Address) + if voterShareEthAmount.Sign() != 0 { + t.Fatalf("Voter share ETH amount does not match expected value for node %s: %s != %s", node.Notes, voterShareEthAmount.String(), "0") + } + } + + megapoolStakedRplNoSpNodes := nodeSummary.MustGetClass(tt, "megapool_staked_rpl_no_sp") + for _, node := range megapoolStakedRplNoSpNodes { + validatorCount := node.MegapoolValidators + // Check the rewards amount in the rewards file + expectedRewardsAmount, _ := big.NewInt(0).SetString("91125852739110953396", 10) + // Multiply by i+1 since the number of validators scales with i+1 + expectedRewardsAmount.Mul(expectedRewardsAmount, big.NewInt(int64(validatorCount))) + rewardsAmount := rewardsFile.GetNodeCollateralRpl(node.Address) + if rewardsAmount.Cmp(expectedRewardsAmount) != 0 { + t.Fatalf("Rewards amount does not match expected value for node %s: %s != %s", node.Notes, rewardsAmount.String(), expectedRewardsAmount.String()) + } + + // Make sure it got no ETH + ethAmount := rewardsFile.GetNodeSmoothingPoolEth(node.Address) + if ethAmount.Sign() != 0 { + t.Fatalf("ETH amount does not match expected value for node %s: %s != %s", node.Notes, ethAmount.String(), "0") + } + + // Make sure it got 0 oDAO rpl + oDaoRplAmount := rewardsFile.GetNodeOracleDaoRpl(node.Address) + if oDaoRplAmount.Sign() != 0 { + t.Fatalf("oDAO rpl amount does not match expected value for node %s: %s != %s", node.Notes, oDaoRplAmount.String(), "0") + } + + // Make sure it got voter share ETH + exepectedVoterShareEthAmount, _ := big.NewInt(0).SetString("933456067472306143", 10) + // Multiply by i+1 since the number of validators scales with i+1 + exepectedVoterShareEthAmount.Mul(exepectedVoterShareEthAmount, big.NewInt(int64(validatorCount))) + voterShareEthAmount := rewardsFile.GetNodeVoterShareEth(node.Address) + if voterShareEthAmount.Cmp(exepectedVoterShareEthAmount) != 0 { + t.Fatalf("Voter share ETH amount does not match expected value for node %s: %s != %s", node.Notes, voterShareEthAmount.String(), exepectedVoterShareEthAmount.String()) + } } + megapoolStakedRplNoSpMinipoolsNodes := nodeSummary.MustGetClass(tt, "megapool_staked_rpl_no_sp_minipools") + for _, node := range megapoolStakedRplNoSpMinipoolsNodes { + validatorCount := node.MegapoolValidators + // Check the rewards amount in the rewards file + expectedRewardsAmount, _ := big.NewInt(0).SetString("91125852739110953396", 10) + // Multiply by i+1 since the number of validators scales with i+1 + expectedRewardsAmount.Mul(expectedRewardsAmount, big.NewInt(int64(validatorCount))) + rewardsAmount := rewardsFile.GetNodeCollateralRpl(node.Address) + if rewardsAmount.Cmp(expectedRewardsAmount) != 0 { + t.Fatalf("Rewards amount does not match expected value for node %s: %s != %s", node.Notes, rewardsAmount.String(), expectedRewardsAmount.String()) + } + + // Make sure it got no ETH + ethAmount := rewardsFile.GetNodeSmoothingPoolEth(node.Address) + if ethAmount.Sign() != 0 { + t.Fatalf("ETH amount does not match expected value for node %s: %s != %s", node.Notes, ethAmount.String(), "0") + } + + // Make sure it got 0 oDAO rpl + oDaoRplAmount := rewardsFile.GetNodeOracleDaoRpl(node.Address) + if oDaoRplAmount.Sign() != 0 { + t.Fatalf("oDAO rpl amount does not match expected value for node %s: %s != %s", node.Notes, oDaoRplAmount.String(), "0") + } + + // Make sure it got voter share ETH + exepectedVoterShareEthAmount, _ := big.NewInt(0).SetString("933456067472306143", 10) + // Multiply by i+1 since the number of validators scales with i+1 + exepectedVoterShareEthAmount.Mul(exepectedVoterShareEthAmount, big.NewInt(int64(validatorCount))) + voterShareEthAmount := rewardsFile.GetNodeVoterShareEth(node.Address) + if voterShareEthAmount.Cmp(exepectedVoterShareEthAmount) != 0 { + t.Fatalf("Voter share ETH amount does not match expected value for node %s: %s != %s", node.Notes, voterShareEthAmount.String(), exepectedVoterShareEthAmount.String()) + } + } + + megapoolStakedRplNoSpMinipoolsCollateralNodes := nodeSummary.MustGetClass(tt, "megapool_staked_rpl_no_sp_minipools_collateral") + for _, node := range megapoolStakedRplNoSpMinipoolsCollateralNodes { + validatorCount := node.MegapoolValidators + // Check the rewards amount in the rewards file + expectedRewardsAmount, _ := big.NewInt(0).SetString("91125852739110953396", 10) + // Multiply by i+1 since the number of validators scales with i+1 + expectedRewardsAmount.Mul(expectedRewardsAmount, big.NewInt(int64(validatorCount))) + // Add a constant amount for minipool rewards + minipoolRewardsAmount, _ := big.NewInt(0).SetString("911258527391109533960", 10) + expectedRewardsAmount.Add(expectedRewardsAmount, minipoolRewardsAmount) + rewardsAmount := rewardsFile.GetNodeCollateralRpl(node.Address) + if rewardsAmount.Cmp(expectedRewardsAmount) != 0 { + t.Fatalf("Rewards amount does not match expected value for node %s: %s != %s", node.Notes, rewardsAmount.String(), expectedRewardsAmount.String()) + } + + // Make sure it got no ETH + ethAmount := rewardsFile.GetNodeSmoothingPoolEth(node.Address) + if ethAmount.Sign() != 0 { + t.Fatalf("ETH amount does not match expected value for node %s: %s != %s", node.Notes, ethAmount.String(), "0") + } + + // Make sure it got 0 oDAO rpl + oDaoRplAmount := rewardsFile.GetNodeOracleDaoRpl(node.Address) + if oDaoRplAmount.Sign() != 0 { + t.Fatalf("oDAO rpl amount does not match expected value for node %s: %s != %s", node.Notes, oDaoRplAmount.String(), "0") + } + + // Make sure it got voter share ETH + exepectedVoterShareEthAmount, _ := big.NewInt(0).SetString("933456067472306143", 10) + // Multiply by i+1 since the number of validators scales with i+1 + exepectedVoterShareEthAmount.Mul(exepectedVoterShareEthAmount, big.NewInt(int64(validatorCount))) + voterShareEthAmount := rewardsFile.GetNodeVoterShareEth(node.Address) + if voterShareEthAmount.Cmp(exepectedVoterShareEthAmount) != 0 { + t.Fatalf("Voter share ETH amount does not match expected value for node %s: %s != %s", node.Notes, voterShareEthAmount.String(), exepectedVoterShareEthAmount.String()) + } + } + /* + + megapoolStakedRplSpNodes := nodeSummary.MustGetClass(tt, "megapool_staked_rpl_sp") + megapoolStakedRplSpMinipoolsNodes := nodeSummary.MustGetClass(tt, "megapool_staked_rpl_sp_minipools") + megapoolStakedRplSpMinipoolsCollateralNodes := nodeSummary.MustGetClass(tt, "megapool_staked_rpl_sp_minipools_collateral") + */ + // Validate merkle root v11MerkleRoot := v11Artifacts.RewardsFile.GetMerkleRoot() @@ -374,7 +694,7 @@ func TestMockIntervalDefaultsTreegenv11(tt *testing.T) { // If this does not match, it implies either you updated the set of default mock nodes, // or you introduced a regression in treegen. // DO NOT update this value unless you know what you are doing. - expectedMerkleRoot := "0x6fc204154008bd9beefac7ded7009467eca0de9fa8e8b4f802d8f0fb7c56754a" + expectedMerkleRoot := "0x96bec9241c41bff45204aaadfe4bf28c5ced44328d0e2206720ceeecab3b4ede" if !strings.EqualFold(v11MerkleRoot, expectedMerkleRoot) { t.Fatalf("Merkle root does not match expected value %s != %s", v11MerkleRoot, expectedMerkleRoot) } else { @@ -407,6 +727,7 @@ func TestInsufficientEthForBonusesesV11(tt *testing.T) { // Ovewrite the SP balance to a value under the bonus commission history.NetworkDetails.SmoothingPoolBalance = big.NewInt(1000) state := history.GetEndNetworkState() + state.IsSaturnDeployed = true t := newV8Test(tt, state.NetworkDetails.RewardIndex) @@ -512,6 +833,7 @@ func TestMockNoRPLRewardsV11(tt *testing.T) { history.Nodes = append(history.Nodes, odaoNodes...) state := history.GetEndNetworkState() + state.IsSaturnDeployed = true t := newV8Test(tt, state.NetworkDetails.RewardIndex) @@ -640,6 +962,7 @@ func TestMockOptedOutAndThenBondReducedV11(tt *testing.T) { history.Nodes = append(history.Nodes, odaoNodes...) state := history.GetEndNetworkState() + state.IsSaturnDeployed = true t := newV8Test(tt, state.NetworkDetails.RewardIndex) @@ -756,6 +1079,7 @@ func TestMockWithdrawableEpochV11(tt *testing.T) { history.Nodes = append(history.Nodes, odaoNodes...) state := history.GetEndNetworkState() + state.IsSaturnDeployed = true t := newV8Test(tt, state.NetworkDetails.RewardIndex) diff --git a/shared/services/rewards/ssz_types/json.go b/shared/services/rewards/ssz_types/json.go index e515fe1b7..0d33ceb01 100644 --- a/shared/services/rewards/ssz_types/json.go +++ b/shared/services/rewards/ssz_types/json.go @@ -60,6 +60,50 @@ func (f *SSZFile_v1) MarshalJSON() ([]byte, error) { return json.Marshal(&alias) } +type sszfile_v2_alias SSZFile_v2 + +// This custom unmarshaler avoids creating a landmine where the user +// may forget to call NewSSZFile_v1 before unmarshaling into the result, +// which would cause the Magic header to be unset. +func (f *SSZFile_v2) UnmarshalJSON(data []byte) error { + // Disposable type without a custom unmarshal + var alias sszfile_v2_alias + err := json.Unmarshal(data, &alias) + if err != nil { + return err + } + *f = SSZFile_v2(alias) + + // After unmarshaling, set the magic header + f.Magic = Magicv2 + + // Verify legitimacy of the file + return f.Verify() +} + +// When writing JSON, we need to compute the merkle tree to populate the proofs +func (f *SSZFile_v2) MarshalJSON() ([]byte, error) { + if err := f.Verify(); err != nil { + return nil, fmt.Errorf("error verifying ssz while serializing json: %w", err) + } + proofs, err := f.Proofs() + if err != nil { + return nil, fmt.Errorf("error getting proofs: %w", err) + } + + for _, nr := range f.NodeRewards { + proof, ok := proofs[nr.Address] + if !ok { + return nil, fmt.Errorf("error getting proof for node %s", nr.Address) + } + nr.MerkleProof = proof + } + + var alias sszfile_v2_alias + alias = sszfile_v2_alias(*f) + return json.Marshal(&alias) +} + func (h *Hash) UnmarshalJSON(data []byte) error { var s string err := json.Unmarshal(data, &s) @@ -212,3 +256,48 @@ func (n NodeRewards) MarshalJSON() ([]byte, error) { // Serialize the map return json.Marshal(m) } + +func (n *NodeRewards_v2) UnmarshalJSON(data []byte) error { + var m map[string]json.RawMessage + err := json.Unmarshal(data, &m) + if err != nil { + return err + } + + *n = make(NodeRewards_v2, 0, len(m)) + for k, v := range m { + s := strings.TrimPrefix(k, "0x") + addr, err := hex.DecodeString(s) + if err != nil { + return err + } + + if len(addr) != 20 { + return fmt.Errorf("address %s wrong size- must be 20 bytes", s) + } + + nodeReward := new(NodeReward_v2) + copy(nodeReward.Address[:], addr) + err = json.Unmarshal(v, nodeReward) + if err != nil { + return err + } + *n = append(*n, nodeReward) + } + + sort.Sort(*n) + return nil +} + +func (n NodeRewards_v2) MarshalJSON() ([]byte, error) { + // Node Rewards is a slice, but represented as a map in the json. + m := make(map[string]*NodeReward_v2, len(n)) + // Make sure we sort, first + sort.Sort(n) + for _, nr := range n { + m[nr.Address.String()] = nr + } + + // Serialize the map + return json.Marshal(m) +} diff --git a/shared/services/rewards/ssz_types/rewards-file-v5.go b/shared/services/rewards/ssz_types/rewards-file-v5.go index e394dc020..e5049ecda 100644 --- a/shared/services/rewards/ssz_types/rewards-file-v5.go +++ b/shared/services/rewards/ssz_types/rewards-file-v5.go @@ -18,6 +18,8 @@ import ( "github.com/wealdtech/go-merkletree/keccak256" ) +var Magicv2 [4]byte = [4]byte{0x52, 0x50, 0x52, 0x55} + type NodeRewards_v2 []*NodeReward_v2 type SSZFile_v2 struct { @@ -73,7 +75,7 @@ type SSZFile_v2 struct { func NewSSZFile_v2() *SSZFile_v2 { return &SSZFile_v2{ - Magic: Magic, + Magic: Magicv2, } } @@ -163,7 +165,7 @@ func (f *SSZFile_v2) FinalizeSSZ() ([]byte, error) { } func (f *SSZFile_v2) FinalizeSSZTo(buf []byte) ([]byte, error) { - copy(f.Magic[:], Magic[:]) + copy(f.Magic[:], Magicv2[:]) if err := f.Verify(); err != nil { return nil, err } @@ -173,7 +175,7 @@ func (f *SSZFile_v2) FinalizeSSZTo(buf []byte) ([]byte, error) { // Parsing wrapper that adds verification to the merkle root and magic header func ParseSSZFile_v2(buf []byte) (*SSZFile_v2, error) { - if !bytes.HasPrefix(buf, Magic[:]) { + if !bytes.HasPrefix(buf, Magicv2[:]) { return nil, errors.New("magic header not found in reward ssz file") } @@ -362,7 +364,7 @@ func (n NodeRewards_v2) Find(addr Address) *NodeReward_v2 { // Functions to implement IRewardsFile func (f *SSZFile_v2) Deserialize(data []byte) error { - if bytes.HasPrefix(data, Magic[:]) { + if bytes.HasPrefix(data, Magicv2[:]) { if err := f.UnmarshalSSZ(data); err != nil { return err } diff --git a/shared/services/rewards/test/beacon.go b/shared/services/rewards/test/beacon.go index 0de602df8..fdcfb3c81 100644 --- a/shared/services/rewards/test/beacon.go +++ b/shared/services/rewards/test/beacon.go @@ -118,6 +118,13 @@ func (m *MockBeaconClient) SetState(state *state.NetworkState) { } m.validatorPubkeys[validatorIndex(v.Index)] = v.Pubkey } + for _, v := range state.MegapoolValidatorGlobalIndex { + vIndex := strconv.FormatUint(v.ValidatorInfo.ValidatorIndex, 10) + if _, ok := m.validatorPubkeys[validatorIndex(vIndex)]; ok { + m.t.Fatalf("Validator %s already set", vIndex) + } + m.validatorPubkeys[validatorIndex(vIndex)] = types.ValidatorPubkey(v.Pubkey) + } } type mockBeaconCommitteeSlot struct { @@ -193,7 +200,10 @@ func (bc *MockBeaconClient) isValidatorActive(validator validatorIndex, e epoch) } validatorDetails, ok := bc.state.MinipoolValidatorDetails[validatorPubkey] if !ok { - return false, fmt.Errorf("validator %s not found", validatorPubkey) + validatorDetails, ok = bc.state.MegapoolValidatorDetails[validatorPubkey] + if !ok { + return false, fmt.Errorf("validator %s details not found", validatorPubkey) + } } // Validators are assigned duties in the epoch they are activated // but not in the epoch they exit diff --git a/shared/services/rewards/test/mock.go b/shared/services/rewards/test/mock.go index 606639f69..04996ef5c 100644 --- a/shared/services/rewards/test/mock.go +++ b/shared/services/rewards/test/mock.go @@ -1,11 +1,14 @@ package test import ( + "crypto/sha256" "math/big" "strconv" + "testing" "time" "github.com/ethereum/go-ethereum/common" + "github.com/rocket-pool/smartnode/bindings/megapool" rprewards "github.com/rocket-pool/smartnode/bindings/rewards" "github.com/rocket-pool/smartnode/bindings/types" "github.com/rocket-pool/smartnode/bindings/utils/eth" @@ -123,10 +126,22 @@ type MockNode struct { borrowedEth *big.Int Minipools []*MockMinipool + Megapool bool + MegapoolEthBorrowed *big.Int + MegapoolEthBonded *big.Int + MegapoolStakedRPL *big.Int + MegapoolValidators int + Notes string Class string } +func (n *MockNode) clearMinipoolWithdrawals() { + for _, minipool := range n.Minipools { + minipool.SPWithdrawals = nil + } +} + func (n *MockNode) AddMinipool(minipool *MockMinipool) { minipool.NodeAddress = n.Address n.bondedEth.Add(n.bondedEth, minipool.NodeDepositBalance) @@ -185,11 +200,29 @@ func (h *MockHistory) SetWithdrawals(mockBeaconClient *MockBeaconClient) { } } +func (n *MockNode) MegapoolAddress() common.Address { + // Just hash the node address with a string "megapool" + // use sha256 out of laziness + h := sha256.New() + h.Write([]byte("megapool")) + h.Write(n.Address.Bytes()) + return common.BytesToAddress(h.Sum(nil)) +} + +type MegapoolParams struct { + Validators int + EthBorrowed *big.Int + EthBonded *big.Int + StakedRPL *big.Int +} + type NewMockNodeParams struct { SmoothingPool bool EightEthMinipools int SixteenEthMinipools int CollateralRpl int64 + + Megapool *MegapoolParams } func (h *MockHistory) GetNewDefaultMockNode(params *NewMockNodeParams) *MockNode { @@ -209,6 +242,14 @@ func (h *MockHistory) GetNewDefaultMockNode(params *NewMockNodeParams) *MockNode bondedEth: big.NewInt(0), } + if params.Megapool != nil { + out.Megapool = true + out.MegapoolEthBorrowed = params.Megapool.EthBorrowed + out.MegapoolEthBonded = params.Megapool.EthBonded + out.MegapoolStakedRPL = params.Megapool.StakedRPL + out.MegapoolValidators = params.Megapool.Validators + } + for i := 0; i < params.EightEthMinipools; i++ { out.AddMinipool(h.GetNewDefaultMockMinipool(BondSizeEightEth)) } @@ -223,11 +264,151 @@ func (h *MockHistory) GetNewDefaultMockNode(params *NewMockNodeParams) *MockNode // Opt nodes in an epoch before the start of the interval if params.SmoothingPool { out.SmoothingPoolRegistrationChanged = h.BeaconConfig.GetSlotTime(h.BeaconConfig.FirstSlotOfEpoch(h.StartEpoch - 1)) + } else { + out.clearMinipoolWithdrawals() } return out } +// Returns a list of nodes with megapools +// Added for v11 +func (h *MockHistory) GetDefaultMockMegapoolNodes() []*MockNode { + nodes := []*MockNode{} + + for minipools := range 2 { + makeMinipools := minipools != 0 + // Create a few nodes with megapools + // and no staked rpl, but members of the smoothing pool + for i := range 2 { + params := &NewMockNodeParams{ + Megapool: &MegapoolParams{ + Validators: 3, + EthBorrowed: big.NewInt(0).Mul(oneEth, big.NewInt(24*3)), + EthBonded: big.NewInt(0).Mul(oneEth, big.NewInt(4*3)), + }, + SmoothingPool: true, + } + if makeMinipools { + params.EightEthMinipools = 2 + params.SixteenEthMinipools = 2 + if i == 1 { + params.CollateralRpl = 10 + } + } + node := h.GetNewDefaultMockNode(params) + node.Notes = "Opted in node with a no-rpl megapool" + node.Class = "megapool_no_rpl_sp" + if makeMinipools { + node.Notes += " and minipools" + node.Class += "_minipools" + if i == 1 { + node.Notes += " and collateral" + node.Class += "_collateral" + } + } + nodes = append(nodes, node) + } + + for i := range int64(2) { + params := &NewMockNodeParams{ + Megapool: &MegapoolParams{ + Validators: int(i + 1), + EthBorrowed: big.NewInt(0).Mul(oneEth, big.NewInt(24*(i+1))), + EthBonded: big.NewInt(0).Mul(oneEth, big.NewInt(4*(i+1))), + }, + } + if makeMinipools { + params.EightEthMinipools = 2 + params.SixteenEthMinipools = 2 + if i == 1 { + params.CollateralRpl = 10 + } + } + node := h.GetNewDefaultMockNode(params) + node.Notes = "Opted out node with a no-rpl megapool" + node.Class = "megapool_no_rpl_no_sp" + if makeMinipools { + node.Notes += " and minipools" + node.Class += "_minipools" + if i == 1 { + node.Notes += " and collateral" + node.Class += "_collateral" + } + } + nodes = append(nodes, node) + } + + // Create a few nodes with megapools + // and staked rpl, but not members of the smoothing pool + // so they earn voter share + for i := range int64(2) { + params := &NewMockNodeParams{ + Megapool: &MegapoolParams{ + Validators: int(i + 1), + EthBorrowed: big.NewInt(0).Mul(oneEth, big.NewInt(24*(i+1))), + EthBonded: big.NewInt(0).Mul(oneEth, big.NewInt(4*(i+1))), + StakedRPL: big.NewInt(0).Mul(oneEth, big.NewInt(int64(i+1))), + }, + } + if makeMinipools { + params.EightEthMinipools = 2 + params.SixteenEthMinipools = 2 + if i == 1 { + params.CollateralRpl = 10 + } + } + node := h.GetNewDefaultMockNode(params) + node.Notes = "Opted out node with a megapool and staked RPL" + node.Class = "megapool_staked_rpl_no_sp" + if makeMinipools { + node.Notes += " and minipools" + node.Class += "_minipools" + if i == 1 { + node.Notes += " and collateral" + node.Class += "_collateral" + } + } + nodes = append(nodes, node) + } + + // Create a few nodes with megapools + // and staked rpl, and members of the smoothing pool + for i := range int64(2) { + params := &NewMockNodeParams{ + Megapool: &MegapoolParams{ + Validators: int(i + 1), + EthBorrowed: big.NewInt(0).Mul(oneEth, big.NewInt(24*(i+1))), + EthBonded: big.NewInt(0).Mul(oneEth, big.NewInt(4*(i+1))), + StakedRPL: big.NewInt(0).Mul(oneEth, big.NewInt(int64(i+1))), + }, + SmoothingPool: true, + } + if makeMinipools { + params.EightEthMinipools = 2 + params.SixteenEthMinipools = 2 + if i == 1 { + params.CollateralRpl = 10 + } + } + node := h.GetNewDefaultMockNode(params) + node.Notes = "Opted in node with a megapool and staked RPL" + node.Class = "megapool_staked_rpl_sp" + if makeMinipools { + node.Notes += " and minipools" + node.Class += "_minipools" + if i == 1 { + node.Notes += " and collateral" + node.Class += "_collateral" + } + } + nodes = append(nodes, node) + } + } + + return nodes +} + // Returns a list of nodes with various attributes- // some will have active minipools, some will not. // some will be under and over collateralized. @@ -244,7 +425,7 @@ func (h *MockHistory) GetDefaultMockNodes() []*MockNode { }) node.Notes = "Regular node with one regular 8-eth minipool" node.Class = "single_eight_eth" - node.Minipools[0].SPWithdrawals = nil + node.clearMinipoolWithdrawals() node.Minipools[0].OptedOutWithdrawals = big.NewInt(1e18) nodes = append(nodes, node) } @@ -269,7 +450,7 @@ func (h *MockHistory) GetDefaultMockNodes() []*MockNode { }) node.Notes = "Regular node with one regular 16-eth minipool" node.Class = "single_sixteen_eth" - node.Minipools[0].SPWithdrawals = nil + node.clearMinipoolWithdrawals() node.Minipools[0].OptedOutWithdrawals = big.NewInt(1e18) nodes = append(nodes, node) } @@ -358,7 +539,7 @@ func (h *MockHistory) GetDefaultMockNodes() []*MockNode { CollateralRpl: 10, }) node.Minipools[0].Status = types.Prelaunch - node.Minipools[0].SPWithdrawals = nil + node.clearMinipoolWithdrawals() node.Notes = "Node with one 8-eth minipool that is pending" node.Class = "single_eight_eth_pending" nodes = append(nodes, node) @@ -369,7 +550,7 @@ func (h *MockHistory) GetDefaultMockNodes() []*MockNode { CollateralRpl: 10, }) node.Minipools[0].Finalised = true - node.Minipools[0].SPWithdrawals = nil + node.clearMinipoolWithdrawals() node.Notes = "Node with one 8-eth minipool that is finalized" node.Class = "single_eight_eth_finalized" nodes = append(nodes, node) @@ -451,6 +632,22 @@ func NewDefaultMockHistoryNoNodes() *MockHistory { // Put 100 ether in the smoothing pool SmoothingPoolBalance: big.NewInt(0).Mul(big.NewInt(100), big.NewInt(1000000000000000000)), + // Saturn + MegapoolRevenueSplitSettings: rpstate.MegapoolRevenueSplitSettings{ + // These numbers are nonsensical except NodeOperatorCommissionAddr + // this ensures the time-weighted averages are the onces referenced. + NodeOperatorCommissionShare: big.NewInt(0).Mul(oneEth, big.NewInt(2)), + NodeOperatorCommissionAdder: big.NewInt(1e16), + VoterCommissionShare: big.NewInt(0).Mul(oneEth, big.NewInt(2)), + PdaoCommissionShare: big.NewInt(0).Mul(oneEth, big.NewInt(2)), + }, + MegapoolRevenueSplitTimeWeightedAverages: rpstate.MegapoolRevenueSplitTimeWeightedAverages{ + NodeShare: big.NewInt(4e16), + VoterShare: big.NewInt(6e16), + PdaoShare: big.NewInt(5e16), + }, + SmoothingPoolPendingVoterShare: big.NewInt(0).Mul(big.NewInt(10), oneEth), + // The rest of the fields seem unimportant and are left empty }, lastNodeAddress: common.BigToAddress(big.NewInt(2000)), @@ -461,9 +658,12 @@ func NewDefaultMockHistoryNoNodes() *MockHistory { return out } -func NewDefaultMockHistory() *MockHistory { +func NewDefaultMockHistory(megapools bool) *MockHistory { out := NewDefaultMockHistoryNoNodes() out.Nodes = out.GetDefaultMockNodes() + if megapools { + out.Nodes = append(out.Nodes, out.GetDefaultMockMegapoolNodes()...) + } return out } @@ -483,6 +683,12 @@ func (h *MockHistory) GetEndNetworkState() *state.NetworkState { MinipoolValidatorDetails: make(state.ValidatorDetailsMap), OracleDaoMemberDetails: []rpstate.OracleDaoMemberDetails{}, ProtocolDaoProposalDetails: nil, + + MegapoolValidatorGlobalIndex: []megapool.ValidatorInfoFromGlobalIndex{}, + MegapoolToPubkeysMap: make(map[common.Address][]types.ValidatorPubkey), + MegapoolValidatorInfo: make(map[types.ValidatorPubkey]*megapool.ValidatorInfoFromGlobalIndex), + MegapoolDetails: make(map[common.Address]rpstate.NativeMegapoolDetails), + MegapoolValidatorDetails: make(state.ValidatorDetailsMap), } // Add nodes @@ -541,6 +747,24 @@ func (h *MockHistory) GetEndNetworkState() *state.NetworkState { // Ratio of bonded to bonded plus borrowed CollateralisationRatio: collateralisationRatio, + + MegapoolAddress: node.MegapoolAddress(), + MegapoolDeployed: node.Megapool, + + MegapoolETHBorrowed: big.NewInt(0), + MegapoolEthBonded: big.NewInt(0), + MegapoolStakedRPL: big.NewInt(0), + LegacyStakedRPL: node.RplStake, + } + + if node.MegapoolEthBorrowed != nil { + details.MegapoolETHBorrowed = node.MegapoolEthBorrowed + } + if node.MegapoolEthBonded != nil { + details.MegapoolEthBonded = node.MegapoolEthBonded + } + if node.MegapoolStakedRPL != nil { + details.MegapoolStakedRPL = node.MegapoolStakedRPL } out.NodeDetails = append(out.NodeDetails, details) @@ -630,6 +854,48 @@ func (h *MockHistory) GetEndNetworkState() *state.NetworkState { } out.OracleDaoMemberDetails = append(out.OracleDaoMemberDetails, details) } + + // Add megapool details + if node.Megapool { + out.MegapoolDetails[node.MegapoolAddress()] = rpstate.NativeMegapoolDetails{ + ActiveValidatorCount: uint32(node.MegapoolValidators), + UserCapital: big.NewInt(0).Set(node.MegapoolEthBorrowed), + NodeBond: big.NewInt(0).Set(node.MegapoolEthBonded), + } + for i := 0; i < node.MegapoolValidators; i++ { + pubkey := h.GetValidatorPubkey() + idx := h.GetValidatorIndex() + intIdx, err := strconv.ParseUint(idx, 10, 64) + if err != nil { + panic(err) + } + vifgi := megapool.ValidatorInfoFromGlobalIndex{ + Pubkey: pubkey.Bytes(), + ValidatorInfo: megapool.ValidatorInfo{ + ValidatorIndex: intIdx, + Staked: true, + }, + MegapoolAddress: node.MegapoolAddress(), + ValidatorId: uint32(intIdx), + } + out.MegapoolValidatorGlobalIndex = append(out.MegapoolValidatorGlobalIndex, vifgi) + out.MegapoolToPubkeysMap[node.MegapoolAddress()] = append(out.MegapoolToPubkeysMap[node.MegapoolAddress()], pubkey) + out.MegapoolValidatorInfo[pubkey] = &vifgi + out.MegapoolValidatorDetails[pubkey] = beacon.ValidatorStatus{ + Pubkey: pubkey, + Index: idx, + WithdrawalCredentials: common.Hash{}, + Balance: (*big.Int)(_bondSizeThirtyTwoEth).Uint64(), + EffectiveBalance: (*big.Int)(_bondSizeThirtyTwoEth).Uint64(), + Slashed: false, + ActivationEligibilityEpoch: 0, + ActivationEpoch: 0, + ExitEpoch: FarFutureEpoch, + WithdrawableEpoch: FarFutureEpoch, + Exists: true, + } + } + } } return out @@ -681,10 +947,25 @@ func (h *MockHistory) GetPreviousRewardSnapshotEvent() rprewards.RewardsEvent { } } -func (h *MockHistory) GetNodeSummary() map[string][]*MockNode { +type nodeSummary map[string][]*MockNode + +func (h *MockHistory) GetNodeSummary() nodeSummary { out := make(map[string][]*MockNode) for _, node := range h.Nodes { out[node.Class] = append(out[node.Class], node) } return out } + +func (s nodeSummary) GetClass(class string) ([]*MockNode, bool) { + nodes, ok := s[class] + return nodes, ok +} + +func (s nodeSummary) MustGetClass(t *testing.T, class string) []*MockNode { + nodes, ok := s.GetClass(class) + if !ok { + t.Fatalf("Class %s not found", class) + } + return nodes +} diff --git a/shared/services/rewards/types.go b/shared/services/rewards/types.go index 8e9d8c9cf..3030d9d8f 100644 --- a/shared/services/rewards/types.go +++ b/shared/services/rewards/types.go @@ -224,6 +224,9 @@ type MegapoolValidatorInfo struct { AttestationCount int `json:"attestationCount"` NativeValidatorInfo *megapool.ValidatorInfoFromGlobalIndex `json:"nativeValidatorInfo"` + + // Amount of eth earned by this validator in the smoothing pool + MegapoolValidatorShare *big.Int `json:"megapoolValidatorShare"` } type MegapoolInfo struct { diff --git a/shared/services/state/network-state.go b/shared/services/state/network-state.go index d95e9a2ff..b89dde1d4 100644 --- a/shared/services/state/network-state.go +++ b/shared/services/state/network-state.go @@ -572,14 +572,21 @@ func (s *NetworkState) CalculateNodeWeights() (map[common.Address]*big.Int, *big eligibleBorrowedEth := s.GetEligibleBorrowedEth(&node) rplStake := s.GetRplStake(&node) - // minCollateral := borrowedEth * minCollateralFraction / ratio - // NOTE: minCollateralFraction and ratio are both percentages, but multiplying and dividing by them cancels out the need for normalization by eth.EthToWei(1) - minCollateral := big.NewInt(0).Mul(eligibleBorrowedEth, s.NetworkDetails.MinCollateralFraction) - minCollateral.Div(minCollateral, s.NetworkDetails.RplPrice) + minCollateral := big.NewInt(0) + if !s.IsSaturnDeployed { + // minCollateral := borrowedEth * minCollateralFraction / ratio + // NOTE: minCollateralFraction and ratio are both percentages, but multiplying and dividing by them cancels out the need for normalization by eth.EthToWei(1) + minCollateral = minCollateral.Mul(eligibleBorrowedEth, s.NetworkDetails.MinCollateralFraction) + minCollateral.Div(minCollateral, s.NetworkDetails.RplPrice) + } // Calculate the weight nodeWeight := big.NewInt(0) - if rplStake.Cmp(minCollateral) == -1 || eligibleBorrowedEth.Sign() <= 0 { + if eligibleBorrowedEth.Sign() <= 0 { + weightSlice[i] = nodeWeight + return nil + } + if rplStake.Cmp(minCollateral) == -1 && !s.IsSaturnDeployed { weightSlice[i] = nodeWeight return nil } From 77f89a348ecf306a9feeab3fe3efa2925da2f686 Mon Sep 17 00:00:00 2001 From: Jacob Shufro Date: Fri, 5 Sep 2025 17:53:14 -0400 Subject: [PATCH 23/33] Test remaining megapool classes --- shared/services/rewards/mock_v11_test.go | 115 ++++++++++++++++++++++- 1 file changed, 110 insertions(+), 5 deletions(-) diff --git a/shared/services/rewards/mock_v11_test.go b/shared/services/rewards/mock_v11_test.go index 6d34126be..0440bcfb0 100644 --- a/shared/services/rewards/mock_v11_test.go +++ b/shared/services/rewards/mock_v11_test.go @@ -678,13 +678,118 @@ func TestMockIntervalDefaultsTreegenv11(tt *testing.T) { t.Fatalf("Voter share ETH amount does not match expected value for node %s: %s != %s", node.Notes, voterShareEthAmount.String(), exepectedVoterShareEthAmount.String()) } } - /* - megapoolStakedRplSpNodes := nodeSummary.MustGetClass(tt, "megapool_staked_rpl_sp") - megapoolStakedRplSpMinipoolsNodes := nodeSummary.MustGetClass(tt, "megapool_staked_rpl_sp_minipools") - megapoolStakedRplSpMinipoolsCollateralNodes := nodeSummary.MustGetClass(tt, "megapool_staked_rpl_sp_minipools_collateral") - */ + megapoolStakedRplSpNodes := nodeSummary.MustGetClass(tt, "megapool_staked_rpl_sp") + for _, node := range megapoolStakedRplSpNodes { + validatorCount := node.MegapoolValidators + // Check the rewards amount in the rewards file + expectedRewardsAmount, _ := big.NewInt(0).SetString("91125852739110953396", 10) + // Multiply by i+1 since the number of validators scales with i+1 + expectedRewardsAmount.Mul(expectedRewardsAmount, big.NewInt(int64(validatorCount))) + rewardsAmount := rewardsFile.GetNodeCollateralRpl(node.Address) + if rewardsAmount.Cmp(expectedRewardsAmount) != 0 { + t.Fatalf("Rewards amount does not match expected value for node %s: %s != %s", node.Notes, rewardsAmount.String(), expectedRewardsAmount.String()) + } + + // Make sure it got ETH + expectedEthAmount, _ := big.NewInt(0).SetString("257458459214501510", 10) + // Multiply by i+1 since the number of validators scales with i+1 + expectedEthAmount.Mul(expectedEthAmount, big.NewInt(int64(validatorCount))) + ethAmount := rewardsFile.GetNodeSmoothingPoolEth(node.Address) + if ethAmount.Cmp(expectedEthAmount) != 0 { + t.Fatalf("ETH amount does not match expected value for node %s: %s != %s", node.Notes, ethAmount.String(), expectedEthAmount.String()) + } + + // Make sure it got 0 oDAO rpl + oDaoRplAmount := rewardsFile.GetNodeOracleDaoRpl(node.Address) + if oDaoRplAmount.Sign() != 0 { + t.Fatalf("oDAO rpl amount does not match expected value for node %s: %s != %s", node.Notes, oDaoRplAmount.String(), "0") + } + + // Make sure it got voter share ETH + exepectedVoterShareEthAmount, _ := big.NewInt(0).SetString("933456067472306143", 10) + // Multiply by i+1 since the number of validators scales with i+1 + exepectedVoterShareEthAmount.Mul(exepectedVoterShareEthAmount, big.NewInt(int64(validatorCount))) + voterShareEthAmount := rewardsFile.GetNodeVoterShareEth(node.Address) + if voterShareEthAmount.Cmp(exepectedVoterShareEthAmount) != 0 { + t.Fatalf("Voter share ETH amount does not match expected value for node %s: %s != %s", node.Notes, voterShareEthAmount.String(), exepectedVoterShareEthAmount.String()) + } + } + megapoolStakedRplSpMinipoolsNodes := nodeSummary.MustGetClass(tt, "megapool_staked_rpl_sp_minipools") + for _, node := range megapoolStakedRplSpMinipoolsNodes { + validatorCount := node.MegapoolValidators + // Check the rewards amount in the rewards file + expectedRewardsAmount, _ := big.NewInt(0).SetString("91125852739110953396", 10) + // Multiply by i+1 since the number of validators scales with i+1 + expectedRewardsAmount.Mul(expectedRewardsAmount, big.NewInt(int64(validatorCount))) + rewardsAmount := rewardsFile.GetNodeCollateralRpl(node.Address) + if rewardsAmount.Cmp(expectedRewardsAmount) != 0 { + t.Fatalf("Rewards amount does not match expected value for node %s: %s != %s", node.Notes, rewardsAmount.String(), expectedRewardsAmount.String()) + } + + // Make sure it got ETH + expectedEthAmount, _ := big.NewInt(0).SetString("2927398036253776432", 10) + // Multiply by i+1 since the number of validators scales with i+1 + expectedEthAmount.Mul(expectedEthAmount, big.NewInt(int64(validatorCount))) + ethAmount := rewardsFile.GetNodeSmoothingPoolEth(node.Address) + if ethAmount.Cmp(expectedEthAmount) != 0 { + t.Fatalf("ETH amount does not match expected value for node %s: %s != %s", node.Notes, ethAmount.String(), expectedEthAmount.String()) + } + + // Make sure it got 0 oDAO rpl + oDaoRplAmount := rewardsFile.GetNodeOracleDaoRpl(node.Address) + if oDaoRplAmount.Sign() != 0 { + t.Fatalf("oDAO rpl amount does not match expected value for node %s: %s != %s", node.Notes, oDaoRplAmount.String(), "0") + } + + // Make sure it got voter share ETH + exepectedVoterShareEthAmount, _ := big.NewInt(0).SetString("933456067472306143", 10) + // Multiply by i+1 since the number of validators scales with i+1 + exepectedVoterShareEthAmount.Mul(exepectedVoterShareEthAmount, big.NewInt(int64(validatorCount))) + voterShareEthAmount := rewardsFile.GetNodeVoterShareEth(node.Address) + if voterShareEthAmount.Cmp(exepectedVoterShareEthAmount) != 0 { + t.Fatalf("Voter share ETH amount does not match expected value for node %s: %s != %s", node.Notes, voterShareEthAmount.String(), exepectedVoterShareEthAmount.String()) + } + } + + megapoolStakedRplSpMinipoolsCollateralNodes := nodeSummary.MustGetClass(tt, "megapool_staked_rpl_sp_minipools_collateral") + for _, node := range megapoolStakedRplSpMinipoolsCollateralNodes { + validatorCount := node.MegapoolValidators + // Check the rewards amount in the rewards file + expectedRewardsAmount, _ := big.NewInt(0).SetString("91125852739110953396", 10) + // Multiply by i+1 since the number of validators scales with i+1 + expectedRewardsAmount.Mul(expectedRewardsAmount, big.NewInt(int64(validatorCount))) + // Add a constant amount for minipool rewards + minipoolRewardsAmount, _ := big.NewInt(0).SetString("911258527391109533960", 10) + expectedRewardsAmount.Add(expectedRewardsAmount, minipoolRewardsAmount) + rewardsAmount := rewardsFile.GetNodeCollateralRpl(node.Address) + if rewardsAmount.Cmp(expectedRewardsAmount) != 0 { + t.Fatalf("Rewards amount does not match expected value for node %s: %s != %s", node.Notes, rewardsAmount.String(), expectedRewardsAmount.String()) + } + + // Make sure it got ETH + expectedEthAmount, _ := big.NewInt(0).SetString("3213270392749244710", 10) + ethAmount := rewardsFile.GetNodeSmoothingPoolEth(node.Address) + if ethAmount.Cmp(expectedEthAmount) != 0 { + t.Fatalf("ETH amount does not match expected value for node %s: %s != %s", node.Notes, ethAmount.String(), expectedEthAmount.String()) + } + + // Make sure it got 0 oDAO rpl + oDaoRplAmount := rewardsFile.GetNodeOracleDaoRpl(node.Address) + if oDaoRplAmount.Sign() != 0 { + t.Fatalf("oDAO rpl amount does not match expected value for node %s: %s != %s", node.Notes, oDaoRplAmount.String(), "0") + } + + // Make sure it got voter share ETH + exepectedVoterShareEthAmount, _ := big.NewInt(0).SetString("933456067472306143", 10) + // Multiply by i+1 since the number of validators scales with i+1 + exepectedVoterShareEthAmount.Mul(exepectedVoterShareEthAmount, big.NewInt(int64(validatorCount))) + voterShareEthAmount := rewardsFile.GetNodeVoterShareEth(node.Address) + if voterShareEthAmount.Cmp(exepectedVoterShareEthAmount) != 0 { + t.Fatalf("Voter share ETH amount does not match expected value for node %s: %s != %s", node.Notes, voterShareEthAmount.String(), exepectedVoterShareEthAmount.String()) + } + } // Validate merkle root v11MerkleRoot := v11Artifacts.RewardsFile.GetMerkleRoot() From b74ac5a491dc4af24360aa196c25688844beba20 Mon Sep 17 00:00:00 2001 From: Jacob Shufro Date: Fri, 5 Sep 2025 18:31:05 -0400 Subject: [PATCH 24/33] Add performance file type which includes megapool performance --- shared/services/rewards/files.go | 6 +- shared/services/rewards/files_test.go | 4 +- shared/services/rewards/generator-impl-v11.go | 45 +++---- shared/services/rewards/generator-impl-v8.go | 4 +- .../services/rewards/generator-impl-v9-v10.go | 4 +- shared/services/rewards/generator-v8_test.go | 6 +- shared/services/rewards/generator.go | 2 +- shared/services/rewards/mock_v10_test.go | 28 ++--- shared/services/rewards/mock_v11_test.go | 30 ++--- .../services/rewards/performance-file-v1.go | 110 ++++++++++++++++++ shared/services/rewards/rewards-file-v1.go | 61 ++++++---- shared/services/rewards/rewards-file-v2.go | 66 +++++++---- shared/services/rewards/types.go | 18 ++- shared/services/rewards/utils.go | 2 +- 14 files changed, 274 insertions(+), 112 deletions(-) create mode 100644 shared/services/rewards/performance-file-v1.go diff --git a/shared/services/rewards/files.go b/shared/services/rewards/files.go index e3e02d74d..6b412136b 100644 --- a/shared/services/rewards/files.go +++ b/shared/services/rewards/files.go @@ -39,7 +39,7 @@ func ReadLocalMinipoolPerformanceFile(path string) (*LocalMinipoolPerformanceFil return nil, fmt.Errorf("error unmarshaling rewards file from %s: %w", path, err) } - return NewLocalFile[IMinipoolPerformanceFile](minipoolPerformance, path), nil + return NewLocalFile[IPerformanceFile](minipoolPerformance, path), nil } // Interface for local rewards or minipool performance files @@ -67,7 +67,7 @@ type ILocalFile interface { // Type aliases type LocalRewardsFile = LocalFile[IRewardsFile] -type LocalMinipoolPerformanceFile = LocalFile[IMinipoolPerformanceFile] +type LocalMinipoolPerformanceFile = LocalFile[IPerformanceFile] // NewLocalFile creates the wrapper, but doesn't write to disk. // This should be used when generating new trees / performance files. @@ -198,7 +198,7 @@ func saveArtifactsImpl(smartnode *config.SmartnodeConfig, treeResult *GenerateTr files := []ILocalFile{ // Do not reorder! // i == 0 - minipool performance file - NewLocalFile[IMinipoolPerformanceFile]( + NewLocalFile[IPerformanceFile]( treeResult.MinipoolPerformanceFile, smartnode.GetMinipoolPerformancePath(currentIndex, true), ), diff --git a/shared/services/rewards/files_test.go b/shared/services/rewards/files_test.go index 7139099d9..b49c123ea 100644 --- a/shared/services/rewards/files_test.go +++ b/shared/services/rewards/files_test.go @@ -43,7 +43,7 @@ func TestFilesFromTree(t *testing.T) { } minipoolPerformanceFile := &f.MinipoolPerformanceFile - localMinipoolPerformanceFile := NewLocalFile[IMinipoolPerformanceFile]( + localMinipoolPerformanceFile := NewLocalFile[IPerformanceFile]( minipoolPerformanceFile, path.Join(dir, "performance.json"), ) @@ -102,7 +102,7 @@ func TestCompressionAndCids(t *testing.T) { ) minipoolPerformanceFile := &f.MinipoolPerformanceFile - localMinipoolPerformanceFile := NewLocalFile[IMinipoolPerformanceFile]( + localMinipoolPerformanceFile := NewLocalFile[IPerformanceFile]( minipoolPerformanceFile, path.Join(dir, "performance.json"), ) diff --git a/shared/services/rewards/generator-impl-v11.go b/shared/services/rewards/generator-impl-v11.go index 8b2895699..135cf3032 100644 --- a/shared/services/rewards/generator-impl-v11.go +++ b/shared/services/rewards/generator-impl-v11.go @@ -57,7 +57,7 @@ type treeGeneratorImpl_v11 struct { successfulAttestations uint64 genesisTime time.Time invalidNetworkNodes map[common.Address]uint64 - minipoolPerformanceFile *MinipoolPerformanceFile_v2 + performanceFile *PerformanceFile_v1 nodeRewards map[common.Address]*ssz_types.NodeReward_v2 networkRewards map[ssz_types.Layer]*ssz_types.NetworkReward @@ -99,9 +99,10 @@ func newTreeGeneratorImpl_v11(log *log.ColorLogger, logPrefix string, index uint totalPdaoScore: big.NewInt(0), networkState: state, invalidNetworkNodes: map[common.Address]uint64{}, - minipoolPerformanceFile: &MinipoolPerformanceFile_v2{ + performanceFile: &PerformanceFile_v1{ Index: index, - MinipoolPerformance: map[common.Address]*SmoothingPoolMinipoolPerformance_v2{}, + MinipoolPerformance: map[common.Address]*MinipoolPerformance_v2{}, + MegapoolPerformance: map[common.Address]*MegapoolPerformance_v1{}, }, nodeRewards: map[common.Address]*ssz_types.NodeReward_v2{}, networkRewards: map[ssz_types.Layer]*ssz_types.NetworkReward{}, @@ -128,9 +129,9 @@ func (r *treeGeneratorImpl_v11) generateTree(rp RewardsExecutionClient, networkN // Set the network name r.rewardsFile.Network, _ = ssz_types.NetworkFromString(networkName) - r.minipoolPerformanceFile.Network = networkName - r.minipoolPerformanceFile.RewardsFileVersion = r.rewardsFile.RewardsFileVersion - r.minipoolPerformanceFile.RulesetVersion = r.rewardsFile.RulesetVersion + r.performanceFile.Network = networkName + r.performanceFile.RewardsFileVersion = r.rewardsFile.RewardsFileVersion + r.performanceFile.RulesetVersion = r.rewardsFile.RulesetVersion // Get the Beacon config r.beaconConfig = r.networkState.BeaconConfig @@ -192,7 +193,7 @@ func (r *treeGeneratorImpl_v11) generateTree(rp RewardsExecutionClient, networkN } // Sort all of the missed attestations so the files are always generated in the same state - for _, minipoolInfo := range r.minipoolPerformanceFile.MinipoolPerformance { + for _, minipoolInfo := range r.performanceFile.MinipoolPerformance { sort.Slice(minipoolInfo.MissingAttestationSlots, func(i, j int) bool { return minipoolInfo.MissingAttestationSlots[i] < minipoolInfo.MissingAttestationSlots[j] }) @@ -201,7 +202,7 @@ func (r *treeGeneratorImpl_v11) generateTree(rp RewardsExecutionClient, networkN return &GenerateTreeResult{ RewardsFile: r.rewardsFile, InvalidNetworkNodes: r.invalidNetworkNodes, - MinipoolPerformanceFile: r.minipoolPerformanceFile, + MinipoolPerformanceFile: r.performanceFile, }, nil } @@ -219,9 +220,9 @@ func (r *treeGeneratorImpl_v11) approximateStakerShareOfSmoothingPool(rp Rewards // Set the network name r.rewardsFile.Network, _ = ssz_types.NetworkFromString(networkName) - r.minipoolPerformanceFile.Network = networkName - r.minipoolPerformanceFile.RewardsFileVersion = r.rewardsFile.RewardsFileVersion - r.minipoolPerformanceFile.RulesetVersion = r.rewardsFile.RulesetVersion + r.performanceFile.Network = networkName + r.performanceFile.RewardsFileVersion = r.rewardsFile.RewardsFileVersion + r.performanceFile.RulesetVersion = r.rewardsFile.RulesetVersion // Get the Beacon config r.beaconConfig = r.networkState.BeaconConfig @@ -617,7 +618,7 @@ func (r *treeGeneratorImpl_v11) calculateEthRewards(checkBeaconPerformance bool) return err } if r.rewardsFile.RulesetVersion >= 10 { - r.minipoolPerformanceFile.BonusScalar = QuotedBigIntFromBigInt(nodeRewards.bonusScalar) + r.performanceFile.BonusScalar = QuotedBigIntFromBigInt(nodeRewards.bonusScalar) } // Update the rewards maps @@ -679,7 +680,7 @@ func (r *treeGeneratorImpl_v11) calculateEthRewards(checkBeaconPerformance bool) for _, minipoolInfo := range nodeInfo.Minipools { successfulAttestations := uint64(len(minipoolInfo.CompletedAttestations)) missingAttestations := uint64(len(minipoolInfo.MissingAttestationSlots)) - performance := &SmoothingPoolMinipoolPerformance_v2{ + performance := &MinipoolPerformance_v2{ Pubkey: minipoolInfo.ValidatorPubkey.Hex(), SuccessfulAttestations: successfulAttestations, MissedAttestations: missingAttestations, @@ -697,7 +698,7 @@ func (r *treeGeneratorImpl_v11) calculateEthRewards(checkBeaconPerformance bool) for slot := range minipoolInfo.MissingAttestationSlots { performance.MissingAttestationSlots = append(performance.MissingAttestationSlots, slot) } - r.minipoolPerformanceFile.MinipoolPerformance[minipoolInfo.Address] = performance + r.performanceFile.MinipoolPerformance[minipoolInfo.Address] = performance } // Add the rewards to the running total for the specified network @@ -1683,19 +1684,19 @@ func (r *treeGeneratorImpl_v11) getBlocksAndTimesForInterval(previousIntervalEve endTime := r.beaconConfig.GetSlotTime(r.snapshotEnd.Slot) r.rewardsFile.StartTime = startTime - r.minipoolPerformanceFile.StartTime = startTime + r.performanceFile.StartTime = startTime r.rewardsFile.EndTime = endTime - r.minipoolPerformanceFile.EndTime = endTime + r.performanceFile.EndTime = endTime r.rewardsFile.ConsensusStartBlock = nextEpoch * r.beaconConfig.SlotsPerEpoch - r.minipoolPerformanceFile.ConsensusStartBlock = r.rewardsFile.ConsensusStartBlock + r.performanceFile.ConsensusStartBlock = r.rewardsFile.ConsensusStartBlock r.rewardsFile.ConsensusEndBlock = r.snapshotEnd.ConsensusBlock - r.minipoolPerformanceFile.ConsensusEndBlock = r.snapshotEnd.ConsensusBlock + r.performanceFile.ConsensusEndBlock = r.snapshotEnd.ConsensusBlock r.rewardsFile.ExecutionEndBlock = r.snapshotEnd.ExecutionBlock - r.minipoolPerformanceFile.ExecutionEndBlock = r.snapshotEnd.ExecutionBlock + r.performanceFile.ExecutionEndBlock = r.snapshotEnd.ExecutionBlock // Get the first block that isn't missing var elBlockNumber uint64 @@ -1706,7 +1707,7 @@ func (r *treeGeneratorImpl_v11) getBlocksAndTimesForInterval(previousIntervalEve } if !exists { r.rewardsFile.ConsensusStartBlock++ - r.minipoolPerformanceFile.ConsensusStartBlock++ + r.performanceFile.ConsensusStartBlock++ } else { elBlockNumber = beaconBlock.ExecutionBlockNumber break @@ -1717,7 +1718,7 @@ func (r *treeGeneratorImpl_v11) getBlocksAndTimesForInterval(previousIntervalEve if elBlockNumber == 0 { // We are pre-merge, so get the first block after the one from the previous interval r.rewardsFile.ExecutionStartBlock = previousIntervalEvent.ExecutionBlock.Uint64() + 1 - r.minipoolPerformanceFile.ExecutionStartBlock = r.rewardsFile.ExecutionStartBlock + r.performanceFile.ExecutionStartBlock = r.rewardsFile.ExecutionStartBlock startElHeader, err = r.rp.HeaderByNumber(context.Background(), big.NewInt(int64(r.rewardsFile.ExecutionStartBlock))) if err != nil { return nil, fmt.Errorf("error getting EL start block %d: %w", r.rewardsFile.ExecutionStartBlock, err) @@ -1725,7 +1726,7 @@ func (r *treeGeneratorImpl_v11) getBlocksAndTimesForInterval(previousIntervalEve } else { // We are post-merge, so get the EL block corresponding to the BC block r.rewardsFile.ExecutionStartBlock = elBlockNumber - r.minipoolPerformanceFile.ExecutionStartBlock = r.rewardsFile.ExecutionStartBlock + r.performanceFile.ExecutionStartBlock = r.rewardsFile.ExecutionStartBlock startElHeader, err = r.rp.HeaderByNumber(context.Background(), big.NewInt(int64(elBlockNumber))) if err != nil { return nil, fmt.Errorf("error getting EL header for block %d: %w", elBlockNumber, err) diff --git a/shared/services/rewards/generator-impl-v8.go b/shared/services/rewards/generator-impl-v8.go index 0b11beecf..6298202b7 100644 --- a/shared/services/rewards/generator-impl-v8.go +++ b/shared/services/rewards/generator-impl-v8.go @@ -83,7 +83,7 @@ func newTreeGeneratorImpl_v8(log *log.ColorLogger, logPrefix string, index uint6 EndTime: endTime.UTC(), ConsensusEndBlock: consensusBlock, ExecutionEndBlock: elSnapshotHeader.Number.Uint64(), - MinipoolPerformance: map[common.Address]*SmoothingPoolMinipoolPerformance_v2{}, + MinipoolPerformance: map[common.Address]*MinipoolPerformance_v2{}, }, }, validatorStatusMap: map[rptypes.ValidatorPubkey]beacon.ValidatorStatus{}, @@ -634,7 +634,7 @@ func (r *treeGeneratorImpl_v8) calculateEthRewards(checkBeaconPerformance bool) for _, minipoolInfo := range nodeInfo.Minipools { successfulAttestations := uint64(len(minipoolInfo.CompletedAttestations)) missingAttestations := uint64(len(minipoolInfo.MissingAttestationSlots)) - performance := &SmoothingPoolMinipoolPerformance_v2{ + performance := &MinipoolPerformance_v2{ Pubkey: minipoolInfo.ValidatorPubkey.Hex(), SuccessfulAttestations: successfulAttestations, MissedAttestations: missingAttestations, diff --git a/shared/services/rewards/generator-impl-v9-v10.go b/shared/services/rewards/generator-impl-v9-v10.go index aee400721..d583639e7 100644 --- a/shared/services/rewards/generator-impl-v9-v10.go +++ b/shared/services/rewards/generator-impl-v9-v10.go @@ -93,7 +93,7 @@ func newTreeGeneratorImpl_v9_v10(rulesetVersion uint64, log *log.ColorLogger, lo invalidNetworkNodes: map[common.Address]uint64{}, minipoolPerformanceFile: &MinipoolPerformanceFile_v2{ Index: index, - MinipoolPerformance: map[common.Address]*SmoothingPoolMinipoolPerformance_v2{}, + MinipoolPerformance: map[common.Address]*MinipoolPerformance_v2{}, }, nodeRewards: map[common.Address]*ssz_types.NodeReward{}, networkRewards: map[ssz_types.Layer]*ssz_types.NetworkReward{}, @@ -567,7 +567,7 @@ func (r *treeGeneratorImpl_v9_v10) calculateEthRewards(checkBeaconPerformance bo for _, minipoolInfo := range nodeInfo.Minipools { successfulAttestations := uint64(len(minipoolInfo.CompletedAttestations)) missingAttestations := uint64(len(minipoolInfo.MissingAttestationSlots)) - performance := &SmoothingPoolMinipoolPerformance_v2{ + performance := &MinipoolPerformance_v2{ Pubkey: minipoolInfo.ValidatorPubkey.Hex(), SuccessfulAttestations: successfulAttestations, MissedAttestations: missingAttestations, diff --git a/shared/services/rewards/generator-v8_test.go b/shared/services/rewards/generator-v8_test.go index 861752515..83189a55d 100644 --- a/shared/services/rewards/generator-v8_test.go +++ b/shared/services/rewards/generator-v8_test.go @@ -30,7 +30,7 @@ func (t *v8Test) saveArtifacts(prefix string, result *GenerateTreeResult) { fullPath: filepath.Join(tmpDir, fmt.Sprintf("%s-rewards.json", prefix)), f: result.RewardsFile, } - performanceLocalFile := LocalFile[IMinipoolPerformanceFile]{ + performanceLocalFile := LocalFile[IPerformanceFile]{ fullPath: filepath.Join(tmpDir, fmt.Sprintf("%s-minipool-performance.json", prefix)), f: result.MinipoolPerformanceFile, } @@ -58,12 +58,12 @@ func (t *v8Test) failIf(err error) { } } -func (t *v8Test) SetMinipoolPerformance(canonicalMinipoolPerformance IMinipoolPerformanceFile, networkState *state.NetworkState) { +func (t *v8Test) SetMinipoolPerformance(canonicalMinipoolPerformance IPerformanceFile, networkState *state.NetworkState) { addresses := canonicalMinipoolPerformance.GetMinipoolAddresses() for _, address := range addresses { // Get the minipool's performance - perf, ok := canonicalMinipoolPerformance.GetSmoothingPoolPerformance(address) + perf, ok := canonicalMinipoolPerformance.GetMinipoolPerformance(address) if !ok { t.Fatalf("Minipool %s not found in canonical minipool performance, despite being listed as present", address.Hex()) } diff --git a/shared/services/rewards/generator.go b/shared/services/rewards/generator.go index daed477aa..0f976dd93 100644 --- a/shared/services/rewards/generator.go +++ b/shared/services/rewards/generator.go @@ -216,7 +216,7 @@ func NewTreeGenerator(logger *log.ColorLogger, logPrefix string, rp RewardsExecu type GenerateTreeResult struct { RewardsFile IRewardsFile - MinipoolPerformanceFile IMinipoolPerformanceFile + MinipoolPerformanceFile IPerformanceFile InvalidNetworkNodes map[common.Address]uint64 } diff --git a/shared/services/rewards/mock_v10_test.go b/shared/services/rewards/mock_v10_test.go index c79fd4bbe..3ab8961af 100644 --- a/shared/services/rewards/mock_v10_test.go +++ b/shared/services/rewards/mock_v10_test.go @@ -151,7 +151,7 @@ func TestMockIntervalDefaultsTreegenv10(tt *testing.T) { if len(node.Minipools) != 1 { t.Fatalf("Expected 1 minipool for node %s, got %d", node.Notes, len(node.Minipools)) } - minipoolPerf, _ := minipoolPerformanceFile.GetSmoothingPoolPerformance(node.Minipools[0].Address) + minipoolPerf, _ := minipoolPerformanceFile.GetMinipoolPerformance(node.Minipools[0].Address) // 8 eth minipools with 10% collateral earn 14% commission overall. // They earned 10% on 24/32 of the 1 eth of consensus rewards already, which is 0.075 eth. // Their bonus is therefore 4/10 of 0.075 eth, which is 0.03 eth. @@ -173,7 +173,7 @@ func TestMockIntervalDefaultsTreegenv10(tt *testing.T) { if len(node.Minipools) != 1 { t.Fatalf("Expected 1 minipool for node %s, got %d", node.Notes, len(node.Minipools)) } - minipoolPerf, _ := minipoolPerformanceFile.GetSmoothingPoolPerformance(node.Minipools[0].Address) + minipoolPerf, _ := minipoolPerformanceFile.GetMinipoolPerformance(node.Minipools[0].Address) // The 16 eth minipools earn 10% on 24/32. expectedAttestationScore := big.NewInt(0).Sub(oneEth, big.NewInt(1e17)) expectedAttestationScore.Mul(expectedAttestationScore, sixteenEth) @@ -209,7 +209,7 @@ func TestMockIntervalDefaultsTreegenv10(tt *testing.T) { rewardsAmount := rewardsFile.GetNodeCollateralRpl(node.Address) mp := node.Minipools[0] - perf, _ := minipoolPerformanceFile.GetSmoothingPoolPerformance(mp.Address) + perf, _ := minipoolPerformanceFile.GetMinipoolPerformance(mp.Address) // Node has 20 RPL and only 1 8-eth minpool which puts it above the linear curve expectedRewardsAmount := big.NewInt(0) @@ -263,7 +263,7 @@ func TestMockIntervalDefaultsTreegenv10(tt *testing.T) { rewardsAmount := rewardsFile.GetNodeCollateralRpl(node.Address) mp := node.Minipools[0] - perf, _ := minipoolPerformanceFile.GetSmoothingPoolPerformance(mp.Address) + perf, _ := minipoolPerformanceFile.GetMinipoolPerformance(mp.Address) // Node has 20 RPL and only 1 8-eth minpool which puts it above the linear curve expectedRewardsAmount := big.NewInt(0) @@ -312,7 +312,7 @@ func TestMockIntervalDefaultsTreegenv10(tt *testing.T) { for _, node := range bondReductionNode { mp := node.Minipools[0] - perf, _ := minipoolPerformanceFile.GetSmoothingPoolPerformance(mp.Address) + perf, _ := minipoolPerformanceFile.GetMinipoolPerformance(mp.Address) // Check the rewards amount in the rewards file rewardsAmount := rewardsFile.GetNodeCollateralRpl(node.Address) @@ -473,14 +473,14 @@ func TestInsufficientEthForBonuseses(tt *testing.T) { // Check the minipool performance file minipoolPerformanceFile := v10Artifacts.MinipoolPerformanceFile - perfOne, ok := minipoolPerformanceFile.GetSmoothingPoolPerformance(nodeOne.Minipools[0].Address) + perfOne, ok := minipoolPerformanceFile.GetMinipoolPerformance(nodeOne.Minipools[0].Address) if !ok { t.Fatalf("Node one minipool performance not found") } if perfOne.GetBonusEthEarned().Uint64() != 416 { t.Fatalf("Node one bonus does not match expected value: %s != %d", perfOne.GetBonusEthEarned().String(), 416) } - perfTwo, ok := minipoolPerformanceFile.GetSmoothingPoolPerformance(nodeTwo.Minipools[0].Address) + perfTwo, ok := minipoolPerformanceFile.GetMinipoolPerformance(nodeTwo.Minipools[0].Address) if !ok { t.Fatalf("Node two minipool performance not found") } @@ -585,11 +585,11 @@ func TestMockNoRPLRewards(tt *testing.T) { // Check the minipool performance file minipoolPerformanceFile := v10Artifacts.MinipoolPerformanceFile - _, ok := minipoolPerformanceFile.GetSmoothingPoolPerformance(nodeOne.Minipools[0].Address) + _, ok := minipoolPerformanceFile.GetMinipoolPerformance(nodeOne.Minipools[0].Address) if ok { t.Fatalf("Node one minipool performance should not be found") } - perfTwo, ok := minipoolPerformanceFile.GetSmoothingPoolPerformance(nodeTwo.Minipools[0].Address) + perfTwo, ok := minipoolPerformanceFile.GetMinipoolPerformance(nodeTwo.Minipools[0].Address) if !ok { t.Fatalf("Node two minipool one performance not found") } @@ -600,7 +600,7 @@ func TestMockNoRPLRewards(tt *testing.T) { if perfTwo.GetEffectiveCommission().Uint64() != 100000000000000000 { t.Fatalf("Node two minipool one effective commission does not match expected value: %s != %d", perfTwo.GetEffectiveCommission().String(), 100000000000000000) } - perfThree, ok := minipoolPerformanceFile.GetSmoothingPoolPerformance(nodeTwo.Minipools[1].Address) + perfThree, ok := minipoolPerformanceFile.GetMinipoolPerformance(nodeTwo.Minipools[1].Address) if !ok { t.Fatalf("Node two minipool two performance not found") } @@ -714,14 +714,14 @@ func TestMockOptedOutAndThenBondReduced(tt *testing.T) { // Check the minipool performance file minipoolPerformanceFile := v10Artifacts.MinipoolPerformanceFile - perfOne, ok := minipoolPerformanceFile.GetSmoothingPoolPerformance(nodeOne.Minipools[0].Address) + perfOne, ok := minipoolPerformanceFile.GetMinipoolPerformance(nodeOne.Minipools[0].Address) if !ok { t.Fatalf("Node one minipool performance should be found") } if perfOne.GetBonusEthEarned().Uint64() != 0 { t.Fatalf("Node one minipool one bonus does not match expected value: %s != %d", perfOne.GetBonusEthEarned().String(), 0) } - perfTwo, ok := minipoolPerformanceFile.GetSmoothingPoolPerformance(nodeTwo.Minipools[0].Address) + perfTwo, ok := minipoolPerformanceFile.GetMinipoolPerformance(nodeTwo.Minipools[0].Address) if !ok { t.Fatalf("Node two minipool one performance not found") } @@ -835,7 +835,7 @@ func TestMockWithdrawableEpoch(tt *testing.T) { // Check the minipool performance file minipoolPerformanceFile := v10Artifacts.MinipoolPerformanceFile - perfOne, ok := minipoolPerformanceFile.GetSmoothingPoolPerformance(nodeOne.Minipools[0].Address) + perfOne, ok := minipoolPerformanceFile.GetMinipoolPerformance(nodeOne.Minipools[0].Address) if !ok { t.Fatalf("Node one minipool performance should be found") } @@ -848,7 +848,7 @@ func TestMockWithdrawableEpoch(tt *testing.T) { if perfOne.GetConsensusIncome().Uint64() != 1000000000000000000 { t.Fatalf("Node one minipool one consensus income does not match expected value: %s != %d", perfOne.GetConsensusIncome().String(), 1000000000000000000) } - perfTwo, ok := minipoolPerformanceFile.GetSmoothingPoolPerformance(nodeTwo.Minipools[0].Address) + perfTwo, ok := minipoolPerformanceFile.GetMinipoolPerformance(nodeTwo.Minipools[0].Address) if !ok { t.Fatalf("Node two minipool one performance not found") } diff --git a/shared/services/rewards/mock_v11_test.go b/shared/services/rewards/mock_v11_test.go index 0440bcfb0..812fde2d7 100644 --- a/shared/services/rewards/mock_v11_test.go +++ b/shared/services/rewards/mock_v11_test.go @@ -161,7 +161,7 @@ func TestMockIntervalDefaultsTreegenv11(tt *testing.T) { if len(node.Minipools) != 1 { t.Fatalf("Expected 1 minipool for node %s, got %d", node.Notes, len(node.Minipools)) } - minipoolPerf, _ := minipoolPerformanceFile.GetSmoothingPoolPerformance(node.Minipools[0].Address) + minipoolPerf, _ := minipoolPerformanceFile.GetMinipoolPerformance(node.Minipools[0].Address) // 8 eth minipools with 10% collateral earn 14% commission overall. // They earned 10% on 24/32 of the 1 eth of consensus rewards already, which is 0.075 eth. // Their bonus is therefore 4/10 of 0.075 eth, which is 0.03 eth. @@ -183,7 +183,7 @@ func TestMockIntervalDefaultsTreegenv11(tt *testing.T) { if len(node.Minipools) != 1 { t.Fatalf("Expected 1 minipool for node %s, got %d", node.Notes, len(node.Minipools)) } - minipoolPerf, _ := minipoolPerformanceFile.GetSmoothingPoolPerformance(node.Minipools[0].Address) + minipoolPerf, _ := minipoolPerformanceFile.GetMinipoolPerformance(node.Minipools[0].Address) // The 16 eth minipools earn 10% on 24/32. expectedAttestationScore := big.NewInt(0).Sub(oneEth, big.NewInt(1e17)) expectedAttestationScore.Mul(expectedAttestationScore, sixteenEth) @@ -225,7 +225,7 @@ func TestMockIntervalDefaultsTreegenv11(tt *testing.T) { rewardsAmount := rewardsFile.GetNodeCollateralRpl(node.Address) mp := node.Minipools[0] - perf, _ := minipoolPerformanceFile.GetSmoothingPoolPerformance(mp.Address) + perf, _ := minipoolPerformanceFile.GetMinipoolPerformance(mp.Address) // Node has 20 RPL and only 1 8-eth minpool which puts it above the linear curve expectedRewardsAmount := big.NewInt(0) @@ -285,7 +285,7 @@ func TestMockIntervalDefaultsTreegenv11(tt *testing.T) { rewardsAmount := rewardsFile.GetNodeCollateralRpl(node.Address) mp := node.Minipools[0] - perf, _ := minipoolPerformanceFile.GetSmoothingPoolPerformance(mp.Address) + perf, _ := minipoolPerformanceFile.GetMinipoolPerformance(mp.Address) // Node has 20 RPL and only 1 8-eth minpool which puts it above the linear curve expectedRewardsAmount := big.NewInt(0) @@ -340,7 +340,7 @@ func TestMockIntervalDefaultsTreegenv11(tt *testing.T) { for _, node := range bondReductionNode { mp := node.Minipools[0] - perf, _ := minipoolPerformanceFile.GetSmoothingPoolPerformance(mp.Address) + perf, _ := minipoolPerformanceFile.GetMinipoolPerformance(mp.Address) // Check the rewards amount in the rewards file rewardsAmount := rewardsFile.GetNodeCollateralRpl(node.Address) @@ -771,6 +771,8 @@ func TestMockIntervalDefaultsTreegenv11(tt *testing.T) { // Make sure it got ETH expectedEthAmount, _ := big.NewInt(0).SetString("3213270392749244710", 10) ethAmount := rewardsFile.GetNodeSmoothingPoolEth(node.Address) + // Multiply by i+1 since the number of validators scales with i+1 + expectedEthAmount.Mul(expectedEthAmount, big.NewInt(int64(validatorCount))) if ethAmount.Cmp(expectedEthAmount) != 0 { t.Fatalf("ETH amount does not match expected value for node %s: %s != %s", node.Notes, ethAmount.String(), expectedEthAmount.String()) } @@ -897,14 +899,14 @@ func TestInsufficientEthForBonusesesV11(tt *testing.T) { // Check the minipool performance file minipoolPerformanceFile := v11Artifacts.MinipoolPerformanceFile - perfOne, ok := minipoolPerformanceFile.GetSmoothingPoolPerformance(nodeOne.Minipools[0].Address) + perfOne, ok := minipoolPerformanceFile.GetMinipoolPerformance(nodeOne.Minipools[0].Address) if !ok { t.Fatalf("Node one minipool performance not found") } if perfOne.GetBonusEthEarned().Uint64() != 416 { t.Fatalf("Node one bonus does not match expected value: %s != %d", perfOne.GetBonusEthEarned().String(), 416) } - perfTwo, ok := minipoolPerformanceFile.GetSmoothingPoolPerformance(nodeTwo.Minipools[0].Address) + perfTwo, ok := minipoolPerformanceFile.GetMinipoolPerformance(nodeTwo.Minipools[0].Address) if !ok { t.Fatalf("Node two minipool performance not found") } @@ -1009,11 +1011,11 @@ func TestMockNoRPLRewardsV11(tt *testing.T) { // Check the minipool performance file minipoolPerformanceFile := v11Artifacts.MinipoolPerformanceFile - _, ok := minipoolPerformanceFile.GetSmoothingPoolPerformance(nodeOne.Minipools[0].Address) + _, ok := minipoolPerformanceFile.GetMinipoolPerformance(nodeOne.Minipools[0].Address) if ok { t.Fatalf("Node one minipool performance should not be found") } - perfTwo, ok := minipoolPerformanceFile.GetSmoothingPoolPerformance(nodeTwo.Minipools[0].Address) + perfTwo, ok := minipoolPerformanceFile.GetMinipoolPerformance(nodeTwo.Minipools[0].Address) if !ok { t.Fatalf("Node two minipool one performance not found") } @@ -1024,7 +1026,7 @@ func TestMockNoRPLRewardsV11(tt *testing.T) { if perfTwo.GetEffectiveCommission().Uint64() != 100000000000000000 { t.Fatalf("Node two minipool one effective commission does not match expected value: %s != %d", perfTwo.GetEffectiveCommission().String(), 100000000000000000) } - perfThree, ok := minipoolPerformanceFile.GetSmoothingPoolPerformance(nodeTwo.Minipools[1].Address) + perfThree, ok := minipoolPerformanceFile.GetMinipoolPerformance(nodeTwo.Minipools[1].Address) if !ok { t.Fatalf("Node two minipool two performance not found") } @@ -1138,14 +1140,14 @@ func TestMockOptedOutAndThenBondReducedV11(tt *testing.T) { // Check the minipool performance file minipoolPerformanceFile := v11Artifacts.MinipoolPerformanceFile - perfOne, ok := minipoolPerformanceFile.GetSmoothingPoolPerformance(nodeOne.Minipools[0].Address) + perfOne, ok := minipoolPerformanceFile.GetMinipoolPerformance(nodeOne.Minipools[0].Address) if !ok { t.Fatalf("Node one minipool performance should be found") } if perfOne.GetBonusEthEarned().Uint64() != 0 { t.Fatalf("Node one minipool one bonus does not match expected value: %s != %d", perfOne.GetBonusEthEarned().String(), 0) } - perfTwo, ok := minipoolPerformanceFile.GetSmoothingPoolPerformance(nodeTwo.Minipools[0].Address) + perfTwo, ok := minipoolPerformanceFile.GetMinipoolPerformance(nodeTwo.Minipools[0].Address) if !ok { t.Fatalf("Node two minipool one performance not found") } @@ -1259,7 +1261,7 @@ func TestMockWithdrawableEpochV11(tt *testing.T) { // Check the minipool performance file minipoolPerformanceFile := v11Artifacts.MinipoolPerformanceFile - perfOne, ok := minipoolPerformanceFile.GetSmoothingPoolPerformance(nodeOne.Minipools[0].Address) + perfOne, ok := minipoolPerformanceFile.GetMinipoolPerformance(nodeOne.Minipools[0].Address) if !ok { t.Fatalf("Node one minipool performance should be found") } @@ -1272,7 +1274,7 @@ func TestMockWithdrawableEpochV11(tt *testing.T) { if perfOne.GetConsensusIncome().Uint64() != 1000000000000000000 { t.Fatalf("Node one minipool one consensus income does not match expected value: %s != %d", perfOne.GetConsensusIncome().String(), 1000000000000000000) } - perfTwo, ok := minipoolPerformanceFile.GetSmoothingPoolPerformance(nodeTwo.Minipools[0].Address) + perfTwo, ok := minipoolPerformanceFile.GetMinipoolPerformance(nodeTwo.Minipools[0].Address) if !ok { t.Fatalf("Node two minipool one performance not found") } diff --git a/shared/services/rewards/performance-file-v1.go b/shared/services/rewards/performance-file-v1.go new file mode 100644 index 000000000..b87db05e9 --- /dev/null +++ b/shared/services/rewards/performance-file-v1.go @@ -0,0 +1,110 @@ +package rewards + +import ( + "fmt" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/goccy/go-json" + "github.com/rocket-pool/smartnode/bindings/types" +) + +type PerformanceFile_v1 struct { + RewardsFileVersion uint64 `json:"rewardsFileVersion"` + RulesetVersion uint64 `json:"rulesetVersion"` + Index uint64 `json:"index"` + Network string `json:"network"` + StartTime time.Time `json:"startTime"` + EndTime time.Time `json:"endTime"` + ConsensusStartBlock uint64 `json:"consensusStartBlock"` + ConsensusEndBlock uint64 `json:"consensusEndBlock"` + ExecutionStartBlock uint64 `json:"executionStartBlock"` + ExecutionEndBlock uint64 `json:"executionEndBlock"` + MinipoolPerformance map[common.Address]*MinipoolPerformance_v2 `json:"minipoolPerformance"` + MegapoolPerformance map[common.Address]*MegapoolPerformance_v1 `json:"megapoolPerformance"` + BonusScalar *QuotedBigInt `json:"bonusScalar"` +} + +// Type assertion to implement IPerformanceFile +var _ IPerformanceFile = (*PerformanceFile_v1)(nil) + +type MegapoolPerformance_v1 struct { + VoterShare *QuotedBigInt `json:"voterShare"` + ValidatorPerformance map[types.ValidatorPubkey]*MegapoolValidatorPerformance_v1 `json:"validatorPerformance"` +} + +// Conveniently, v2 minipool performance tracks all the same fields +// as a single megapool validator, but has 3 extras. +// Those fields are omitempty anyway, so we will just leave them nil +type MegapoolValidatorPerformance_v1 = MinipoolPerformance_v2 + +// Type assertion to implement ISmoothingPoolPerformance +var _ ISmoothingPoolPerformance = (*MegapoolValidatorPerformance_v1)(nil) + +func (f *PerformanceFile_v1) Serialize() ([]byte, error) { + return json.Marshal(f) +} + +func (f *PerformanceFile_v1) SerializeSSZ() ([]byte, error) { + return nil, fmt.Errorf("ssz format not implemented for performance files") +} + +func (f *PerformanceFile_v1) SerializeHuman() ([]byte, error) { + return json.MarshalIndent(f, "", "\t") +} + +func (f *PerformanceFile_v1) Deserialize(bytes []byte) error { + return json.Unmarshal(bytes, &f) +} + +func (f *PerformanceFile_v1) GetMinipoolAddresses() []common.Address { + addresses := make([]common.Address, len(f.MinipoolPerformance)) + i := 0 + for address := range f.MinipoolPerformance { + addresses[i] = address + i++ + } + return addresses +} + +func (f *PerformanceFile_v1) GetMegapoolAddresses() []common.Address { + addresses := make([]common.Address, len(f.MegapoolPerformance)) + i := 0 + for address := range f.MegapoolPerformance { + addresses[i] = address + i++ + } + return addresses +} + +func (f *PerformanceFile_v1) GetMegapoolValidatorPubkeys(megapoolAddress common.Address) ([]types.ValidatorPubkey, error) { + perf, exists := f.MegapoolPerformance[megapoolAddress] + if !exists { + return nil, fmt.Errorf("megapool %s not found", megapoolAddress) + } + numValidators := len(perf.ValidatorPerformance) + pubkeys := make([]types.ValidatorPubkey, numValidators) + i := 0 + for pubkey := range perf.ValidatorPerformance { + pubkeys[i] = pubkey + i++ + } + return pubkeys, nil +} + +func (f *PerformanceFile_v1) GetMegapoolPerformance(megapoolAddress common.Address, pubkey types.ValidatorPubkey) (ISmoothingPoolPerformance, bool) { + megapoolPerf, exists := f.MegapoolPerformance[megapoolAddress] + if !exists { + return nil, false + } + validatorPerf, exists := megapoolPerf.ValidatorPerformance[pubkey] + if !exists { + return nil, false + } + return validatorPerf, true +} + +func (f *PerformanceFile_v1) GetMinipoolPerformance(minipoolAddress common.Address) (ISmoothingPoolPerformance, bool) { + perf, exists := f.MinipoolPerformance[minipoolAddress] + return perf, exists +} diff --git a/shared/services/rewards/rewards-file-v1.go b/shared/services/rewards/rewards-file-v1.go index 7016d85a4..bb095cdab 100644 --- a/shared/services/rewards/rewards-file-v1.go +++ b/shared/services/rewards/rewards-file-v1.go @@ -15,17 +15,23 @@ import ( ) type MinipoolPerformanceFile_v1 struct { - Index uint64 `json:"index"` - Network string `json:"network"` - StartTime time.Time `json:"startTime,omitempty"` - EndTime time.Time `json:"endTime,omitempty"` - ConsensusStartBlock uint64 `json:"consensusStartBlock,omitempty"` - ConsensusEndBlock uint64 `json:"consensusEndBlock,omitempty"` - ExecutionStartBlock uint64 `json:"executionStartBlock,omitempty"` - ExecutionEndBlock uint64 `json:"executionEndBlock,omitempty"` - MinipoolPerformance map[common.Address]*SmoothingPoolMinipoolPerformance_v1 `json:"minipoolPerformance"` + Index uint64 `json:"index"` + Network string `json:"network"` + StartTime time.Time `json:"startTime,omitempty"` + EndTime time.Time `json:"endTime,omitempty"` + ConsensusStartBlock uint64 `json:"consensusStartBlock,omitempty"` + ConsensusEndBlock uint64 `json:"consensusEndBlock,omitempty"` + ExecutionStartBlock uint64 `json:"executionStartBlock,omitempty"` + ExecutionEndBlock uint64 `json:"executionEndBlock,omitempty"` + MinipoolPerformance map[common.Address]*MinipoolPerformance_v1 `json:"minipoolPerformance"` } +// Type assertion to implement IPerformanceFile +var _ IPerformanceFile = (*MinipoolPerformanceFile_v1)(nil) + +// Type assertion to implement IRewardsFile +var _ IRewardsFile = (*RewardsFile_v1)(nil) + // Serialize a minipool performance file into bytes func (f *MinipoolPerformanceFile_v1) Serialize() ([]byte, error) { return json.Marshal(f) @@ -57,14 +63,26 @@ func (f *MinipoolPerformanceFile_v1) GetMinipoolAddresses() []common.Address { return addresses } +func (f *MinipoolPerformanceFile_v1) GetMegapoolAddresses() []common.Address { + return nil +} + +func (f *MinipoolPerformanceFile_v1) GetMegapoolPerformance(megapoolAddress common.Address, pubkey types.ValidatorPubkey) (ISmoothingPoolPerformance, bool) { + return nil, false +} + +func (f *MinipoolPerformanceFile_v1) GetMegapoolValidatorPubkeys(megapoolAddress common.Address) ([]types.ValidatorPubkey, error) { + return nil, nil +} + // Get a minipool's smoothing pool performance if it was present -func (f *MinipoolPerformanceFile_v1) GetSmoothingPoolPerformance(minipoolAddress common.Address) (ISmoothingPoolMinipoolPerformance, bool) { +func (f *MinipoolPerformanceFile_v1) GetMinipoolPerformance(minipoolAddress common.Address) (ISmoothingPoolPerformance, bool) { perf, exists := f.MinipoolPerformance[minipoolAddress] return perf, exists } // Minipool stats -type SmoothingPoolMinipoolPerformance_v1 struct { +type MinipoolPerformance_v1 struct { Pubkey string `json:"pubkey"` StartSlot uint64 `json:"startSlot,omitempty"` EndSlot uint64 `json:"endSlot,omitempty"` @@ -76,31 +94,34 @@ type SmoothingPoolMinipoolPerformance_v1 struct { EthEarned float64 `json:"ethEarned"` } -func (p *SmoothingPoolMinipoolPerformance_v1) GetPubkey() (types.ValidatorPubkey, error) { +// Type assertion to implement ISmoothingPoolPerformance +var _ ISmoothingPoolPerformance = (*MinipoolPerformance_v1)(nil) + +func (p *MinipoolPerformance_v1) GetPubkey() (types.ValidatorPubkey, error) { return types.HexToValidatorPubkey(p.Pubkey) } -func (p *SmoothingPoolMinipoolPerformance_v1) GetSuccessfulAttestationCount() uint64 { +func (p *MinipoolPerformance_v1) GetSuccessfulAttestationCount() uint64 { return p.SuccessfulAttestations } -func (p *SmoothingPoolMinipoolPerformance_v1) GetMissedAttestationCount() uint64 { +func (p *MinipoolPerformance_v1) GetMissedAttestationCount() uint64 { return p.MissedAttestations } -func (p *SmoothingPoolMinipoolPerformance_v1) GetMissingAttestationSlots() []uint64 { +func (p *MinipoolPerformance_v1) GetMissingAttestationSlots() []uint64 { return p.MissingAttestationSlots } -func (p *SmoothingPoolMinipoolPerformance_v1) GetEthEarned() *big.Int { +func (p *MinipoolPerformance_v1) GetEthEarned() *big.Int { return eth.EthToWei(p.EthEarned) } -func (p *SmoothingPoolMinipoolPerformance_v1) GetBonusEthEarned() *big.Int { +func (p *MinipoolPerformance_v1) GetBonusEthEarned() *big.Int { return big.NewInt(0) } -func (p *SmoothingPoolMinipoolPerformance_v1) GetEffectiveCommission() *big.Int { +func (p *MinipoolPerformance_v1) GetEffectiveCommission() *big.Int { return big.NewInt(0) } -func (p *SmoothingPoolMinipoolPerformance_v1) GetConsensusIncome() *big.Int { +func (p *MinipoolPerformance_v1) GetConsensusIncome() *big.Int { return big.NewInt(0) } -func (p *SmoothingPoolMinipoolPerformance_v1) GetAttestationScore() *big.Int { +func (p *MinipoolPerformance_v1) GetAttestationScore() *big.Int { return big.NewInt(0) } diff --git a/shared/services/rewards/rewards-file-v2.go b/shared/services/rewards/rewards-file-v2.go index d49da407c..3224ffef0 100644 --- a/shared/services/rewards/rewards-file-v2.go +++ b/shared/services/rewards/rewards-file-v2.go @@ -14,19 +14,22 @@ import ( ) type MinipoolPerformanceFile_v2 struct { - RewardsFileVersion uint64 `json:"rewardsFileVersion"` - RulesetVersion uint64 `json:"rulesetVersion"` - Index uint64 `json:"index"` - Network string `json:"network"` - StartTime time.Time `json:"startTime,omitempty"` - EndTime time.Time `json:"endTime,omitempty"` - ConsensusStartBlock uint64 `json:"consensusStartBlock,omitempty"` - ConsensusEndBlock uint64 `json:"consensusEndBlock,omitempty"` - ExecutionStartBlock uint64 `json:"executionStartBlock,omitempty"` - ExecutionEndBlock uint64 `json:"executionEndBlock,omitempty"` - MinipoolPerformance map[common.Address]*SmoothingPoolMinipoolPerformance_v2 `json:"minipoolPerformance"` - BonusScalar *QuotedBigInt `json:"bonusScalar,omitempty"` -} + RewardsFileVersion uint64 `json:"rewardsFileVersion"` + RulesetVersion uint64 `json:"rulesetVersion"` + Index uint64 `json:"index"` + Network string `json:"network"` + StartTime time.Time `json:"startTime,omitempty"` + EndTime time.Time `json:"endTime,omitempty"` + ConsensusStartBlock uint64 `json:"consensusStartBlock,omitempty"` + ConsensusEndBlock uint64 `json:"consensusEndBlock,omitempty"` + ExecutionStartBlock uint64 `json:"executionStartBlock,omitempty"` + ExecutionEndBlock uint64 `json:"executionEndBlock,omitempty"` + MinipoolPerformance map[common.Address]*MinipoolPerformance_v2 `json:"minipoolPerformance"` + BonusScalar *QuotedBigInt `json:"bonusScalar,omitempty"` +} + +// Type assertion to implement IPerformanceFile +var _ IPerformanceFile = (*MinipoolPerformanceFile_v2)(nil) // Serialize a minipool performance file into bytes func (f *MinipoolPerformanceFile_v2) Serialize() ([]byte, error) { @@ -59,14 +62,26 @@ func (f *MinipoolPerformanceFile_v2) GetMinipoolAddresses() []common.Address { return addresses } +func (f *MinipoolPerformanceFile_v2) GetMegapoolAddresses() []common.Address { + return nil +} + +func (f *MinipoolPerformanceFile_v2) GetMegapoolPerformance(megapoolAddress common.Address, pubkey types.ValidatorPubkey) (ISmoothingPoolPerformance, bool) { + return nil, false +} + +func (f *MinipoolPerformanceFile_v2) GetMegapoolValidatorPubkeys(megapoolAddress common.Address) ([]types.ValidatorPubkey, error) { + return nil, nil +} + // Get a minipool's smoothing pool performance if it was present -func (f *MinipoolPerformanceFile_v2) GetSmoothingPoolPerformance(minipoolAddress common.Address) (ISmoothingPoolMinipoolPerformance, bool) { +func (f *MinipoolPerformanceFile_v2) GetMinipoolPerformance(minipoolAddress common.Address) (ISmoothingPoolPerformance, bool) { perf, exists := f.MinipoolPerformance[minipoolAddress] return perf, exists } // Minipool stats -type SmoothingPoolMinipoolPerformance_v2 struct { +type MinipoolPerformance_v2 struct { Pubkey string `json:"pubkey"` SuccessfulAttestations uint64 `json:"successfulAttestations"` MissedAttestations uint64 `json:"missedAttestations"` @@ -78,40 +93,43 @@ type SmoothingPoolMinipoolPerformance_v2 struct { EffectiveCommission *QuotedBigInt `json:"effectiveCommission,omitempty"` } -func (p *SmoothingPoolMinipoolPerformance_v2) GetPubkey() (types.ValidatorPubkey, error) { +// Type assertion to implement ISmoothingPoolPerformance +var _ ISmoothingPoolPerformance = (*MinipoolPerformance_v2)(nil) + +func (p *MinipoolPerformance_v2) GetPubkey() (types.ValidatorPubkey, error) { return types.HexToValidatorPubkey(p.Pubkey) } -func (p *SmoothingPoolMinipoolPerformance_v2) GetSuccessfulAttestationCount() uint64 { +func (p *MinipoolPerformance_v2) GetSuccessfulAttestationCount() uint64 { return p.SuccessfulAttestations } -func (p *SmoothingPoolMinipoolPerformance_v2) GetMissedAttestationCount() uint64 { +func (p *MinipoolPerformance_v2) GetMissedAttestationCount() uint64 { return p.MissedAttestations } -func (p *SmoothingPoolMinipoolPerformance_v2) GetMissingAttestationSlots() []uint64 { +func (p *MinipoolPerformance_v2) GetMissingAttestationSlots() []uint64 { return p.MissingAttestationSlots } -func (p *SmoothingPoolMinipoolPerformance_v2) GetEthEarned() *big.Int { +func (p *MinipoolPerformance_v2) GetEthEarned() *big.Int { return &p.EthEarned.Int } -func (p *SmoothingPoolMinipoolPerformance_v2) GetBonusEthEarned() *big.Int { +func (p *MinipoolPerformance_v2) GetBonusEthEarned() *big.Int { if p.BonusEthEarned == nil { return big.NewInt(0) } return &p.BonusEthEarned.Int } -func (p *SmoothingPoolMinipoolPerformance_v2) GetEffectiveCommission() *big.Int { +func (p *MinipoolPerformance_v2) GetEffectiveCommission() *big.Int { if p.EffectiveCommission == nil { return big.NewInt(0) } return &p.EffectiveCommission.Int } -func (p *SmoothingPoolMinipoolPerformance_v2) GetConsensusIncome() *big.Int { +func (p *MinipoolPerformance_v2) GetConsensusIncome() *big.Int { if p.ConsensusIncome == nil { return big.NewInt(0) } return &p.ConsensusIncome.Int } -func (p *SmoothingPoolMinipoolPerformance_v2) GetAttestationScore() *big.Int { +func (p *MinipoolPerformance_v2) GetAttestationScore() *big.Int { return &p.AttestationScore.Int } diff --git a/shared/services/rewards/types.go b/shared/services/rewards/types.go index 3030d9d8f..70ddb0ec8 100644 --- a/shared/services/rewards/types.go +++ b/shared/services/rewards/types.go @@ -57,7 +57,7 @@ type RewardsBeaconClient interface { } // Interface for version-agnostic minipool performance -type IMinipoolPerformanceFile interface { +type IPerformanceFile interface { // Serialize a minipool performance file into bytes Serialize() ([]byte, error) SerializeSSZ() ([]byte, error) @@ -72,8 +72,18 @@ type IMinipoolPerformanceFile interface { // NOTE: the order of minipool addresses is not guaranteed to be stable, so don't rely on it GetMinipoolAddresses() []common.Address + // Get all of the megapools + // NOTE: the order of megapool addresses is not guaranteed to be stable, so don't rely on it + GetMegapoolAddresses() []common.Address + // Get a minipool's smoothing pool performance if it was present - GetSmoothingPoolPerformance(minipoolAddress common.Address) (ISmoothingPoolMinipoolPerformance, bool) + GetMinipoolPerformance(minipoolAddress common.Address) (ISmoothingPoolPerformance, bool) + + // Get a megapool's validator pubkeys + GetMegapoolValidatorPubkeys(megapoolAddress common.Address) ([]types.ValidatorPubkey, error) + + // Get a megapool's performance if it was present + GetMegapoolPerformance(megapoolAddress common.Address, pubkey types.ValidatorPubkey) (ISmoothingPoolPerformance, bool) } // Interface for version-agnostic rewards files @@ -152,7 +162,7 @@ type TotalRewards struct { } // Minipool stats -type ISmoothingPoolMinipoolPerformance interface { +type ISmoothingPoolPerformance interface { GetPubkey() (types.ValidatorPubkey, error) GetSuccessfulAttestationCount() uint64 GetMissedAttestationCount() uint64 @@ -420,7 +430,7 @@ func (versionHeader *VersionHeader) deserializeRewardsFile(bytes []byte) (IRewar panic("unreachable section of code reached, please report this error to the maintainers") } -func (versionHeader *VersionHeader) deserializeMinipoolPerformanceFile(bytes []byte) (IMinipoolPerformanceFile, error) { +func (versionHeader *VersionHeader) deserializeMinipoolPerformanceFile(bytes []byte) (IPerformanceFile, error) { if err := versionHeader.checkVersion(); err != nil { return nil, err } diff --git a/shared/services/rewards/utils.go b/shared/services/rewards/utils.go index 112281a20..c8d11506f 100644 --- a/shared/services/rewards/utils.go +++ b/shared/services/rewards/utils.go @@ -327,7 +327,7 @@ func DeserializeRewardsFile(bytes []byte) (IRewardsFile, error) { } // Deserializes a byte array into a rewards file interface -func DeserializeMinipoolPerformanceFile(bytes []byte) (IMinipoolPerformanceFile, error) { +func DeserializeMinipoolPerformanceFile(bytes []byte) (IPerformanceFile, error) { header, err := deserializeVersionHeader(bytes) if err != nil { return nil, fmt.Errorf("error deserializing rewards file header: %w", err) From 255fea667f58ebaaa07123513f3da74e63b2cd2c Mon Sep 17 00:00:00 2001 From: Jacob Shufro Date: Fri, 5 Sep 2025 18:48:27 -0400 Subject: [PATCH 25/33] Use go 1.25.1 --- .github/workflows/build.yml | 2 +- .github/workflows/lint.yml | 4 ++-- .github/workflows/unit-tests.yml | 2 +- .golangci.yml | 2 +- Makefile | 2 +- docker/rocketpool-dockerfile | 2 +- go.mod | 2 +- 7 files changed, 8 insertions(+), 8 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 1007054f5..04fe9a623 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -17,7 +17,7 @@ jobs: - uses: actions/checkout@v4 - uses: actions/setup-go@v5 with: - go-version: 1.24.5 + go-version: 1.25.1 - run: make NO_DOCKER=true release docker-build: runs-on: ubuntu-latest diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 4811a8814..703907433 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -19,12 +19,12 @@ jobs: - uses: actions/checkout@v4 - uses: actions/setup-go@v5 with: - go-version: 1.24.5 + go-version: 1.25.1 - name: golangci-lint uses: golangci/golangci-lint-action@v8 with: # Optional: version of golangci-lint to use in form of v1.2 or v1.2.3 or `latest` to use the latest version - version: v2.1 + version: v2.4 # Optional: working directory, useful for monorepos # working-directory: ${{ matrix.modules }} diff --git a/.github/workflows/unit-tests.yml b/.github/workflows/unit-tests.yml index 53fdbf017..4575e517d 100644 --- a/.github/workflows/unit-tests.yml +++ b/.github/workflows/unit-tests.yml @@ -16,6 +16,6 @@ jobs: - uses: actions/checkout@v4 - uses: actions/setup-go@v5 with: - go-version: 1.24.5 + go-version: 1.25.1 cache-dependency-path: go.work.sum - run: make test diff --git a/.golangci.yml b/.golangci.yml index 2e7466d65..da34eeace 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -6,7 +6,7 @@ formatters: - goimports run: relative-path-mode: cfg - go: '1.24.5' + go: '1.25.1' output: formats: text: diff --git a/Makefile b/Makefile index 1d2587095..b8918349d 100644 --- a/Makefile +++ b/Makefile @@ -169,7 +169,7 @@ docker-prune: .PHONY: lint lint: ifndef NO_DOCKER - docker run -e GOMODCACHE=/go/.cache/pkg/mod -e GOCACHE=/go/.cache/go-build -e GOLANGCI_LINT_CACHE=/go/.cache/golangci-lint --user $(shell id -u):$(shell id -g) --rm -v ~/.cache:/go/.cache -v .:/smartnode --workdir /smartnode/ golangci/golangci-lint:v2.1-alpine golangci-lint fmt --diff + docker run -e GOMODCACHE=/go/.cache/pkg/mod -e GOCACHE=/go/.cache/go-build -e GOLANGCI_LINT_CACHE=/go/.cache/golangci-lint --user $(shell id -u):$(shell id -g) --rm -v ~/.cache:/go/.cache -v .:/smartnode --workdir /smartnode/ golangci/golangci-lint:v2.4-alpine golangci-lint fmt --diff endif .PHONY: test diff --git a/docker/rocketpool-dockerfile b/docker/rocketpool-dockerfile index 69995a48b..4d07ed271 100644 --- a/docker/rocketpool-dockerfile +++ b/docker/rocketpool-dockerfile @@ -1,4 +1,4 @@ -FROM --platform=$BUILDPLATFORM golang:1.24.5-bookworm AS smartnode_dependencies +FROM --platform=$BUILDPLATFORM golang:1.25.1-bookworm AS smartnode_dependencies ARG BUILDPLATFORM # Install build tools diff --git a/go.mod b/go.mod index 862af62c7..7bd428267 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/rocket-pool/smartnode -go 1.24.5 +go 1.25.1 // Do not update until you can test that its regression on ARM is resolved require github.com/herumi/bls-eth-go-binary v1.28.1 From 1c9da9bf12b33cfcc9abf2a6087a158523134b64 Mon Sep 17 00:00:00 2001 From: Jacob Shufro Date: Sun, 7 Sep 2025 20:01:14 -0400 Subject: [PATCH 26/33] Fix json marshal and test --- shared/services/rewards/mock_v11_test.go | 5 ++- .../services/rewards/performance-file-v1.go | 36 +++++++++++++++++-- 2 files changed, 38 insertions(+), 3 deletions(-) diff --git a/shared/services/rewards/mock_v11_test.go b/shared/services/rewards/mock_v11_test.go index 812fde2d7..c239d6cb6 100644 --- a/shared/services/rewards/mock_v11_test.go +++ b/shared/services/rewards/mock_v11_test.go @@ -769,11 +769,14 @@ func TestMockIntervalDefaultsTreegenv11(tt *testing.T) { } // Make sure it got ETH - expectedEthAmount, _ := big.NewInt(0).SetString("3213270392749244710", 10) + minipoolEthAmount, _ := big.NewInt(0).SetString("2698353474320241690", 10) + expectedEthAmount, _ := big.NewInt(0).SetString("257458459214501510", 10) ethAmount := rewardsFile.GetNodeSmoothingPoolEth(node.Address) // Multiply by i+1 since the number of validators scales with i+1 expectedEthAmount.Mul(expectedEthAmount, big.NewInt(int64(validatorCount))) + expectedEthAmount.Add(expectedEthAmount, minipoolEthAmount) if ethAmount.Cmp(expectedEthAmount) != 0 { + fmt.Printf("Node: %+v\n", node) t.Fatalf("ETH amount does not match expected value for node %s: %s != %s", node.Notes, ethAmount.String(), expectedEthAmount.String()) } diff --git a/shared/services/rewards/performance-file-v1.go b/shared/services/rewards/performance-file-v1.go index b87db05e9..1bd39c558 100644 --- a/shared/services/rewards/performance-file-v1.go +++ b/shared/services/rewards/performance-file-v1.go @@ -1,6 +1,7 @@ package rewards import ( + "encoding/hex" "fmt" "time" @@ -28,9 +29,40 @@ type PerformanceFile_v1 struct { // Type assertion to implement IPerformanceFile var _ IPerformanceFile = (*PerformanceFile_v1)(nil) +type MegapoolPerformanceMap map[types.ValidatorPubkey]*MegapoolValidatorPerformance_v1 + type MegapoolPerformance_v1 struct { - VoterShare *QuotedBigInt `json:"voterShare"` - ValidatorPerformance map[types.ValidatorPubkey]*MegapoolValidatorPerformance_v1 `json:"validatorPerformance"` + VoterShare *QuotedBigInt `json:"voterShare"` + ValidatorPerformance MegapoolPerformanceMap `json:"validatorPerformance"` +} + +// MegapoolPerformanceMap has a custom JSON marshaler to avoid the issue with ValidatorPubkey not being a valid dict key. +// encoding/json/v2 will fix this once it's stable, and the custom marshaler can be removed. +func (m MegapoolPerformanceMap) MarshalJSON() ([]byte, error) { + out := make(map[string]*MegapoolValidatorPerformance_v1) + for pubkey, perf := range m { + out[pubkey.Hex()] = perf + } + return json.Marshal(out) +} + +// And a custom unmarshaler to avoid the issue with ValidatorPubkey not being a valid dict key. +// encoding/json/v2 will fix this once it's stable, and the custom unmarshaler can be removed. +func (m *MegapoolPerformanceMap) UnmarshalJSON(data []byte) error { + var out map[string]*MegapoolValidatorPerformance_v1 + err := json.Unmarshal(data, &out) + if err != nil { + return err + } + *m = make(MegapoolPerformanceMap, len(out)) + for pubkey, perf := range out { + pubkeyBytes, err := hex.DecodeString(pubkey) + if err != nil { + return fmt.Errorf("error decoding pubkey %s: %w", pubkey, err) + } + (*m)[types.ValidatorPubkey(pubkeyBytes)] = perf + } + return nil } // Conveniently, v2 minipool performance tracks all the same fields From df39ac0c96a31c0e50832f751929f93e64312542 Mon Sep 17 00:00:00 2001 From: Jacob Shufro Date: Sun, 7 Sep 2025 20:01:40 -0400 Subject: [PATCH 27/33] Remove unused parameter --- shared/services/rewards/generator-impl-v11.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/shared/services/rewards/generator-impl-v11.go b/shared/services/rewards/generator-impl-v11.go index 135cf3032..c7315fc0a 100644 --- a/shared/services/rewards/generator-impl-v11.go +++ b/shared/services/rewards/generator-impl-v11.go @@ -1149,10 +1149,9 @@ func (r *treeGeneratorImpl_v11) processEpoch(duringInterval bool, epoch uint64) // Process all of the slots in the epoch for i := uint64(0); i < r.slotsPerEpoch; i++ { - inclusionSlot := epoch*r.slotsPerEpoch + i attestations := attestationsPerSlot[i] if len(attestations) > 0 { - r.checkAttestations(attestations, inclusionSlot) + r.checkAttestations(attestations) } } @@ -1160,7 +1159,7 @@ func (r *treeGeneratorImpl_v11) processEpoch(duringInterval bool, epoch uint64) } -func (r *treeGeneratorImpl_v11) checkAttestations(attestations []beacon.AttestationInfo, inclusionSlot uint64) error { +func (r *treeGeneratorImpl_v11) checkAttestations(attestations []beacon.AttestationInfo) error { // Go through the attestations for the block for _, attestation := range attestations { From e33b5adcd55136f659cd26f3207591f738e208ca Mon Sep 17 00:00:00 2001 From: Jacob Shufro Date: Sun, 7 Sep 2025 20:13:17 -0400 Subject: [PATCH 28/33] Populate megapool performance --- shared/services/rewards/generator-impl-v11.go | 34 +++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/shared/services/rewards/generator-impl-v11.go b/shared/services/rewards/generator-impl-v11.go index c7315fc0a..6800a31f1 100644 --- a/shared/services/rewards/generator-impl-v11.go +++ b/shared/services/rewards/generator-impl-v11.go @@ -701,6 +701,40 @@ func (r *treeGeneratorImpl_v11) calculateEthRewards(checkBeaconPerformance bool) r.performanceFile.MinipoolPerformance[minipoolInfo.Address] = performance } + // Add megapool rewards to the JSON + if nodeInfo.Megapool != nil { + for _, validator := range nodeInfo.Megapool.Validators { + successfulAttestations := uint64(len(validator.CompletedAttestations)) + missingAttestations := uint64(len(validator.MissingAttestationSlots)) + performance := &MegapoolValidatorPerformance_v1{ + Pubkey: validator.Pubkey.Hex(), + SuccessfulAttestations: successfulAttestations, + MissedAttestations: missingAttestations, + AttestationScore: validator.AttestationScore, + EthEarned: QuotedBigIntFromBigInt(validator.MegapoolValidatorShare), + BonusEthEarned: nil, + ConsensusIncome: nil, + EffectiveCommission: nil, + MissingAttestationSlots: []uint64{}, + } + if successfulAttestations+missingAttestations == 0 { + // Don't include megapools that have zero attestations + continue + } + for slot := range validator.MissingAttestationSlots { + performance.MissingAttestationSlots = append(performance.MissingAttestationSlots, slot) + } + mpPerformance, exists := r.performanceFile.MegapoolPerformance[nodeInfo.Megapool.Address] + if !exists { + mpPerformance = &MegapoolPerformance_v1{ + ValidatorPerformance: make(MegapoolPerformanceMap), + } + r.performanceFile.MegapoolPerformance[nodeInfo.Megapool.Address] = mpPerformance + } + mpPerformance.ValidatorPerformance[validator.Pubkey] = performance + } + } + // Add the rewards to the running total for the specified network rewardsForNetwork, exists := r.networkRewards[rewardsForNode.Network] if !exists { From 65ba8ebf19af3bcca45df9d30b82c9e548dbe16e Mon Sep 17 00:00:00 2001 From: Jacob Shufro Date: Sun, 7 Sep 2025 20:37:32 -0400 Subject: [PATCH 29/33] Populate missing parts of performance file --- shared/services/rewards/generator-impl-v11.go | 36 +++++++++---- .../services/rewards/performance-file-v1.go | 54 ++++++++++++++++--- .../rewards/ssz_types/rewards-file-v5.go | 2 + 3 files changed, 77 insertions(+), 15 deletions(-) diff --git a/shared/services/rewards/generator-impl-v11.go b/shared/services/rewards/generator-impl-v11.go index 6800a31f1..3d763513e 100644 --- a/shared/services/rewards/generator-impl-v11.go +++ b/shared/services/rewards/generator-impl-v11.go @@ -4,7 +4,7 @@ import ( "context" "fmt" "math/big" - "sort" + "slices" "sync" "time" @@ -70,7 +70,7 @@ type treeGeneratorImpl_v11 struct { func newTreeGeneratorImpl_v11(log *log.ColorLogger, logPrefix string, index uint64, snapshotEnd *SnapshotEnd, elSnapshotHeader *types.Header, intervalsPassed uint64, state *state.NetworkState) *treeGeneratorImpl_v11 { return &treeGeneratorImpl_v11{ rewardsFile: &ssz_types.SSZFile_v2{ - RewardsFileVersion: 3, + RewardsFileVersion: 4, RulesetVersion: 11, Index: index, IntervalsPassed: intervalsPassed, @@ -84,6 +84,7 @@ func newTreeGeneratorImpl_v11(log *log.ColorLogger, logPrefix string, index uint TotalNodeWeight: sszbig.NewUint256(0), TotalVoterShareEth: sszbig.NewUint256(0), SmoothingPoolVoterShareEth: sszbig.NewUint256(0), + TotalPdaoShareEth: sszbig.NewUint256(0), }, NetworkRewards: ssz_types.NetworkRewards{}, NodeRewards: ssz_types.NodeRewards_v2{}, @@ -194,9 +195,13 @@ func (r *treeGeneratorImpl_v11) generateTree(rp RewardsExecutionClient, networkN // Sort all of the missed attestations so the files are always generated in the same state for _, minipoolInfo := range r.performanceFile.MinipoolPerformance { - sort.Slice(minipoolInfo.MissingAttestationSlots, func(i, j int) bool { - return minipoolInfo.MissingAttestationSlots[i] < minipoolInfo.MissingAttestationSlots[j] - }) + slices.Sort(minipoolInfo.MissingAttestationSlots) + } + + for _, megapoolInfo := range r.performanceFile.MegapoolPerformance { + for _, validatorInfo := range megapoolInfo.ValidatorPerformance { + slices.Sort(validatorInfo.MissingAttestationSlots) + } } return &GenerateTreeResult{ @@ -707,14 +712,11 @@ func (r *treeGeneratorImpl_v11) calculateEthRewards(checkBeaconPerformance bool) successfulAttestations := uint64(len(validator.CompletedAttestations)) missingAttestations := uint64(len(validator.MissingAttestationSlots)) performance := &MegapoolValidatorPerformance_v1{ - Pubkey: validator.Pubkey.Hex(), + pubkey: validator.Pubkey.Hex(), SuccessfulAttestations: successfulAttestations, MissedAttestations: missingAttestations, AttestationScore: validator.AttestationScore, EthEarned: QuotedBigIntFromBigInt(validator.MegapoolValidatorShare), - BonusEthEarned: nil, - ConsensusIncome: nil, - EffectiveCommission: nil, MissingAttestationSlots: []uint64{}, } if successfulAttestations+missingAttestations == 0 { @@ -743,12 +745,25 @@ func (r *treeGeneratorImpl_v11) calculateEthRewards(checkBeaconPerformance bool) } rewardsForNetwork.SmoothingPoolEth.Add(rewardsForNetwork.SmoothingPoolEth.Int, nodeInfo.SmoothingPoolEth) } + + // Finally, take care of adding voter share to the performance file + if nodeInfo.VoterShareEth.Cmp(common.Big0) > 0 { + performance, exists := r.performanceFile.MegapoolPerformance[nodeInfo.Megapool.Address] + if !exists { + performance = &MegapoolPerformance_v1{ + VoterShare: QuotedBigIntFromBigInt(nodeInfo.VoterShareEth), + } + r.performanceFile.MegapoolPerformance[nodeInfo.Megapool.Address] = performance + } + } } // Set the totals r.rewardsFile.TotalRewards.PoolStakerSmoothingPoolEth.Set(nodeRewards.poolStakerEth) r.rewardsFile.TotalRewards.NodeOperatorSmoothingPoolEth.Set(nodeRewards.nodeOpEth) r.rewardsFile.TotalRewards.TotalSmoothingPoolEth.Set(r.smoothingPoolBalance) + r.rewardsFile.TotalRewards.TotalVoterShareEth.Set(nodeRewards.voterEth) + r.rewardsFile.TotalRewards.TotalPdaoShareEth.Set(nodeRewards.pdaoEth) return nil } @@ -852,6 +867,9 @@ func (r *treeGeneratorImpl_v11) calculateNodeRewards() (*nodeRewards, error) { voterEth.Div(voterEth, big.NewInt(int64(r.successfulAttestations))) voterEth.Div(voterEth, oneEth) + // Set the voter share eth in the rewards file + r.rewardsFile.TotalRewards.SmoothingPoolVoterShareEth.Set(voterEth) + // Add in the earmarked voter share voterEth.Add(voterEth, r.networkState.NetworkDetails.SmoothingPoolPendingVoterShare) } diff --git a/shared/services/rewards/performance-file-v1.go b/shared/services/rewards/performance-file-v1.go index 1bd39c558..5abfdbcdc 100644 --- a/shared/services/rewards/performance-file-v1.go +++ b/shared/services/rewards/performance-file-v1.go @@ -3,6 +3,7 @@ package rewards import ( "encoding/hex" "fmt" + "math/big" "time" "github.com/ethereum/go-ethereum/common" @@ -32,8 +33,8 @@ var _ IPerformanceFile = (*PerformanceFile_v1)(nil) type MegapoolPerformanceMap map[types.ValidatorPubkey]*MegapoolValidatorPerformance_v1 type MegapoolPerformance_v1 struct { - VoterShare *QuotedBigInt `json:"voterShare"` - ValidatorPerformance MegapoolPerformanceMap `json:"validatorPerformance"` + VoterShare *QuotedBigInt `json:"voterShare,omitempty"` + ValidatorPerformance MegapoolPerformanceMap `json:"validatorPerformance,omitempty"` } // MegapoolPerformanceMap has a custom JSON marshaler to avoid the issue with ValidatorPubkey not being a valid dict key. @@ -60,15 +61,56 @@ func (m *MegapoolPerformanceMap) UnmarshalJSON(data []byte) error { if err != nil { return fmt.Errorf("error decoding pubkey %s: %w", pubkey, err) } + perf.pubkey = pubkey (*m)[types.ValidatorPubkey(pubkeyBytes)] = perf } return nil } -// Conveniently, v2 minipool performance tracks all the same fields -// as a single megapool validator, but has 3 extras. -// Those fields are omitempty anyway, so we will just leave them nil -type MegapoolValidatorPerformance_v1 = MinipoolPerformance_v2 +type MegapoolValidatorPerformance_v1 struct { + pubkey string `json:"-"` + SuccessfulAttestations uint64 `json:"successfulAttestations"` + MissedAttestations uint64 `json:"missedAttestations"` + AttestationScore *QuotedBigInt `json:"attestationScore"` + MissingAttestationSlots []uint64 `json:"missingAttestationSlots"` + EthEarned *QuotedBigInt `json:"ethEarned"` +} + +func (p *MegapoolValidatorPerformance_v1) GetAttestationScore() *big.Int { + return &p.AttestationScore.Int +} + +func (p *MegapoolValidatorPerformance_v1) GetBonusEthEarned() *big.Int { + return nil +} + +func (p *MegapoolValidatorPerformance_v1) GetConsensusIncome() *big.Int { + return nil +} + +func (p *MegapoolValidatorPerformance_v1) GetEffectiveCommission() *big.Int { + return nil +} + +func (p *MegapoolValidatorPerformance_v1) GetEthEarned() *big.Int { + return &p.EthEarned.Int +} + +func (p *MegapoolValidatorPerformance_v1) GetPubkey() (types.ValidatorPubkey, error) { + return types.HexToValidatorPubkey(p.pubkey) +} + +func (p *MegapoolValidatorPerformance_v1) GetMissedAttestationCount() uint64 { + return p.MissedAttestations +} + +func (p *MegapoolValidatorPerformance_v1) GetMissingAttestationSlots() []uint64 { + return p.MissingAttestationSlots +} + +func (p *MegapoolValidatorPerformance_v1) GetSuccessfulAttestationCount() uint64 { + return p.SuccessfulAttestations +} // Type assertion to implement ISmoothingPoolPerformance var _ ISmoothingPoolPerformance = (*MegapoolValidatorPerformance_v1)(nil) diff --git a/shared/services/rewards/ssz_types/rewards-file-v5.go b/shared/services/rewards/ssz_types/rewards-file-v5.go index e5049ecda..356125032 100644 --- a/shared/services/rewards/ssz_types/rewards-file-v5.go +++ b/shared/services/rewards/ssz_types/rewards-file-v5.go @@ -299,6 +299,8 @@ type TotalRewards_v2 struct { // Smoothing Pool Voter Share is the portion of TotalVoterShareEth that comes from // the Smoothing Pool contract (as opposed to megapool distribution) SmoothingPoolVoterShareEth big.Uint256 `ssz-size:"32" json:"smoothingPoolVoterShareEth"` + // Total amount of Eth sent to the pDAO + TotalPdaoShareEth big.Uint256 `ssz-size:"32" json:"totalPdaoShareEth"` } type NodeReward_v2 struct { From 5d8901b985ddd01ebdc302472660311925dcae03 Mon Sep 17 00:00:00 2001 From: Jacob Shufro Date: Sun, 7 Sep 2025 20:52:50 -0400 Subject: [PATCH 30/33] Conditionally rename performance file --- shared/services/config/smartnode-config.go | 12 ++++++++++ shared/services/rewards/files.go | 17 +++++++++---- shared/services/rewards/generator-impl-v11.go | 1 + shared/services/rewards/generator-impl-v8.go | 1 + .../services/rewards/generator-impl-v9-v10.go | 1 + shared/services/rewards/generator-v8_test.go | 2 +- shared/services/rewards/generator.go | 1 + treegen/tree-gen.go | 24 ++++++++++++------- 8 files changed, 45 insertions(+), 14 deletions(-) diff --git a/shared/services/config/smartnode-config.go b/shared/services/config/smartnode-config.go index 903f5d7f7..db9f6c4f5 100644 --- a/shared/services/config/smartnode-config.go +++ b/shared/services/config/smartnode-config.go @@ -18,6 +18,7 @@ const ( SnapshotID string = "rocketpool-dao.eth" rewardsTreeFilenameFormat string = "rp-rewards-%s-%d%s" minipoolPerformanceFilenameFormat string = "rp-minipool-performance-%s-%d%s" + performanceFilenameFormat string = "rp-performance-%s-%d%s" RewardsTreeIpfsExtension string = ".zst" RewardsTreesFolder string = "rewards-trees" ChecksumTableFilename string = "checksums.sha384" @@ -803,6 +804,10 @@ func (cfg *SmartnodeConfig) GetMinipoolPerformanceFilename(interval uint64) stri return cfg.formatRewardsFilename(minipoolPerformanceFilenameFormat, interval, RewardsExtensionJSON) } +func (cfg *SmartnodeConfig) GetPerformanceFilename(interval uint64) string { + return cfg.formatRewardsFilename(performanceFilenameFormat, interval, RewardsExtensionJSON) +} + func (cfg *SmartnodeConfig) GetRewardsTreePath(interval uint64, daemon bool, extension RewardsExtension) string { return filepath.Join( cfg.GetRewardsTreeDirectory(daemon), @@ -817,6 +822,13 @@ func (cfg *SmartnodeConfig) GetMinipoolPerformancePath(interval uint64, daemon b ) } +func (cfg *SmartnodeConfig) GetPerformancePath(interval uint64) string { + return filepath.Join( + cfg.GetRewardsTreeDirectory(false), + cfg.GetPerformanceFilename(interval), + ) +} + func (cfg *SmartnodeConfig) GetRegenerateRewardsTreeRequestPath(interval uint64, daemon bool) string { if daemon && !cfg.parent.IsNativeMode { return filepath.Join(DaemonDataPath, WatchtowerFolder, fmt.Sprintf(RegenerateRewardsTreeRequestFormat, interval)) diff --git a/shared/services/rewards/files.go b/shared/services/rewards/files.go index 6b412136b..2413a234c 100644 --- a/shared/services/rewards/files.go +++ b/shared/services/rewards/files.go @@ -195,15 +195,22 @@ func saveArtifactsImpl(smartnode *config.SmartnodeConfig, treeResult *GenerateTr var primaryCid *cid.Cid out := make(map[string]cid.Cid, 4) + var performancePath string + if treeResult.RulesetVersion < 11 { + performancePath = smartnode.GetMinipoolPerformancePath(currentIndex, true) + } else { + performancePath = smartnode.GetPerformancePath(currentIndex) + } + files := []ILocalFile{ // Do not reorder! - // i == 0 - minipool performance file - NewLocalFile[IPerformanceFile]( + // i == 0 - performance file + NewLocalFile( treeResult.MinipoolPerformanceFile, - smartnode.GetMinipoolPerformancePath(currentIndex, true), + performancePath, ), // i == 1 - rewards file - NewLocalFile[IRewardsFile]( + NewLocalFile( rewardsFile, smartnode.GetRewardsTreePath(currentIndex, true, config.RewardsExtensionJSON), ), @@ -216,7 +223,7 @@ func saveArtifactsImpl(smartnode *config.SmartnodeConfig, treeResult *GenerateTr files = append( files, // i == 2 - ssz rewards file - NewLocalFile[IRewardsFile]( + NewLocalFile( rewardsFile, smartnode.GetRewardsTreePath(currentIndex, true, config.RewardsExtensionSSZ), ), diff --git a/shared/services/rewards/generator-impl-v11.go b/shared/services/rewards/generator-impl-v11.go index 3d763513e..a80d66f52 100644 --- a/shared/services/rewards/generator-impl-v11.go +++ b/shared/services/rewards/generator-impl-v11.go @@ -205,6 +205,7 @@ func (r *treeGeneratorImpl_v11) generateTree(rp RewardsExecutionClient, networkN } return &GenerateTreeResult{ + RulesetVersion: r.rewardsFile.RulesetVersion, RewardsFile: r.rewardsFile, InvalidNetworkNodes: r.invalidNetworkNodes, MinipoolPerformanceFile: r.performanceFile, diff --git a/shared/services/rewards/generator-impl-v8.go b/shared/services/rewards/generator-impl-v8.go index 6298202b7..2b117ba58 100644 --- a/shared/services/rewards/generator-impl-v8.go +++ b/shared/services/rewards/generator-impl-v8.go @@ -170,6 +170,7 @@ func (r *treeGeneratorImpl_v8) generateTree(rp RewardsExecutionClient, networkNa } return &GenerateTreeResult{ + RulesetVersion: r.rewardsFile.RulesetVersion, RewardsFile: r.rewardsFile, InvalidNetworkNodes: r.invalidNetworkNodes, MinipoolPerformanceFile: &r.rewardsFile.MinipoolPerformanceFile, diff --git a/shared/services/rewards/generator-impl-v9-v10.go b/shared/services/rewards/generator-impl-v9-v10.go index d583639e7..a4dd7528b 100644 --- a/shared/services/rewards/generator-impl-v9-v10.go +++ b/shared/services/rewards/generator-impl-v9-v10.go @@ -182,6 +182,7 @@ func (r *treeGeneratorImpl_v9_v10) generateTree(rp RewardsExecutionClient, netwo } return &GenerateTreeResult{ + RulesetVersion: r.rewardsFile.RulesetVersion, RewardsFile: r.rewardsFile, InvalidNetworkNodes: r.invalidNetworkNodes, MinipoolPerformanceFile: r.minipoolPerformanceFile, diff --git a/shared/services/rewards/generator-v8_test.go b/shared/services/rewards/generator-v8_test.go index 83189a55d..d68bb4548 100644 --- a/shared/services/rewards/generator-v8_test.go +++ b/shared/services/rewards/generator-v8_test.go @@ -31,7 +31,7 @@ func (t *v8Test) saveArtifacts(prefix string, result *GenerateTreeResult) { f: result.RewardsFile, } performanceLocalFile := LocalFile[IPerformanceFile]{ - fullPath: filepath.Join(tmpDir, fmt.Sprintf("%s-minipool-performance.json", prefix)), + fullPath: filepath.Join(tmpDir, fmt.Sprintf("%s-performance.json", prefix)), f: result.MinipoolPerformanceFile, } _, err = rewardsLocalFile.Write() diff --git a/shared/services/rewards/generator.go b/shared/services/rewards/generator.go index 0f976dd93..2724147dd 100644 --- a/shared/services/rewards/generator.go +++ b/shared/services/rewards/generator.go @@ -215,6 +215,7 @@ func NewTreeGenerator(logger *log.ColorLogger, logPrefix string, rp RewardsExecu } type GenerateTreeResult struct { + RulesetVersion uint64 RewardsFile IRewardsFile MinipoolPerformanceFile IPerformanceFile InvalidNetworkNodes map[common.Address]uint64 diff --git a/treegen/tree-gen.go b/treegen/tree-gen.go index 4b7e34abc..8fadaf0a9 100644 --- a/treegen/tree-gen.go +++ b/treegen/tree-gen.go @@ -463,24 +463,32 @@ func (g *treeGenerator) writeFiles(result *rprewards.GenerateTreeResult, votingP g.outputDir, g.cfg.Smartnode.GetRewardsTreeFilename(index, config.RewardsExtensionJSON), ) - minipoolPerformancePath := filepath.Join( - g.outputDir, - g.cfg.Smartnode.GetMinipoolPerformanceFilename(index), - ) + var performancePath string + if g.ruleset < 11 { + performancePath = filepath.Join( + g.outputDir, + g.cfg.Smartnode.GetMinipoolPerformanceFilename(index), + ) + } else { + performancePath = filepath.Join( + g.outputDir, + g.cfg.Smartnode.GetPerformanceFilename(index), + ) + } // Serialize the minipool performance file - minipoolPerformanceBytes, err := g.serializeMinipoolPerformance(result) + performanceBytes, err := g.serializeMinipoolPerformance(result) if err != nil { return fmt.Errorf("error serializing minipool performance file into JSON: %w", err) } // Write it to disk - err = os.WriteFile(minipoolPerformancePath, minipoolPerformanceBytes, 0644) + err = os.WriteFile(performancePath, performanceBytes, 0644) if err != nil { - return fmt.Errorf("error saving minipool performance file to %s: %w", minipoolPerformancePath, err) + return fmt.Errorf("error saving minipool performance file to %s: %w", performancePath, err) } - g.log.Printlnf("Saved minipool performance file to %s", minipoolPerformancePath) + g.log.Printlnf("Saved minipool performance file to %s", performancePath) rewardsFile.SetMinipoolPerformanceFileCID("---") // Serialize the rewards tree to JSON From 1b7066042e70eeed68d274162e78b5855094fd5f Mon Sep 17 00:00:00 2001 From: Jacob Shufro Date: Sun, 7 Sep 2025 21:35:37 -0400 Subject: [PATCH 31/33] Fix tests, don't use voter share to pay bonuses --- shared/services/rewards/generator-impl-v11.go | 8 +++++++- shared/services/rewards/mock_v11_test.go | 15 +++++++++------ 2 files changed, 16 insertions(+), 7 deletions(-) diff --git a/shared/services/rewards/generator-impl-v11.go b/shared/services/rewards/generator-impl-v11.go index a80d66f52..465aa2fa9 100644 --- a/shared/services/rewards/generator-impl-v11.go +++ b/shared/services/rewards/generator-impl-v11.go @@ -974,7 +974,13 @@ func (r *treeGeneratorImpl_v11) calculateNodeRewards() (*nodeRewards, error) { remainingBalance := big.NewInt(0).Sub(r.smoothingPoolBalance, totalEthForMinipools) remainingBalance.Sub(remainingBalance, totalEthForMegapools) remainingBalance.Sub(remainingBalance, pdaoEth) - remainingBalance.Sub(remainingBalance, trueVoterEth) + if trueVoterEth.Sign() > 0 { + remainingBalance.Sub(remainingBalance, trueVoterEth) + } else { + // Nobody earned voter share. + // Subtract voter share- it shouldn't be used to pay bonuses, or we could have a deficit later. + remainingBalance.Sub(remainingBalance, r.networkState.NetworkDetails.SmoothingPoolPendingVoterShare) + } if remainingBalance.Cmp(totalConsensusBonus) < 0 { r.log.Printlnf("WARNING: Remaining balance is less than total consensus bonus... Balance = %s, total consensus bonus = %s", remainingBalance.String(), totalConsensusBonus.String()) // Scale bonuses down to fit the remaining balance diff --git a/shared/services/rewards/mock_v11_test.go b/shared/services/rewards/mock_v11_test.go index c239d6cb6..701d3e473 100644 --- a/shared/services/rewards/mock_v11_test.go +++ b/shared/services/rewards/mock_v11_test.go @@ -835,7 +835,10 @@ func TestInsufficientEthForBonusesesV11(tt *testing.T) { history.Nodes = append(history.Nodes, odaoNodes...) // Ovewrite the SP balance to a value under the bonus commission - history.NetworkDetails.SmoothingPoolBalance = big.NewInt(1000) + history.NetworkDetails.SmoothingPoolBalance = big.NewInt(1100) + // Set the SP voter share to 0 + history.NetworkDetails.SmoothingPoolPendingVoterShare = big.NewInt(100) + // Set the pdao share to 0 state := history.GetEndNetworkState() state.IsSaturnDeployed = true @@ -1007,7 +1010,7 @@ func TestMockNoRPLRewardsV11(tt *testing.T) { t.Fatalf("Node one ETH amount does not match expected value: %s != %d", ethOne.String(), 0) } ethTwo := rewardsFile.GetNodeSmoothingPoolEth(nodeTwo.Address) - expectedEthTwo, _ := big.NewInt(0).SetString("32575000000000000000", 10) + expectedEthTwo, _ := big.NewInt(0).SetString("29325000000000000000", 10) if ethTwo.Cmp(expectedEthTwo) != 0 { t.Fatalf("Node two ETH amount does not match expected value: %s != %s", ethTwo.String(), expectedEthTwo.String()) } @@ -1131,12 +1134,12 @@ func TestMockOptedOutAndThenBondReducedV11(tt *testing.T) { rewardsFile := v11Artifacts.RewardsFile ethOne := rewardsFile.GetNodeSmoothingPoolEth(nodeOne.Address) // Node one was in the SP so it should have some ETH, but no bonuses - expectedEthOne, _ := big.NewInt(0).SetString("11309523809523809523", 10) + expectedEthOne, _ := big.NewInt(0).SetString("10178571428571428571", 10) if ethOne.Cmp(expectedEthOne) != 0 { t.Fatalf("Node one ETH amount does not match expected value: %s != %s", ethOne.String(), expectedEthOne.String()) } ethTwo := rewardsFile.GetNodeSmoothingPoolEth(nodeTwo.Address) - expectedEthTwo, _ := big.NewInt(0).SetString("26089087301587301587", 10) + expectedEthTwo, _ := big.NewInt(0).SetString("23483928571428571428", 10) if ethTwo.Cmp(expectedEthTwo) != 0 { t.Fatalf("Node two ETH amount does not match expected value: %s != %s", ethTwo.String(), expectedEthTwo.String()) } @@ -1252,12 +1255,12 @@ func TestMockWithdrawableEpochV11(tt *testing.T) { // Check the rewards file rewardsFile := v11Artifacts.RewardsFile ethOne := rewardsFile.GetNodeSmoothingPoolEth(nodeOne.Address) - expectedEthOne, _ := big.NewInt(0).SetString("21920833333333333333", 10) + expectedEthOne, _ := big.NewInt(0).SetString("19732500000000000000", 10) if ethOne.Cmp(expectedEthOne) != 0 { t.Fatalf("Node one ETH amount does not match expected value: %s != %s", ethOne.String(), expectedEthOne.String()) } ethTwo := rewardsFile.GetNodeSmoothingPoolEth(nodeTwo.Address) - expectedEthTwo, _ := big.NewInt(0).SetString("10654166666666666666", 10) + expectedEthTwo, _ := big.NewInt(0).SetString("9592500000000000000", 10) if ethTwo.Cmp(expectedEthTwo) != 0 { t.Fatalf("Node two ETH amount does not match expected value: %s != %s", ethTwo.String(), expectedEthTwo.String()) } From 7a88d9078dbaa9ab8c9b84e0af543bc426f1ef75 Mon Sep 17 00:00:00 2001 From: Jacob Shufro Date: Mon, 8 Sep 2025 09:54:45 -0400 Subject: [PATCH 32/33] Implement consensus bonus cut-off interval --- shared/services/rewards/generator-impl-v11.go | 8 ++++-- shared/services/rewards/generator.go | 28 +++++++++++++++++-- shared/services/rewards/mock_v11_test.go | 5 ++++ 3 files changed, 35 insertions(+), 6 deletions(-) diff --git a/shared/services/rewards/generator-impl-v11.go b/shared/services/rewards/generator-impl-v11.go index 465aa2fa9..0831811a9 100644 --- a/shared/services/rewards/generator-impl-v11.go +++ b/shared/services/rewards/generator-impl-v11.go @@ -60,6 +60,8 @@ type treeGeneratorImpl_v11 struct { performanceFile *PerformanceFile_v1 nodeRewards map[common.Address]*ssz_types.NodeReward_v2 networkRewards map[ssz_types.Layer]*ssz_types.NetworkReward + // Whether the interval is eligible for consensus bonuses + isEligibleInterval bool // fields for RPIP-62 bonus calculations // Withdrawals made by a minipool's validator. @@ -67,7 +69,7 @@ type treeGeneratorImpl_v11 struct { } // Create a new tree generator -func newTreeGeneratorImpl_v11(log *log.ColorLogger, logPrefix string, index uint64, snapshotEnd *SnapshotEnd, elSnapshotHeader *types.Header, intervalsPassed uint64, state *state.NetworkState) *treeGeneratorImpl_v11 { +func newTreeGeneratorImpl_v11(log *log.ColorLogger, logPrefix string, index uint64, snapshotEnd *SnapshotEnd, elSnapshotHeader *types.Header, intervalsPassed uint64, state *state.NetworkState, isEligibleInterval bool) *treeGeneratorImpl_v11 { return &treeGeneratorImpl_v11{ rewardsFile: &ssz_types.SSZFile_v2{ RewardsFileVersion: 4, @@ -108,6 +110,7 @@ func newTreeGeneratorImpl_v11(log *log.ColorLogger, logPrefix string, index uint nodeRewards: map[common.Address]*ssz_types.NodeReward_v2{}, networkRewards: map[ssz_types.Layer]*ssz_types.NetworkReward{}, minipoolWithdrawals: map[common.Address]*big.Int{}, + isEligibleInterval: isEligibleInterval, } } @@ -916,9 +919,8 @@ func (r *treeGeneratorImpl_v11) calculateNodeRewards() (*nodeRewards, error) { } // Calculate the minipool bonuses - isEligibleInterval := true // TODO - check on-chain for saturn 1 var totalConsensusBonus *big.Int - if r.rewardsFile.RulesetVersion >= 10 && isEligibleInterval { + if r.rewardsFile.RulesetVersion >= 10 && r.isEligibleInterval { totalConsensusBonus, err = r.calculateNodeBonuses() if err != nil { return nil, err diff --git a/shared/services/rewards/generator.go b/shared/services/rewards/generator.go index 2724147dd..47e5dbd5b 100644 --- a/shared/services/rewards/generator.go +++ b/shared/services/rewards/generator.go @@ -50,10 +50,12 @@ const ( MainnetV8Interval uint64 = 18 MainnetV9Interval uint64 = 29 MainnetV10Interval uint64 = 30 + MainnetV11Interval uint64 = 9000 // TODO: schedule v11 // Devnet intervals // Testnet intervals TestnetV10Interval uint64 = 0 + TestnetV11Interval uint64 = 9000 // TODO: schedule v11 ) func GetMainnetRulesetVersion(interval uint64) uint64 { @@ -128,6 +130,23 @@ func NewTreeGenerator(logger *log.ColorLogger, logPrefix string, rp RewardsExecu intervalsPassed: intervalsPassed, } + // Get the current network + network := t.cfg.Smartnode.Network.Value.(cfgtypes.Network) + + // Determine if the interval is eligible for consensus bonuses + var isEligibleInterval bool + switch network { + case cfgtypes.Network_Mainnet: + isEligibleInterval = t.index-4 < MainnetV11Interval + case cfgtypes.Network_Testnet: + isEligibleInterval = t.index-4 < TestnetV11Interval + default: + isEligibleInterval = true + } + + // v11 + v11_generator := newTreeGeneratorImpl_v11(t.logger, t.logPrefix, t.index, t.snapshotEnd, t.elSnapshotHeader, t.intervalsPassed, state, isEligibleInterval) + // v10 v10_generator := newTreeGeneratorImpl_v9_v10(10, t.logger, t.logPrefix, t.index, t.snapshotEnd, t.elSnapshotHeader, t.intervalsPassed, state) @@ -139,6 +158,12 @@ func NewTreeGenerator(logger *log.ColorLogger, logPrefix string, rp RewardsExecu // Create the interval wrappers rewardsIntervalInfos := []rewardsIntervalInfo{ + { + rewardsRulesetVersion: 11, + mainnetStartInterval: MainnetV11Interval, + testnetStartInterval: TestnetV11Interval, + generator: v11_generator, + }, { rewardsRulesetVersion: 10, mainnetStartInterval: MainnetV10Interval, @@ -171,9 +196,6 @@ func NewTreeGenerator(logger *log.ColorLogger, logPrefix string, rp RewardsExecu t.rewardsIntervalInfos[info.rewardsRulesetVersion] = info } - // Get the current network - network := t.cfg.Smartnode.Network.Value.(cfgtypes.Network) - // Determine which actual rulesets to use based on the current interval number, checking in descending order. foundGenerator := false foundApproximator := false diff --git a/shared/services/rewards/mock_v11_test.go b/shared/services/rewards/mock_v11_test.go index 701d3e473..57ac5c3e6 100644 --- a/shared/services/rewards/mock_v11_test.go +++ b/shared/services/rewards/mock_v11_test.go @@ -90,6 +90,7 @@ func TestMockIntervalDefaultsTreegenv11(tt *testing.T) { }, /* intervalsPassed= */ 1, state, + true, ) v11Artifacts, err := generatorv11.generateTree( @@ -878,6 +879,7 @@ func TestInsufficientEthForBonusesesV11(tt *testing.T) { }, /* intervalsPassed= */ 1, state, + true, ) v11Artifacts, err := generatorv11.generateTree( @@ -988,6 +990,7 @@ func TestMockNoRPLRewardsV11(tt *testing.T) { }, /* intervalsPassed= */ 1, state, + true, ) v11Artifacts, err := generatorv11.generateTree( @@ -1116,6 +1119,7 @@ func TestMockOptedOutAndThenBondReducedV11(tt *testing.T) { }, /* intervalsPassed= */ 1, state, + true, ) v11Artifacts, err := generatorv11.generateTree( @@ -1238,6 +1242,7 @@ func TestMockWithdrawableEpochV11(tt *testing.T) { }, /* intervalsPassed= */ 1, state, + true, ) v11Artifacts, err := generatorv11.generateTree( From 7614e2cc61353f2f536e03d4c216de076422752d Mon Sep 17 00:00:00 2001 From: Fornax <23104993+fornax2@users.noreply.github.com> Date: Mon, 15 Sep 2025 18:30:46 -0300 Subject: [PATCH 33/33] Add devnet v11 interval --- shared/services/rewards/generator.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/shared/services/rewards/generator.go b/shared/services/rewards/generator.go index 47e5dbd5b..a5179cb97 100644 --- a/shared/services/rewards/generator.go +++ b/shared/services/rewards/generator.go @@ -52,6 +52,7 @@ const ( MainnetV10Interval uint64 = 30 MainnetV11Interval uint64 = 9000 // TODO: schedule v11 // Devnet intervals + DevnetV11Interval uint64 = 21 // Testnet intervals TestnetV10Interval uint64 = 0 @@ -162,12 +163,14 @@ func NewTreeGenerator(logger *log.ColorLogger, logPrefix string, rp RewardsExecu rewardsRulesetVersion: 11, mainnetStartInterval: MainnetV11Interval, testnetStartInterval: TestnetV11Interval, + devnetStartInterval: DevnetV11Interval, generator: v11_generator, }, { rewardsRulesetVersion: 10, mainnetStartInterval: MainnetV10Interval, testnetStartInterval: TestnetV10Interval, + devnetStartInterval: 0, generator: v10_generator, }, {