diff --git a/.github/workflows/unit-tests.yml b/.github/workflows/unit-tests.yml index aa511482a..ade236aef 100644 --- a/.github/workflows/unit-tests.yml +++ b/.github/workflows/unit-tests.yml @@ -17,4 +17,4 @@ jobs: - uses: actions/setup-go@v4 with: go-version: 1.21.8 - - run: go test ./... + - run: go test ./... -timeout 30m diff --git a/go.mod b/go.mod index d0e599224..e5c78591c 100644 --- a/go.mod +++ b/go.mod @@ -22,6 +22,7 @@ require ( github.com/goccy/go-json v0.10.2 github.com/google/uuid v1.5.0 github.com/hashicorp/go-version v1.6.0 + github.com/holiman/uint256 v1.2.4 github.com/ipfs/boxo v0.8.0 github.com/ipfs/go-cid v0.4.1 github.com/ipfs/go-datastore v0.6.0 @@ -32,7 +33,7 @@ require ( github.com/prysmaticlabs/go-bitfield v0.0.0-20210809151128-385d8c5e3fb7 github.com/prysmaticlabs/prysm/v5 v5.0.3 github.com/rivo/tview v0.0.0-20230208211350-7dfff1ce7854 - github.com/rocket-pool/rocketpool-go v1.8.4-0.20241009143357-7b6894d57365 + github.com/rocket-pool/rocketpool-go v1.8.4-0.20241122223132-c5f2be18f72b github.com/sethvargo/go-password v0.2.0 github.com/shirou/gopsutil/v3 v3.23.1 github.com/tyler-smith/go-bip39 v1.1.0 @@ -87,7 +88,6 @@ require ( github.com/grpc-ecosystem/grpc-gateway/v2 v2.0.1 // indirect github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d // indirect github.com/herumi/bls-eth-go-binary v1.28.1 // indirect - github.com/holiman/uint256 v1.2.4 // indirect github.com/ipfs/bbloom v0.0.4 // indirect github.com/ipfs/go-bitfield v1.1.0 // indirect github.com/ipfs/go-block-format v0.1.2 // indirect diff --git a/go.sum b/go.sum index 7a109e9ef..a77ba55d9 100644 --- a/go.sum +++ b/go.sum @@ -633,10 +633,8 @@ github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis= github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rocket-pool/go-merkletree v1.0.1-0.20220406020931-c262d9b976dd h1:p9KuetSKB9nte9I/MkkiM3pwKFVQgqxxPTQ0y56Ff6s= github.com/rocket-pool/go-merkletree v1.0.1-0.20220406020931-c262d9b976dd/go.mod h1:UE9fof8P7iESVtLn1K9CTSkNRYVFHZHlf96RKbU33kA= -github.com/rocket-pool/rocketpool-go v1.8.4-0.20240903025128-025f78ebda85 h1:sCeOQE95E9KATbaz9rnHezLvZnJa0FdNp+kE7cwogSI= -github.com/rocket-pool/rocketpool-go v1.8.4-0.20240903025128-025f78ebda85/go.mod h1:f2TVsMOYmCwaJOhshG2zRoX89PZmvCkCD7UYJ9waRkI= -github.com/rocket-pool/rocketpool-go v1.8.4-0.20241009143357-7b6894d57365 h1:e8Y0PxBCpIV0NhCM2VvuceNbGSMfLagbMhcfwBzCNNc= -github.com/rocket-pool/rocketpool-go v1.8.4-0.20241009143357-7b6894d57365/go.mod h1:f2TVsMOYmCwaJOhshG2zRoX89PZmvCkCD7UYJ9waRkI= +github.com/rocket-pool/rocketpool-go v1.8.4-0.20241122223132-c5f2be18f72b h1:PnL2c1StqHDOjyOUYn4C/tuwhLtIZ2N/3qfNYyQlVWc= +github.com/rocket-pool/rocketpool-go v1.8.4-0.20241122223132-c5f2be18f72b/go.mod h1:f2TVsMOYmCwaJOhshG2zRoX89PZmvCkCD7UYJ9waRkI= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= diff --git a/rocketpool/api/network/generate-tree.go b/rocketpool/api/network/generate-tree.go index b2d9929c9..af402c00c 100644 --- a/rocketpool/api/network/generate-tree.go +++ b/rocketpool/api/network/generate-tree.go @@ -5,8 +5,8 @@ import ( "os" "github.com/fatih/color" - "github.com/rocket-pool/rocketpool-go/rewards" "github.com/rocket-pool/smartnode/shared/services" + "github.com/rocket-pool/smartnode/shared/services/config" "github.com/rocket-pool/smartnode/shared/types/api" "github.com/urfave/cli" ) @@ -32,14 +32,14 @@ func canGenerateRewardsTree(c *cli.Context, index uint64) (*api.CanNetworkGenera response := api.CanNetworkGenerateRewardsTreeResponse{} // Get the current interval - currentIndexBig, err := rewards.GetRewardIndex(rp, nil) + currentIndexBig, err := rp.GetRewardIndex(nil) if err != nil { return nil, err } response.CurrentIndex = currentIndexBig.Uint64() // Get the path of the file to save - filePath := cfg.Smartnode.GetRewardsTreePath(index, true) + filePath := cfg.Smartnode.GetRewardsTreePath(index, true, config.RewardsExtensionJSON) _, err = os.Stat(filePath) if os.IsNotExist(err) { response.TreeFileExists = false diff --git a/rocketpool/node/collectors/node-collector.go b/rocketpool/node/collectors/node-collector.go index 32ca59482..4b69ff0f1 100644 --- a/rocketpool/node/collectors/node-collector.go +++ b/rocketpool/node/collectors/node-collector.go @@ -321,8 +321,10 @@ func (collector *NodeCollector) Collect(channel chan<- prometheus.Metric) { if !previousInterval.TreeFileExists { return fmt.Errorf("Error retrieving previous interval's total node weight: rewards file %s doesn't exist for interval %d", previousInterval.TreeFilePath, previousRewardIndex) } - // Convert to a float, accuracy loss is meaningless compared to the heuristic's natural inaccuracy. - previousIntervalTotalNodeWeight = &previousInterval.TotalNodeWeight.Int + + if previousInterval.TotalNodeWeight != nil { + previousIntervalTotalNodeWeight.Set(previousInterval.TotalNodeWeight) + } // Get the info for each claimed interval for _, claimedInterval := range claimed { @@ -533,6 +535,7 @@ func (collector *NodeCollector) Collect(channel chan<- prometheus.Metric) { nodeWeightSum := big.NewInt(0).Add(nodeWeight, previousIntervalTotalNodeWeight) + // Convert to a float, accuracy loss is meaningless compared to the heuristic's natural inaccuracy. // nodeWeightRatio = current_node_weight / (current_node_weight + previous_interval_total_node_weight) nodeWeightRatio, _ := big.NewFloat(0).Quo( big.NewFloat(0).SetInt(nodeWeight), diff --git a/rocketpool/node/download-reward-trees.go b/rocketpool/node/download-reward-trees.go index f93c2f39c..79ae6d6a3 100644 --- a/rocketpool/node/download-reward-trees.go +++ b/rocketpool/node/download-reward-trees.go @@ -96,7 +96,7 @@ func (d *downloadRewardsTrees) run(state *state.NetworkState) error { missingIntervals := []uint64{} for i := uint64(0); i < currentIndex; i++ { // Check if the tree file exists - treeFilePath := d.cfg.Smartnode.GetRewardsTreePath(i, true) + treeFilePath := d.cfg.Smartnode.GetRewardsTreePath(i, true, config.RewardsExtensionJSON) _, err = os.Stat(treeFilePath) if os.IsNotExist(err) { d.log.Printlnf("You are missing the rewards tree file for interval %d.", i) diff --git a/rocketpool/node/node.go b/rocketpool/node/node.go index 73dc544f8..8b8b69e31 100644 --- a/rocketpool/node/node.go +++ b/rocketpool/node/node.go @@ -117,10 +117,7 @@ func run(c *cli.Context) error { updateLog := log.NewColorLogger(UpdateColor) // Create the state manager - m, err := state.NewNetworkStateManager(rp, cfg, rp.Client, bc, &updateLog) - if err != nil { - return err - } + m := state.NewNetworkStateManager(rp, cfg.Smartnode.GetStateManagerContracts(), bc, &updateLog) stateLocker := collectors.NewStateLocker() // Initialize tasks diff --git a/rocketpool/watchtower/generate-rewards-tree.go b/rocketpool/watchtower/generate-rewards-tree.go index fd6e2b62c..a93b6d782 100644 --- a/rocketpool/watchtower/generate-rewards-tree.go +++ b/rocketpool/watchtower/generate-rewards-tree.go @@ -139,8 +139,13 @@ func (t *generateRewardsTree) generateRewardsTree(index uint64) { generationPrefix := fmt.Sprintf("[Interval %d Tree]", index) t.log.Printlnf("%s Starting generation of Merkle rewards tree for interval %d.", generationPrefix, index) + // Get previous rewards pool addresses + previousRewardsPoolAddresses := t.cfg.Smartnode.GetPreviousRewardsPoolAddresses() + + rewardsClient := rprewards.NewRewardsExecutionClient(t.rp) + // Find the event for this interval - rewardsEvent, err := rprewards.GetRewardSnapshotEvent(t.rp, t.cfg, index, nil) + rewardsEvent, err := rewardsClient.GetRewardSnapshotEvent(previousRewardsPoolAddresses, index, nil) if err != nil { t.handleError(fmt.Errorf("%s Error getting event for interval %d: %w", generationPrefix, index, err)) return @@ -164,11 +169,7 @@ func (t *generateRewardsTree) generateRewardsTree(index uint64) { address, err := client.RocketStorage.GetAddress(opts, crypto.Keccak256Hash([]byte("contract.addressrocketTokenRETH"))) if err == nil { // Create the state manager with using the primary or fallback (not necessarily archive) EC - stateManager, err = state.NewNetworkStateManager(client, t.cfg, t.rp.Client, t.bc, &t.log) - if err != nil { - t.handleError(fmt.Errorf("error creating new NetworkStateManager with Archive EC: %w", err)) - return - } + stateManager = state.NewNetworkStateManager(client, t.cfg.Smartnode.GetStateManagerContracts(), t.bc, &t.log) } else { // Check if an Archive EC is provided, and if using it would potentially resolve the error errMessage := err.Error() @@ -199,12 +200,16 @@ func (t *generateRewardsTree) generateRewardsTree(index uint64) { t.handleError(fmt.Errorf("Error verifying rETH address with Archive EC: %w", err)) return } - // Create the state manager with the archive EC - stateManager, err = state.NewNetworkStateManager(client, t.cfg, ec, t.bc, &t.log) + + // Create a new rocketpool-go instance + archiveRP, err := rocketpool.NewRocketPool(ec, *t.rp.RocketStorageContract.Address) if err != nil { - t.handleError(fmt.Errorf("Error creating new NetworkStateManager with ARchive EC: %w", err)) + t.handleError(fmt.Errorf("Error instantiating client with Archive EC: %w", err)) return } + + // Create the state manager with the archive EC + stateManager = state.NewNetworkStateManager(archiveRP, t.cfg.Smartnode.GetStateManagerContracts(), t.bc, &t.log) } else { // No archive node specified t.handleError(fmt.Errorf("***ERROR*** Primary EC cannot retrieve state for historical block %d and the Archive EC is not specified.", elBlockHeader.Number.Uint64())) @@ -234,53 +239,46 @@ func (t *generateRewardsTree) generateRewardsTree(index uint64) { // Implementation for rewards tree generation using a viable EC func (t *generateRewardsTree) generateRewardsTreeImpl(rp *rocketpool.RocketPool, index uint64, generationPrefix string, rewardsEvent rewards.RewardsEvent, elBlockHeader *types.Header, state *state.NetworkState) { + // Determine the end of the interval + snapshotEnd := &rprewards.SnapshotEnd{ + ConsensusBlock: rewardsEvent.ConsensusBlock.Uint64(), + ExecutionBlock: rewardsEvent.ExecutionBlock.Uint64(), + Slot: state.BeaconConfig.FirstSlotAtLeast(rewardsEvent.IntervalEndTime.Unix()), + } + // Generate the rewards file start := time.Now() - treegen, err := rprewards.NewTreeGenerator(&t.log, generationPrefix, rp, t.cfg, t.bc, index, rewardsEvent.IntervalStartTime, rewardsEvent.IntervalEndTime, rewardsEvent.ConsensusBlock.Uint64(), elBlockHeader, rewardsEvent.IntervalsPassed.Uint64(), state, nil) + treegen, err := rprewards.NewTreeGenerator(&t.log, generationPrefix, rprewards.NewRewardsExecutionClient(rp), t.cfg, t.bc, index, rewardsEvent.IntervalStartTime, rewardsEvent.IntervalEndTime, snapshotEnd, elBlockHeader, rewardsEvent.IntervalsPassed.Uint64(), state) if err != nil { t.handleError(fmt.Errorf("%s Error creating Merkle tree generator: %w", generationPrefix, err)) return } - rewardsFile, err := treegen.GenerateTree() + treeResult, err := treegen.GenerateTree() if err != nil { t.handleError(fmt.Errorf("%s Error generating Merkle tree: %w", generationPrefix, err)) return } - header := rewardsFile.GetHeader() - for address, network := range header.InvalidNetworkNodes { + rewardsFile := treeResult.RewardsFile + for address, network := range treeResult.InvalidNetworkNodes { t.log.Printlnf("%s WARNING: Node %s has invalid network %d assigned! Using 0 (mainnet) instead.", generationPrefix, address.Hex(), network) } t.log.Printlnf("%s Finished in %s", generationPrefix, time.Since(start).String()) // Validate the Merkle root - root := common.BytesToHash(header.MerkleTree.Root()) - if root != rewardsEvent.MerkleRoot { - t.log.Printlnf("%s WARNING: your Merkle tree had a root of %s, but the canonical Merkle tree's root was %s. This file will not be usable for claiming rewards.", generationPrefix, root.Hex(), rewardsEvent.MerkleRoot.Hex()) + root := rewardsFile.GetMerkleRoot() + if root != rewardsEvent.MerkleRoot.Hex() { + t.log.Printlnf("%s WARNING: your Merkle tree had a root of %s, but the canonical Merkle tree's root was %s. This file will not be usable for claiming rewards.", generationPrefix, root, rewardsEvent.MerkleRoot.Hex()) } else { - t.log.Printlnf("%s Your Merkle tree's root of %s matches the canonical root! You will be able to use this file for claiming rewards.", generationPrefix, header.MerkleRoot) + t.log.Printlnf("%s Your Merkle tree's root of %s matches the canonical root! You will be able to use this file for claiming rewards.", generationPrefix, root) } - // Create the JSON files rewardsFile.SetMinipoolPerformanceFileCID("---") + + // Save the files t.log.Printlnf("%s Saving JSON files...", generationPrefix) - localMinipoolPerformanceFile := rprewards.NewLocalFile[rprewards.IMinipoolPerformanceFile]( - rewardsFile.GetMinipoolPerformanceFile(), - t.cfg.Smartnode.GetMinipoolPerformancePath(index, true), - ) - localRewardsFile := rprewards.NewLocalFile[rprewards.IRewardsFile]( - rewardsFile, - t.cfg.Smartnode.GetRewardsTreePath(index, true), - ) - - // Write the files - err = localMinipoolPerformanceFile.Write() - if err != nil { - t.handleError(fmt.Errorf("%s error saving minipool performance file: %w", generationPrefix, err)) - return - } - err = localRewardsFile.Write() + _, _, err = treegen.SaveFiles(treeResult, false) if err != nil { - t.handleError(fmt.Errorf("%s error saving rewards file: %w", generationPrefix, err)) + t.handleError(fmt.Errorf("%s failed to save rewards artifacts: %w", generationPrefix, err)) return } diff --git a/rocketpool/watchtower/submit-network-balances.go b/rocketpool/watchtower/submit-network-balances.go index acd8176de..9f281ccc2 100644 --- a/rocketpool/watchtower/submit-network-balances.go +++ b/rocketpool/watchtower/submit-network-balances.go @@ -297,10 +297,7 @@ func (t *submitNetworkBalances) getNetworkBalances(elBlockHeader *types.Header, } // Create a new state gen manager - mgr, err := state.NewNetworkStateManager(client, t.cfg, client.Client, t.bc, t.log) - if err != nil { - return networkBalances{}, fmt.Errorf("error creating network state manager for EL block %s, Beacon slot %d: %w", elBlock, beaconBlock, err) - } + mgr := state.NewNetworkStateManager(client, t.cfg.Smartnode.GetStateManagerContracts(), t.bc, t.log) // Create a new state for the target block state, err := mgr.GetStateForSlot(beaconBlock) @@ -352,10 +349,16 @@ func (t *submitNetworkBalances) getNetworkBalances(elBlockHeader *types.Header, timeSinceStart := slotTime.Sub(startTime) intervalsPassed := timeSinceStart / intervalTime endTime := slotTime + // Since we aren't generating an actual tree, just use beaconBlock as the snapshotEnd + snapshotEnd := &rprewards.SnapshotEnd{ + Slot: beaconBlock, + ConsensusBlock: beaconBlock, + ExecutionBlock: state.ElBlockNumber, + } // Approximate the staker's share of the smoothing pool balance // NOTE: this will use the "vanilla" variant of treegen, without rolling records, to retain parity with other Oracle DAO nodes that aren't using rolling records - treegen, err := rprewards.NewTreeGenerator(t.log, "[Balances]", client, t.cfg, t.bc, currentIndex, startTime, endTime, beaconBlock, elBlockHeader, uint64(intervalsPassed), state, nil) + treegen, err := rprewards.NewTreeGenerator(t.log, "[Balances]", rprewards.NewRewardsExecutionClient(client), t.cfg, t.bc, currentIndex, startTime, endTime, snapshotEnd, elBlockHeader, uint64(intervalsPassed), state) if err != nil { return fmt.Errorf("error creating merkle tree generator to approximate share of smoothing pool: %w", err) } diff --git a/rocketpool/watchtower/submit-rewards-tree-rolling.go b/rocketpool/watchtower/submit-rewards-tree-rolling.go deleted file mode 100644 index 2d13443a3..000000000 --- a/rocketpool/watchtower/submit-rewards-tree-rolling.go +++ /dev/null @@ -1,694 +0,0 @@ -package watchtower - -import ( - "context" - "encoding/hex" - "fmt" - "math" - "math/big" - "os" - "strings" - "sync" - "time" - - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "github.com/rocket-pool/rocketpool-go/rewards" - "github.com/rocket-pool/rocketpool-go/rocketpool" - "github.com/rocket-pool/rocketpool-go/tokens" - "github.com/rocket-pool/rocketpool-go/utils/eth" - "github.com/rocket-pool/smartnode/rocketpool/watchtower/utils" - "github.com/rocket-pool/smartnode/shared/services" - "github.com/rocket-pool/smartnode/shared/services/beacon" - "github.com/rocket-pool/smartnode/shared/services/config" - rprewards "github.com/rocket-pool/smartnode/shared/services/rewards" - "github.com/rocket-pool/smartnode/shared/services/state" - "github.com/rocket-pool/smartnode/shared/services/wallet" - "github.com/rocket-pool/smartnode/shared/utils/api" - "github.com/rocket-pool/smartnode/shared/utils/eth1" - hexutil "github.com/rocket-pool/smartnode/shared/utils/hex" - "github.com/rocket-pool/smartnode/shared/utils/log" - "github.com/urfave/cli" -) - -// Process balances and rewards task -type submitRewardsTree_Rolling struct { - c *cli.Context - log log.ColorLogger - errLog log.ColorLogger - cfg *config.RocketPoolConfig - w *wallet.Wallet - ec rocketpool.ExecutionClient - rp *rocketpool.RocketPool - bc beacon.Client - genesisTime time.Time - recordMgr *rprewards.RollingRecordManager - stateMgr *state.NetworkStateManager - logPrefix string - - lock *sync.Mutex - isRunning bool -} - -// Create submit rewards tree with rolling record support -func newSubmitRewardsTree_Rolling(c *cli.Context, logger log.ColorLogger, errorLogger log.ColorLogger, stateMgr *state.NetworkStateManager) (*submitRewardsTree_Rolling, error) { - - // Get services - cfg, err := services.GetConfig(c) - if err != nil { - return nil, err - } - w, err := services.GetWallet(c) - if err != nil { - return nil, err - } - ec, err := services.GetEthClient(c) - if err != nil { - return nil, err - } - rp, err := services.GetRocketPool(c) - if err != nil { - return nil, err - } - bc, err := services.GetBeaconClient(c) - if err != nil { - return nil, err - } - - // Get the beacon config - beaconCfg, err := bc.GetEth2Config() - if err != nil { - return nil, fmt.Errorf("error getting beacon config: %w", err) - } - - // Get the Beacon genesis time - genesisTime := time.Unix(int64(beaconCfg.GenesisTime), 0) - - // Get the current interval index - currentIndexBig, err := rewards.GetRewardIndex(rp, nil) - if err != nil { - return nil, fmt.Errorf("error getting rewards index: %w", err) - } - currentIndex := currentIndexBig.Uint64() - if currentIndex == 0 { - return nil, fmt.Errorf("rolling records cannot be used for the first rewards interval") - } - - // Get the previous RocketRewardsPool addresses - prevAddresses := cfg.Smartnode.GetPreviousRewardsPoolAddresses() - - // Get the last rewards event and starting epoch - found, event, err := rewards.GetRewardsEvent(rp, currentIndex-1, prevAddresses, nil) - if err != nil { - return nil, fmt.Errorf("error getting event for rewards interval %d: %w", currentIndex-1, err) - } - if !found { - return nil, fmt.Errorf("event for rewards interval %d not found", currentIndex-1) - } - - // Get the start slot of the current interval - startSlot, err := rprewards.GetStartSlotForInterval(event, bc, beaconCfg) - if err != nil { - return nil, fmt.Errorf("error getting start slot for interval %d: %w", currentIndex, err) - } - - // Create the task - lock := &sync.Mutex{} - logPrefix := "[Rolling Record]" - task := &submitRewardsTree_Rolling{ - c: c, - log: logger, - errLog: errorLogger, - cfg: cfg, - ec: ec, - w: w, - rp: rp, - bc: bc, - stateMgr: stateMgr, - genesisTime: genesisTime, - logPrefix: logPrefix, - lock: lock, - isRunning: false, - } - - // Make a new rolling manager - recordMgr, err := rprewards.NewRollingRecordManager(&task.log, &task.errLog, cfg, rp, bc, stateMgr, startSlot, beaconCfg, currentIndex) - if err != nil { - return nil, fmt.Errorf("error creating rolling record manager: %w", err) - } - - // Load the latest checkpoint - beaconHead, err := bc.GetBeaconHead() - if err != nil { - return nil, fmt.Errorf("error getting beacon head: %w", err) - } - latestFinalizedSlot := (beaconHead.FinalizedEpoch+1)*beaconCfg.SlotsPerEpoch - 1 - _, err = recordMgr.LoadBestRecordFromDisk(startSlot, latestFinalizedSlot, currentIndex) - if err != nil { - return nil, fmt.Errorf("error loading rolling record checkpoint from disk: %w", err) - } - - // Return - task.recordMgr = recordMgr - return task, nil - -} - -// Update the rolling record and run the submission process if applicable -func (t *submitRewardsTree_Rolling) run(headState *state.NetworkState) error { - // Wait for clients to sync - if err := services.WaitEthClientSynced(t.c, true); err != nil { - return err - } - if err := services.WaitBeaconClientSynced(t.c, true); err != nil { - return err - } - - t.lock.Lock() - if t.isRunning { - t.log.Println("Record update is already running in the background.") - t.lock.Unlock() - return nil - } - t.lock.Unlock() - - nodeAccount, err := t.w.GetNodeAccount() - if err != nil { - return fmt.Errorf("error loading node account: %w", err) - } - nodeAddress := nodeAccount.Address - - go func() { - t.lock.Lock() - t.isRunning = true - t.lock.Unlock() - t.log.Printlnf("%s Running record update in a separate thread.", t.logPrefix) - - // Capture the latest head state if one isn't passed in - if headState == nil { - // Get the latest Beacon block - latestBlock, err := t.stateMgr.GetLatestBeaconBlock() - if err != nil { - t.handleError(fmt.Errorf("error getting latest Beacon block: %w", err)) - return - } - - // Get the state of the network - headState, err = t.stateMgr.GetStateForSlot(latestBlock.Slot) - if err != nil { - t.handleError(fmt.Errorf("error getting network state: %w", err)) - return - } - } - - // Check whether or not the node is in the Oracle DAO - isInOdao := false - for _, details := range headState.OracleDaoMemberDetails { - if details.Address == nodeAddress { - isInOdao = true - break - } - } - - // Get the latest finalized slot and epoch - latestFinalizedBlock, err := t.stateMgr.GetLatestFinalizedBeaconBlock() - if err != nil { - t.handleError(fmt.Errorf("error getting latest finalized block: %w", err)) - return - } - latestFinalizedEpoch := latestFinalizedBlock.Slot / headState.BeaconConfig.SlotsPerEpoch - - // Check if a rewards interval is due - isRewardsSubmissionDue, rewardsSlot, intervalsPassed, startTime, endTime, err := t.isRewardsIntervalSubmissionRequired(headState) - if err != nil { - t.handleError(fmt.Errorf("error checking if rewards submission is required: %w", err)) - return - } - - // If no special upcoming state is required, update normally - if !isRewardsSubmissionDue { - err = t.recordMgr.UpdateRecordToState(headState, latestFinalizedBlock.Slot) - if err != nil { - t.handleError(fmt.Errorf("error updating record: %w", err)) - return - } - - t.lock.Lock() - t.isRunning = false - t.lock.Unlock() - return - } - - // Check if rewards reporting is ready - rewardsEpoch := rewardsSlot / headState.BeaconConfig.SlotsPerEpoch - requiredRewardsEpoch := rewardsEpoch + 1 - isRewardsReadyForReport := isRewardsSubmissionDue && (latestFinalizedEpoch >= requiredRewardsEpoch) - - // Run updates and submissions as required - if isRewardsReadyForReport { - // Check if there's an existing file for this interval, and try submitting that - existingRewardsFile, valid, mustRegenerate := t.isExistingRewardsFileValid(headState.NetworkDetails.RewardIndex, intervalsPassed, nodeAddress, isInOdao) - if existingRewardsFile != nil { - if valid && !mustRegenerate { - // We already have a valid file and submission - t.log.Printlnf("%s Rewards tree has already been submitted for interval %d and is still valid but consensus hasn't been reached yet; nothing to do.", t.logPrefix, headState.NetworkDetails.RewardIndex) - t.lock.Lock() - t.isRunning = false - t.lock.Unlock() - return - } else if !valid && !mustRegenerate { - // We already have a valid file but need to submit again - t.log.Printlnf("%s Rewards tree has already been created for interval %d but hasn't been submitted yet, attempting resubmission.", t.logPrefix, headState.NetworkDetails.RewardIndex) - } else if !valid && mustRegenerate { - // We have a file but it's not valid (probably because too many intervals have passed) - t.log.Printlnf("%s Rewards submission for interval %d is due and current file is no longer valid (likely too many intervals have passed since its creation), regenerating it.", t.logPrefix, headState.NetworkDetails.RewardIndex) - } - } - - // Get the actual slot to report on - var elBlockNumber uint64 - rewardsSlot, elBlockNumber, err = t.getTrueRewardsIntervalSubmissionSlot(rewardsSlot) - if err != nil { - t.handleError(fmt.Errorf("error getting the true rewards interval slot: %w", err)) - return - } - - // Get an appropriate client that has access to the target state - this is required if the state gets pruned by the local EC and the - // archive EC is required - client, err := eth1.GetBestApiClient(t.rp, t.cfg, t.printMessage, big.NewInt(0).SetUint64(elBlockNumber)) - if err != nil { - t.handleError(fmt.Errorf("error getting best API client during rewards submission: %w", err)) - return - } - - // Generate the rewards state - stateMgr, err := state.NewNetworkStateManager(client, t.cfg, client.Client, t.bc, &t.log) - if err != nil { - t.handleError(fmt.Errorf("error creating state manager for rewards slot: %w", err)) - return - } - state, err := stateMgr.GetStateForSlot(rewardsSlot) - if err != nil { - t.handleError(fmt.Errorf("error getting state for rewards slot: %w", err)) - return - } - - // Process the rewards interval - t.log.Printlnf("%s Running rewards interval submission.", t.logPrefix) - err = t.runRewardsIntervalReport(client, state, isInOdao, intervalsPassed, startTime, endTime, mustRegenerate, existingRewardsFile) - if err != nil { - t.handleError(fmt.Errorf("error running rewards interval report: %w", err)) - return - } - } else { - t.log.Printlnf("%s Rewards submission for interval %d is due... waiting for epoch %d to be finalized (currently on epoch %d)", t.logPrefix, headState.NetworkDetails.RewardIndex, requiredRewardsEpoch, latestFinalizedEpoch) - } - - t.lock.Lock() - t.isRunning = false - t.lock.Unlock() - }() - - return nil -} - -// Print a message from the tree generation goroutine -func (t *submitRewardsTree_Rolling) printMessage(message string) { - t.log.Printlnf("%s %s", t.logPrefix, message) -} - -// Print an error and unlock the mutex -func (t *submitRewardsTree_Rolling) handleError(err error) { - t.errLog.Printlnf("%s %s", t.logPrefix, err.Error()) - t.errLog.Println("*** Rolling Record processing failed. ***") - t.lock.Lock() - t.isRunning = false - t.lock.Unlock() -} - -// Check if a rewards interval submission is required and if so, the slot number for the update -func (t *submitRewardsTree_Rolling) isRewardsIntervalSubmissionRequired(state *state.NetworkState) (bool, uint64, uint64, time.Time, time.Time, error) { - // Check if a rewards interval has passed and needs to be calculated - startTime := state.NetworkDetails.IntervalStart - intervalTime := state.NetworkDetails.IntervalDuration - - // Adjust for the first interval by making the start time the RPL inflation interval start time - if startTime == time.Unix(0, 0) { - var err error - opts := &bind.CallOpts{ - BlockNumber: big.NewInt(0).SetUint64(state.ElBlockNumber), - } - startTime, err = tokens.GetRPLInflationIntervalStartTime(t.rp, opts) - if err != nil { - return false, 0, 0, time.Time{}, time.Time{}, fmt.Errorf("start time is zero, but error getting Rocket Pool deployment block: %w", err) - } - t.log.Printlnf("NOTE: rewards pool interval start time is 0, using the inflation interval start time according to the RPL token (%s)", startTime.String()) - } - - // Calculate the end time, which is the number of intervals that have gone by since the current one's start - secondsSinceGenesis := time.Duration(state.BeaconConfig.SecondsPerSlot*state.BeaconSlotNumber) * time.Second - stateTime := t.genesisTime.Add(secondsSinceGenesis) - timeSinceStart := stateTime.Sub(startTime) - intervalsPassed := timeSinceStart / intervalTime - endTime := startTime.Add(intervalTime * intervalsPassed) - if intervalsPassed == 0 { - return false, 0, 0, time.Time{}, time.Time{}, nil - } - - // Get the target slot number - eth2Config := state.BeaconConfig - totalTimespan := endTime.Sub(t.genesisTime) - targetSlot := uint64(math.Ceil(totalTimespan.Seconds() / float64(eth2Config.SecondsPerSlot))) - targetSlotEpoch := targetSlot / eth2Config.SlotsPerEpoch - targetSlot = (targetSlotEpoch+1)*eth2Config.SlotsPerEpoch - 1 // The target slot becomes the last one in the Epoch - - return true, targetSlot, uint64(intervalsPassed), startTime, endTime, nil -} - -// Get the actual slot to be used for a rewards interval submission instead of the naively-determined one -// NOTE: only call this once the required epoch (targetSlotEpoch + 1) has been finalized -func (t *submitRewardsTree_Rolling) getTrueRewardsIntervalSubmissionSlot(targetSlot uint64) (uint64, uint64, error) { - // Get the first successful block - for { - // Try to get the current block - block, exists, err := t.bc.GetBeaconBlock(fmt.Sprint(targetSlot)) - if err != nil { - return 0, 0, fmt.Errorf("error getting Beacon block %d: %w", targetSlot, err) - } - - // If the block was missing, try the previous one - if !exists { - t.log.Printlnf("%s Slot %d was missing, trying the previous one...", t.logPrefix, targetSlot) - targetSlot-- - } else { - // Ok, we have the first proposed finalized block - this is the one to use for the snapshot! - return targetSlot, block.ExecutionBlockNumber, nil - } - } -} - -// Checks to see if an existing rewards file is still valid and whether or not it should be regenerated or just resubmitted -func (t *submitRewardsTree_Rolling) isExistingRewardsFileValid(rewardIndex uint64, intervalsPassed uint64, nodeAddress common.Address, isInOdao bool) (*rprewards.LocalRewardsFile, bool, bool) { - rewardsTreePath := t.cfg.Smartnode.GetRewardsTreePath(rewardIndex, true) - - // Check if the rewards file exists - _, err := os.Stat(rewardsTreePath) - if os.IsNotExist(err) { - return nil, false, true - } - if err != nil { - t.log.Printlnf("%s WARNING: failed to check if [%s] exists: %s; regenerating file...\n", t.logPrefix, rewardsTreePath, err.Error()) - return nil, false, true - } - - // The file already exists, attempt to read it - localRewardsFile, err := rprewards.ReadLocalRewardsFile(rewardsTreePath) - if err != nil { - t.log.Printlnf("%s WARNING: failed to read %s: %s; regenerating file...\n", t.logPrefix, rewardsTreePath, err.Error()) - return nil, false, true - } - - proofWrapper := localRewardsFile.Impl() - header := proofWrapper.GetHeader() - - if isInOdao { - // Save the compressed file and get the CID for it - cid, err := localRewardsFile.CreateCompressedFileAndCid() - if err != nil { - t.log.Printlnf("%s WARNING: failed to get CID for %s: %s; regenerating file...\n", t.logPrefix, rewardsTreePath, err.Error()) - return nil, false, true - } - - // Check if this file has already been submitted - submission := rewards.RewardSubmission{ - RewardIndex: big.NewInt(0).SetUint64(header.Index), - ExecutionBlock: big.NewInt(0).SetUint64(header.ExecutionEndBlock), - ConsensusBlock: big.NewInt(0).SetUint64(header.ConsensusEndBlock), - MerkleRoot: common.HexToHash(header.MerkleRoot), - MerkleTreeCID: cid.String(), - IntervalsPassed: big.NewInt(0).SetUint64(header.IntervalsPassed), - TreasuryRPL: &header.TotalRewards.ProtocolDaoRpl.Int, - TrustedNodeRPL: []*big.Int{&header.TotalRewards.TotalOracleDaoRpl.Int}, - NodeRPL: []*big.Int{&header.TotalRewards.TotalCollateralRpl.Int}, - NodeETH: []*big.Int{&header.TotalRewards.NodeOperatorSmoothingPoolEth.Int}, - UserETH: &header.TotalRewards.PoolStakerSmoothingPoolEth.Int, - } - - hasSubmitted, err := rewards.GetTrustedNodeSubmittedSpecificRewards(t.rp, nodeAddress, submission, nil) - if err != nil { - t.log.Printlnf("%s WARNING: could not check if node has previously submitted file %s: %s; regenerating file...\n", t.logPrefix, rewardsTreePath, err.Error()) - return nil, false, true - } - if !hasSubmitted { - if header.IntervalsPassed != intervalsPassed { - t.log.Printlnf("%s Existing file for interval %d had %d intervals passed but %d have passed now, regenerating file...", t.logPrefix, header.Index, header.IntervalsPassed, intervalsPassed) - return localRewardsFile, false, true - } - t.log.Printlnf("%s Existing file for interval %d has not been submitted yet.", t.logPrefix, header.Index) - return localRewardsFile, false, false - } - } - - // Check if the file's valid (same number of intervals passed as the current time) - if header.IntervalsPassed != intervalsPassed { - t.log.Printlnf("%s Existing file for interval %d had %d intervals passed but %d have passed now, regenerating file...", t.logPrefix, header.Index, header.IntervalsPassed, intervalsPassed) - return localRewardsFile, false, true - } - - // File's good and it has the same number of intervals passed, so use it - return localRewardsFile, true, false -} - -// Run a rewards interval report submission -func (t *submitRewardsTree_Rolling) runRewardsIntervalReport(client *rocketpool.RocketPool, state *state.NetworkState, isInOdao bool, intervalsPassed uint64, startTime time.Time, endTime time.Time, mustRegenerate bool, existingRewardsFile *rprewards.LocalRewardsFile) error { - // Prep the record for reporting - err := t.recordMgr.PrepareRecordForReport(state) - if err != nil { - return fmt.Errorf("error preparing record for report: %w", err) - } - - // Initialize some variables - snapshotBeaconBlock := state.BeaconSlotNumber - elBlockNumber := state.ElBlockNumber - - // Get the number of the EL block matching the CL snapshot block - snapshotElBlockHeader, err := t.rp.Client.HeaderByNumber(context.Background(), big.NewInt(int64(elBlockNumber))) - if err != nil { - return err - } - elBlockIndex := snapshotElBlockHeader.Number.Uint64() - - // Get the current interval - currentIndex := state.NetworkDetails.RewardIndex - currentIndexBig := big.NewInt(0).SetUint64(currentIndex) - - // Get the expected file paths - rewardsTreePath := t.cfg.Smartnode.GetRewardsTreePath(currentIndex, true) - compressedRewardsTreePath := rewardsTreePath + config.RewardsTreeIpfsExtension - minipoolPerformancePath := t.cfg.Smartnode.GetMinipoolPerformancePath(currentIndex, true) - compressedMinipoolPerformancePath := minipoolPerformancePath + config.RewardsTreeIpfsExtension - - // Check if we can reuse an existing file for this interval - if !mustRegenerate { - if !isInOdao { - t.log.Printlnf("%s Node is not in the Oracle DAO, skipping submission for interval %d.", t.logPrefix, currentIndex) - return nil - } - - t.log.Printlnf("%s Merkle rewards tree for interval %d already exists at %s, attempting to resubmit...", t.logPrefix, currentIndex, rewardsTreePath) - - // Save the compressed file and get the CID for it - cid, err := existingRewardsFile.CreateCompressedFileAndCid() - if err != nil { - return fmt.Errorf("error getting CID for file %s: %w", compressedRewardsTreePath, err) - } - t.printMessage(fmt.Sprintf("Calculated rewards tree CID: %s", cid)) - - // Submit to the contracts - err = t.submitRewardsSnapshot(currentIndexBig, snapshotBeaconBlock, elBlockIndex, existingRewardsFile.Impl().GetHeader(), cid.String(), big.NewInt(int64(intervalsPassed))) - if err != nil { - return fmt.Errorf("error submitting rewards snapshot: %w", err) - } - - t.log.Printlnf("%s Successfully submitted rewards snapshot for interval %d.", t.logPrefix, currentIndex) - return nil - } - - // Generate the tree - err = t.generateTree(client, state, intervalsPassed, isInOdao, currentIndex, snapshotBeaconBlock, elBlockIndex, startTime, endTime, snapshotElBlockHeader, rewardsTreePath, compressedRewardsTreePath, minipoolPerformancePath, compressedMinipoolPerformancePath) - if err != nil { - return fmt.Errorf("error generating rewards tree: %w", err) - } - - return nil -} - -// Implementation for rewards tree generation using a viable EC -func (t *submitRewardsTree_Rolling) generateTree(rp *rocketpool.RocketPool, state *state.NetworkState, intervalsPassed uint64, nodeTrusted bool, currentIndex uint64, snapshotBeaconBlock uint64, elBlockIndex uint64, startTime time.Time, endTime time.Time, snapshotElBlockHeader *types.Header, rewardsTreePath string, compressedRewardsTreePath string, minipoolPerformancePath string, compressedMinipoolPerformancePath string) error { - - // Log - if intervalsPassed > 1 { - t.log.Printlnf("WARNING: %d intervals have passed since the last rewards checkpoint was submitted! Rolling them into one...", intervalsPassed) - } - t.log.Printlnf("Rewards checkpoint has passed, starting Merkle tree generation for interval %d in the background.\n%s Snapshot Beacon block = %d, EL block = %d, running from %s to %s", currentIndex, t.logPrefix, snapshotBeaconBlock, elBlockIndex, startTime, endTime) - - // Generate the rewards file - treegen, err := rprewards.NewTreeGenerator(&t.log, t.logPrefix, rp, t.cfg, t.bc, currentIndex, startTime, endTime, snapshotBeaconBlock, snapshotElBlockHeader, uint64(intervalsPassed), state, t.recordMgr.Record) - if err != nil { - return fmt.Errorf("Error creating Merkle tree generator: %w", err) - } - rewardsFile, err := treegen.GenerateTree() - if err != nil { - return fmt.Errorf("Error generating Merkle tree: %w", err) - } - for address, network := range rewardsFile.GetHeader().InvalidNetworkNodes { - t.printMessage(fmt.Sprintf("WARNING: Node %s has invalid network %d assigned! Using 0 (mainnet) instead.", address.Hex(), network)) - } - - // Serialize the minipool performance file - localMinipoolPerformanceFile := rprewards.NewLocalFile[rprewards.IMinipoolPerformanceFile]( - rewardsFile.GetMinipoolPerformanceFile(), - minipoolPerformancePath, - ) - err = localMinipoolPerformanceFile.Write() - if err != nil { - return fmt.Errorf("Error serializing minipool performance file into JSON: %w", err) - } - - if nodeTrusted { - minipoolPerformanceCid, err := localMinipoolPerformanceFile.CreateCompressedFileAndCid() - if err != nil { - return fmt.Errorf("Error getting the CID for file %s: %w", compressedMinipoolPerformancePath, err) - } - t.printMessage(fmt.Sprintf("Calculated minipool performance CID: %s", minipoolPerformanceCid)) - rewardsFile.SetMinipoolPerformanceFileCID(minipoolPerformanceCid.String()) - } else { - t.printMessage("Saved minipool performance file.") - rewardsFile.SetMinipoolPerformanceFileCID("---") - } - - // Serialize the rewards tree to JSON - localRewardsFile := rprewards.NewLocalFile[rprewards.IRewardsFile]( - rewardsFile, - rewardsTreePath, - ) - t.printMessage("Generation complete! Saving tree...") - - // Write the rewards tree to disk - err = localRewardsFile.Write() - if err != nil { - return fmt.Errorf("Error saving rewards tree file to %s: %w", rewardsTreePath, err) - } - - if nodeTrusted { - cid, err := localRewardsFile.CreateCompressedFileAndCid() - if err != nil { - return fmt.Errorf("Error getting CID for file %s: %w", compressedRewardsTreePath, err) - } - t.printMessage(fmt.Sprintf("Calculated rewards tree CID: %s", cid)) - // Submit to the contracts - err = t.submitRewardsSnapshot(big.NewInt(int64(currentIndex)), snapshotBeaconBlock, elBlockIndex, rewardsFile.GetHeader(), cid.String(), big.NewInt(int64(intervalsPassed))) - if err != nil { - return fmt.Errorf("Error submitting rewards snapshot: %w", err) - } - - t.printMessage(fmt.Sprintf("Successfully submitted rewards snapshot for interval %d.", currentIndex)) - } else { - t.printMessage(fmt.Sprintf("Successfully generated rewards snapshot for interval %d.", currentIndex)) - } - - return nil - -} - -// Submit rewards info to the contracts -func (t *submitRewardsTree_Rolling) submitRewardsSnapshot(index *big.Int, consensusBlock uint64, executionBlock uint64, rewardsFileHeader *rprewards.RewardsFileHeader, cid string, intervalsPassed *big.Int) error { - - treeRootBytes, err := hex.DecodeString(hexutil.RemovePrefix(rewardsFileHeader.MerkleRoot)) - if err != nil { - return fmt.Errorf("Error decoding merkle root: %w", err) - } - treeRoot := common.BytesToHash(treeRootBytes) - - // Create the arrays of rewards per network - collateralRplRewards := []*big.Int{} - oDaoRplRewards := []*big.Int{} - smoothingPoolEthRewards := []*big.Int{} - - // Create the total rewards for each network - network := uint64(0) - for { - networkRewards, exists := rewardsFileHeader.NetworkRewards[network] - if !exists { - break - } - - collateralRplRewards = append(collateralRplRewards, &networkRewards.CollateralRpl.Int) - oDaoRplRewards = append(oDaoRplRewards, &networkRewards.OracleDaoRpl.Int) - smoothingPoolEthRewards = append(smoothingPoolEthRewards, &networkRewards.SmoothingPoolEth.Int) - - network++ - } - - // Get transactor - opts, err := t.w.GetNodeAccountTransactor() - if err != nil { - return err - } - - // Create the submission - submission := rewards.RewardSubmission{ - RewardIndex: index, - ExecutionBlock: big.NewInt(0).SetUint64(executionBlock), - ConsensusBlock: big.NewInt(0).SetUint64(consensusBlock), - MerkleRoot: treeRoot, - MerkleTreeCID: cid, - IntervalsPassed: intervalsPassed, - TreasuryRPL: &rewardsFileHeader.TotalRewards.ProtocolDaoRpl.Int, - NodeRPL: collateralRplRewards, - TrustedNodeRPL: oDaoRplRewards, - NodeETH: smoothingPoolEthRewards, - UserETH: &rewardsFileHeader.TotalRewards.PoolStakerSmoothingPoolEth.Int, - } - - // Get the gas limit - gasInfo, err := rewards.EstimateSubmitRewardSnapshotGas(t.rp, submission, opts) - if err != nil { - if enableSubmissionAfterConsensus_RewardsTree && strings.Contains(err.Error(), "Can only submit snapshot for next period") { - // Set a gas limit which will intentionally be too low and revert - gasInfo = rocketpool.GasInfo{ - EstGasLimit: utils.RewardsSubmissionForcedGas, - SafeGasLimit: utils.RewardsSubmissionForcedGas, - } - t.log.Println("Rewards period consensus has already been reached but submitting anyway for the health check.") - } else { - return fmt.Errorf("Could not estimate the gas required to submit the rewards tree: %w", err) - } - } - - // Print the gas info - maxFee := eth.GweiToWei(utils.GetWatchtowerMaxFee(t.cfg)) - if !api.PrintAndCheckGasInfo(gasInfo, false, 0, &t.log, maxFee, 0) { - return nil - } - - opts.GasFeeCap = maxFee - opts.GasTipCap = eth.GweiToWei(utils.GetWatchtowerPrioFee(t.cfg)) - opts.GasLimit = gasInfo.SafeGasLimit - - // Submit RPL price - hash, err := rewards.SubmitRewardSnapshot(t.rp, submission, opts) - if err != nil { - return err - } - - // Print TX info and wait for it to be included in a block - err = api.PrintAndWaitForTransaction(t.cfg, hash, t.rp.Client, &t.log) - if err != nil { - return err - } - - // Return - return nil -} diff --git a/rocketpool/watchtower/submit-rewards-tree-stateless.go b/rocketpool/watchtower/submit-rewards-tree-stateless.go index ca592644b..5e84089b9 100644 --- a/rocketpool/watchtower/submit-rewards-tree-stateless.go +++ b/rocketpool/watchtower/submit-rewards-tree-stateless.go @@ -155,10 +155,12 @@ func (t *submitRewardsTree_Stateless) Run(nodeTrusted bool, state *state.Network } // Get the block and timestamp of the consensus block that best matches the end time - snapshotBeaconBlock, elBlockNumber, err := t.getSnapshotConsensusBlock(endTime, state) + snapshotEnd, err := t.getSnapshotEnd(endTime, state) if err != nil { return err } + snapshotBeaconBlock := snapshotEnd.ConsensusBlock + elBlockNumber := snapshotEnd.ExecutionBlock // Get the number of the EL block matching the CL snapshot block snapshotElBlockHeader, err := t.ec.HeaderByNumber(context.Background(), big.NewInt(int64(elBlockNumber))) @@ -181,15 +183,13 @@ func (t *submitRewardsTree_Stateless) Run(nodeTrusted bool, state *state.Network t.lock.Unlock() // Get the expected file paths - rewardsTreePath := t.cfg.Smartnode.GetRewardsTreePath(currentIndex, true) - compressedRewardsTreePath := rewardsTreePath + config.RewardsTreeIpfsExtension - minipoolPerformancePath := t.cfg.Smartnode.GetMinipoolPerformancePath(currentIndex, true) - compressedMinipoolPerformancePath := minipoolPerformancePath + config.RewardsTreeIpfsExtension + rewardsTreePathJSON := t.cfg.Smartnode.GetRewardsTreePath(currentIndex, true, config.RewardsExtensionJSON) + compressedRewardsTreePathJSON := rewardsTreePathJSON + config.RewardsTreeIpfsExtension // Check if we can reuse an existing file for this interval - if t.isExistingRewardsFileValid(rewardsTreePath, uint64(intervalsPassed)) { + if t.isExistingRewardsFileValid(rewardsTreePathJSON, uint64(intervalsPassed)) { if !nodeTrusted { - t.log.Printlnf("Merkle rewards tree for interval %d already exists at %s.", currentIndex, rewardsTreePath) + t.log.Printlnf("Merkle rewards tree for interval %d already exists at %s.", currentIndex, rewardsTreePathJSON) return nil } @@ -202,10 +202,10 @@ func (t *submitRewardsTree_Stateless) Run(nodeTrusted bool, state *state.Network return nil } - t.log.Printlnf("Merkle rewards tree for interval %d already exists at %s, attempting to resubmit...", currentIndex, rewardsTreePath) + t.log.Printlnf("Merkle rewards tree for interval %d already exists at %s, attempting to resubmit...", currentIndex, rewardsTreePathJSON) // Deserialize the file - localRewardsFile, err := rprewards.ReadLocalRewardsFile(rewardsTreePath) + localRewardsFile, err := rprewards.ReadLocalRewardsFile(rewardsTreePathJSON) if err != nil { return fmt.Errorf("Error reading rewards tree file: %w", err) } @@ -213,15 +213,15 @@ func (t *submitRewardsTree_Stateless) Run(nodeTrusted bool, state *state.Network proofWrapper := localRewardsFile.Impl() // Save the compressed file and get the CID for it - cid, err := localRewardsFile.CreateCompressedFileAndCid() + _, cid, err := localRewardsFile.CreateCompressedFileAndCid() if err != nil { - return fmt.Errorf("Error getting CID for file %s: %w", compressedRewardsTreePath, err) + return fmt.Errorf("Error getting CID for file %s: %w", compressedRewardsTreePathJSON, err) } t.printMessage(fmt.Sprintf("Calculated rewards tree CID: %s", cid)) // Submit to the contracts - err = t.submitRewardsSnapshot(currentIndexBig, snapshotBeaconBlock, elBlockIndex, proofWrapper.GetHeader(), cid.String(), big.NewInt(int64(intervalsPassed))) + err = t.submitRewardsSnapshot(currentIndexBig, snapshotBeaconBlock, elBlockIndex, proofWrapper, cid.String(), big.NewInt(int64(intervalsPassed))) if err != nil { return fmt.Errorf("Error submitting rewards snapshot: %w", err) } @@ -231,7 +231,7 @@ func (t *submitRewardsTree_Stateless) Run(nodeTrusted bool, state *state.Network } // Generate the tree - t.generateTree(intervalsPassed, nodeTrusted, currentIndex, snapshotBeaconBlock, elBlockIndex, startTime, endTime, snapshotElBlockHeader, rewardsTreePath, compressedRewardsTreePath, minipoolPerformancePath, compressedMinipoolPerformancePath) + t.generateTree(intervalsPassed, nodeTrusted, currentIndex, snapshotEnd, elBlockIndex, startTime, endTime, snapshotElBlockHeader) // Done return nil @@ -268,9 +268,12 @@ func (t *submitRewardsTree_Stateless) isExistingRewardsFileValid(rewardsTreePath // Compare the number of intervals in it with the current number of intervals proofWrapper := localRewardsFile.Impl() - header := proofWrapper.GetHeader() - if header.IntervalsPassed != intervalsPassed { - t.log.Printlnf("Existing file for interval %d had %d intervals passed but %d have passed now, regenerating file...\n", header.Index, header.IntervalsPassed, intervalsPassed) + if proofWrapper.GetIntervalsPassed() != intervalsPassed { + t.log.Printlnf("Existing file for interval %d had %d intervals passed but %d have passed now, regenerating file...\n", + proofWrapper.GetIndex(), + proofWrapper.GetIntervalsPassed(), + intervalsPassed, + ) return false } @@ -280,7 +283,7 @@ func (t *submitRewardsTree_Stateless) isExistingRewardsFileValid(rewardsTreePath } // Kick off the tree generation goroutine -func (t *submitRewardsTree_Stateless) generateTree(intervalsPassed time.Duration, nodeTrusted bool, currentIndex uint64, snapshotBeaconBlock uint64, elBlockIndex uint64, startTime time.Time, endTime time.Time, snapshotElBlockHeader *types.Header, rewardsTreePath string, compressedRewardsTreePath string, minipoolPerformancePath string, compressedMinipoolPerformancePath string) { +func (t *submitRewardsTree_Stateless) generateTree(intervalsPassed time.Duration, nodeTrusted bool, currentIndex uint64, snapshotEnd *rprewards.SnapshotEnd, elBlockIndex uint64, startTime time.Time, endTime time.Time, snapshotElBlockHeader *types.Header) { go func() { t.lock.Lock() @@ -295,7 +298,7 @@ func (t *submitRewardsTree_Stateless) generateTree(intervalsPassed time.Duration } // Generate the tree - err = t.generateTreeImpl(client, intervalsPassed, nodeTrusted, currentIndex, snapshotBeaconBlock, elBlockIndex, startTime, endTime, snapshotElBlockHeader, rewardsTreePath, compressedRewardsTreePath, minipoolPerformancePath, compressedMinipoolPerformancePath) + err = t.generateTreeImpl(client, intervalsPassed, nodeTrusted, currentIndex, snapshotEnd, elBlockIndex, startTime, endTime, snapshotElBlockHeader) if err != nil { t.handleError(err) } @@ -308,7 +311,8 @@ func (t *submitRewardsTree_Stateless) generateTree(intervalsPassed time.Duration } // Implementation for rewards tree generation using a viable EC -func (t *submitRewardsTree_Stateless) generateTreeImpl(rp *rocketpool.RocketPool, intervalsPassed time.Duration, nodeTrusted bool, currentIndex uint64, snapshotBeaconBlock uint64, elBlockIndex uint64, startTime time.Time, endTime time.Time, snapshotElBlockHeader *types.Header, rewardsTreePath string, compressedRewardsTreePath string, minipoolPerformancePath string, compressedMinipoolPerformancePath string) error { +func (t *submitRewardsTree_Stateless) generateTreeImpl(rp *rocketpool.RocketPool, intervalsPassed time.Duration, nodeTrusted bool, currentIndex uint64, snapshotEnd *rprewards.SnapshotEnd, elBlockIndex uint64, startTime time.Time, endTime time.Time, snapshotElBlockHeader *types.Header) error { + snapshotBeaconBlock := snapshotEnd.ConsensusBlock // Log if uint64(intervalsPassed) > 1 { @@ -317,10 +321,7 @@ func (t *submitRewardsTree_Stateless) generateTreeImpl(rp *rocketpool.RocketPool t.log.Printlnf("Rewards checkpoint has passed, starting Merkle tree generation for interval %d in the background.\n%s Snapshot Beacon block = %d, EL block = %d, running from %s to %s", currentIndex, t.generationPrefix, snapshotBeaconBlock, elBlockIndex, startTime, endTime) // Create a new state gen manager - mgr, err := state.NewNetworkStateManager(rp, t.cfg, rp.Client, t.bc, t.log) - if err != nil { - return fmt.Errorf("error creating network state manager for EL block %d, Beacon slot %d: %w", elBlockIndex, snapshotBeaconBlock, err) - } + mgr := state.NewNetworkStateManager(rp, t.cfg.Smartnode.GetStateManagerContracts(), t.bc, t.log) // Create a new state for the target block state, err := mgr.GetStateForSlot(snapshotBeaconBlock) @@ -329,65 +330,34 @@ func (t *submitRewardsTree_Stateless) generateTreeImpl(rp *rocketpool.RocketPool } // Generate the rewards file - treegen, err := rprewards.NewTreeGenerator(t.log, t.generationPrefix, rp, t.cfg, t.bc, currentIndex, startTime, endTime, snapshotBeaconBlock, snapshotElBlockHeader, uint64(intervalsPassed), state, nil) + treegen, err := rprewards.NewTreeGenerator(t.log, t.generationPrefix, rprewards.NewRewardsExecutionClient(rp), t.cfg, t.bc, currentIndex, startTime, endTime, snapshotEnd, snapshotElBlockHeader, uint64(intervalsPassed), state) if err != nil { return fmt.Errorf("Error creating Merkle tree generator: %w", err) } - rewardsFile, err := treegen.GenerateTree() + treeResult, err := treegen.GenerateTree() if err != nil { return fmt.Errorf("Error generating Merkle tree: %w", err) } - for address, network := range rewardsFile.GetHeader().InvalidNetworkNodes { + rewardsFile := treeResult.RewardsFile + for address, network := range treeResult.InvalidNetworkNodes { t.printMessage(fmt.Sprintf("WARNING: Node %s has invalid network %d assigned! Using 0 (mainnet) instead.", address.Hex(), network)) } - // Serialize the minipool performance file - localMinipoolPerformanceFile := rprewards.NewLocalFile[rprewards.IMinipoolPerformanceFile]( - rewardsFile.GetMinipoolPerformanceFile(), - minipoolPerformancePath, - ) - - // Write it to disk - err = localMinipoolPerformanceFile.Write() + // Save the files + t.printMessage("Generation complete! Saving files...") + cid, cids, err := treegen.SaveFiles(treeResult, nodeTrusted) if err != nil { - return fmt.Errorf("Error saving minipool performance file to %s: %w", minipoolPerformancePath, err) - } - - if nodeTrusted { - minipoolPerformanceCid, err := localMinipoolPerformanceFile.CreateCompressedFileAndCid() - if err != nil { - return fmt.Errorf("Error getting CID for file %s: %w", compressedMinipoolPerformancePath, err) - } - t.printMessage(fmt.Sprintf("Calculated minipool performance CID: %s", minipoolPerformanceCid)) - rewardsFile.SetMinipoolPerformanceFileCID(minipoolPerformanceCid.String()) - } else { - t.printMessage("Saved minipool performance file.") - rewardsFile.SetMinipoolPerformanceFileCID("---") + return fmt.Errorf("Error writing rewards artifacts to disk: %w", err) } - - // Serialize the rewards tree to JSON - localRewardsFile := rprewards.NewLocalFile[rprewards.IRewardsFile]( - rewardsFile, - rewardsTreePath, - ) - t.printMessage("Generation complete! Saving tree...") - - // Write the rewards tree to disk - err = localRewardsFile.Write() - if err != nil { - return fmt.Errorf("Error saving rewards tree file to %s: %w", rewardsTreePath, err) + for filename, cid := range cids { + t.printMessage(fmt.Sprintf("\t%s - CID %s", filename, cid.String())) } if nodeTrusted { - // Save the compressed file and get the CID for it - cid, err := localRewardsFile.CreateCompressedFileAndCid() - if err != nil { - return fmt.Errorf("Error getting CID for file %s : %w", rewardsTreePath, err) - } t.printMessage(fmt.Sprintf("Calculated rewards tree CID: %s", cid)) // Submit to the contracts - err = t.submitRewardsSnapshot(big.NewInt(int64(currentIndex)), snapshotBeaconBlock, elBlockIndex, rewardsFile.GetHeader(), cid.String(), big.NewInt(int64(intervalsPassed))) + err = t.submitRewardsSnapshot(big.NewInt(int64(currentIndex)), snapshotBeaconBlock, elBlockIndex, rewardsFile, cid.String(), big.NewInt(int64(intervalsPassed))) if err != nil { return fmt.Errorf("Error submitting rewards snapshot: %w", err) } @@ -402,9 +372,9 @@ func (t *submitRewardsTree_Stateless) generateTreeImpl(rp *rocketpool.RocketPool } // Submit rewards info to the contracts -func (t *submitRewardsTree_Stateless) submitRewardsSnapshot(index *big.Int, consensusBlock uint64, executionBlock uint64, rewardsFileHeader *rprewards.RewardsFileHeader, cid string, intervalsPassed *big.Int) error { +func (t *submitRewardsTree_Stateless) submitRewardsSnapshot(index *big.Int, consensusBlock uint64, executionBlock uint64, rewardsFile rprewards.IRewardsFile, cid string, intervalsPassed *big.Int) error { - treeRootBytes, err := hex.DecodeString(hexutil.RemovePrefix(rewardsFileHeader.MerkleRoot)) + treeRootBytes, err := hex.DecodeString(hexutil.RemovePrefix(rewardsFile.GetMerkleRoot())) if err != nil { return fmt.Errorf("Error decoding merkle root: %w", err) } @@ -416,18 +386,11 @@ func (t *submitRewardsTree_Stateless) submitRewardsSnapshot(index *big.Int, cons smoothingPoolEthRewards := []*big.Int{} // Create the total rewards for each network - network := uint64(0) - for { - networkRewards, exists := rewardsFileHeader.NetworkRewards[network] - if !exists { - break - } + for network := uint64(0); rewardsFile.HasRewardsForNetwork(network); network++ { - collateralRplRewards = append(collateralRplRewards, &networkRewards.CollateralRpl.Int) - oDaoRplRewards = append(oDaoRplRewards, &networkRewards.OracleDaoRpl.Int) - smoothingPoolEthRewards = append(smoothingPoolEthRewards, &networkRewards.SmoothingPoolEth.Int) - - network++ + collateralRplRewards = append(collateralRplRewards, rewardsFile.GetNetworkCollateralRpl(network)) + oDaoRplRewards = append(oDaoRplRewards, rewardsFile.GetNetworkOracleDaoRpl(network)) + smoothingPoolEthRewards = append(smoothingPoolEthRewards, rewardsFile.GetNetworkSmoothingPoolEth(network)) } // Get transactor @@ -444,11 +407,11 @@ func (t *submitRewardsTree_Stateless) submitRewardsSnapshot(index *big.Int, cons MerkleRoot: treeRoot, MerkleTreeCID: cid, IntervalsPassed: intervalsPassed, - TreasuryRPL: &rewardsFileHeader.TotalRewards.ProtocolDaoRpl.Int, + TreasuryRPL: rewardsFile.GetTotalProtocolDaoRpl(), NodeRPL: collateralRplRewards, TrustedNodeRPL: oDaoRplRewards, NodeETH: smoothingPoolEthRewards, - UserETH: &rewardsFileHeader.TotalRewards.PoolStakerSmoothingPoolEth.Int, + UserETH: rewardsFile.GetTotalPoolStakerSmoothingPoolEth(), } // Get the gas limit @@ -493,12 +456,12 @@ func (t *submitRewardsTree_Stateless) submitRewardsSnapshot(index *big.Int, cons } // Get the first finalized, successful consensus block that occurred after the given target time -func (t *submitRewardsTree_Stateless) getSnapshotConsensusBlock(endTime time.Time, state *state.NetworkState) (uint64, uint64, error) { +func (t *submitRewardsTree_Stateless) getSnapshotEnd(endTime time.Time, state *state.NetworkState) (*rprewards.SnapshotEnd, error) { // Get the beacon head beaconHead, err := t.bc.GetBeaconHead() if err != nil { - return 0, 0, fmt.Errorf("Error getting Beacon head: %w", err) + return nil, fmt.Errorf("Error getting Beacon head: %w", err) } // Get the target block number @@ -512,7 +475,11 @@ func (t *submitRewardsTree_Stateless) getSnapshotConsensusBlock(endTime time.Tim // Check if the required epoch is finalized yet if beaconHead.FinalizedEpoch < requiredEpoch { - return 0, 0, fmt.Errorf("Snapshot end time = %s, slot (epoch) = %d (%d)... waiting until epoch %d is finalized (currently %d).", endTime, targetSlot, targetSlotEpoch, requiredEpoch, beaconHead.FinalizedEpoch) + return nil, fmt.Errorf("Snapshot end time = %s, slot (epoch) = %d (%d)... waiting until epoch %d is finalized (currently %d).", endTime, targetSlot, targetSlotEpoch, requiredEpoch, beaconHead.FinalizedEpoch) + } + + out := &rprewards.SnapshotEnd{ + Slot: targetSlot, } // Get the first successful block @@ -520,19 +487,23 @@ func (t *submitRewardsTree_Stateless) getSnapshotConsensusBlock(endTime time.Tim // Try to get the current block block, exists, err := t.bc.GetBeaconBlock(fmt.Sprint(targetSlot)) if err != nil { - return 0, 0, fmt.Errorf("Error getting Beacon block %d: %w", targetSlot, err) + return nil, fmt.Errorf("Error getting Beacon block %d: %w", targetSlot, err) } // If the block was missing, try the previous one if !exists { t.log.Printlnf("Slot %d was missing, trying the previous one...", targetSlot) targetSlot-- - } else { - // Ok, we have the first proposed finalized block - this is the one to use for the snapshot! - return targetSlot, block.ExecutionBlockNumber, nil + continue } + + // Ok, we have the first proposed finalized block - this is the one to use for the snapshot! + out.ConsensusBlock = targetSlot + out.ExecutionBlock = block.ExecutionBlockNumber + break } + return out, nil } // Check whether the rewards tree for the current interval been submitted by the node diff --git a/rocketpool/watchtower/watchtower.go b/rocketpool/watchtower/watchtower.go index ae5739385..c8f63bff6 100644 --- a/rocketpool/watchtower/watchtower.go +++ b/rocketpool/watchtower/watchtower.go @@ -95,12 +95,6 @@ func run(c *cli.Context) error { fmt.Println("Starting watchtower daemon in Docker Mode.") } - // Check if rolling records are enabled - useRollingRecords := cfg.Smartnode.UseRollingRecords.Value.(bool) - if useRollingRecords { - fmt.Println("***NOTE: EXPERIMENTAL ROLLING RECORDS ARE ENABLED, BE ADVISED!***") - } - // Initialize the metrics reporters scrubCollector := collectors.NewScrubCollector() bondReductionCollector := collectors.NewBondReductionCollector() @@ -111,10 +105,7 @@ func run(c *cli.Context) error { updateLog := log.NewColorLogger(UpdateColor) // Create the state manager - m, err := state.NewNetworkStateManager(rp, cfg, rp.Client, bc, &updateLog) - if err != nil { - return err - } + m := state.NewNetworkStateManager(rp, cfg.Smartnode.GetStateManagerContracts(), bc, &updateLog) // Get the node address nodeAccount, err := w.GetNodeAccount() @@ -144,17 +135,9 @@ func run(c *cli.Context) error { return fmt.Errorf("error during scrub check: %w", err) } var submitRewardsTree_Stateless *submitRewardsTree_Stateless - var submitRewardsTree_Rolling *submitRewardsTree_Rolling - if !useRollingRecords { - submitRewardsTree_Stateless, err = newSubmitRewardsTree_Stateless(c, log.NewColorLogger(SubmitRewardsTreeColor), errorLog, m) - if err != nil { - return fmt.Errorf("error during stateless rewards tree check: %w", err) - } - } else { - submitRewardsTree_Rolling, err = newSubmitRewardsTree_Rolling(c, log.NewColorLogger(SubmitRewardsTreeColor), errorLog, m) - if err != nil { - return fmt.Errorf("error during rolling rewards tree check: %w", err) - } + submitRewardsTree_Stateless, err = newSubmitRewardsTree_Stateless(c, log.NewColorLogger(SubmitRewardsTreeColor), errorLog, m) + if err != nil { + return fmt.Errorf("error during stateless rewards tree check: %w", err) } /*processPenalties, err := newProcessPenalties(c, log.NewColorLogger(ProcessPenaltiesColor), errorLog) if err != nil { @@ -251,19 +234,11 @@ func run(c *cli.Context) error { } time.Sleep(taskCooldown) - if !useRollingRecords { - // Run the rewards tree submission check - if err := submitRewardsTree_Stateless.Run(isOnOdao, state, latestBlock.Slot); err != nil { - errorLog.Println(err) - } - time.Sleep(taskCooldown) - } else { - // Run the network balance and rewards tree submission check - if err := submitRewardsTree_Rolling.run(state); err != nil { - errorLog.Println(err) - } - time.Sleep(taskCooldown) + // Run the rewards tree submission check + if err := submitRewardsTree_Stateless.Run(isOnOdao, state, latestBlock.Slot); err != nil { + errorLog.Println(err) } + time.Sleep(taskCooldown) // Run the price submission check if err := submitRplPrice.run(state); err != nil { @@ -307,18 +282,9 @@ func run(c *cli.Context) error { }*/ // DISABLED until MEV-Boost can support it } else { - /* - */ - if !useRollingRecords { - // Run the rewards tree submission check - if err := submitRewardsTree_Stateless.Run(isOnOdao, nil, latestBlock.Slot); err != nil { - errorLog.Println(err) - } - } else { - // Run the network balance and rewards tree submission check - if err := submitRewardsTree_Rolling.run(nil); err != nil { - errorLog.Println(err) - } + // Run the rewards tree submission check + if err := submitRewardsTree_Stateless.Run(isOnOdao, nil, latestBlock.Slot); err != nil { + errorLog.Println(err) } } diff --git a/shared/services/bc-manager.go b/shared/services/bc-manager.go index 17f01147a..fda19834b 100644 --- a/shared/services/bc-manager.go +++ b/shared/services/bc-manager.go @@ -2,6 +2,7 @@ package services import ( "fmt" + "math/big" "strings" "github.com/ethereum/go-ethereum/common" @@ -308,6 +309,28 @@ func (m *BeaconClientManager) ChangeWithdrawalCredentials(validatorIndex string, return nil } +// Get the validator balances for a set of validators at a given slot, with backoff. +func (m *BeaconClientManager) GetValidatorBalancesSafe(indices []string, opts *beacon.ValidatorStatusOptions) (map[string]*big.Int, error) { + result, err := m.runFunction1(func(client beacon.Client) (interface{}, error) { + return client.GetValidatorBalancesSafe(indices, opts) + }) + if err != nil { + return nil, err + } + return result.(map[string]*big.Int), nil +} + +// Get the validator balances for a set of validators at a given slot +func (m *BeaconClientManager) GetValidatorBalances(indices []string, opts *beacon.ValidatorStatusOptions) (map[string]*big.Int, error) { + result, err := m.runFunction1(func(client beacon.Client) (interface{}, error) { + return client.GetValidatorBalances(indices, opts) + }) + if err != nil { + return nil, err + } + return result.(map[string]*big.Int), nil +} + /// ================== /// Internal Functions /// ================== diff --git a/shared/services/beacon/client.go b/shared/services/beacon/client.go index f60b6a943..353bb7c15 100644 --- a/shared/services/beacon/client.go +++ b/shared/services/beacon/client.go @@ -1,6 +1,8 @@ package beacon import ( + "math/big" + "github.com/ethereum/go-ethereum/common" "github.com/prysmaticlabs/go-bitfield" "github.com/rocket-pool/rocketpool-go/types" @@ -17,16 +19,6 @@ type SyncStatus struct { Syncing bool Progress float64 } -type Eth2Config struct { - GenesisForkVersion []byte - GenesisValidatorsRoot []byte - GenesisEpoch uint64 - GenesisTime uint64 - SecondsPerSlot uint64 - SlotsPerEpoch uint64 - SecondsPerEpoch uint64 - EpochsPerSyncCommitteePeriod uint64 -} type Eth2DepositContract struct { ChainID uint64 Address common.Address @@ -38,24 +30,29 @@ type BeaconHead struct { PreviousJustifiedEpoch uint64 } type ValidatorStatus struct { - Pubkey types.ValidatorPubkey - Index string - WithdrawalCredentials common.Hash - Balance uint64 - Status ValidatorState - EffectiveBalance uint64 - Slashed bool - ActivationEligibilityEpoch uint64 - ActivationEpoch uint64 - ExitEpoch uint64 - WithdrawableEpoch uint64 - Exists bool + Pubkey types.ValidatorPubkey `json:"pubkey"` + Index string `json:"index"` + WithdrawalCredentials common.Hash `json:"withdrawal_credentials"` + Balance uint64 `json:"balance"` + Status ValidatorState `json:"status"` + EffectiveBalance uint64 `json:"effective_balance"` + Slashed bool `json:"slashed"` + ActivationEligibilityEpoch uint64 `json:"activation_eligibility_epoch"` + ActivationEpoch uint64 `json:"activation_epoch"` + ExitEpoch uint64 `json:"exit_epoch"` + WithdrawableEpoch uint64 `json:"withdrawable_epoch"` + Exists bool `json:"exists"` } type Eth1Data struct { DepositRoot common.Hash DepositCount uint64 BlockHash common.Hash } +type WithdrawalInfo struct { + ValidatorIndex string + Address common.Address + Amount *big.Int +} type BeaconBlock struct { Slot uint64 ProposerIndex string @@ -63,6 +60,7 @@ type BeaconBlock struct { Attestations []AttestationInfo FeeRecipient common.Address ExecutionBlockNumber uint64 + Withdrawals []WithdrawalInfo } type BeaconBlockHeader struct { Slot uint64 @@ -146,6 +144,8 @@ type Client interface { GetValidatorIndex(pubkey types.ValidatorPubkey) (string, error) GetValidatorSyncDuties(indices []string, epoch uint64) (map[string]bool, error) GetValidatorProposerDuties(indices []string, epoch uint64) (map[string]uint64, error) + GetValidatorBalances(indices []string, opts *ValidatorStatusOptions) (map[string]*big.Int, error) + GetValidatorBalancesSafe(indices []string, opts *ValidatorStatusOptions) (map[string]*big.Int, error) GetDomainData(domainType []byte, epoch uint64, useGenesisFork bool) ([]byte, error) ExitValidator(validatorIndex string, epoch uint64, signature types.ValidatorSignature) error Close() error diff --git a/shared/services/beacon/client/std-http-client.go b/shared/services/beacon/client/std-http-client.go index a8729ec21..ad3ed1a50 100644 --- a/shared/services/beacon/client/std-http-client.go +++ b/shared/services/beacon/client/std-http-client.go @@ -5,10 +5,13 @@ import ( "encoding/hex" "fmt" "io" + "math/big" "net/http" + "slices" "strconv" "strings" "sync" + "sync/atomic" "time" "github.com/ethereum/go-ethereum/common" @@ -36,6 +39,7 @@ const ( RequestFinalityCheckpointsPath = "/eth/v1/beacon/states/%s/finality_checkpoints" RequestForkPath = "/eth/v1/beacon/states/%s/fork" RequestValidatorsPath = "/eth/v1/beacon/states/%s/validators" + RequestValidatorBalancesPath = "/eth/v1/beacon/states/%s/validator_balances" RequestVoluntaryExitPath = "/eth/v1/beacon/pool/voluntary_exits" RequestAttestationsPath = "/eth/v1/beacon/blocks/%s/attestations" RequestBeaconBlockPath = "/eth/v2/beacon/blocks/%s" @@ -90,9 +94,18 @@ func (c *StandardHttpClient) GetSyncStatus() (beacon.SyncStatus, error) { } +var eth2ConfigCache atomic.Pointer[beacon.Eth2Config] + // Get the eth2 config +// cache it for future requests func (c *StandardHttpClient) GetEth2Config() (beacon.Eth2Config, error) { + // Check the cache + cached := eth2ConfigCache.Load() + if cached != nil { + return *cached, nil + } + // Data var wg errgroup.Group var eth2Config Eth2ConfigResponse @@ -117,8 +130,8 @@ func (c *StandardHttpClient) GetEth2Config() (beacon.Eth2Config, error) { return beacon.Eth2Config{}, err } - // Return response - return beacon.Eth2Config{ + // Save the result + out := beacon.Eth2Config{ GenesisForkVersion: genesis.Data.GenesisForkVersion, GenesisValidatorsRoot: genesis.Data.GenesisValidatorsRoot, GenesisEpoch: 0, @@ -127,8 +140,11 @@ func (c *StandardHttpClient) GetEth2Config() (beacon.Eth2Config, error) { SlotsPerEpoch: uint64(eth2Config.Data.SlotsPerEpoch), SecondsPerEpoch: uint64(eth2Config.Data.SecondsPerSlot * eth2Config.Data.SlotsPerEpoch), EpochsPerSyncCommitteePeriod: uint64(eth2Config.Data.EpochsPerSyncCommitteePeriod), - }, nil + } + eth2ConfigCache.Store(&out) + // Return + return out, nil } // Get the eth2 deposit contract info @@ -231,6 +247,114 @@ func (c *StandardHttpClient) getValidatorStatus(pubkeyOrIndex string, opts *beac } +// Get multiple validators' balances +func (c *StandardHttpClient) GetValidatorBalances(indices []string, opts *beacon.ValidatorStatusOptions) (map[string]*big.Int, error) { + + // Get state ID + var stateId string + if opts == nil { + stateId = "head" + } else if opts.Slot != nil { + stateId = strconv.FormatInt(int64(*opts.Slot), 10) + } else if opts.Epoch != nil { + + // Get eth2 config + eth2Config, err := c.getEth2Config() + if err != nil { + return nil, err + } + + // Get slot nuimber + slot := *opts.Epoch * uint64(eth2Config.Data.SlotsPerEpoch) + stateId = strconv.FormatInt(int64(slot), 10) + + } else { + return nil, fmt.Errorf("must specify a slot or epoch when calling getValidatorsByOpts") + } + + count := len(indices) + data := make(map[string]*big.Int, count) + for i := 0; i < count; i += MaxRequestValidatorsCount { + i := i + max := i + MaxRequestValidatorsCount + if max > count { + max = count + } + + // Get & add validators + batch := indices[i:max] + balances, err := c.getValidatorBalances(stateId, batch) + if err != nil { + return nil, fmt.Errorf("error getting validator balances: %w", err) + } + for _, balance := range balances.Data { + b, ok := big.NewInt(0).SetString(balance.Balance, 10) + if !ok { + return nil, fmt.Errorf("invalid balance: %s", balance.Balance) + } + // Beacon clients return Gwei, but we want wei + b.Mul(b, big.NewInt(1e9)) + + data[balance.Index] = b + } + } + + // Return + return data, nil +} + +// GetValidatorBalancesSafe returns the balances of the validators +// In order to avoid thrashing the bn, when opts.Slot is provided, +// we will preflight the balance query with a sync query, and ensure that the +// bn has not entered optimistic sync due to being unable to provide forkchoice updates, +// and that the current head is a recent slot. +func (c *StandardHttpClient) GetValidatorBalancesSafe(indices []string, opts *beacon.ValidatorStatusOptions) (map[string]*big.Int, error) { + // Filter out empty indices + indices = slices.DeleteFunc(indices, func(index string) bool { + return index == "" + }) + + beaconConfig, err := c.GetEth2Config() + if err != nil { + return nil, err + } + // Check the current head + safe := false + for i := 0; i < 30; i++ { + syncStatus, err := c.getSyncStatus() + if err != nil { + // If we get an error, wait and try again + time.Sleep(1 * time.Second) + continue + } + if syncStatus.Data.IsSyncing { + // If the bn is still syncing, wait and try again + time.Sleep(1 * time.Second) + continue + } + if syncStatus.Data.ELOffline { + // If the bn is offline, wait and try again + time.Sleep(1 * time.Second) + continue + } + // Check that the head is no more than 2 slots behind the current time. + if beaconConfig.GetSlotTime(uint64(syncStatus.Data.HeadSlot)).Add(2 * time.Second * time.Duration(beaconConfig.SecondsPerSlot)).Before(time.Now()) { + // If the head is too far behind, wait and try again + time.Sleep(1 * time.Second) + continue + } + + safe = true + break + } + if !safe { + return nil, fmt.Errorf("bn is not in sync after 30 seconds") + } + + // Get the balances + return c.GetValidatorBalances(indices, opts) +} + // Get multiple validators' statuses func (c *StandardHttpClient) GetValidatorStatuses(pubkeys []types.ValidatorPubkey, opts *beacon.ValidatorStatusOptions) (map[types.ValidatorPubkey]beacon.ValidatorStatus, error) { @@ -526,6 +650,7 @@ func (c *StandardHttpClient) GetBeaconBlock(blockId string) (beacon.BeaconBlock, } // Add attestation info + beaconBlock.Attestations = make([]beacon.AttestationInfo, 0, len(block.Data.Message.Body.Attestations)) for i, attestation := range block.Data.Message.Body.Attestations { bitString := hexutil.RemovePrefix(attestation.AggregationBits) info := beacon.AttestationInfo{ @@ -539,6 +664,22 @@ func (c *StandardHttpClient) GetBeaconBlock(blockId string) (beacon.BeaconBlock, beaconBlock.Attestations = append(beaconBlock.Attestations, info) } + // Add withdrawals + beaconBlock.Withdrawals = make([]beacon.WithdrawalInfo, 0, len(block.Data.Message.Body.ExecutionPayload.Withdrawals)) + for _, withdrawal := range block.Data.Message.Body.ExecutionPayload.Withdrawals { + amount, ok := new(big.Int).SetString(withdrawal.Amount, 10) + if !ok { + return beacon.BeaconBlock{}, false, fmt.Errorf("Error decoding withdrawal amount for withdrawal for address %s of block %s: %s", withdrawal.Address, blockId, withdrawal.Amount) + } + // amount is in Gwei, but we want wei + amount.Mul(amount, big.NewInt(1e9)) + beaconBlock.Withdrawals = append(beaconBlock.Withdrawals, beacon.WithdrawalInfo{ + ValidatorIndex: withdrawal.ValidatorIndex, + Address: common.BytesToAddress(withdrawal.Address), + Amount: amount, + }) + } + return beaconBlock, true, nil } @@ -676,6 +817,26 @@ func (c *StandardHttpClient) getFork(stateId string) (ForkResponse, error) { return fork, nil } +// Get validator balances +func (c *StandardHttpClient) getValidatorBalances(stateId string, indices []string) (ValidatorBalancesResponse, error) { + var query string + if len(indices) > 0 { + query = fmt.Sprintf("?id=%s", strings.Join(indices, ",")) + } + responseBody, status, err := c.getRequest(fmt.Sprintf(RequestValidatorBalancesPath, stateId) + query) + if err != nil { + return ValidatorBalancesResponse{}, fmt.Errorf("Could not get validator balances: %w", err) + } + if status != http.StatusOK { + return ValidatorBalancesResponse{}, fmt.Errorf("Could not get validator balances: HTTP status %d; response body: '%s'", status, string(responseBody)) + } + var balances ValidatorBalancesResponse + if err := json.Unmarshal(responseBody, &balances); err != nil { + return ValidatorBalancesResponse{}, fmt.Errorf("Could not decode validator balances: %w", err) + } + return balances, nil +} + // Get validators func (c *StandardHttpClient) getValidators(stateId string, pubkeys []string) (ValidatorsResponse, error) { var query string diff --git a/shared/services/beacon/client/types.go b/shared/services/beacon/client/types.go index e14d0073a..00882fe0a 100644 --- a/shared/services/beacon/client/types.go +++ b/shared/services/beacon/client/types.go @@ -32,9 +32,11 @@ type BLSToExecutionChangeRequest struct { // Response types type SyncStatusResponse struct { Data struct { - IsSyncing bool `json:"is_syncing"` HeadSlot uinteger `json:"head_slot"` SyncDistance uinteger `json:"sync_distance"` + IsSyncing bool `json:"is_syncing"` + IsOptimistic bool `json:"is_optimistic"` + ELOffline bool `json:"el_offline"` } `json:"data"` } type Eth2ConfigResponse struct { @@ -94,8 +96,9 @@ type BeaconBlockResponse struct { } `json:"eth1_data"` Attestations []Attestation `json:"attestations"` ExecutionPayload *struct { - FeeRecipient byteArray `json:"fee_recipient"` - BlockNumber uinteger `json:"block_number"` + FeeRecipient byteArray `json:"fee_recipient"` + BlockNumber uinteger `json:"block_number"` + Withdrawals []Withdrawal `json:"withdrawals"` } `json:"execution_payload"` } `json:"body"` } `json:"message"` @@ -114,6 +117,12 @@ type BeaconBlockHeaderResponse struct { } `json:"header"` } `json:"data"` } +type ValidatorBalancesResponse struct { + Data []struct { + Index string `json:"index"` + Balance string `json:"balance"` + } `json:"data"` +} type ValidatorsResponse struct { Data []Validator `json:"data"` } @@ -159,6 +168,13 @@ type Attestation struct { } `json:"data"` } +type Withdrawal struct { + Index string `json:"index"` + ValidatorIndex string `json:"validator_index"` + Address byteArray `json:"address"` + Amount string `json:"amount"` +} + // Unsigned integer type type uinteger uint64 diff --git a/shared/services/beacon/config.go b/shared/services/beacon/config.go new file mode 100644 index 000000000..fd0299fa5 --- /dev/null +++ b/shared/services/beacon/config.go @@ -0,0 +1,126 @@ +package beacon + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/ethereum/go-ethereum/common/hexutil" +) + +type Eth2Config struct { + GenesisForkVersion []byte `json:"genesis_fork_version"` + GenesisValidatorsRoot []byte `json:"genesis_validators_root"` + GenesisEpoch uint64 `json:"genesis_epoch"` + GenesisTime uint64 `json:"genesis_time"` + SecondsPerSlot uint64 `json:"seconds_per_slot"` + SlotsPerEpoch uint64 `json:"slots_per_epoch"` + SecondsPerEpoch uint64 `json:"seconds_per_epoch"` + EpochsPerSyncCommitteePeriod uint64 `json:"epochs_per_sync_committee_period"` +} + +func (c *Eth2Config) MarshalJSON() ([]byte, error) { + // GenesisForkVersion and GenesisValidatorsRoot are returned as hex strings with 0x prefixes. + // The other fields are returned as uint64s. + type Alias Eth2Config + return json.Marshal(&struct { + GenesisForkVersion string `json:"genesis_fork_version"` + GenesisValidatorsRoot string `json:"genesis_validators_root"` + *Alias + }{ + GenesisForkVersion: hexutil.Encode(c.GenesisForkVersion), + GenesisValidatorsRoot: hexutil.Encode(c.GenesisValidatorsRoot), + Alias: (*Alias)(c), + }) +} + +func (c *Eth2Config) UnmarshalJSON(data []byte) error { + type Alias Eth2Config + aux := &struct { + GenesisForkVersion string `json:"genesis_fork_version"` + GenesisValidatorsRoot string `json:"genesis_validators_root"` + *Alias + }{ + Alias: (*Alias)(c), + } + + err := json.Unmarshal(data, &aux) + if err != nil { + return err + } + + c.GenesisForkVersion, err = hexutil.Decode(aux.GenesisForkVersion) + if err != nil { + return err + } + c.GenesisValidatorsRoot, err = hexutil.Decode(aux.GenesisValidatorsRoot) + if err != nil { + return err + } + return nil +} + +// GetSlotTime returns the time of a given slot for the network described by Eth2Config. +func (c *Eth2Config) GetSlotTime(slot uint64) time.Time { + // In the interest of keeping this pure, we'll just return genesis time for slots before genesis + if slot <= c.GenesisEpoch*c.SlotsPerEpoch { + return time.Unix(int64(c.GenesisTime), 0) + } + // Genesis is slot 0 on mainnet, so we can subtract it safely + slotsSinceGenesis := slot - (c.GenesisEpoch * c.SlotsPerEpoch) + return time.Unix(int64(slotsSinceGenesis*c.SecondsPerSlot+c.GenesisTime), 0) +} + +// FirstSlotAtLeast returns the first slot with a timestamp greater than or equal to t +func (c *Eth2Config) FirstSlotAtLeast(t int64) uint64 { + if t <= 0 { + return c.GenesisEpoch * c.SlotsPerEpoch + } + + if uint64(t) <= c.GenesisTime { + return c.GenesisEpoch * c.SlotsPerEpoch + } + + secondsSinceGenesis := uint64(t) - c.GenesisTime + + var slotsSinceGenesis uint64 + // Avoid float error triggering ceil on quality with a modulo check + if secondsSinceGenesis%c.SecondsPerSlot == 0 { + slotsSinceGenesis = secondsSinceGenesis / c.SecondsPerSlot + } else { + // There must be a remainder + slotsSinceGenesis = secondsSinceGenesis/c.SecondsPerSlot + 1 + } + return c.GenesisEpoch*c.SlotsPerEpoch + slotsSinceGenesis +} + +func (c *Eth2Config) SlotToEpoch(slot uint64) uint64 { + return slot / c.SlotsPerEpoch +} + +func (c *Eth2Config) EpochToSlot(epoch uint64) uint64 { + return epoch * c.SlotsPerEpoch +} + +func (c *Eth2Config) SlotOfEpoch(epoch uint64, slot uint64) (uint64, error) { + if slot > c.SlotsPerEpoch-1 { + return 0, fmt.Errorf("slot %d is not in range 0 - %d", slot, c.SlotsPerEpoch-1) + } + return epoch*c.SlotsPerEpoch + slot, nil +} + +func (c *Eth2Config) LastSlotOfEpoch(epoch uint64) uint64 { + out, err := c.SlotOfEpoch(epoch, c.SlotsPerEpoch-1) + if err != nil { + panic("SlotOfEpoch should never return an error when passed SlotsPerEpoch - 1") + } + return out +} + +func (c *Eth2Config) FirstSlotOfEpoch(epoch uint64) uint64 { + out, err := c.SlotOfEpoch(epoch, 0) + if err != nil { + panic("SlotOfEpoch should never return an error when passed 0") + } + return out +} diff --git a/shared/services/beacon/config_test.go b/shared/services/beacon/config_test.go new file mode 100644 index 000000000..e0af148de --- /dev/null +++ b/shared/services/beacon/config_test.go @@ -0,0 +1,110 @@ +package beacon + +import ( + "slices" + "testing" + "time" +) + +var config = &Eth2Config{ + GenesisEpoch: 10, + GenesisTime: 10000, + SecondsPerSlot: 4, + SlotsPerEpoch: 32, + SecondsPerEpoch: 32 * 4, +} + +func TestGetSlotTime(t *testing.T) { + genesis := config.GetSlotTime(0) + if !genesis.Equal(time.Unix(int64(config.GenesisTime), 0)) { + t.Fatalf("slot 0 should be at genesis (%d) but was at %s", config.GenesisTime, genesis) + } + + slotPlusTen := config.GenesisEpoch*config.SlotsPerEpoch + 10 + slotPlusTenTime := config.GetSlotTime(slotPlusTen) + expectedTime := time.Unix(int64(config.SecondsPerSlot*10+config.GenesisTime), 0) + if !slotPlusTenTime.Equal(expectedTime) { + t.Fatalf("slot +10 should be at genesis (%d) but was at %s", config.GenesisTime, genesis) + } +} + +func TestFirstSlotAtLeast(t *testing.T) { + genesis := config.FirstSlotAtLeast(30) + if genesis != config.GenesisEpoch*config.SlotsPerEpoch { + t.Fatalf("should have gotten the genesis slot (%d), instead got %d", config.GenesisEpoch*config.SlotsPerEpoch, genesis) + } + + // Whole multiple + slots := uint64(9000000) + st := config.GenesisTime + config.SecondsPerSlot*slots + result := config.FirstSlotAtLeast(int64(st)) + if result != slots+config.GenesisEpoch*config.SlotsPerEpoch { + t.Fatal("Whole number seconds shouldn't round up") + } + + // Partial multiple rounds up + st = config.GenesisTime + config.SecondsPerSlot*slots - config.SecondsPerSlot/2 + result = config.FirstSlotAtLeast(int64(st)) + if result != slots+config.GenesisEpoch*config.SlotsPerEpoch { + t.Fatal("Whole number seconds shouldn't round up") + } + + // Smallest fractional amount rounds up + st = config.GenesisTime + config.SecondsPerSlot*slots - config.SecondsPerSlot + 1 + result = config.FirstSlotAtLeast(int64(st)) + if result != slots+config.GenesisEpoch*config.SlotsPerEpoch { + t.Fatal("Whole number seconds shouldn't round up") + } +} + +func TestMarshalJSON(t *testing.T) { + config := &Eth2Config{ + GenesisForkVersion: []byte{0x00, 0x00, 0x00, 0x08}, + GenesisValidatorsRoot: []byte{0xfe, 0x44, 0x33, 0x22}, + GenesisEpoch: 10, + GenesisTime: 10000, + SecondsPerSlot: 4, + SlotsPerEpoch: 32, + SecondsPerEpoch: 32 * 4, + EpochsPerSyncCommitteePeriod: 256, + } + + json, err := config.MarshalJSON() + if err != nil { + t.Fatalf("error marshalling config: %v", err) + } + + unmarshalled := &Eth2Config{} + err = unmarshalled.UnmarshalJSON(json) + if err != nil { + t.Fatalf("error unmarshalling config: %v", err) + } + + if !slices.Equal(unmarshalled.GenesisForkVersion, config.GenesisForkVersion) { + t.Fatalf("genesis fork version should be %v, instead got %v", config.GenesisForkVersion, unmarshalled.GenesisForkVersion) + } + + if !slices.Equal(unmarshalled.GenesisValidatorsRoot, config.GenesisValidatorsRoot) { + t.Fatalf("genesis validators root should be %v, instead got %v", config.GenesisValidatorsRoot, unmarshalled.GenesisValidatorsRoot) + } + + if unmarshalled.GenesisEpoch != config.GenesisEpoch { + t.Fatalf("genesis epoch should be %v, instead got %v", config.GenesisEpoch, unmarshalled.GenesisEpoch) + } + + if unmarshalled.GenesisTime != config.GenesisTime { + t.Fatalf("genesis time should be %v, instead got %v", config.GenesisTime, unmarshalled.GenesisTime) + } + + if unmarshalled.SecondsPerSlot != config.SecondsPerSlot { + t.Fatalf("seconds per slot should be %v, instead got %v", config.SecondsPerSlot, unmarshalled.SecondsPerSlot) + } + + if unmarshalled.SlotsPerEpoch != config.SlotsPerEpoch { + t.Fatalf("slots per epoch should be %v, instead got %v", config.SlotsPerEpoch, unmarshalled.SlotsPerEpoch) + } + + if unmarshalled.EpochsPerSyncCommitteePeriod != config.EpochsPerSyncCommitteePeriod { + t.Fatalf("epochs per sync committee period should be %v, instead got %v", config.EpochsPerSyncCommitteePeriod, unmarshalled.EpochsPerSyncCommitteePeriod) + } +} diff --git a/shared/services/config/smartnode-config.go b/shared/services/config/smartnode-config.go index f04aaf3ec..6c01c042b 100644 --- a/shared/services/config/smartnode-config.go +++ b/shared/services/config/smartnode-config.go @@ -18,8 +18,8 @@ const ( NetworkID string = "network" ProjectNameID string = "projectName" SnapshotID string = "rocketpool-dao.eth" - RewardsTreeFilenameFormat string = "rp-rewards-%s-%d.json" - MinipoolPerformanceFilenameFormat string = "rp-minipool-performance-%s-%d.json" + rewardsTreeFilenameFormat string = "rp-rewards-%s-%d%s" + minipoolPerformanceFilenameFormat string = "rp-minipool-performance-%s-%d%s" RewardsTreeIpfsExtension string = ".zst" RewardsTreesFolder string = "rewards-trees" ChecksumTableFilename string = "checksums.sha384" @@ -42,6 +42,19 @@ const ( WatchtowerPrioFeeDefault uint64 = 3 ) +type RewardsExtension string + +const ( + RewardsExtensionJSON RewardsExtension = ".json" + RewardsExtensionSSZ RewardsExtension = ".ssz" +) + +// Contract addresses for multicall / network state manager +type StateManagerContracts struct { + Multicaller common.Address + BalanceBatcher common.Address +} + // Configuration for the Smartnode type SmartnodeConfig struct { Title string `yaml:"-"` @@ -95,18 +108,6 @@ type SmartnodeConfig struct { // Manual override for the watchtower's priority fee WatchtowerPrioFeeOverride config.Parameter `yaml:"watchtowerPrioFeeOverride,omitempty"` - // The toggle for rolling records - UseRollingRecords config.Parameter `yaml:"useRollingRecords,omitempty"` - - // The rolling record checkpoint interval - RecordCheckpointInterval config.Parameter `yaml:"recordCheckpointInterval,omitempty"` - - // The checkpoint retention limit - CheckpointRetentionLimit config.Parameter `yaml:"checkpointRetentionLimit,omitempty"` - - // The path of the records folder where snapshots of rolling record info is stored during a rewards interval - RecordsPath config.Parameter `yaml:"recordsPath,omitempty"` - // The toggle for enabling pDAO proposal verification duties VerifyProposals config.Parameter `yaml:"verifyProposals,omitempty"` @@ -417,50 +418,6 @@ func NewSmartnodeConfig(cfg *RocketPoolConfig) *SmartnodeConfig { OverwriteOnUpgrade: true, }, - UseRollingRecords: config.Parameter{ - ID: "useRollingRecords", - Name: "Use Rolling Records", - Description: "[orange]**WARNING: EXPERIMENTAL**\n\n[white]Enable this to use the new rolling records feature, which stores attestation records for the entire Rocket Pool network in real time instead of collecting them all after a rewards period during tree generation.\n\nOnly useful for the Oracle DAO, or if you generate your own rewards trees.", - Type: config.ParameterType_Bool, - Default: map[config.Network]interface{}{config.Network_All: false}, - AffectsContainers: []config.ContainerID{config.ContainerID_Watchtower}, - CanBeBlank: false, - OverwriteOnUpgrade: false, - }, - - RecordCheckpointInterval: config.Parameter{ - ID: "recordCheckpointInterval", - Name: "Record Checkpoint Interval", - Description: "The number of epochs that should pass before saving a new rolling record checkpoint. Used if Rolling Records is enabled.\n\nOnly useful for the Oracle DAO, or if you generate your own rewards trees.", - Type: config.ParameterType_Uint, - Default: map[config.Network]interface{}{config.Network_All: uint64(45)}, - AffectsContainers: []config.ContainerID{config.ContainerID_Watchtower}, - CanBeBlank: false, - OverwriteOnUpgrade: false, - }, - - CheckpointRetentionLimit: config.Parameter{ - ID: "checkpointRetentionLimit", - Name: "Checkpoint Retention Limit", - Description: "The number of checkpoint files to save on-disk before pruning old ones. Used if Rolling Records is enabled.\n\nOnly useful for the Oracle DAO, or if you generate your own rewards trees.", - Type: config.ParameterType_Uint, - Default: map[config.Network]interface{}{config.Network_All: uint64(200)}, - AffectsContainers: []config.ContainerID{config.ContainerID_Watchtower}, - CanBeBlank: false, - OverwriteOnUpgrade: false, - }, - - RecordsPath: config.Parameter{ - ID: "recordsPath", - Name: "Records Path", - Description: "The path of the folder to store rolling record checkpoints in during a rewards interval. Used if Rolling Records is enabled.\n\nOnly useful if you're an Oracle DAO member, or if you generate your own rewards trees.", - Type: config.ParameterType_String, - Default: map[config.Network]interface{}{config.Network_All: getDefaultRecordsDir(cfg)}, - AffectsContainers: []config.ContainerID{config.ContainerID_Watchtower}, - CanBeBlank: false, - OverwriteOnUpgrade: false, - }, - txWatchUrl: map[config.Network]string{ config.Network_Mainnet: "https://etherscan.io/tx", config.Network_Devnet: "https://holesky.etherscan.io/tx", @@ -693,10 +650,6 @@ func (cfg *SmartnodeConfig) GetParameters() []*config.Parameter { &cfg.ArchiveECUrl, &cfg.WatchtowerMaxFeeOverride, &cfg.WatchtowerPrioFeeOverride, - &cfg.UseRollingRecords, - &cfg.RecordCheckpointInterval, - &cfg.CheckpointRetentionLimit, - &cfg.RecordsPath, } } @@ -844,27 +797,45 @@ func (cfg *SmartnodeConfig) GetRethAddress() common.Address { } func getDefaultDataDir(config *RocketPoolConfig) string { + if config == nil { + // Handle tests. Eventually we'll refactor so this isn't necessary. + return "" + } return filepath.Join(config.RocketPoolDirectory, "data") } -func getDefaultRecordsDir(config *RocketPoolConfig) string { - return filepath.Join(getDefaultDataDir(config), "records") -} - -func (cfg *SmartnodeConfig) GetRewardsTreePath(interval uint64, daemon bool) string { +func (cfg *SmartnodeConfig) GetRewardsTreeDirectory(daemon bool) string { if daemon && !cfg.parent.IsNativeMode { - return filepath.Join(DaemonDataPath, RewardsTreesFolder, fmt.Sprintf(RewardsTreeFilenameFormat, string(cfg.Network.Value.(config.Network)), interval)) + return filepath.Join(DaemonDataPath, RewardsTreesFolder) } - return filepath.Join(cfg.DataPath.Value.(string), RewardsTreesFolder, fmt.Sprintf(RewardsTreeFilenameFormat, string(cfg.Network.Value.(config.Network)), interval)) + return filepath.Join(cfg.DataPath.Value.(string), RewardsTreesFolder) } -func (cfg *SmartnodeConfig) GetMinipoolPerformancePath(interval uint64, daemon bool) string { - if daemon && !cfg.parent.IsNativeMode { - return filepath.Join(DaemonDataPath, RewardsTreesFolder, fmt.Sprintf(MinipoolPerformanceFilenameFormat, string(cfg.Network.Value.(config.Network)), interval)) - } +func (cfg *SmartnodeConfig) formatRewardsFilename(f string, interval uint64, extension RewardsExtension) string { + return fmt.Sprintf(f, string(cfg.Network.Value.(config.Network)), interval, string(extension)) +} + +func (cfg *SmartnodeConfig) GetRewardsTreeFilename(interval uint64, extension RewardsExtension) string { + return cfg.formatRewardsFilename(rewardsTreeFilenameFormat, interval, extension) +} + +func (cfg *SmartnodeConfig) GetMinipoolPerformanceFilename(interval uint64) string { + return cfg.formatRewardsFilename(minipoolPerformanceFilenameFormat, interval, RewardsExtensionJSON) +} - return filepath.Join(cfg.DataPath.Value.(string), RewardsTreesFolder, fmt.Sprintf(MinipoolPerformanceFilenameFormat, string(cfg.Network.Value.(config.Network)), interval)) +func (cfg *SmartnodeConfig) GetRewardsTreePath(interval uint64, daemon bool, extension RewardsExtension) string { + return filepath.Join( + cfg.GetRewardsTreeDirectory(daemon), + cfg.GetRewardsTreeFilename(interval, extension), + ) +} + +func (cfg *SmartnodeConfig) GetMinipoolPerformancePath(interval uint64, daemon bool) string { + return filepath.Join( + cfg.GetRewardsTreeDirectory(daemon), + cfg.GetMinipoolPerformanceFilename(interval), + ) } func (cfg *SmartnodeConfig) GetRegenerateRewardsTreeRequestPath(interval uint64, daemon bool) string { @@ -987,6 +958,14 @@ func (cfg *SmartnodeConfig) GetBalanceBatcherAddress() string { return cfg.balancebatcherAddress[cfg.Network.Value.(config.Network)] } +// Utility function to get the state manager contracts +func (cfg *SmartnodeConfig) GetStateManagerContracts() StateManagerContracts { + return StateManagerContracts{ + Multicaller: common.HexToAddress(cfg.GetMulticallAddress()), + BalanceBatcher: common.HexToAddress(cfg.GetBalanceBatcherAddress()), + } +} + func (cfg *SmartnodeConfig) GetFlashbotsProtectUrl() string { return cfg.flashbotsProtectUrl[cfg.Network.Value.(config.Network)] } diff --git a/shared/services/proposals/proposal-manager.go b/shared/services/proposals/proposal-manager.go index 13ab78429..8fe4f4d57 100644 --- a/shared/services/proposals/proposal-manager.go +++ b/shared/services/proposals/proposal-manager.go @@ -43,10 +43,7 @@ func NewProposalManager(log *log.ColorLogger, cfg *config.RocketPoolConfig, rp * return nil, fmt.Errorf("error creating node tree manager: %w", err) } - stateMgr, err := state.NewNetworkStateManager(rp, cfg, rp.Client, bc, log) - if err != nil { - return nil, fmt.Errorf("error creating network state manager: %w", err) - } + stateMgr := state.NewNetworkStateManager(rp, cfg.Smartnode.GetStateManagerContracts(), bc, log) logPrefix := "[PDAO Proposals]" return &ProposalManager{ diff --git a/shared/services/rewards/execution-client.go b/shared/services/rewards/execution-client.go new file mode 100644 index 000000000..418f825e0 --- /dev/null +++ b/shared/services/rewards/execution-client.go @@ -0,0 +1,76 @@ +package rewards + +import ( + "context" + "fmt" + "math/big" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + ethtypes "github.com/ethereum/go-ethereum/core/types" + "github.com/rocket-pool/rocketpool-go/rewards" + "github.com/rocket-pool/rocketpool-go/rocketpool" + "github.com/rocket-pool/rocketpool-go/settings/trustednode" +) + +// Interface assertion +var _ RewardsExecutionClient = &defaultRewardsExecutionClient{} + +// An implementation of RewardsExecutionClient that uses +// rocketpool-go to access chain data. +// +// Importantly, this struct instantiates rocketpool.RocketPool and passes it +// to the old fashioned rocketpool-go getters that take it as an argument +// but it also fulfills the requirements of an interface used for dependency injection +// in tests. +type defaultRewardsExecutionClient struct { + *rocketpool.RocketPool +} + +func NewRewardsExecutionClient(rp *rocketpool.RocketPool) *defaultRewardsExecutionClient { + out := new(defaultRewardsExecutionClient) + out.RocketPool = rp + return out +} + +func (client *defaultRewardsExecutionClient) GetNetworkEnabled(networkId *big.Int, opts *bind.CallOpts) (bool, error) { + return trustednode.GetNetworkEnabled(client.RocketPool, networkId, opts) +} + +func (client *defaultRewardsExecutionClient) HeaderByNumber(ctx context.Context, block *big.Int) (*ethtypes.Header, error) { + return client.RocketPool.Client.HeaderByNumber(ctx, block) +} + +func (client *defaultRewardsExecutionClient) GetRewardsEvent(index uint64, rocketRewardsPoolAddresses []common.Address, opts *bind.CallOpts) (bool, rewards.RewardsEvent, error) { + return rewards.GetRewardsEvent(client.RocketPool, index, rocketRewardsPoolAddresses, opts) +} + +func (client *defaultRewardsExecutionClient) GetRewardSnapshotEvent(previousRewardsPoolAddresses []common.Address, interval uint64, opts *bind.CallOpts) (rewards.RewardsEvent, error) { + + found, event, err := client.GetRewardsEvent(interval, previousRewardsPoolAddresses, opts) + if err != nil { + return rewards.RewardsEvent{}, fmt.Errorf("error getting rewards event for interval %d: %w", interval, err) + } + if !found { + return rewards.RewardsEvent{}, fmt.Errorf("interval %d event not found", interval) + } + + return event, nil + +} + +func (client *defaultRewardsExecutionClient) GetRewardIndex(opts *bind.CallOpts) (*big.Int, error) { + return client.RocketPool.GetRewardIndex(opts) +} + +func (client *defaultRewardsExecutionClient) GetContract(contractName string, opts *bind.CallOpts) (*rocketpool.Contract, error) { + return client.RocketPool.GetContract(contractName, opts) +} + +func (client *defaultRewardsExecutionClient) BalanceAt(ctx context.Context, address common.Address, blockNumber *big.Int) (*big.Int, error) { + return client.RocketPool.Client.BalanceAt(ctx, address, blockNumber) +} + +func (client *defaultRewardsExecutionClient) Client() *rocketpool.RocketPool { + return client.RocketPool +} diff --git a/shared/services/rewards/fees/fees.go b/shared/services/rewards/fees/fees.go new file mode 100644 index 000000000..9cda3e9d4 --- /dev/null +++ b/shared/services/rewards/fees/fees.go @@ -0,0 +1,29 @@ +package fees + +import ( + "math/big" +) + +var oneEth = big.NewInt(1000000000000000000) +var tenEth = big.NewInt(0).Mul(oneEth, big.NewInt(10)) +var pointOhFourEth = big.NewInt(40000000000000000) +var pointOneEth = big.NewInt(0).Div(oneEth, big.NewInt(10)) +var sixteenEth = big.NewInt(0).Mul(oneEth, big.NewInt(16)) + +func GetMinipoolFeeWithBonus(bond, fee, percentOfBorrowedEth *big.Int) *big.Int { + if bond.Cmp(sixteenEth) >= 0 { + return fee + } + // fee = max(fee, 0.10 Eth + (0.04 Eth * min(10 Eth, percentOfBorrowedETH) / 10 Eth)) + _min := big.NewInt(0).Set(tenEth) + if _min.Cmp(percentOfBorrowedEth) > 0 { + _min.Set(percentOfBorrowedEth) + } + dividend := _min.Mul(_min, pointOhFourEth) + divResult := dividend.Div(dividend, tenEth) + feeWithBonus := divResult.Add(divResult, pointOneEth) + if fee.Cmp(feeWithBonus) >= 0 { + return fee + } + return feeWithBonus +} diff --git a/shared/services/rewards/files.go b/shared/services/rewards/files.go index 93893662d..10f9905bc 100644 --- a/shared/services/rewards/files.go +++ b/shared/services/rewards/files.go @@ -43,25 +43,35 @@ func ReadLocalMinipoolPerformanceFile(path string) (*LocalMinipoolPerformanceFil } // Interface for local rewards or minipool performance files -type ILocalFile interface { +type ISerializable interface { // Converts the underlying interface to a byte slice Serialize() ([]byte, error) + SerializeSSZ() ([]byte, error) } -// A wrapper around ILocalFile representing a local rewards file or minipool performance file. +// A wrapper around ISerializable representing a local rewards file or minipool performance file. // Can be used with anything that can be serialzed to bytes or parsed from bytes. -type LocalFile[T ILocalFile] struct { +type LocalFile[T ISerializable] struct { f T fullPath string } +type ILocalFile interface { + ISerializable + Write() ([]byte, error) + WriteSSZ() ([]byte, error) + Path() string + FileName() string + CreateCompressedFileAndCid() (string, cid.Cid, error) +} + // Type aliases type LocalRewardsFile = LocalFile[IRewardsFile] type LocalMinipoolPerformanceFile = LocalFile[IMinipoolPerformanceFile] // NewLocalFile creates the wrapper, but doesn't write to disk. // This should be used when generating new trees / performance files. -func NewLocalFile[T ILocalFile](ilf T, fullpath string) *LocalFile[T] { +func NewLocalFile[T ISerializable](ilf T, fullpath string) *LocalFile[T] { return &LocalFile[T]{ f: ilf, fullPath: fullpath, @@ -78,18 +88,45 @@ func (lf *LocalFile[T]) Serialize() ([]byte, error) { return lf.f.Serialize() } +// Converts the underlying interface to a byte slice by calling its SerializeSSZ function +func (lf *LocalFile[T]) SerializeSSZ() ([]byte, error) { + return lf.f.SerializeSSZ() +} + // Serializes the file and writes it to disk -func (lf *LocalFile[T]) Write() error { +func (lf *LocalFile[T]) Write() ([]byte, error) { data, err := lf.Serialize() if err != nil { - return fmt.Errorf("error serializing file: %w", err) + return nil, fmt.Errorf("error serializing file: %w", err) + } + + err = os.WriteFile(lf.fullPath, data, 0644) + if err != nil { + return nil, fmt.Errorf("error writing file to %s: %w", lf.fullPath, err) + } + return data, nil +} + +// Serializes the file and writes it to disk +func (lf *LocalFile[T]) WriteSSZ() ([]byte, error) { + data, err := lf.SerializeSSZ() + if err != nil { + return nil, fmt.Errorf("error serializing file: %w", err) } err = os.WriteFile(lf.fullPath, data, 0644) if err != nil { - return fmt.Errorf("error writing file to %s: %w", lf.fullPath, err) + return nil, fmt.Errorf("error writing file to %s: %w", lf.fullPath, err) } - return nil + return data, nil +} + +func (lf *LocalFile[T]) Path() string { + return lf.fullPath +} + +func (lf *LocalFile[T]) FileName() string { + return filepath.Base(lf.Path()) } // Computes the CID that would be used if we compressed the file with zst, @@ -98,11 +135,11 @@ func (lf *LocalFile[T]) Write() error { // // N.B. This function will also save the compressed file to disk so it can // later be uploaded to ipfs -func (lf *LocalFile[T]) CreateCompressedFileAndCid() (cid.Cid, error) { +func (lf *LocalFile[T]) CreateCompressedFileAndCid() (string, cid.Cid, error) { // Serialize data, err := lf.Serialize() if err != nil { - return cid.Cid{}, fmt.Errorf("error serializing file: %w", err) + return "", cid.Cid{}, fmt.Errorf("error serializing file: %w", err) } // Compress @@ -112,14 +149,132 @@ func (lf *LocalFile[T]) CreateCompressedFileAndCid() (cid.Cid, error) { filename := lf.fullPath + config.RewardsTreeIpfsExtension c, err := singleFileDirIPFSCid(compressedBytes, filepath.Base(filename)) if err != nil { - return cid.Cid{}, fmt.Errorf("error calculating CID: %w", err) + return filename, cid.Cid{}, fmt.Errorf("error calculating CID: %w", err) } // Write to disk // Take care to write to `filename` since it has the .zst extension added err = os.WriteFile(filename, compressedBytes, 0644) if err != nil { - return cid.Cid{}, fmt.Errorf("error writing file to %s: %w", lf.fullPath, err) + return filename, cid.Cid{}, fmt.Errorf("error writing file to %s: %w", lf.fullPath, err) + } + return filename, c, nil +} + +// Saves all rewards artifacts, including ssz if the rewards file is at least v3. +// If nodeTrusted is passed, zstd compressed copies will also be saved, with the cid of the +// compressed minipool perf file added to the rewards file before the latter is compressed. +// +// If the rewards file is at least v3, the cid of the uncompressed ssz file is returned for consensus +// Otherwise, the cid of the compressed json rewards file is returned for consensus. +// Thus, this function is only suitable for v9+ and versions below should use saveJSONArtifacts instead +func saveRewardsArtifacts(smartnode *config.SmartnodeConfig, treeResult *GenerateTreeResult, nodeTrusted bool) (cid.Cid, map[string]cid.Cid, error) { + if treeResult.RewardsFile.GetRewardsFileVersion() < rewardsFileVersionThree { + return saveJSONArtifacts(smartnode, treeResult, nodeTrusted) + } + + return saveArtifactsImpl(smartnode, treeResult, nodeTrusted, true) +} + +// Saves JSON artifacts from tree generation +// If nodeTrusted is passed, zstd compressed copies will also be saved, with the cid of the +// compressed minipool perf file added to the rewards file before the latter is compressed. +// +// Returns the cid of the compressed rewards file, a map containing all the other cids, or an error. +func saveJSONArtifacts(smartnode *config.SmartnodeConfig, treeResult *GenerateTreeResult, nodeTrusted bool) (cid.Cid, map[string]cid.Cid, error) { + return saveArtifactsImpl(smartnode, treeResult, nodeTrusted, false) +} + +// Saves JSON artifacts and optionally compressed + ssz artifacts +// If includeSSZ is true, the primary cid is the uncompressed reward ssz. +// Otherwise, it is the compressed rewards json. +func saveArtifactsImpl(smartnode *config.SmartnodeConfig, treeResult *GenerateTreeResult, nodeTrusted bool, includeSSZ bool) (cid.Cid, map[string]cid.Cid, error) { + rewardsFile := treeResult.RewardsFile + currentIndex := rewardsFile.GetIndex() + + var primaryCid *cid.Cid + out := make(map[string]cid.Cid, 4) + + files := []ILocalFile{ + // Do not reorder! + // i == 0 - minipool performance file + NewLocalFile[IMinipoolPerformanceFile]( + treeResult.MinipoolPerformanceFile, + smartnode.GetMinipoolPerformancePath(currentIndex, true), + ), + // i == 1 - rewards file + NewLocalFile[IRewardsFile]( + rewardsFile, + smartnode.GetRewardsTreePath(currentIndex, true, config.RewardsExtensionJSON), + ), + } + + // Only include ssz for supported versions + includeSSZ = includeSSZ && rewardsFile.GetRewardsFileVersion() >= minRewardsFileVersionSSZ + + if includeSSZ { + files = append( + files, + // i == 2 - ssz rewards file + NewLocalFile[IRewardsFile]( + rewardsFile, + smartnode.GetRewardsTreePath(currentIndex, true, config.RewardsExtensionSSZ), + ), + ) + } + + for i, f := range files { + var data []byte + var err error + + if includeSSZ && i == 2 { + data, err = f.WriteSSZ() + } else { + data, err = f.Write() + } + if err != nil { + return cid.Cid{}, nil, fmt.Errorf("error saving %s: %w", f.Path(), err) + } + + uncompressedCid, err := singleFileDirIPFSCid(data, f.FileName()) + if err != nil { + return cid.Cid{}, nil, fmt.Errorf("error calculating cid for saved file %s: %w", f.Path(), err) + } + out[f.FileName()] = uncompressedCid + + if !nodeTrusted { + // For some reason we didn't simply omit this in the past, so for consistency, keep setting it. + rewardsFile.SetMinipoolPerformanceFileCID("---") + // Non odao nodes only need inflated files + continue + } + + // Save compressed versions + compressedFilePath, compressedCid, err := f.CreateCompressedFileAndCid() + if err != nil { + return cid.Cid{}, nil, fmt.Errorf("error compressing file %s: %w", f.Path(), err) + } + out[filepath.Base(compressedFilePath)] = compressedCid + + // Note the performance cid in the rewards file + if i == 0 { + rewardsFile.SetMinipoolPerformanceFileCID(compressedCid.String()) + } + + // Note the primary cid for artifacts used for consensus + if !includeSSZ { + // JSON rewards file + if i == 1 { + primaryCid = &compressedCid + } + } else { + // SSZ rewards file + if i == 2 { + // Consensus is on the uncompressed cid when using ssz + primaryCid = &uncompressedCid + } + } + } - return c, nil + return *primaryCid, out, nil } diff --git a/shared/services/rewards/files_test.go b/shared/services/rewards/files_test.go index e5914d034..7139099d9 100644 --- a/shared/services/rewards/files_test.go +++ b/shared/services/rewards/files_test.go @@ -1,10 +1,12 @@ package rewards import ( + "bytes" "encoding/hex" "fmt" "os" "path" + "path/filepath" "testing" ) @@ -17,7 +19,7 @@ func TestFilesFromTree(t *testing.T) { RewardsFileVersion: 3, RulesetVersion: 8, }, - MinipoolPerformanceFile: MinipoolPerformanceFile_v3{ + MinipoolPerformanceFile: MinipoolPerformanceFile_v2{ RewardsFileVersion: 3, RulesetVersion: 8, }, @@ -28,21 +30,35 @@ func TestFilesFromTree(t *testing.T) { path.Join(dir, "rewards.json"), ) - err := localRewardsFile.Write() + rewardsFileBytes, err := localRewardsFile.Write() if err != nil { t.Fatal(err) } + if rewardsFileBytes == nil { + t.Fatal("Write() should have returned serialized data") + } + directBytes, _ := f.Serialize() + if !bytes.Equal(directBytes, rewardsFileBytes) { + t.Fatal("Write() returned something different than Serialize()") + } - minipoolPerformanceFile := localRewardsFile.Impl().GetMinipoolPerformanceFile() + minipoolPerformanceFile := &f.MinipoolPerformanceFile localMinipoolPerformanceFile := NewLocalFile[IMinipoolPerformanceFile]( minipoolPerformanceFile, path.Join(dir, "performance.json"), ) - err = localMinipoolPerformanceFile.Write() + miniPerfFileBytes, err := localMinipoolPerformanceFile.Write() if err != nil { t.Fatal(err) } + if miniPerfFileBytes == nil { + t.Fatal("Write() should have returned serialized data") + } + directBytes, _ = minipoolPerformanceFile.Serialize() + if !bytes.Equal(directBytes, miniPerfFileBytes) { + t.Fatal("Write() returned something different than Serialize()") + } // Check that the file can be parsed localRewardsFile, err = ReadLocalRewardsFile(path.Join(dir, "rewards.json")) @@ -50,10 +66,10 @@ func TestFilesFromTree(t *testing.T) { t.Fatal(err) } - if localRewardsFile.Impl().GetHeader().RulesetVersion != f.RewardsFileHeader.RulesetVersion { + if localRewardsFile.Impl().(*RewardsFile_v3).RulesetVersion != f.RewardsFileHeader.RulesetVersion { t.Fatalf( "expected parsed version %d to match serialized version %d\n", - localRewardsFile.Impl().GetHeader().RulesetVersion, + localRewardsFile.Impl().(*RewardsFile_v3).RulesetVersion, f.RewardsFileHeader.RulesetVersion, ) } @@ -74,7 +90,7 @@ func TestCompressionAndCids(t *testing.T) { RewardsFileVersion: 3, RulesetVersion: 8, }, - MinipoolPerformanceFile: MinipoolPerformanceFile_v3{ + MinipoolPerformanceFile: MinipoolPerformanceFile_v2{ RewardsFileVersion: 3, RulesetVersion: 9, }, @@ -85,21 +101,27 @@ func TestCompressionAndCids(t *testing.T) { path.Join(dir, "rewards.json"), ) - minipoolPerformanceFile := localRewardsFile.Impl().GetMinipoolPerformanceFile() + minipoolPerformanceFile := &f.MinipoolPerformanceFile localMinipoolPerformanceFile := NewLocalFile[IMinipoolPerformanceFile]( minipoolPerformanceFile, path.Join(dir, "performance.json"), ) - rewardsCid, err := localRewardsFile.CreateCompressedFileAndCid() + returnedFilename, rewardsCid, err := localRewardsFile.CreateCompressedFileAndCid() if err != nil { t.Fatal(err) } + if filepath.Base(returnedFilename) != "rewards.json.zst" { + t.Fatalf("Unexpected filename: %s", returnedFilename) + } - performanceCid, err := localMinipoolPerformanceFile.CreateCompressedFileAndCid() + returnedFilename, performanceCid, err := localMinipoolPerformanceFile.CreateCompressedFileAndCid() if err != nil { t.Fatal(err) } + if filepath.Base(returnedFilename) != "performance.json.zst" { + t.Fatalf("Unexpected filename: %s", returnedFilename) + } // Check that compressed files were written to disk and their cids match what was returned by CompressedCid compressedRewardsBytes, err := os.ReadFile(path.Join(dir, "rewards.json.zst")) @@ -151,21 +173,21 @@ func TestCompressionAndCids(t *testing.T) { } // Make sure values were preserved in the round trip - if localRewardsFile.Impl().GetHeader().RulesetVersion != parsedRewards.GetHeader().RulesetVersion { + if localRewardsFile.Impl().(*RewardsFile_v3).RulesetVersion != parsedRewards.(*RewardsFile_v3).RulesetVersion { t.Fatalf( "expected parsed version %d to match serialized version %d\n", - localRewardsFile.Impl().GetHeader().RulesetVersion, - parsedRewards.GetHeader().RulesetVersion, + localRewardsFile.Impl().(*RewardsFile_v3).RulesetVersion, + parsedRewards.(*RewardsFile_v3).RulesetVersion, ) } - if localRewardsFile.Impl().GetMinipoolPerformanceFile().(*MinipoolPerformanceFile_v3).RulesetVersion != - parsedPerformance.(*MinipoolPerformanceFile_v3).RulesetVersion { + if minipoolPerformanceFile.RulesetVersion != + parsedPerformance.(*MinipoolPerformanceFile_v2).RulesetVersion { t.Fatalf( "expected parsed version %d to match serialized version %d\n", - localRewardsFile.Impl().GetMinipoolPerformanceFile().(*MinipoolPerformanceFile_v3).RulesetVersion, - parsedPerformance.(*MinipoolPerformanceFile_v3).RulesetVersion, + minipoolPerformanceFile.RulesetVersion, + parsedPerformance.(*MinipoolPerformanceFile_v2).RulesetVersion, ) } } diff --git a/shared/services/rewards/generator-impl-v8-rolling.go b/shared/services/rewards/generator-impl-v8-rolling.go deleted file mode 100644 index a9d88fbce..000000000 --- a/shared/services/rewards/generator-impl-v8-rolling.go +++ /dev/null @@ -1,807 +0,0 @@ -package rewards - -import ( - "context" - "encoding/hex" - "fmt" - "math/big" - "sort" - "time" - - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "github.com/rocket-pool/rocketpool-go/rocketpool" - tnsettings "github.com/rocket-pool/rocketpool-go/settings/trustednode" - "github.com/rocket-pool/rocketpool-go/utils/eth" - "github.com/rocket-pool/smartnode/shared/services/beacon" - "github.com/rocket-pool/smartnode/shared/services/config" - "github.com/rocket-pool/smartnode/shared/services/state" - "github.com/rocket-pool/smartnode/shared/utils/log" - "github.com/wealdtech/go-merkletree" - "github.com/wealdtech/go-merkletree/keccak256" -) - -// Implementation for tree generator ruleset v8 with rolling record support -type treeGeneratorImpl_v8_rolling struct { - networkState *state.NetworkState - rewardsFile *RewardsFile_v3 - elSnapshotHeader *types.Header - log *log.ColorLogger - logPrefix string - rp *rocketpool.RocketPool - cfg *config.RocketPoolConfig - bc beacon.Client - opts *bind.CallOpts - smoothingPoolBalance *big.Int - intervalDutiesInfo *IntervalDutiesInfo - slotsPerEpoch uint64 - validatorIndexMap map[string]*MinipoolInfo - elStartTime time.Time - elEndTime time.Time - validNetworkCache map[uint64]bool - epsilon *big.Int - intervalSeconds *big.Int - beaconConfig beacon.Eth2Config - rollingRecord *RollingRecord - nodeDetails map[common.Address]*NodeSmoothingDetails -} - -// Create a new tree generator -func newTreeGeneratorImpl_v8_rolling(log *log.ColorLogger, logPrefix string, index uint64, startTime time.Time, endTime time.Time, consensusBlock uint64, elSnapshotHeader *types.Header, intervalsPassed uint64, state *state.NetworkState, rollingRecord *RollingRecord) *treeGeneratorImpl_v8_rolling { - return &treeGeneratorImpl_v8_rolling{ - rewardsFile: &RewardsFile_v3{ - RewardsFileHeader: &RewardsFileHeader{ - RewardsFileVersion: 3, - RulesetVersion: 8, - Index: index, - StartTime: startTime.UTC(), - EndTime: endTime.UTC(), - ConsensusEndBlock: consensusBlock, - ExecutionEndBlock: elSnapshotHeader.Number.Uint64(), - IntervalsPassed: intervalsPassed, - InvalidNetworkNodes: map[common.Address]uint64{}, - TotalRewards: &TotalRewards{ - ProtocolDaoRpl: NewQuotedBigInt(0), - TotalCollateralRpl: NewQuotedBigInt(0), - TotalOracleDaoRpl: NewQuotedBigInt(0), - TotalSmoothingPoolEth: NewQuotedBigInt(0), - PoolStakerSmoothingPoolEth: NewQuotedBigInt(0), - NodeOperatorSmoothingPoolEth: NewQuotedBigInt(0), - }, - NetworkRewards: map[uint64]*NetworkRewardsInfo{}, - }, - NodeRewards: map[common.Address]*NodeRewardsInfo_v3{}, - MinipoolPerformanceFile: MinipoolPerformanceFile_v3{ - Index: index, - StartTime: startTime.UTC(), - EndTime: endTime.UTC(), - ConsensusEndBlock: consensusBlock, - ExecutionEndBlock: elSnapshotHeader.Number.Uint64(), - MinipoolPerformance: map[common.Address]*SmoothingPoolMinipoolPerformance_v3{}, - }, - }, - validatorIndexMap: map[string]*MinipoolInfo{}, - elSnapshotHeader: elSnapshotHeader, - log: log, - logPrefix: logPrefix, - networkState: state, - rollingRecord: rollingRecord, - } -} - -// Get the version of the ruleset used by this generator -func (r *treeGeneratorImpl_v8_rolling) getRulesetVersion() uint64 { - return r.rewardsFile.RulesetVersion -} - -func (r *treeGeneratorImpl_v8_rolling) generateTree(rp *rocketpool.RocketPool, cfg *config.RocketPoolConfig, bc beacon.Client) (IRewardsFile, error) { - - r.log.Printlnf("%s Generating tree using Ruleset v%d.", r.logPrefix, r.rewardsFile.RulesetVersion) - - // Provision some struct params - r.rp = rp - r.cfg = cfg - r.bc = bc - r.validNetworkCache = map[uint64]bool{ - 0: true, - } - - // Set the network name - r.rewardsFile.Network = fmt.Sprint(cfg.Smartnode.Network.Value) - r.rewardsFile.MinipoolPerformanceFile.Network = r.rewardsFile.Network - r.rewardsFile.MinipoolPerformanceFile.RewardsFileVersion = r.rewardsFile.RewardsFileVersion - r.rewardsFile.MinipoolPerformanceFile.RulesetVersion = r.rewardsFile.RulesetVersion - - // Get the Beacon config - r.beaconConfig = r.networkState.BeaconConfig - r.slotsPerEpoch = r.beaconConfig.SlotsPerEpoch - - // Set the EL client call opts - r.opts = &bind.CallOpts{ - BlockNumber: r.elSnapshotHeader.Number, - } - - r.log.Printlnf("%s Creating tree for %d nodes", r.logPrefix, len(r.networkState.NodeDetails)) - - // Get the max of node count and minipool count - this will be used for an error epsilon due to division truncation - nodeCount := len(r.networkState.NodeDetails) - minipoolCount := len(r.networkState.MinipoolDetails) - if nodeCount > minipoolCount { - r.epsilon = big.NewInt(int64(nodeCount)) - } else { - r.epsilon = big.NewInt(int64(minipoolCount)) - } - - // Calculate the RPL rewards - err := r.calculateRplRewards() - if err != nil { - return nil, fmt.Errorf("error calculating RPL rewards: %w", err) - } - - // Calculate the ETH rewards - err = r.calculateEthRewards(true) - if err != nil { - return nil, fmt.Errorf("error calculating ETH rewards: %w", err) - } - - // Calculate the network reward map and the totals - r.updateNetworksAndTotals() - - // Generate the Merkle Tree - err = r.generateMerkleTree() - if err != nil { - return nil, fmt.Errorf("error generating Merkle tree: %w", err) - } - - // Sort all of the missed attestations so the files are always generated in the same state - for _, minipoolInfo := range r.rewardsFile.MinipoolPerformanceFile.MinipoolPerformance { - sort.Slice(minipoolInfo.MissingAttestationSlots, func(i, j int) bool { - return minipoolInfo.MissingAttestationSlots[i] < minipoolInfo.MissingAttestationSlots[j] - }) - } - - return r.rewardsFile, nil - -} - -// Quickly calculates an approximate of the staker's share of the smoothing pool balance without processing Beacon performance -// Used for approximate returns in the rETH ratio update -func (r *treeGeneratorImpl_v8_rolling) approximateStakerShareOfSmoothingPool(rp *rocketpool.RocketPool, cfg *config.RocketPoolConfig, bc beacon.Client) (*big.Int, error) { - r.log.Printlnf("%s Approximating tree using Ruleset v%d.", r.logPrefix, r.rewardsFile.RulesetVersion) - - r.rp = rp - r.cfg = cfg - r.bc = bc - r.validNetworkCache = map[uint64]bool{ - 0: true, - } - - // Set the network name - r.rewardsFile.Network = fmt.Sprint(cfg.Smartnode.Network.Value) - r.rewardsFile.MinipoolPerformanceFile.Network = r.rewardsFile.Network - r.rewardsFile.MinipoolPerformanceFile.RewardsFileVersion = r.rewardsFile.RewardsFileVersion - r.rewardsFile.MinipoolPerformanceFile.RulesetVersion = r.rewardsFile.RulesetVersion - - // Get the Beacon config - r.beaconConfig = r.networkState.BeaconConfig - r.slotsPerEpoch = r.beaconConfig.SlotsPerEpoch - - // Set the EL client call opts - r.opts = &bind.CallOpts{ - BlockNumber: r.elSnapshotHeader.Number, - } - - r.log.Printlnf("%s Creating tree for %d nodes", r.logPrefix, len(r.networkState.NodeDetails)) - - // Get the max of node count and minipool count - this will be used for an error epsilon due to division truncation - nodeCount := len(r.networkState.NodeDetails) - minipoolCount := len(r.networkState.MinipoolDetails) - if nodeCount > minipoolCount { - r.epsilon = big.NewInt(int64(nodeCount)) - } else { - r.epsilon = big.NewInt(int64(minipoolCount)) - } - - // Calculate the ETH rewards - err := r.calculateEthRewards(false) - if err != nil { - return nil, fmt.Errorf("error calculating ETH rewards: %w", err) - } - - return &r.rewardsFile.TotalRewards.PoolStakerSmoothingPoolEth.Int, nil -} - -// Generates a merkle tree from the provided rewards map -func (r *treeGeneratorImpl_v8_rolling) generateMerkleTree() error { - - // Generate the leaf data for each node - totalData := make([][]byte, 0, len(r.rewardsFile.NodeRewards)) - for address, rewardsForNode := range r.rewardsFile.NodeRewards { - // Ignore nodes that didn't receive any rewards - if rewardsForNode.CollateralRpl.Cmp(common.Big0) == 0 && rewardsForNode.OracleDaoRpl.Cmp(common.Big0) == 0 && rewardsForNode.SmoothingPoolEth.Cmp(common.Big0) == 0 { - continue - } - - // Node data is address[20] :: network[32] :: RPL[32] :: ETH[32] - nodeData := make([]byte, 0, 20+32*3) - - // Node address - addressBytes := address.Bytes() - nodeData = append(nodeData, addressBytes...) - - // Node network - network := big.NewInt(0).SetUint64(rewardsForNode.RewardNetwork) - networkBytes := make([]byte, 32) - network.FillBytes(networkBytes) - nodeData = append(nodeData, networkBytes...) - - // RPL rewards - rplRewards := big.NewInt(0) - rplRewards.Add(&rewardsForNode.CollateralRpl.Int, &rewardsForNode.OracleDaoRpl.Int) - rplRewardsBytes := make([]byte, 32) - rplRewards.FillBytes(rplRewardsBytes) - nodeData = append(nodeData, rplRewardsBytes...) - - // ETH rewards - ethRewardsBytes := make([]byte, 32) - rewardsForNode.SmoothingPoolEth.FillBytes(ethRewardsBytes) - nodeData = append(nodeData, ethRewardsBytes...) - - // Assign it to the node rewards tracker and add it to the leaf data slice - rewardsForNode.MerkleData = nodeData - totalData = append(totalData, nodeData) - } - - // Generate the tree - tree, err := merkletree.NewUsing(totalData, keccak256.New(), false, true) - if err != nil { - return fmt.Errorf("error generating Merkle Tree: %w", err) - } - - // Generate the proofs for each node - for address, rewardsForNode := range r.rewardsFile.NodeRewards { - // Get the proof - proof, err := tree.GenerateProof(rewardsForNode.MerkleData, 0) - if err != nil { - return fmt.Errorf("error generating proof for node %s: %w", address.Hex(), err) - } - - // Convert the proof into hex strings - proofStrings := make([]string, len(proof.Hashes)) - for i, hash := range proof.Hashes { - proofStrings[i] = fmt.Sprintf("0x%s", hex.EncodeToString(hash)) - } - - // Assign the hex strings to the node rewards struct - rewardsForNode.MerkleProof = proofStrings - } - - r.rewardsFile.MerkleTree = tree - r.rewardsFile.MerkleRoot = common.BytesToHash(tree.Root()).Hex() - return nil - -} - -// Calculates the per-network distribution amounts and the total reward amounts -func (r *treeGeneratorImpl_v8_rolling) updateNetworksAndTotals() { - - // Get the highest network index with valid rewards - highestNetworkIndex := uint64(0) - for network := range r.rewardsFile.NetworkRewards { - if network > highestNetworkIndex { - highestNetworkIndex = network - } - } - - // Create the map for each network, including unused ones - for network := uint64(0); network <= highestNetworkIndex; network++ { - _, exists := r.rewardsFile.NetworkRewards[network] - if !exists { - rewardsForNetwork := &NetworkRewardsInfo{ - CollateralRpl: NewQuotedBigInt(0), - OracleDaoRpl: NewQuotedBigInt(0), - SmoothingPoolEth: NewQuotedBigInt(0), - } - r.rewardsFile.NetworkRewards[network] = rewardsForNetwork - } - } - -} - -func (r *treeGeneratorImpl_v8_rolling) calculateNodeRplRewards( - collateralRewards *big.Int, - nodeEffectiveStake *big.Int, - totalEffectiveRplStake *big.Int, - nodeWeight *big.Int, - totalNodeWeight *big.Int, -) *big.Int { - - if nodeEffectiveStake.Sign() <= 0 || nodeWeight.Sign() <= 0 { - return big.NewInt(0) - } - - // C is in the closed range [1, 6] - // C := min(6, interval - 18 + 1) - c := int64(6) - interval := int64(r.networkState.NetworkDetails.RewardIndex) - - if c > (interval - 18 + 1) { - c = interval - 18 + 1 - } - - if c <= 0 { - c = 1 - } - - bigC := big.NewInt(c) - - // (collateralRewards * C * nodeWeight / (totalNodeWeight * 6)) + (collateralRewards * (6 - C) * nodeEffectiveStake / (totalEffectiveRplStake * 6)) - // First, (collateralRewards * C * nodeWeight / (totalNodeWeight * 6)) - rpip30Rewards := big.NewInt(0).Mul(collateralRewards, nodeWeight) - rpip30Rewards.Mul(rpip30Rewards, bigC) - rpip30Rewards.Quo(rpip30Rewards, big.NewInt(0).Mul(totalNodeWeight, six)) - - // Once C hits 6 we can exit early as an optimization - if c == 6 { - return rpip30Rewards - } - - // Second, (collateralRewards * (6 - C) * nodeEffectiveStake / (totalEffectiveRplStake * 6)) - oldRewards := big.NewInt(6) - oldRewards.Sub(oldRewards, bigC) - oldRewards.Mul(oldRewards, collateralRewards) - oldRewards.Mul(oldRewards, nodeEffectiveStake) - oldRewards.Quo(oldRewards, big.NewInt(0).Mul(totalEffectiveRplStake, six)) - - // Add them together - return rpip30Rewards.Add(rpip30Rewards, oldRewards) -} - -// Calculates the RPL rewards for the given interval -func (r *treeGeneratorImpl_v8_rolling) calculateRplRewards() error { - pendingRewards := r.networkState.NetworkDetails.PendingRPLRewards - r.log.Printlnf("%s Pending RPL rewards: %s (%.3f)", r.logPrefix, pendingRewards.String(), eth.WeiToEth(pendingRewards)) - if pendingRewards.Cmp(common.Big0) == 0 { - return fmt.Errorf("there are no pending RPL rewards, so this interval cannot be used for rewards submission") - } - - // Get baseline Protocol DAO rewards - pDaoPercent := r.networkState.NetworkDetails.ProtocolDaoRewardsPercent - pDaoRewards := NewQuotedBigInt(0) - pDaoRewards.Mul(pendingRewards, pDaoPercent) - pDaoRewards.Div(&pDaoRewards.Int, eth.EthToWei(1)) - r.log.Printlnf("%s Expected Protocol DAO rewards: %s (%.3f)", r.logPrefix, pDaoRewards.String(), eth.WeiToEth(&pDaoRewards.Int)) - - // Get node operator rewards - nodeOpPercent := r.networkState.NetworkDetails.NodeOperatorRewardsPercent - totalNodeRewards := big.NewInt(0) - totalNodeRewards.Mul(pendingRewards, nodeOpPercent) - totalNodeRewards.Div(totalNodeRewards, eth.EthToWei(1)) - r.log.Printlnf("%s Approx. total collateral RPL rewards: %s (%.3f)", r.logPrefix, totalNodeRewards.String(), eth.WeiToEth(totalNodeRewards)) - - // Calculate the effective stake of each node, scaling by their participation in this interval - // Before entering this function, make sure to hard-code MaxCollateralFraction to 1.5 eth (150% in wei), to comply with RPIP-30. - // Do it here, as the network state value will still be used for vote power, so doing it upstream is likely to introduce more issues. - // Doing it here also ensures that v1-7 continue to run correctly on networks other than mainnet where the max collateral fraction may not have always been 150%. - r.networkState.NetworkDetails.MaxCollateralFraction = big.NewInt(1.5e18) // 1.5 eth is 150% in wei - trueNodeEffectiveStakes, totalNodeEffectiveStake, err := r.networkState.CalculateTrueEffectiveStakes(true, true) - if err != nil { - return fmt.Errorf("error calculating effective RPL stakes: %w", err) - } - - // Calculate the RPIP-30 weight of each node, scaling by their participation in this interval - nodeWeights, totalNodeWeight, err := r.networkState.CalculateNodeWeights() - if err != nil { - return fmt.Errorf("error calculating node weights: %w", err) - } - - // Operate normally if any node has rewards - if totalNodeEffectiveStake.Sign() > 0 && totalNodeWeight.Sign() > 0 { - // Make sure to record totalNodeWeight in the rewards file - quotedTotalNodeWeight := NewQuotedBigInt(0) - quotedTotalNodeWeight.Set(totalNodeWeight) - r.rewardsFile.TotalRewards.TotalNodeWeight = quotedTotalNodeWeight - - r.log.Printlnf("%s Calculating individual collateral rewards...", r.logPrefix) - for i, nodeDetails := range r.networkState.NodeDetails { - // Get how much RPL goes to this node - nodeRplRewards := r.calculateNodeRplRewards( - totalNodeRewards, - trueNodeEffectiveStakes[nodeDetails.NodeAddress], - totalNodeEffectiveStake, - nodeWeights[nodeDetails.NodeAddress], - totalNodeWeight, - ) - - // If there are pending rewards, add it to the map - if nodeRplRewards.Sign() == 1 { - rewardsForNode, exists := r.rewardsFile.NodeRewards[nodeDetails.NodeAddress] - if !exists { - // Get the network the rewards should go to - network := r.networkState.NodeDetails[i].RewardNetwork.Uint64() - validNetwork, err := r.validateNetwork(network) - if err != nil { - return err - } - if !validNetwork { - r.rewardsFile.InvalidNetworkNodes[nodeDetails.NodeAddress] = network - network = 0 - } - - rewardsForNode = &NodeRewardsInfo_v3{ - RewardNetwork: network, - CollateralRpl: NewQuotedBigInt(0), - OracleDaoRpl: NewQuotedBigInt(0), - SmoothingPoolEth: NewQuotedBigInt(0), - } - r.rewardsFile.NodeRewards[nodeDetails.NodeAddress] = rewardsForNode - } - rewardsForNode.CollateralRpl.Add(&rewardsForNode.CollateralRpl.Int, nodeRplRewards) - - // Add the rewards to the running total for the specified network - rewardsForNetwork, exists := r.rewardsFile.NetworkRewards[rewardsForNode.RewardNetwork] - if !exists { - rewardsForNetwork = &NetworkRewardsInfo{ - CollateralRpl: NewQuotedBigInt(0), - OracleDaoRpl: NewQuotedBigInt(0), - SmoothingPoolEth: NewQuotedBigInt(0), - } - r.rewardsFile.NetworkRewards[rewardsForNode.RewardNetwork] = rewardsForNetwork - } - rewardsForNetwork.CollateralRpl.Add(&rewardsForNetwork.CollateralRpl.Int, nodeRplRewards) - } - } - - // Sanity check to make sure we arrived at the correct total - delta := big.NewInt(0) - totalCalculatedNodeRewards := big.NewInt(0) - for _, networkRewards := range r.rewardsFile.NetworkRewards { - totalCalculatedNodeRewards.Add(totalCalculatedNodeRewards, &networkRewards.CollateralRpl.Int) - } - delta.Sub(totalNodeRewards, totalCalculatedNodeRewards).Abs(delta) - if delta.Cmp(r.epsilon) == 1 { - return fmt.Errorf("error calculating collateral RPL: total was %s, but expected %s; error was too large", totalCalculatedNodeRewards.String(), totalNodeRewards.String()) - } - r.rewardsFile.TotalRewards.TotalCollateralRpl.Int = *totalCalculatedNodeRewards - r.log.Printlnf("%s Calculated rewards: %s (error = %s wei)", r.logPrefix, totalCalculatedNodeRewards.String(), delta.String()) - pDaoRewards.Sub(pendingRewards, totalCalculatedNodeRewards) - } else { - // In this situation, none of the nodes in the network had eligible rewards so send it all to the pDAO - pDaoRewards.Add(&pDaoRewards.Int, totalNodeRewards) - r.log.Printlnf("%s None of the nodes were eligible for collateral rewards, sending everything to the pDAO; now at %s (%.3f)", r.logPrefix, pDaoRewards.String(), eth.WeiToEth(&pDaoRewards.Int)) - } - - // Handle Oracle DAO rewards - oDaoPercent := r.networkState.NetworkDetails.TrustedNodeOperatorRewardsPercent - totalODaoRewards := big.NewInt(0) - totalODaoRewards.Mul(pendingRewards, oDaoPercent) - totalODaoRewards.Div(totalODaoRewards, eth.EthToWei(1)) - r.log.Printlnf("%s Total Oracle DAO RPL rewards: %s (%.3f)", r.logPrefix, totalODaoRewards.String(), eth.WeiToEth(totalODaoRewards)) - - oDaoDetails := r.networkState.OracleDaoMemberDetails - - // Calculate the true effective time of each oDAO node based on their participation in this interval - totalODaoNodeTime := big.NewInt(0) - trueODaoNodeTimes := map[common.Address]*big.Int{} - for _, details := range oDaoDetails { - // Get the timestamp of the node joining the oDAO - joinTime := details.JoinedTime - - // Get the actual effective time, scaled based on participation - intervalDuration := r.networkState.NetworkDetails.IntervalDuration - intervalDurationBig := big.NewInt(int64(intervalDuration.Seconds())) - participationTime := big.NewInt(0).Set(intervalDurationBig) - snapshotBlockTime := time.Unix(int64(r.elSnapshotHeader.Time), 0) - eligibleDuration := snapshotBlockTime.Sub(joinTime) - if eligibleDuration < intervalDuration { - participationTime = big.NewInt(int64(eligibleDuration.Seconds())) - } - trueODaoNodeTimes[details.Address] = participationTime - - // Add it to the total - totalODaoNodeTime.Add(totalODaoNodeTime, participationTime) - } - - for _, details := range oDaoDetails { - address := details.Address - - // Calculate the oDAO rewards for the node: (participation time) * (total oDAO rewards) / (total participation time) - individualOdaoRewards := big.NewInt(0) - individualOdaoRewards.Mul(trueODaoNodeTimes[address], totalODaoRewards) - individualOdaoRewards.Div(individualOdaoRewards, totalODaoNodeTime) - - rewardsForNode, exists := r.rewardsFile.NodeRewards[address] - if !exists { - // Get the network the rewards should go to - network := r.networkState.NodeDetailsByAddress[address].RewardNetwork.Uint64() - validNetwork, err := r.validateNetwork(network) - if err != nil { - return err - } - if !validNetwork { - r.rewardsFile.InvalidNetworkNodes[address] = network - network = 0 - } - - rewardsForNode = &NodeRewardsInfo_v3{ - RewardNetwork: network, - CollateralRpl: NewQuotedBigInt(0), - OracleDaoRpl: NewQuotedBigInt(0), - SmoothingPoolEth: NewQuotedBigInt(0), - } - r.rewardsFile.NodeRewards[address] = rewardsForNode - - } - rewardsForNode.OracleDaoRpl.Add(&rewardsForNode.OracleDaoRpl.Int, individualOdaoRewards) - - // Add the rewards to the running total for the specified network - rewardsForNetwork, exists := r.rewardsFile.NetworkRewards[rewardsForNode.RewardNetwork] - if !exists { - rewardsForNetwork = &NetworkRewardsInfo{ - CollateralRpl: NewQuotedBigInt(0), - OracleDaoRpl: NewQuotedBigInt(0), - SmoothingPoolEth: NewQuotedBigInt(0), - } - r.rewardsFile.NetworkRewards[rewardsForNode.RewardNetwork] = rewardsForNetwork - } - rewardsForNetwork.OracleDaoRpl.Add(&rewardsForNetwork.OracleDaoRpl.Int, individualOdaoRewards) - } - - // Sanity check to make sure we arrived at the correct total - totalCalculatedOdaoRewards := big.NewInt(0) - delta := big.NewInt(0) - for _, networkRewards := range r.rewardsFile.NetworkRewards { - totalCalculatedOdaoRewards.Add(totalCalculatedOdaoRewards, &networkRewards.OracleDaoRpl.Int) - } - delta.Sub(totalODaoRewards, totalCalculatedOdaoRewards).Abs(delta) - if delta.Cmp(r.epsilon) == 1 { - return fmt.Errorf("error calculating ODao RPL: total was %s, but expected %s; error was too large", totalCalculatedOdaoRewards.String(), totalODaoRewards.String()) - } - r.rewardsFile.TotalRewards.TotalOracleDaoRpl.Int = *totalCalculatedOdaoRewards - r.log.Printlnf("%s Calculated rewards: %s (error = %s wei)", r.logPrefix, totalCalculatedOdaoRewards.String(), delta.String()) - - // Get actual protocol DAO rewards - pDaoRewards.Sub(&pDaoRewards.Int, totalCalculatedOdaoRewards) - r.rewardsFile.TotalRewards.ProtocolDaoRpl = pDaoRewards - r.log.Printlnf("%s Actual Protocol DAO rewards: %s to account for truncation", r.logPrefix, pDaoRewards.String()) - - return nil - -} - -// Calculates the ETH rewards for the given interval -func (r *treeGeneratorImpl_v8_rolling) calculateEthRewards(checkBeaconPerformance bool) error { - - // Get the Smoothing Pool contract's balance - r.smoothingPoolBalance = r.networkState.NetworkDetails.SmoothingPoolBalance - r.log.Printlnf("%s Smoothing Pool Balance: %s (%.3f)", r.logPrefix, r.smoothingPoolBalance.String(), eth.WeiToEth(r.smoothingPoolBalance)) - - // Ignore the ETH calculation if there are no rewards - if r.smoothingPoolBalance.Cmp(common.Big0) == 0 { - return nil - } - - if r.rewardsFile.Index == 0 { - // This is the first interval, Smoothing Pool rewards are ignored on the first interval since it doesn't have a discrete start time - return nil - } - - // Get the EL block for the start of this interval - startElBlockHeader, err := r.getStartBlocksForInterval() - if err != nil { - return err - } - - r.elStartTime = time.Unix(int64(startElBlockHeader.Time), 0) - r.elEndTime = time.Unix(int64(r.elSnapshotHeader.Time), 0) - r.intervalSeconds = big.NewInt(int64(r.elEndTime.Sub(r.elStartTime) / time.Second)) - - // Process the attestation performance for each minipool during this interval - r.intervalDutiesInfo = &IntervalDutiesInfo{ - Index: r.rewardsFile.Index, - Slots: map[uint64]*SlotInfo{}, - } - - // Determine how much ETH each node gets and how much the pool stakers get - poolStakerETH, nodeOpEth, err := r.calculateNodeRewards() - if err != nil { - return err - } - - // Update the rewards maps - for nodeAddress, nodeInfo := range r.nodeDetails { - if nodeInfo.SmoothingPoolEth.Cmp(common.Big0) > 0 { - rewardsForNode, exists := r.rewardsFile.NodeRewards[nodeAddress] - if !exists { - network := nodeInfo.RewardsNetwork - validNetwork, err := r.validateNetwork(network) - if err != nil { - return err - } - if !validNetwork { - r.rewardsFile.InvalidNetworkNodes[nodeAddress] = network - network = 0 - } - - rewardsForNode = &NodeRewardsInfo_v3{ - RewardNetwork: network, - CollateralRpl: NewQuotedBigInt(0), - OracleDaoRpl: NewQuotedBigInt(0), - SmoothingPoolEth: NewQuotedBigInt(0), - } - r.rewardsFile.NodeRewards[nodeAddress] = rewardsForNode - } - rewardsForNode.SmoothingPoolEth.Add(&rewardsForNode.SmoothingPoolEth.Int, nodeInfo.SmoothingPoolEth) - - // Add minipool rewards to the JSON - for _, minipoolInfo := range nodeInfo.Minipools { - successfulAttestations := uint64(minipoolInfo.AttestationCount) - missingAttestations := uint64(len(minipoolInfo.MissingAttestationSlots)) - performance := &SmoothingPoolMinipoolPerformance_v3{ - Pubkey: minipoolInfo.ValidatorPubkey.Hex(), - SuccessfulAttestations: successfulAttestations, - MissedAttestations: missingAttestations, - AttestationScore: &QuotedBigInt{Int: minipoolInfo.AttestationScore.Int}, - EthEarned: &QuotedBigInt{Int: *minipoolInfo.MinipoolShare}, - MissingAttestationSlots: []uint64{}, - } - if successfulAttestations+missingAttestations == 0 { - // Don't include minipools that have zero attestations - continue - } - for slot := range minipoolInfo.MissingAttestationSlots { - performance.MissingAttestationSlots = append(performance.MissingAttestationSlots, slot) - } - r.rewardsFile.MinipoolPerformanceFile.MinipoolPerformance[minipoolInfo.Address] = performance - } - - // Add the rewards to the running total for the specified network - rewardsForNetwork, exists := r.rewardsFile.NetworkRewards[rewardsForNode.RewardNetwork] - if !exists { - rewardsForNetwork = &NetworkRewardsInfo{ - CollateralRpl: NewQuotedBigInt(0), - OracleDaoRpl: NewQuotedBigInt(0), - SmoothingPoolEth: NewQuotedBigInt(0), - } - r.rewardsFile.NetworkRewards[rewardsForNode.RewardNetwork] = rewardsForNetwork - } - rewardsForNetwork.SmoothingPoolEth.Add(&rewardsForNetwork.SmoothingPoolEth.Int, nodeInfo.SmoothingPoolEth) - } - } - - // Set the totals - r.rewardsFile.TotalRewards.PoolStakerSmoothingPoolEth.Int = *poolStakerETH - r.rewardsFile.TotalRewards.NodeOperatorSmoothingPoolEth.Int = *nodeOpEth - r.rewardsFile.TotalRewards.TotalSmoothingPoolEth.Int = *r.smoothingPoolBalance - return nil - -} - -// Calculate the distribution of Smoothing Pool ETH to each node -func (r *treeGeneratorImpl_v8_rolling) calculateNodeRewards() (*big.Int, *big.Int, error) { - - // Get the list of cheaters - cheaters := r.getCheaters() - - // Get the latest scores from the rolling record - minipools, totalScore, attestationCount := r.rollingRecord.GetScores(cheaters) - - // If there weren't any successful attestations, everything goes to the pool stakers - if totalScore.Cmp(common.Big0) == 0 || attestationCount == 0 { - r.log.Printlnf("WARNING: Total attestation score = %s, successful attestations = %d... sending the whole smoothing pool balance to the pool stakers.", totalScore.String(), attestationCount) - return r.smoothingPoolBalance, big.NewInt(0), nil - } - - totalEthForMinipools := big.NewInt(0) - totalNodeOpShare := big.NewInt(0) - totalNodeOpShare.Mul(r.smoothingPoolBalance, totalScore) - totalNodeOpShare.Div(totalNodeOpShare, big.NewInt(int64(attestationCount))) - totalNodeOpShare.Div(totalNodeOpShare, eth.EthToWei(1)) - - r.nodeDetails = map[common.Address]*NodeSmoothingDetails{} - for _, minipool := range minipools { - // Get the node amount - nodeInfo, exists := r.nodeDetails[minipool.NodeAddress] - if !exists { - nodeInfo = &NodeSmoothingDetails{ - Minipools: []*MinipoolInfo{}, - SmoothingPoolEth: big.NewInt(0), - RewardsNetwork: r.networkState.NodeDetailsByAddress[minipool.NodeAddress].RewardNetwork.Uint64(), - } - r.nodeDetails[minipool.NodeAddress] = nodeInfo - } - nodeInfo.Minipools = append(nodeInfo.Minipools, minipool) - - // Add the minipool's score to the total node score - minipoolEth := big.NewInt(0).Set(totalNodeOpShare) - minipoolEth.Mul(minipoolEth, &minipool.AttestationScore.Int) - minipoolEth.Div(minipoolEth, totalScore) - minipool.MinipoolShare = minipoolEth - nodeInfo.SmoothingPoolEth.Add(nodeInfo.SmoothingPoolEth, minipoolEth) - } - - // Add the node amounts to the total - for _, nodeInfo := range r.nodeDetails { - totalEthForMinipools.Add(totalEthForMinipools, nodeInfo.SmoothingPoolEth) - } - - // This is how much actually goes to the pool stakers - it should ideally be equal to poolStakerShare but this accounts for any cumulative floating point errors - truePoolStakerAmount := big.NewInt(0).Sub(r.smoothingPoolBalance, totalEthForMinipools) - - // Sanity check to make sure we arrived at the correct total - delta := big.NewInt(0).Sub(totalEthForMinipools, totalNodeOpShare) - delta.Abs(delta) - if delta.Cmp(r.epsilon) == 1 { - return nil, nil, fmt.Errorf("error calculating smoothing pool ETH: total was %s, but expected %s; error was too large (%s wei)", totalEthForMinipools.String(), totalNodeOpShare.String(), delta.String()) - } - - // Calculate the staking pool share and the node op share - poolStakerShare := big.NewInt(0).Sub(r.smoothingPoolBalance, totalNodeOpShare) - - r.log.Printlnf("%s Pool staker ETH: %s (%.3f)", r.logPrefix, poolStakerShare.String(), eth.WeiToEth(poolStakerShare)) - r.log.Printlnf("%s Node Op ETH: %s (%.3f)", r.logPrefix, totalNodeOpShare.String(), eth.WeiToEth(totalNodeOpShare)) - r.log.Printlnf("%s Calculated NO ETH: %s (error = %s wei)", r.logPrefix, totalEthForMinipools.String(), delta.String()) - r.log.Printlnf("%s Adjusting pool staker ETH to %s to account for truncation", r.logPrefix, truePoolStakerAmount.String()) - - return truePoolStakerAmount, totalEthForMinipools, nil - -} - -// Validates that the provided network is legal -func (r *treeGeneratorImpl_v8_rolling) validateNetwork(network uint64) (bool, error) { - valid, exists := r.validNetworkCache[network] - if !exists { - var err error - valid, err = tnsettings.GetNetworkEnabled(r.rp, big.NewInt(int64(network)), r.opts) - if err != nil { - return false, err - } - r.validNetworkCache[network] = valid - } - - return valid, nil -} - -// Gets the EL header for the given interval's start block -func (r *treeGeneratorImpl_v8_rolling) getStartBlocksForInterval() (*types.Header, error) { - // Get the Beacon block for the start slot of the record - r.rewardsFile.ConsensusStartBlock = r.rollingRecord.StartSlot - r.rewardsFile.MinipoolPerformanceFile.ConsensusStartBlock = r.rollingRecord.StartSlot - beaconBlock, exists, err := r.bc.GetBeaconBlock(fmt.Sprint(r.rollingRecord.StartSlot)) - if err != nil { - return nil, fmt.Errorf("error verifying block from previous interval: %w", err) - } - if !exists { - return nil, fmt.Errorf("couldn't retrieve CL block from previous interval (slot %d); this likely means you checkpoint sync'd your Beacon Node and it has not backfilled to the previous interval yet so it cannot be used for tree generation", r.rollingRecord.StartSlot) - } - - // Get the EL block for that Beacon block - elBlockNumber := beaconBlock.ExecutionBlockNumber - r.rewardsFile.ExecutionStartBlock = elBlockNumber - r.rewardsFile.MinipoolPerformanceFile.ExecutionStartBlock = r.rewardsFile.ExecutionStartBlock - startElHeader, err := r.rp.Client.HeaderByNumber(context.Background(), big.NewInt(int64(elBlockNumber))) - if err != nil { - return nil, fmt.Errorf("error getting EL header for block %d: %w", elBlockNumber, err) - } - - return startElHeader, nil -} - -// Detect and flag any cheaters -func (r *treeGeneratorImpl_v8_rolling) getCheaters() map[common.Address]bool { - cheatingNodes := map[common.Address]bool{} - three := big.NewInt(3) - - for _, nd := range r.networkState.NodeDetails { - for _, mpd := range r.networkState.MinipoolDetailsByNode[nd.NodeAddress] { - if mpd.PenaltyCount.Cmp(three) >= 0 { - // If any minipool has 3+ penalties, ban the entire node - cheatingNodes[nd.NodeAddress] = true - break - } - } - } - - return cheatingNodes -} diff --git a/shared/services/rewards/generator-impl-v8.go b/shared/services/rewards/generator-impl-v8.go index 171758b35..84384cd1f 100644 --- a/shared/services/rewards/generator-impl-v8.go +++ b/shared/services/rewards/generator-impl-v8.go @@ -10,9 +10,8 @@ import ( "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" + "github.com/ipfs/go-cid" "github.com/rocket-pool/rocketpool-go/rewards" - "github.com/rocket-pool/rocketpool-go/rocketpool" - tnsettings "github.com/rocket-pool/rocketpool-go/settings/trustednode" rptypes "github.com/rocket-pool/rocketpool-go/types" "github.com/rocket-pool/rocketpool-go/utils/eth" rpstate "github.com/rocket-pool/rocketpool-go/utils/state" @@ -27,30 +26,31 @@ var six = big.NewInt(6) // Implementation for tree generator ruleset v8 type treeGeneratorImpl_v8 struct { - networkState *state.NetworkState - rewardsFile *RewardsFile_v3 - elSnapshotHeader *types.Header - log *log.ColorLogger - logPrefix string - rp *rocketpool.RocketPool - cfg *config.RocketPoolConfig - bc beacon.Client - opts *bind.CallOpts - nodeDetails []*NodeSmoothingDetails - smoothingPoolBalance *big.Int - intervalDutiesInfo *IntervalDutiesInfo - slotsPerEpoch uint64 - validatorIndexMap map[string]*MinipoolInfo - elStartTime time.Time - elEndTime time.Time - validNetworkCache map[uint64]bool - epsilon *big.Int - intervalSeconds *big.Int - beaconConfig beacon.Eth2Config - validatorStatusMap map[rptypes.ValidatorPubkey]beacon.ValidatorStatus - totalAttestationScore *big.Int - successfulAttestations uint64 - genesisTime time.Time + networkState *state.NetworkState + rewardsFile *RewardsFile_v3 + elSnapshotHeader *types.Header + log *log.ColorLogger + logPrefix string + rp RewardsExecutionClient + previousRewardsPoolAddresses []common.Address + bc RewardsBeaconClient + opts *bind.CallOpts + nodeDetails []*NodeSmoothingDetails + smoothingPoolBalance *big.Int + intervalDutiesInfo *IntervalDutiesInfo + slotsPerEpoch uint64 + validatorIndexMap map[string]*MinipoolInfo + elStartTime time.Time + elEndTime time.Time + validNetworkCache map[uint64]bool + epsilon *big.Int + intervalSeconds *big.Int + beaconConfig beacon.Eth2Config + validatorStatusMap map[rptypes.ValidatorPubkey]beacon.ValidatorStatus + totalAttestationScore *big.Int + successfulAttestations uint64 + genesisTime time.Time + invalidNetworkNodes map[common.Address]uint64 } // Create a new tree generator @@ -58,15 +58,14 @@ func newTreeGeneratorImpl_v8(log *log.ColorLogger, logPrefix string, index uint6 return &treeGeneratorImpl_v8{ rewardsFile: &RewardsFile_v3{ RewardsFileHeader: &RewardsFileHeader{ - RewardsFileVersion: 3, - RulesetVersion: 8, - Index: index, - StartTime: startTime.UTC(), - EndTime: endTime.UTC(), - ConsensusEndBlock: consensusBlock, - ExecutionEndBlock: elSnapshotHeader.Number.Uint64(), - IntervalsPassed: intervalsPassed, - InvalidNetworkNodes: map[common.Address]uint64{}, + RewardsFileVersion: 3, + RulesetVersion: 8, + Index: index, + StartTime: startTime.UTC(), + EndTime: endTime.UTC(), + ConsensusEndBlock: consensusBlock, + ExecutionEndBlock: elSnapshotHeader.Number.Uint64(), + IntervalsPassed: intervalsPassed, TotalRewards: &TotalRewards{ ProtocolDaoRpl: NewQuotedBigInt(0), TotalCollateralRpl: NewQuotedBigInt(0), @@ -77,14 +76,14 @@ func newTreeGeneratorImpl_v8(log *log.ColorLogger, logPrefix string, index uint6 }, NetworkRewards: map[uint64]*NetworkRewardsInfo{}, }, - NodeRewards: map[common.Address]*NodeRewardsInfo_v3{}, - MinipoolPerformanceFile: MinipoolPerformanceFile_v3{ + NodeRewards: map[common.Address]*NodeRewardsInfo_v2{}, + MinipoolPerformanceFile: MinipoolPerformanceFile_v2{ Index: index, StartTime: startTime.UTC(), EndTime: endTime.UTC(), ConsensusEndBlock: consensusBlock, ExecutionEndBlock: elSnapshotHeader.Number.Uint64(), - MinipoolPerformance: map[common.Address]*SmoothingPoolMinipoolPerformance_v3{}, + MinipoolPerformance: map[common.Address]*SmoothingPoolMinipoolPerformance_v2{}, }, }, validatorStatusMap: map[rptypes.ValidatorPubkey]beacon.ValidatorStatus{}, @@ -94,6 +93,7 @@ func newTreeGeneratorImpl_v8(log *log.ColorLogger, logPrefix string, index uint6 logPrefix: logPrefix, totalAttestationScore: big.NewInt(0), networkState: state, + invalidNetworkNodes: map[common.Address]uint64{}, } } @@ -102,20 +102,20 @@ func (r *treeGeneratorImpl_v8) getRulesetVersion() uint64 { return r.rewardsFile.RulesetVersion } -func (r *treeGeneratorImpl_v8) generateTree(rp *rocketpool.RocketPool, cfg *config.RocketPoolConfig, bc beacon.Client) (IRewardsFile, error) { +func (r *treeGeneratorImpl_v8) generateTree(rp RewardsExecutionClient, networkName string, previousRewardsPoolAddresses []common.Address, bc RewardsBeaconClient) (*GenerateTreeResult, error) { r.log.Printlnf("%s Generating tree using Ruleset v%d.", r.logPrefix, r.rewardsFile.RulesetVersion) // Provision some struct params r.rp = rp - r.cfg = cfg + r.previousRewardsPoolAddresses = previousRewardsPoolAddresses r.bc = bc r.validNetworkCache = map[uint64]bool{ 0: true, } // Set the network name - r.rewardsFile.Network = fmt.Sprint(cfg.Smartnode.Network.Value) + r.rewardsFile.Network = networkName r.rewardsFile.MinipoolPerformanceFile.Network = r.rewardsFile.Network r.rewardsFile.MinipoolPerformanceFile.RewardsFileVersion = r.rewardsFile.RewardsFileVersion r.rewardsFile.MinipoolPerformanceFile.RulesetVersion = r.rewardsFile.RulesetVersion @@ -157,7 +157,7 @@ func (r *treeGeneratorImpl_v8) generateTree(rp *rocketpool.RocketPool, cfg *conf r.updateNetworksAndTotals() // Generate the Merkle Tree - err = r.rewardsFile.generateMerkleTree() + err = r.rewardsFile.GenerateMerkleTree() if err != nil { return nil, fmt.Errorf("error generating Merkle tree: %w", err) } @@ -169,24 +169,27 @@ func (r *treeGeneratorImpl_v8) generateTree(rp *rocketpool.RocketPool, cfg *conf }) } - return r.rewardsFile, nil + return &GenerateTreeResult{ + RewardsFile: r.rewardsFile, + InvalidNetworkNodes: r.invalidNetworkNodes, + MinipoolPerformanceFile: &r.rewardsFile.MinipoolPerformanceFile, + }, nil } // Quickly calculates an approximate of the staker's share of the smoothing pool balance without processing Beacon performance // Used for approximate returns in the rETH ratio update -func (r *treeGeneratorImpl_v8) approximateStakerShareOfSmoothingPool(rp *rocketpool.RocketPool, cfg *config.RocketPoolConfig, bc beacon.Client) (*big.Int, error) { +func (r *treeGeneratorImpl_v8) approximateStakerShareOfSmoothingPool(rp RewardsExecutionClient, networkName string, bc RewardsBeaconClient) (*big.Int, error) { r.log.Printlnf("%s Approximating tree using Ruleset v%d.", r.logPrefix, r.rewardsFile.RulesetVersion) r.rp = rp - r.cfg = cfg r.bc = bc r.validNetworkCache = map[uint64]bool{ 0: true, } // Set the network name - r.rewardsFile.Network = fmt.Sprint(cfg.Smartnode.Network.Value) + r.rewardsFile.Network = networkName r.rewardsFile.MinipoolPerformanceFile.Network = r.rewardsFile.Network r.rewardsFile.MinipoolPerformanceFile.RewardsFileVersion = r.rewardsFile.RewardsFileVersion r.rewardsFile.MinipoolPerformanceFile.RulesetVersion = r.rewardsFile.RulesetVersion @@ -363,11 +366,11 @@ func (r *treeGeneratorImpl_v8) calculateRplRewards() error { return err } if !validNetwork { - r.rewardsFile.InvalidNetworkNodes[nodeDetails.NodeAddress] = network + r.invalidNetworkNodes[nodeDetails.NodeAddress] = network network = 0 } - rewardsForNode = &NodeRewardsInfo_v3{ + rewardsForNode = &NodeRewardsInfo_v2{ RewardNetwork: network, CollateralRpl: NewQuotedBigInt(0), OracleDaoRpl: NewQuotedBigInt(0), @@ -458,11 +461,11 @@ func (r *treeGeneratorImpl_v8) calculateRplRewards() error { return err } if !validNetwork { - r.rewardsFile.InvalidNetworkNodes[address] = network + r.invalidNetworkNodes[address] = network network = 0 } - rewardsForNode = &NodeRewardsInfo_v3{ + rewardsForNode = &NodeRewardsInfo_v2{ RewardNetwork: network, CollateralRpl: NewQuotedBigInt(0), OracleDaoRpl: NewQuotedBigInt(0), @@ -530,7 +533,7 @@ func (r *treeGeneratorImpl_v8) calculateEthRewards(checkBeaconPerformance bool) // Get the start time of this interval based on the event from the previous one //previousIntervalEvent, err := GetRewardSnapshotEvent(r.rp, r.cfg, r.rewardsFile.Index-1, r.opts) // This is immutable so querying at the head is fine and mitigates issues around calls for pruned EL state - previousIntervalEvent, err := GetRewardSnapshotEvent(r.rp, r.cfg, r.rewardsFile.Index-1, nil) + previousIntervalEvent, err := r.rp.GetRewardSnapshotEvent(r.previousRewardsPoolAddresses, r.rewardsFile.Index-1, nil) if err != nil { return err } @@ -613,11 +616,11 @@ func (r *treeGeneratorImpl_v8) calculateEthRewards(checkBeaconPerformance bool) return err } if !validNetwork { - r.rewardsFile.InvalidNetworkNodes[nodeInfo.Address] = network + r.invalidNetworkNodes[nodeInfo.Address] = network network = 0 } - rewardsForNode = &NodeRewardsInfo_v3{ + rewardsForNode = &NodeRewardsInfo_v2{ RewardNetwork: network, CollateralRpl: NewQuotedBigInt(0), OracleDaoRpl: NewQuotedBigInt(0), @@ -631,7 +634,7 @@ func (r *treeGeneratorImpl_v8) calculateEthRewards(checkBeaconPerformance bool) for _, minipoolInfo := range nodeInfo.Minipools { successfulAttestations := uint64(len(minipoolInfo.CompletedAttestations)) missingAttestations := uint64(len(minipoolInfo.MissingAttestationSlots)) - performance := &SmoothingPoolMinipoolPerformance_v3{ + performance := &SmoothingPoolMinipoolPerformance_v2{ Pubkey: minipoolInfo.ValidatorPubkey.Hex(), SuccessfulAttestations: successfulAttestations, MissedAttestations: missingAttestations, @@ -1048,6 +1051,7 @@ func (r *treeGeneratorImpl_v8) getSmoothingPoolNodeDetails() error { Minipools: []*MinipoolInfo{}, SmoothingPoolEth: big.NewInt(0), RewardsNetwork: nativeNodeDetails.RewardNetwork.Uint64(), + RplStake: nativeNodeDetails.RplStake, } nodeDetails.IsOptedIn = nativeNodeDetails.SmoothingPoolRegistrationState @@ -1102,6 +1106,12 @@ func (r *treeGeneratorImpl_v8) getSmoothingPoolNodeDetails() error { } } + // Populate the eligible borrowed ETH field for all nodes + for _, nodeDetails := range r.nodeDetails { + nnd := r.networkState.NodeDetailsByAddress[nodeDetails.Address] + nodeDetails.EligibleBorrowedEth = r.networkState.GetEligibleBorrowedEth(nnd) + } + return nil } @@ -1111,7 +1121,7 @@ func (r *treeGeneratorImpl_v8) validateNetwork(network uint64) (bool, error) { valid, exists := r.validNetworkCache[network] if !exists { var err error - valid, err = tnsettings.GetNetworkEnabled(r.rp, big.NewInt(int64(network)), r.opts) + valid, err = r.rp.GetNetworkEnabled(big.NewInt(int64(network)), r.opts) if err != nil { return false, err } @@ -1158,7 +1168,7 @@ func (r *treeGeneratorImpl_v8) getStartBlocksForInterval(previousIntervalEvent r // We are pre-merge, so get the first block after the one from the previous interval r.rewardsFile.ExecutionStartBlock = previousIntervalEvent.ExecutionBlock.Uint64() + 1 r.rewardsFile.MinipoolPerformanceFile.ExecutionStartBlock = r.rewardsFile.ExecutionStartBlock - startElHeader, err = r.rp.Client.HeaderByNumber(context.Background(), big.NewInt(int64(r.rewardsFile.ExecutionStartBlock))) + startElHeader, err = r.rp.HeaderByNumber(context.Background(), big.NewInt(int64(r.rewardsFile.ExecutionStartBlock))) if err != nil { return nil, fmt.Errorf("error getting EL start block %d: %w", r.rewardsFile.ExecutionStartBlock, err) } @@ -1166,7 +1176,7 @@ func (r *treeGeneratorImpl_v8) getStartBlocksForInterval(previousIntervalEvent r // We are post-merge, so get the EL block corresponding to the BC block r.rewardsFile.ExecutionStartBlock = elBlockNumber r.rewardsFile.MinipoolPerformanceFile.ExecutionStartBlock = r.rewardsFile.ExecutionStartBlock - startElHeader, err = r.rp.Client.HeaderByNumber(context.Background(), big.NewInt(int64(elBlockNumber))) + startElHeader, err = r.rp.HeaderByNumber(context.Background(), big.NewInt(int64(elBlockNumber))) if err != nil { return nil, fmt.Errorf("error getting EL header for block %d: %w", elBlockNumber, err) } @@ -1200,3 +1210,7 @@ func (r *treeGeneratorImpl_v8) getMinipoolBondAndNodeFee(details *rpstate.Native return currentBond, currentFee } + +func (r *treeGeneratorImpl_v8) saveFiles(smartnode *config.SmartnodeConfig, treeResult *GenerateTreeResult, nodeTrusted bool) (cid.Cid, map[string]cid.Cid, error) { + return saveJSONArtifacts(smartnode, treeResult, nodeTrusted) +} diff --git a/shared/services/rewards/generator-impl-v9-v10.go b/shared/services/rewards/generator-impl-v9-v10.go new file mode 100644 index 000000000..d3d7f0880 --- /dev/null +++ b/shared/services/rewards/generator-impl-v9-v10.go @@ -0,0 +1,1313 @@ +package rewards + +import ( + "context" + "fmt" + "math/big" + "sort" + "sync" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ipfs/go-cid" + "github.com/rocket-pool/rocketpool-go/rewards" + rptypes "github.com/rocket-pool/rocketpool-go/types" + "github.com/rocket-pool/rocketpool-go/utils/eth" + "github.com/rocket-pool/smartnode/shared/services/beacon" + "github.com/rocket-pool/smartnode/shared/services/config" + "github.com/rocket-pool/smartnode/shared/services/rewards/fees" + "github.com/rocket-pool/smartnode/shared/services/rewards/ssz_types" + sszbig "github.com/rocket-pool/smartnode/shared/services/rewards/ssz_types/big" + "github.com/rocket-pool/smartnode/shared/services/state" + "github.com/rocket-pool/smartnode/shared/utils/log" + "golang.org/x/sync/errgroup" +) + +// Type assertion to ensure SSZFile_v1 is IRewardsFile +var _ IRewardsFile = (*ssz_types.SSZFile_v1)(nil) + +// Implementation for tree generator ruleset v9 +type treeGeneratorImpl_v9_v10 struct { + networkState *state.NetworkState + rewardsFile *ssz_types.SSZFile_v1 + elSnapshotHeader *types.Header + snapshotEnd *SnapshotEnd + log *log.ColorLogger + logPrefix string + rp RewardsExecutionClient + previousRewardsPoolAddresses []common.Address + bc RewardsBeaconClient + opts *bind.CallOpts + nodeDetails []*NodeSmoothingDetails + smoothingPoolBalance *big.Int + intervalDutiesInfo *IntervalDutiesInfo + slotsPerEpoch uint64 + validatorIndexMap map[string]*MinipoolInfo + elStartTime time.Time + elEndTime time.Time + validNetworkCache map[uint64]bool + epsilon *big.Int + intervalSeconds *big.Int + beaconConfig beacon.Eth2Config + validatorStatusMap map[rptypes.ValidatorPubkey]beacon.ValidatorStatus + totalAttestationScore *big.Int + successfulAttestations uint64 + genesisTime time.Time + invalidNetworkNodes map[common.Address]uint64 + minipoolPerformanceFile *MinipoolPerformanceFile_v2 + nodeRewards map[common.Address]*ssz_types.NodeReward + networkRewards map[ssz_types.Layer]*ssz_types.NetworkReward + + // fields for RPIP-62 bonus calculations + // Withdrawals made by a minipool's validator. + minipoolWithdrawals map[common.Address]*big.Int +} + +// Create a new tree generator +func newTreeGeneratorImpl_v9_v10(rulesetVersion uint64, log *log.ColorLogger, logPrefix string, index uint64, snapshotEnd *SnapshotEnd, elSnapshotHeader *types.Header, intervalsPassed uint64, state *state.NetworkState) *treeGeneratorImpl_v9_v10 { + return &treeGeneratorImpl_v9_v10{ + rewardsFile: &ssz_types.SSZFile_v1{ + RewardsFileVersion: 3, + RulesetVersion: rulesetVersion, + Index: index, + IntervalsPassed: intervalsPassed, + TotalRewards: &ssz_types.TotalRewards{ + ProtocolDaoRpl: sszbig.NewUint256(0), + TotalCollateralRpl: sszbig.NewUint256(0), + TotalOracleDaoRpl: sszbig.NewUint256(0), + TotalSmoothingPoolEth: sszbig.NewUint256(0), + PoolStakerSmoothingPoolEth: sszbig.NewUint256(0), + NodeOperatorSmoothingPoolEth: sszbig.NewUint256(0), + TotalNodeWeight: sszbig.NewUint256(0), + }, + NetworkRewards: ssz_types.NetworkRewards{}, + NodeRewards: ssz_types.NodeRewards{}, + }, + validatorStatusMap: map[rptypes.ValidatorPubkey]beacon.ValidatorStatus{}, + validatorIndexMap: map[string]*MinipoolInfo{}, + elSnapshotHeader: elSnapshotHeader, + snapshotEnd: snapshotEnd, + log: log, + logPrefix: logPrefix, + totalAttestationScore: big.NewInt(0), + networkState: state, + invalidNetworkNodes: map[common.Address]uint64{}, + minipoolPerformanceFile: &MinipoolPerformanceFile_v2{ + Index: index, + MinipoolPerformance: map[common.Address]*SmoothingPoolMinipoolPerformance_v2{}, + }, + nodeRewards: map[common.Address]*ssz_types.NodeReward{}, + networkRewards: map[ssz_types.Layer]*ssz_types.NetworkReward{}, + minipoolWithdrawals: map[common.Address]*big.Int{}, + } +} + +// Get the version of the ruleset used by this generator +func (r *treeGeneratorImpl_v9_v10) getRulesetVersion() uint64 { + return r.rewardsFile.RulesetVersion +} + +func (r *treeGeneratorImpl_v9_v10) generateTree(rp RewardsExecutionClient, networkName string, previousRewardsPoolAddresses []common.Address, bc RewardsBeaconClient) (*GenerateTreeResult, error) { + + r.log.Printlnf("%s Generating tree using Ruleset v%d.", r.logPrefix, r.rewardsFile.RulesetVersion) + + // Provision some struct params + r.rp = rp + r.previousRewardsPoolAddresses = previousRewardsPoolAddresses + r.bc = bc + r.validNetworkCache = map[uint64]bool{ + 0: true, + } + + // Set the network name + r.rewardsFile.Network, _ = ssz_types.NetworkFromString(networkName) + r.minipoolPerformanceFile.Network = networkName + r.minipoolPerformanceFile.RewardsFileVersion = r.rewardsFile.RewardsFileVersion + r.minipoolPerformanceFile.RulesetVersion = r.rewardsFile.RulesetVersion + + // Get the Beacon config + r.beaconConfig = r.networkState.BeaconConfig + r.slotsPerEpoch = r.beaconConfig.SlotsPerEpoch + r.genesisTime = time.Unix(int64(r.beaconConfig.GenesisTime), 0) + + // Set the EL client call opts + r.opts = &bind.CallOpts{ + BlockNumber: r.elSnapshotHeader.Number, + } + + r.log.Printlnf("%s Creating tree for %d nodes", r.logPrefix, len(r.networkState.NodeDetails)) + + // Get the max of node count and minipool count - this will be used for an error epsilon due to division truncation + nodeCount := len(r.networkState.NodeDetails) + minipoolCount := len(r.networkState.MinipoolDetails) + if nodeCount > minipoolCount { + r.epsilon = big.NewInt(int64(nodeCount)) + } else { + r.epsilon = big.NewInt(int64(minipoolCount)) + } + + // Calculate the RPL rewards + err := r.calculateRplRewards() + if err != nil { + return nil, fmt.Errorf("error calculating RPL rewards: %w", err) + } + + // Calculate the ETH rewards + err = r.calculateEthRewards(true) + if err != nil { + return nil, fmt.Errorf("error calculating ETH rewards: %w", err) + } + + // Sort and assign the maps to the ssz file lists + for nodeAddress, nodeReward := range r.nodeRewards { + copy(nodeReward.Address[:], nodeAddress[:]) + r.rewardsFile.NodeRewards = append(r.rewardsFile.NodeRewards, nodeReward) + } + + for layer, networkReward := range r.networkRewards { + networkReward.Network = layer + r.rewardsFile.NetworkRewards = append(r.rewardsFile.NetworkRewards, networkReward) + } + + // Generate the Merkle Tree + err = r.rewardsFile.GenerateMerkleTree() + if err != nil { + return nil, fmt.Errorf("error generating Merkle tree: %w", err) + } + + // Sort all of the missed attestations so the files are always generated in the same state + for _, minipoolInfo := range r.minipoolPerformanceFile.MinipoolPerformance { + sort.Slice(minipoolInfo.MissingAttestationSlots, func(i, j int) bool { + return minipoolInfo.MissingAttestationSlots[i] < minipoolInfo.MissingAttestationSlots[j] + }) + } + + return &GenerateTreeResult{ + RewardsFile: r.rewardsFile, + InvalidNetworkNodes: r.invalidNetworkNodes, + MinipoolPerformanceFile: r.minipoolPerformanceFile, + }, nil + +} + +// Quickly calculates an approximate of the staker's share of the smoothing pool balance without processing Beacon performance +// Used for approximate returns in the rETH ratio update +func (r *treeGeneratorImpl_v9_v10) approximateStakerShareOfSmoothingPool(rp RewardsExecutionClient, networkName string, bc RewardsBeaconClient) (*big.Int, error) { + r.log.Printlnf("%s Approximating tree using Ruleset v%d.", r.logPrefix, r.rewardsFile.RulesetVersion) + + r.rp = rp + r.bc = bc + r.validNetworkCache = map[uint64]bool{ + 0: true, + } + + // Set the network name + r.rewardsFile.Network, _ = ssz_types.NetworkFromString(networkName) + r.minipoolPerformanceFile.Network = networkName + r.minipoolPerformanceFile.RewardsFileVersion = r.rewardsFile.RewardsFileVersion + r.minipoolPerformanceFile.RulesetVersion = r.rewardsFile.RulesetVersion + + // Get the Beacon config + r.beaconConfig = r.networkState.BeaconConfig + r.slotsPerEpoch = r.beaconConfig.SlotsPerEpoch + r.genesisTime = time.Unix(int64(r.beaconConfig.GenesisTime), 0) + + // Set the EL client call opts + r.opts = &bind.CallOpts{ + BlockNumber: r.elSnapshotHeader.Number, + } + + r.log.Printlnf("%s Creating tree for %d nodes", r.logPrefix, len(r.networkState.NodeDetails)) + + // Get the max of node count and minipool count - this will be used for an error epsilon due to division truncation + nodeCount := len(r.networkState.NodeDetails) + minipoolCount := len(r.networkState.MinipoolDetails) + if nodeCount > minipoolCount { + r.epsilon = big.NewInt(int64(nodeCount)) + } else { + r.epsilon = big.NewInt(int64(minipoolCount)) + } + + // Calculate the ETH rewards + err := r.calculateEthRewards(false) + if err != nil { + return nil, fmt.Errorf("error calculating ETH rewards: %w", err) + } + + return r.rewardsFile.TotalRewards.PoolStakerSmoothingPoolEth.Int, nil +} + +func (r *treeGeneratorImpl_v9_v10) calculateNodeRplRewards( + collateralRewards *big.Int, + nodeWeight *big.Int, + totalNodeWeight *big.Int, +) *big.Int { + + if nodeWeight.Sign() <= 0 { + return big.NewInt(0) + } + + // (collateralRewards * nodeWeight / totalNodeWeight) + rpip30Rewards := big.NewInt(0).Mul(collateralRewards, nodeWeight) + rpip30Rewards.Quo(rpip30Rewards, totalNodeWeight) + + return rpip30Rewards +} + +// Calculates the RPL rewards for the given interval +func (r *treeGeneratorImpl_v9_v10) calculateRplRewards() error { + pendingRewards := r.networkState.NetworkDetails.PendingRPLRewards + r.log.Printlnf("%s Pending RPL rewards: %s (%.3f)", r.logPrefix, pendingRewards.String(), eth.WeiToEth(pendingRewards)) + if pendingRewards.Cmp(common.Big0) == 0 { + return fmt.Errorf("there are no pending RPL rewards, so this interval cannot be used for rewards submission") + } + + // Get baseline Protocol DAO rewards + pDaoPercent := r.networkState.NetworkDetails.ProtocolDaoRewardsPercent + pDaoRewards := big.NewInt(0) + pDaoRewards.Mul(pendingRewards, pDaoPercent) + pDaoRewards.Div(pDaoRewards, oneEth) + r.log.Printlnf("%s Expected Protocol DAO rewards: %s (%.3f)", r.logPrefix, pDaoRewards.String(), eth.WeiToEth(pDaoRewards)) + + // Get node operator rewards + nodeOpPercent := r.networkState.NetworkDetails.NodeOperatorRewardsPercent + totalNodeRewards := big.NewInt(0) + totalNodeRewards.Mul(pendingRewards, nodeOpPercent) + totalNodeRewards.Div(totalNodeRewards, oneEth) + r.log.Printlnf("%s Approx. total collateral RPL rewards: %s (%.3f)", r.logPrefix, totalNodeRewards.String(), eth.WeiToEth(totalNodeRewards)) + + // Calculate the RPIP-30 weight of each node, scaling by their participation in this interval + nodeWeights, totalNodeWeight, err := r.networkState.CalculateNodeWeights() + if err != nil { + return fmt.Errorf("error calculating node weights: %w", err) + } + + // Operate normally if any node has rewards + if totalNodeWeight.Sign() > 0 { + // Make sure to record totalNodeWeight in the rewards file + r.rewardsFile.TotalRewards.TotalNodeWeight.Set(totalNodeWeight) + + r.log.Printlnf("%s Calculating individual collateral rewards...", r.logPrefix) + for i, nodeDetails := range r.networkState.NodeDetails { + // Get how much RPL goes to this node + nodeRplRewards := r.calculateNodeRplRewards( + totalNodeRewards, + nodeWeights[nodeDetails.NodeAddress], + totalNodeWeight, + ) + + // If there are pending rewards, add it to the map + if nodeRplRewards.Sign() == 1 { + rewardsForNode, exists := r.nodeRewards[nodeDetails.NodeAddress] + if !exists { + // Get the network the rewards should go to + network := r.networkState.NodeDetails[i].RewardNetwork.Uint64() + validNetwork, err := r.validateNetwork(network) + if err != nil { + return err + } + if !validNetwork { + network = 0 + } + + rewardsForNode = ssz_types.NewNodeReward( + network, + ssz_types.AddressFromBytes(nodeDetails.NodeAddress.Bytes()), + ) + r.nodeRewards[nodeDetails.NodeAddress] = rewardsForNode + } + rewardsForNode.CollateralRpl.Add(rewardsForNode.CollateralRpl.Int, nodeRplRewards) + + // Add the rewards to the running total for the specified network + rewardsForNetwork, exists := r.networkRewards[rewardsForNode.Network] + if !exists { + rewardsForNetwork = ssz_types.NewNetworkReward(rewardsForNode.Network) + r.networkRewards[rewardsForNode.Network] = rewardsForNetwork + } + rewardsForNetwork.CollateralRpl.Int.Add(rewardsForNetwork.CollateralRpl.Int, nodeRplRewards) + } + } + + // Sanity check to make sure we arrived at the correct total + delta := big.NewInt(0) + totalCalculatedNodeRewards := big.NewInt(0) + for _, networkRewards := range r.networkRewards { + totalCalculatedNodeRewards.Add(totalCalculatedNodeRewards, networkRewards.CollateralRpl.Int) + } + delta.Sub(totalNodeRewards, totalCalculatedNodeRewards).Abs(delta) + if delta.Cmp(r.epsilon) == 1 { + return fmt.Errorf("error calculating collateral RPL: total was %s, but expected %s; error was too large", totalCalculatedNodeRewards.String(), totalNodeRewards.String()) + } + r.rewardsFile.TotalRewards.TotalCollateralRpl.Int.Set(totalCalculatedNodeRewards) + r.log.Printlnf("%s Calculated rewards: %s (error = %s wei)", r.logPrefix, totalCalculatedNodeRewards.String(), delta.String()) + pDaoRewards.Sub(pendingRewards, totalCalculatedNodeRewards) + } else { + // In this situation, none of the nodes in the network had eligible rewards so send it all to the pDAO + pDaoRewards.Add(pDaoRewards, totalNodeRewards) + r.log.Printlnf("%s None of the nodes were eligible for collateral rewards, sending everything to the pDAO; now at %s (%.3f)", r.logPrefix, pDaoRewards.String(), eth.WeiToEth(pDaoRewards)) + } + + // Handle Oracle DAO rewards + oDaoPercent := r.networkState.NetworkDetails.TrustedNodeOperatorRewardsPercent + totalODaoRewards := big.NewInt(0) + totalODaoRewards.Mul(pendingRewards, oDaoPercent) + totalODaoRewards.Div(totalODaoRewards, oneEth) + r.log.Printlnf("%s Total Oracle DAO RPL rewards: %s (%.3f)", r.logPrefix, totalODaoRewards.String(), eth.WeiToEth(totalODaoRewards)) + + oDaoDetails := r.networkState.OracleDaoMemberDetails + + // Calculate the true effective time of each oDAO node based on their participation in this interval + totalODaoNodeTime := big.NewInt(0) + trueODaoNodeTimes := map[common.Address]*big.Int{} + for _, details := range oDaoDetails { + // Get the timestamp of the node joining the oDAO + joinTime := details.JoinedTime + + // Get the actual effective time, scaled based on participation + intervalDuration := r.networkState.NetworkDetails.IntervalDuration + intervalDurationBig := big.NewInt(int64(intervalDuration.Seconds())) + participationTime := big.NewInt(0).Set(intervalDurationBig) + snapshotBlockTime := time.Unix(int64(r.elSnapshotHeader.Time), 0) + eligibleDuration := snapshotBlockTime.Sub(joinTime) + if eligibleDuration < intervalDuration { + participationTime = big.NewInt(int64(eligibleDuration.Seconds())) + } + trueODaoNodeTimes[details.Address] = participationTime + + // Add it to the total + totalODaoNodeTime.Add(totalODaoNodeTime, participationTime) + } + + for _, details := range oDaoDetails { + address := details.Address + + // Calculate the oDAO rewards for the node: (participation time) * (total oDAO rewards) / (total participation time) + individualOdaoRewards := big.NewInt(0) + individualOdaoRewards.Mul(trueODaoNodeTimes[address], totalODaoRewards) + individualOdaoRewards.Div(individualOdaoRewards, totalODaoNodeTime) + + rewardsForNode, exists := r.nodeRewards[address] + if !exists { + // Get the network the rewards should go to + network := r.networkState.NodeDetailsByAddress[address].RewardNetwork.Uint64() + validNetwork, err := r.validateNetwork(network) + if err != nil { + return err + } + if !validNetwork { + r.invalidNetworkNodes[address] = network + network = 0 + } + + rewardsForNode = ssz_types.NewNodeReward( + network, + ssz_types.AddressFromBytes(address.Bytes()), + ) + r.nodeRewards[address] = rewardsForNode + + } + rewardsForNode.OracleDaoRpl.Add(rewardsForNode.OracleDaoRpl.Int, individualOdaoRewards) + + // Add the rewards to the running total for the specified network + rewardsForNetwork, exists := r.networkRewards[rewardsForNode.Network] + if !exists { + rewardsForNetwork = ssz_types.NewNetworkReward(rewardsForNode.Network) + r.networkRewards[rewardsForNode.Network] = rewardsForNetwork + } + rewardsForNetwork.OracleDaoRpl.Add(rewardsForNetwork.OracleDaoRpl.Int, individualOdaoRewards) + } + + // Sanity check to make sure we arrived at the correct total + totalCalculatedOdaoRewards := big.NewInt(0) + delta := big.NewInt(0) + for _, networkRewards := range r.networkRewards { + totalCalculatedOdaoRewards.Add(totalCalculatedOdaoRewards, networkRewards.OracleDaoRpl.Int) + } + delta.Sub(totalODaoRewards, totalCalculatedOdaoRewards).Abs(delta) + if delta.Cmp(r.epsilon) == 1 { + return fmt.Errorf("error calculating ODao RPL: total was %s, but expected %s; error was too large", totalCalculatedOdaoRewards.String(), totalODaoRewards.String()) + } + r.rewardsFile.TotalRewards.TotalOracleDaoRpl.Int.Set(totalCalculatedOdaoRewards) + r.log.Printlnf("%s Calculated rewards: %s (error = %s wei)", r.logPrefix, totalCalculatedOdaoRewards.String(), delta.String()) + + // Get actual protocol DAO rewards + pDaoRewards.Sub(pDaoRewards, totalCalculatedOdaoRewards) + r.rewardsFile.TotalRewards.ProtocolDaoRpl = sszbig.NewUint256(0) + r.rewardsFile.TotalRewards.ProtocolDaoRpl.Set(pDaoRewards) + r.log.Printlnf("%s Actual Protocol DAO rewards: %s to account for truncation", r.logPrefix, pDaoRewards.String()) + + // Print total node weight + r.log.Printlnf("%s Total Node Weight: %s", r.logPrefix, totalNodeWeight) + + return nil + +} + +// Calculates the ETH rewards for the given interval +func (r *treeGeneratorImpl_v9_v10) calculateEthRewards(checkBeaconPerformance bool) error { + + // Get the Smoothing Pool contract's balance + r.smoothingPoolBalance = r.networkState.NetworkDetails.SmoothingPoolBalance + r.log.Printlnf("%s Smoothing Pool Balance: %s (%.3f)", r.logPrefix, r.smoothingPoolBalance.String(), eth.WeiToEth(r.smoothingPoolBalance)) + + // Ignore the ETH calculation if there are no rewards + if r.smoothingPoolBalance.Cmp(common.Big0) == 0 { + return nil + } + + if r.rewardsFile.Index == 0 { + // This is the first interval, Smoothing Pool rewards are ignored on the first interval since it doesn't have a discrete start time + return nil + } + + // Get the start time of this interval based on the event from the previous one + //previousIntervalEvent, err := GetRewardSnapshotEvent(r.rp, r.cfg, r.rewardsFile.Index-1, r.opts) // This is immutable so querying at the head is fine and mitigates issues around calls for pruned EL state + previousIntervalEvent, err := r.rp.GetRewardSnapshotEvent(r.previousRewardsPoolAddresses, r.rewardsFile.Index-1, r.opts) + if err != nil { + return err + } + startElBlockHeader, err := r.getBlocksAndTimesForInterval(previousIntervalEvent) + if err != nil { + return err + } + + r.elStartTime = time.Unix(int64(startElBlockHeader.Time), 0) + r.elEndTime = time.Unix(int64(r.elSnapshotHeader.Time), 0) + r.intervalSeconds = big.NewInt(int64(r.elEndTime.Sub(r.elStartTime) / time.Second)) + + // Get the details for nodes eligible for Smoothing Pool rewards + // This should be all of the eth1 calls, so do them all at the start of Smoothing Pool calculation to prevent the need for an archive node during normal operations + err = r.getSmoothingPoolNodeDetails() + if err != nil { + return err + } + eligible := 0 + for _, nodeInfo := range r.nodeDetails { + if nodeInfo.IsEligible { + eligible++ + } + } + r.log.Printlnf("%s %d / %d nodes were eligible for Smoothing Pool rewards", r.logPrefix, eligible, len(r.nodeDetails)) + + // Process the attestation performance for each minipool during this interval + r.intervalDutiesInfo = &IntervalDutiesInfo{ + Index: r.rewardsFile.Index, + Slots: map[uint64]*SlotInfo{}, + } + if checkBeaconPerformance { + err = r.processAttestationsBalancesAndWithdrawalsForInterval() + if err != nil { + return err + } + } else { + // Attestation processing is disabled, just give each minipool 1 good attestation and complete slot activity so they're all scored the same + // Used for approximating rETH's share during balances calculation + validatorReq := big.NewInt(0).Set(thirtyTwoEth) + for _, nodeInfo := range r.nodeDetails { + // Check if the node is currently opted in for simplicity + if nodeInfo.IsEligible && nodeInfo.IsOptedIn && r.elEndTime.After(nodeInfo.OptInTime) { + eligibleBorrowedEth := nodeInfo.EligibleBorrowedEth + _, percentOfBorrowedEth := r.networkState.GetStakedRplValueInEthAndPercentOfBorrowedEth(eligibleBorrowedEth, nodeInfo.RplStake) + for _, minipool := range nodeInfo.Minipools { + minipool.CompletedAttestations = map[uint64]bool{0: true} + + // Make up an attestation + details := r.networkState.MinipoolDetailsByAddress[minipool.Address] + bond, fee := details.GetMinipoolBondAndNodeFee(r.elEndTime) + if r.rewardsFile.RulesetVersion >= 10 { + fee = fees.GetMinipoolFeeWithBonus(bond, fee, percentOfBorrowedEth) + } + minipoolScore := big.NewInt(0).Sub(oneEth, fee) // 1 - fee + minipoolScore.Mul(minipoolScore, bond) // Multiply by bond + minipoolScore.Div(minipoolScore, validatorReq) // Divide by 32 to get the bond as a fraction of a total validator + minipoolScore.Add(minipoolScore, fee) // Total = fee + (bond/32)(1 - fee) + + // Add it to the minipool's score and the total score + minipool.AttestationScore.Add(&minipool.AttestationScore.Int, minipoolScore) + r.totalAttestationScore.Add(r.totalAttestationScore, minipoolScore) + + r.successfulAttestations++ + } + } + } + } + + // Determine how much ETH each node gets and how much the pool stakers get + poolStakerETH, nodeOpEth, bonusScalar, err := r.calculateNodeRewards() + if err != nil { + return err + } + if r.rewardsFile.RulesetVersion >= 10 { + r.minipoolPerformanceFile.BonusScalar = QuotedBigIntFromBigInt(bonusScalar) + } + + // Update the rewards maps + for _, nodeInfo := range r.nodeDetails { + if nodeInfo.IsEligible && nodeInfo.SmoothingPoolEth.Cmp(common.Big0) > 0 { + rewardsForNode, exists := r.nodeRewards[nodeInfo.Address] + if !exists { + network := nodeInfo.RewardsNetwork + validNetwork, err := r.validateNetwork(network) + if err != nil { + return err + } + if !validNetwork { + r.invalidNetworkNodes[nodeInfo.Address] = network + network = 0 + } + + rewardsForNode = ssz_types.NewNodeReward( + network, + ssz_types.AddressFromBytes(nodeInfo.Address.Bytes()), + ) + r.nodeRewards[nodeInfo.Address] = rewardsForNode + } + rewardsForNode.SmoothingPoolEth.Add(rewardsForNode.SmoothingPoolEth.Int, nodeInfo.SmoothingPoolEth) + + // Add minipool rewards to the JSON + for _, minipoolInfo := range nodeInfo.Minipools { + successfulAttestations := uint64(len(minipoolInfo.CompletedAttestations)) + missingAttestations := uint64(len(minipoolInfo.MissingAttestationSlots)) + performance := &SmoothingPoolMinipoolPerformance_v2{ + Pubkey: minipoolInfo.ValidatorPubkey.Hex(), + SuccessfulAttestations: successfulAttestations, + MissedAttestations: missingAttestations, + AttestationScore: minipoolInfo.AttestationScore, + EthEarned: QuotedBigIntFromBigInt(minipoolInfo.MinipoolShare), + BonusEthEarned: QuotedBigIntFromBigInt(minipoolInfo.MinipoolBonus), + ConsensusIncome: minipoolInfo.ConsensusIncome, + EffectiveCommission: QuotedBigIntFromBigInt(minipoolInfo.TotalFee), + MissingAttestationSlots: []uint64{}, + } + if successfulAttestations+missingAttestations == 0 { + // Don't include minipools that have zero attestations + continue + } + for slot := range minipoolInfo.MissingAttestationSlots { + performance.MissingAttestationSlots = append(performance.MissingAttestationSlots, slot) + } + r.minipoolPerformanceFile.MinipoolPerformance[minipoolInfo.Address] = performance + } + + // Add the rewards to the running total for the specified network + rewardsForNetwork, exists := r.networkRewards[rewardsForNode.Network] + if !exists { + rewardsForNetwork = ssz_types.NewNetworkReward(rewardsForNode.Network) + r.networkRewards[rewardsForNode.Network] = rewardsForNetwork + } + rewardsForNetwork.SmoothingPoolEth.Add(rewardsForNetwork.SmoothingPoolEth.Int, nodeInfo.SmoothingPoolEth) + } + } + + // Set the totals + r.rewardsFile.TotalRewards.PoolStakerSmoothingPoolEth.Set(poolStakerETH) + r.rewardsFile.TotalRewards.NodeOperatorSmoothingPoolEth.Set(nodeOpEth) + r.rewardsFile.TotalRewards.TotalSmoothingPoolEth.Set(r.smoothingPoolBalance) + return nil + +} + +var oneEth = big.NewInt(1000000000000000000) +var eightEth = big.NewInt(0).Mul(oneEth, big.NewInt(8)) +var fourteenPercentEth = big.NewInt(14e16) +var thirtyTwoEth = big.NewInt(0).Mul(oneEth, big.NewInt(32)) + +func (r *treeGeneratorImpl_v9_v10) calculateNodeBonuses() (*big.Int, error) { + totalConsensusBonus := big.NewInt(0) + for _, nsd := range r.nodeDetails { + if !nsd.IsEligible { + continue + } + + nodeDetails := r.networkState.NodeDetailsByAddress[nsd.Address] + eligible, _, eligibleEnd := nodeDetails.IsEligibleForBonuses(r.elStartTime, r.elEndTime) + if !eligible { + continue + } + + // Get the nodeDetails from the network state + eligibleBorrowedEth := nsd.EligibleBorrowedEth + _, percentOfBorrowedEth := r.networkState.GetStakedRplValueInEthAndPercentOfBorrowedEth(eligibleBorrowedEth, nsd.RplStake) + for _, mpd := range nsd.Minipools { + mpi := r.networkState.MinipoolDetailsByAddress[mpd.Address] + if !mpi.IsEligibleForBonuses(eligibleEnd) { + continue + } + bond, fee := mpi.GetMinipoolBondAndNodeFee(eligibleEnd) + feeWithBonus := fees.GetMinipoolFeeWithBonus(bond, fee, percentOfBorrowedEth) + if fee.Cmp(feeWithBonus) >= 0 { + // This minipool won't get any bonuses, so skip it + continue + } + // This minipool will get a bonus + // It is safe to populate the optional fields from here on. + + fee = feeWithBonus + // Save fee as totalFee for the Minipool + mpd.TotalFee = fee + + // Total fee for a minipool with a bonus shall never exceed 14% + if fee.Cmp(fourteenPercentEth) > 0 { + r.log.Printlnf("WARNING: Minipool %s has a fee of %s, which is greater than the maximum allowed of 14%", mpd.Address.Hex(), fee.String()) + r.log.Printlnf("WARNING: Aborting.") + return nil, fmt.Errorf("minipool %s has a fee of %s, which is greater than the maximum allowed of 14%%", mpd.Address.Hex(), fee.String()) + } + bonusFee := big.NewInt(0).Set(fee) + bonusFee.Sub(bonusFee, mpi.NodeFee) + withdrawalTotal := r.minipoolWithdrawals[mpd.Address] + if withdrawalTotal == nil { + withdrawalTotal = big.NewInt(0) + } + consensusIncome := big.NewInt(0).Set(withdrawalTotal) + mpd.ConsensusIncome = &QuotedBigInt{Int: *(big.NewInt(0).Set(consensusIncome))} + bonusShare := bonusFee.Mul(bonusFee, big.NewInt(0).Sub(thirtyTwoEth, mpi.NodeDepositBalance)) + bonusShare.Div(bonusShare, thirtyTwoEth) + minipoolBonus := consensusIncome.Mul(consensusIncome, bonusShare) + minipoolBonus.Div(minipoolBonus, oneEth) + if minipoolBonus.Sign() == -1 { + minipoolBonus = big.NewInt(0) + } + mpd.MinipoolBonus = minipoolBonus + totalConsensusBonus.Add(totalConsensusBonus, minipoolBonus) + nsd.BonusEth.Add(nsd.BonusEth, minipoolBonus) + } + } + return totalConsensusBonus, nil +} + +// Calculate the distribution of Smoothing Pool ETH to each node +func (r *treeGeneratorImpl_v9_v10) calculateNodeRewards() (*big.Int, *big.Int, *big.Int, error) { + var err error + bonusScalar := big.NewInt(0).Set(oneEth) + + // If there weren't any successful attestations, everything goes to the pool stakers + if r.totalAttestationScore.Cmp(common.Big0) == 0 || r.successfulAttestations == 0 { + r.log.Printlnf("WARNING: Total attestation score = %s, successful attestations = %d... sending the whole smoothing pool balance to the pool stakers.", r.totalAttestationScore.String(), r.successfulAttestations) + return r.smoothingPoolBalance, big.NewInt(0), bonusScalar, nil + } + + // Calculate the minipool bonuses + isEligibleInterval := true // TODO - check on-chain for saturn 1 + var totalConsensusBonus *big.Int + if r.rewardsFile.RulesetVersion >= 10 && isEligibleInterval { + totalConsensusBonus, err = r.calculateNodeBonuses() + if err != nil { + return nil, nil, nil, err + } + } + + totalEthForMinipools := big.NewInt(0) + totalNodeOpShare := big.NewInt(0) + totalNodeOpShare.Mul(r.smoothingPoolBalance, r.totalAttestationScore) + totalNodeOpShare.Div(totalNodeOpShare, big.NewInt(int64(r.successfulAttestations))) + totalNodeOpShare.Div(totalNodeOpShare, oneEth) + + for _, nodeInfo := range r.nodeDetails { + nodeInfo.SmoothingPoolEth = big.NewInt(0) + if !nodeInfo.IsEligible { + continue + } + for _, minipool := range nodeInfo.Minipools { + if len(minipool.CompletedAttestations)+len(minipool.MissingAttestationSlots) == 0 || !minipool.WasActive { + // Ignore minipools that weren't active for the interval + minipool.WasActive = false + minipool.MinipoolShare = big.NewInt(0) + continue + } + + minipoolEth := big.NewInt(0).Set(totalNodeOpShare) + minipoolEth.Mul(minipoolEth, &minipool.AttestationScore.Int) + minipoolEth.Div(minipoolEth, r.totalAttestationScore) + minipool.MinipoolShare = minipoolEth + nodeInfo.SmoothingPoolEth.Add(nodeInfo.SmoothingPoolEth, minipoolEth) + } + totalEthForMinipools.Add(totalEthForMinipools, nodeInfo.SmoothingPoolEth) + } + + if r.rewardsFile.RulesetVersion >= 10 { + remainingBalance := big.NewInt(0).Sub(r.smoothingPoolBalance, totalEthForMinipools) + if remainingBalance.Cmp(totalConsensusBonus) < 0 { + r.log.Printlnf("WARNING: Remaining balance is less than total consensus bonus... Balance = %s, total consensus bonus = %s", remainingBalance.String(), totalConsensusBonus.String()) + // Scale bonuses down to fit the remaining balance + bonusScalar.Div(big.NewInt(0).Mul(remainingBalance, oneEth), totalConsensusBonus) + for _, nsd := range r.nodeDetails { + nsd.BonusEth.Mul(nsd.BonusEth, remainingBalance) + nsd.BonusEth.Div(nsd.BonusEth, totalConsensusBonus) + // Calculate the reduced bonus for each minipool + // Because of integer division, this will be less than the actual bonus by up to 1 wei + for _, mpd := range nsd.Minipools { + if mpd.MinipoolBonus == nil { + continue + } + mpd.MinipoolBonus.Mul(mpd.MinipoolBonus, remainingBalance) + mpd.MinipoolBonus.Div(mpd.MinipoolBonus, totalConsensusBonus) + } + } + } + } + + // Sanity check the totalNodeOpShare before bonuses are awarded + delta := big.NewInt(0).Sub(totalEthForMinipools, totalNodeOpShare) + delta.Abs(delta) + if delta.Cmp(r.epsilon) == 1 { + return nil, nil, nil, fmt.Errorf("error calculating smoothing pool ETH: total was %s, but expected %s; error was too large (%s wei)", totalEthForMinipools.String(), totalNodeOpShare.String(), delta.String()) + } + + // Finally, award the bonuses + if r.rewardsFile.RulesetVersion >= 10 { + for _, nsd := range r.nodeDetails { + nsd.SmoothingPoolEth.Add(nsd.SmoothingPoolEth, nsd.BonusEth) + totalEthForMinipools.Add(totalEthForMinipools, nsd.BonusEth) + } + } + + // This is how much actually goes to the pool stakers - it should ideally be equal to poolStakerShare but this accounts for any cumulative floating point errors + truePoolStakerAmount := big.NewInt(0).Sub(r.smoothingPoolBalance, totalEthForMinipools) + + // Calculate the staking pool share and the node op share + poolStakerShareBeforeBonuses := big.NewInt(0).Sub(r.smoothingPoolBalance, totalNodeOpShare) + + r.log.Printlnf("%s Pool staker ETH before bonuses: %s (%.3f)", r.logPrefix, poolStakerShareBeforeBonuses.String(), eth.WeiToEth(poolStakerShareBeforeBonuses)) + r.log.Printlnf("%s Pool staker ETH after bonuses: %s (%.3f)", r.logPrefix, truePoolStakerAmount.String(), eth.WeiToEth(truePoolStakerAmount)) + r.log.Printlnf("%s Node Op ETH before bonuses: %s (%.3f)", r.logPrefix, totalNodeOpShare.String(), eth.WeiToEth(totalNodeOpShare)) + r.log.Printlnf("%s Node Op ETH after bonuses: %s (%.3f)", r.logPrefix, totalEthForMinipools.String(), eth.WeiToEth(totalEthForMinipools)) + r.log.Printlnf("%s (error = %s wei)", r.logPrefix, delta.String()) + r.log.Printlnf("%s Adjusting pool staker ETH to %s to account for truncation", r.logPrefix, truePoolStakerAmount.String()) + + return truePoolStakerAmount, totalEthForMinipools, bonusScalar, nil + +} + +// Get all of the duties for a range of epochs +func (r *treeGeneratorImpl_v9_v10) processAttestationsBalancesAndWithdrawalsForInterval() error { + + startEpoch := r.rewardsFile.ConsensusStartBlock / r.beaconConfig.SlotsPerEpoch + endEpoch := r.rewardsFile.ConsensusEndBlock / r.beaconConfig.SlotsPerEpoch + + // Determine the validator indices of each minipool + err := r.createMinipoolIndexMap() + if err != nil { + return err + } + + // Check all of the attestations for each epoch + r.log.Printlnf("%s Checking participation of %d minipools for epochs %d to %d", r.logPrefix, len(r.validatorIndexMap), startEpoch, endEpoch) + r.log.Printlnf("%s NOTE: this will take a long time, progress is reported every 100 epochs", r.logPrefix) + + epochsDone := 0 + reportStartTime := time.Now() + for epoch := startEpoch; epoch < endEpoch+1; epoch++ { + if epochsDone == 100 { + timeTaken := time.Since(reportStartTime) + r.log.Printlnf("%s On Epoch %d of %d (%.2f%%)... (%s so far)", r.logPrefix, epoch, endEpoch, float64(epoch-startEpoch)/float64(endEpoch-startEpoch)*100.0, timeTaken) + epochsDone = 0 + } + + err := r.processEpoch(true, epoch) + if err != nil { + return err + } + + epochsDone++ + } + + // Check the epoch after the end of the interval for any lingering attestations + epoch := endEpoch + 1 + err = r.processEpoch(false, epoch) + if err != nil { + return err + } + + r.log.Printlnf("%s Finished participation check (total time = %s)", r.logPrefix, time.Since(reportStartTime)) + return nil + +} + +// Process an epoch, optionally getting the duties for all eligible minipools in it and checking each one's attestation performance +func (r *treeGeneratorImpl_v9_v10) processEpoch(duringInterval bool, epoch uint64) error { + + // Get the committee info and attestation records for this epoch + var committeeData beacon.Committees + attestationsPerSlot := make([][]beacon.AttestationInfo, r.slotsPerEpoch) + var wg errgroup.Group + + if duringInterval { + wg.Go(func() error { + var err error + committeeData, err = r.bc.GetCommitteesForEpoch(&epoch) + return err + }) + } + + withdrawalsLock := &sync.Mutex{} + for i := uint64(0); i < r.slotsPerEpoch; i++ { + // Get the beacon block for this slot + i := i + slot := epoch*r.slotsPerEpoch + i + slotTime := r.networkState.BeaconConfig.GetSlotTime(slot) + wg.Go(func() error { + beaconBlock, found, err := r.bc.GetBeaconBlock(fmt.Sprint(slot)) + if err != nil { + return err + } + if found { + attestationsPerSlot[i] = beaconBlock.Attestations + } + + // If we don't need withdrawal amounts because we're using ruleset 9, + // return early + if r.rewardsFile.RulesetVersion < 10 || !duringInterval { + return nil + } + + for _, withdrawal := range beaconBlock.Withdrawals { + // Ignore non-RP validators + mpi, exists := r.validatorIndexMap[withdrawal.ValidatorIndex] + if !exists { + continue + } + nnd := r.networkState.NodeDetailsByAddress[mpi.NodeAddress] + nmd := r.networkState.MinipoolDetailsByAddress[mpi.Address] + + // Check that the node is opted into the SP during this slot + if !nnd.WasOptedInAt(slotTime) { + continue + } + + // Check that the minipool's bond is eligible for bonuses at this slot + if eligible := nmd.IsEligibleForBonuses(slotTime); !eligible { + continue + } + + // If the withdrawal is in or after the minipool's withdrawable epoch, adjust it. + withdrawalAmount := withdrawal.Amount + validatorInfo := r.networkState.ValidatorDetails[mpi.ValidatorPubkey] + if slot >= r.networkState.BeaconConfig.FirstSlotOfEpoch(validatorInfo.WithdrawableEpoch) { + // Subtract 32 ETH from the withdrawal amount + withdrawalAmount = big.NewInt(0).Sub(withdrawalAmount, thirtyTwoEth) + // max(withdrawalAmount, 0) + if withdrawalAmount.Sign() < 0 { + withdrawalAmount.SetInt64(0) + } + } + + // Create the minipool's withdrawal sum big.Int if it doesn't exist + withdrawalsLock.Lock() + if r.minipoolWithdrawals[mpi.Address] == nil { + r.minipoolWithdrawals[mpi.Address] = big.NewInt(0) + } + // Add the withdrawal amount + r.minipoolWithdrawals[mpi.Address].Add(r.minipoolWithdrawals[mpi.Address], withdrawalAmount) + withdrawalsLock.Unlock() + } + return nil + }) + } + err := wg.Wait() + // Return preallocated memory to the pool if it exists + if committeeData != nil { + defer committeeData.Release() + } + if err != nil { + return fmt.Errorf("error getting committee and attestaion records for epoch %d: %w", epoch, err) + } + + if duringInterval { + // Get all of the expected duties for the epoch + err = r.getDutiesForEpoch(committeeData) + if err != nil { + return fmt.Errorf("error getting duties for epoch %d: %w", epoch, err) + } + } + + // Process all of the slots in the epoch + for i := uint64(0); i < r.slotsPerEpoch; i++ { + inclusionSlot := epoch*r.slotsPerEpoch + i + attestations := attestationsPerSlot[i] + if len(attestations) > 0 { + r.checkAttestations(attestations, inclusionSlot) + } + } + + return nil + +} + +func (r *treeGeneratorImpl_v9_v10) checkAttestations(attestations []beacon.AttestationInfo, inclusionSlot uint64) error { + + // Go through the attestations for the block + for _, attestation := range attestations { + // Get the RP committees for this attestation's slot and index + slotInfo, exists := r.intervalDutiesInfo.Slots[attestation.SlotIndex] + if !exists { + continue + } + // Ignore attestations delayed by more than 32 slots + if inclusionSlot-attestation.SlotIndex > r.beaconConfig.SlotsPerEpoch { + continue + } + rpCommittee, exists := slotInfo.Committees[attestation.CommitteeIndex] + if !exists { + continue + } + blockTime := r.genesisTime.Add(time.Second * time.Duration(r.networkState.BeaconConfig.SecondsPerSlot*attestation.SlotIndex)) + + // Check if each RP validator attested successfully + for position, validator := range rpCommittee.Positions { + if !attestation.AggregationBits.BitAt(uint64(position)) { + continue + } + + // This was seen, so remove it from the missing attestations and add it to the completed ones + delete(rpCommittee.Positions, position) + if len(rpCommittee.Positions) == 0 { + delete(slotInfo.Committees, attestation.CommitteeIndex) + } + if len(slotInfo.Committees) == 0 { + delete(r.intervalDutiesInfo.Slots, attestation.SlotIndex) + } + delete(validator.MissingAttestationSlots, attestation.SlotIndex) + + // Check if this minipool was opted into the SP for this block + nodeDetails := r.nodeDetails[validator.NodeIndex] + if blockTime.Before(nodeDetails.OptInTime) || blockTime.After(nodeDetails.OptOutTime) { + // Not opted in + continue + } + + eligibleBorrowedEth := nodeDetails.EligibleBorrowedEth + _, percentOfBorrowedEth := r.networkState.GetStakedRplValueInEthAndPercentOfBorrowedEth(eligibleBorrowedEth, nodeDetails.RplStake) + + // Mark this duty as completed + validator.CompletedAttestations[attestation.SlotIndex] = true + + // Get the pseudoscore for this attestation + details := r.networkState.MinipoolDetailsByAddress[validator.Address] + bond, fee := details.GetMinipoolBondAndNodeFee(blockTime) + + if r.rewardsFile.RulesetVersion >= 10 { + fee = fees.GetMinipoolFeeWithBonus(bond, fee, percentOfBorrowedEth) + } + + minipoolScore := big.NewInt(0).Sub(oneEth, fee) // 1 - fee + minipoolScore.Mul(minipoolScore, bond) // Multiply by bond + minipoolScore.Div(minipoolScore, thirtyTwoEth) // Divide by 32 to get the bond as a fraction of a total validator + minipoolScore.Add(minipoolScore, fee) // Total = fee + (bond/32)(1 - fee) + + // Add it to the minipool's score and the total score + validator.AttestationScore.Add(&validator.AttestationScore.Int, minipoolScore) + r.totalAttestationScore.Add(r.totalAttestationScore, minipoolScore) + r.successfulAttestations++ + } + } + + return nil + +} + +// Maps out the attestaion duties for the given epoch +func (r *treeGeneratorImpl_v9_v10) getDutiesForEpoch(committees beacon.Committees) error { + + // Crawl the committees + for idx := 0; idx < committees.Count(); idx++ { + slotIndex := committees.Slot(idx) + if slotIndex < r.rewardsFile.ConsensusStartBlock || slotIndex > r.rewardsFile.ConsensusEndBlock { + // Ignore slots that are out of bounds + continue + } + blockTime := r.genesisTime.Add(time.Second * time.Duration(r.beaconConfig.SecondsPerSlot*slotIndex)) + committeeIndex := committees.Index(idx) + + // Check if there are any RP validators in this committee + rpValidators := map[int]*MinipoolInfo{} + for position, validator := range committees.Validators(idx) { + minipoolInfo, exists := r.validatorIndexMap[validator] + if !exists { + // This isn't an RP validator, so ignore it + continue + } + + // Check if this minipool was opted into the SP for this block + nodeDetails := r.networkState.NodeDetailsByAddress[minipoolInfo.NodeAddress] + isOptedIn := nodeDetails.SmoothingPoolRegistrationState + spRegistrationTime := time.Unix(nodeDetails.SmoothingPoolRegistrationChanged.Int64(), 0) + if (isOptedIn && blockTime.Sub(spRegistrationTime) < 0) || // If this block occurred before the node opted in, ignore it + (!isOptedIn && spRegistrationTime.Sub(blockTime) < 0) { // If this block occurred after the node opted out, ignore it + continue + } + + // Check if this minipool was in the `staking` state during this time + mpd := r.networkState.MinipoolDetailsByAddress[minipoolInfo.Address] + statusChangeTime := time.Unix(mpd.StatusTime.Int64(), 0) + if mpd.Status != rptypes.Staking || blockTime.Sub(statusChangeTime) < 0 { + continue + } + + // This was a legal RP validator opted into the SP during this slot so add it + rpValidators[position] = minipoolInfo + minipoolInfo.MissingAttestationSlots[slotIndex] = true + } + + // If there are some RP validators, add this committee to the map + if len(rpValidators) > 0 { + slotInfo, exists := r.intervalDutiesInfo.Slots[slotIndex] + if !exists { + slotInfo = &SlotInfo{ + Index: slotIndex, + Committees: map[uint64]*CommitteeInfo{}, + } + r.intervalDutiesInfo.Slots[slotIndex] = slotInfo + } + slotInfo.Committees[committeeIndex] = &CommitteeInfo{ + Index: committeeIndex, + Positions: rpValidators, + } + } + } + + return nil + +} + +// Maps all minipools to their validator indices and creates a map of indices to minipool info +func (r *treeGeneratorImpl_v9_v10) createMinipoolIndexMap() error { + + // Get the status for all uncached minipool validators and add them to the cache + r.validatorIndexMap = map[string]*MinipoolInfo{} + for _, details := range r.nodeDetails { + if details.IsEligible { + for _, minipoolInfo := range details.Minipools { + status, exists := r.networkState.ValidatorDetails[minipoolInfo.ValidatorPubkey] + if !exists { + // Remove minipools that don't have indices yet since they're not actually viable + //r.log.Printlnf("NOTE: minipool %s (pubkey %s) didn't exist at this slot; removing it", minipoolInfo.Address.Hex(), minipoolInfo.ValidatorPubkey.Hex()) + minipoolInfo.WasActive = false + } else { + switch status.Status { + case beacon.ValidatorState_PendingInitialized, beacon.ValidatorState_PendingQueued: + // Remove minipools that don't have indices yet since they're not actually viable + //r.log.Printlnf("NOTE: minipool %s (index %s, pubkey %s) was in state %s; removing it", minipoolInfo.Address.Hex(), status.Index, minipoolInfo.ValidatorPubkey.Hex(), string(status.Status)) + minipoolInfo.WasActive = false + default: + // Get the validator index + minipoolInfo.ValidatorIndex = status.Index + r.validatorIndexMap[minipoolInfo.ValidatorIndex] = minipoolInfo + + // Get the validator's activation start and end slots + startSlot := status.ActivationEpoch * r.beaconConfig.SlotsPerEpoch + endSlot := status.ExitEpoch * r.beaconConfig.SlotsPerEpoch + + // Verify this minipool has already started + if status.ActivationEpoch == FarEpoch { + //r.log.Printlnf("NOTE: minipool %s hasn't been scheduled for activation yet; removing it", minipoolInfo.Address.Hex()) + minipoolInfo.WasActive = false + continue + } else if startSlot > r.rewardsFile.ConsensusEndBlock { + //r.log.Printlnf("NOTE: minipool %s activates on slot %d which is after interval end %d; removing it", minipoolInfo.Address.Hex(), startSlot, r.rewardsFile.ConsensusEndBlock) + minipoolInfo.WasActive = false + } + + // Check if the minipool exited before this interval + if status.ExitEpoch != FarEpoch && endSlot < r.rewardsFile.ConsensusStartBlock { + //r.log.Printlnf("NOTE: minipool %s exited on slot %d which was before interval start %d; removing it", minipoolInfo.Address.Hex(), endSlot, r.rewardsFile.ConsensusStartBlock) + minipoolInfo.WasActive = false + continue + } + } + } + } + } + } + + return nil + +} + +var farFutureTimestamp int64 = 1000000000000000000 // Far into the future +var farPastTimestamp int64 = 0 + +// Get the details for every node that was opted into the Smoothing Pool for at least some portion of this interval +func (r *treeGeneratorImpl_v9_v10) getSmoothingPoolNodeDetails() error { + + // For each NO, get their opt-in status and time of last change in batches + r.log.Printlnf("%s Getting details of nodes for Smoothing Pool calculation...", r.logPrefix) + nodeCount := uint64(len(r.networkState.NodeDetails)) + r.nodeDetails = make([]*NodeSmoothingDetails, nodeCount) + for batchStartIndex := uint64(0); batchStartIndex < nodeCount; batchStartIndex += SmoothingPoolDetailsBatchSize { + + // Get batch start & end index + iterationStartIndex := batchStartIndex + iterationEndIndex := batchStartIndex + SmoothingPoolDetailsBatchSize + if iterationEndIndex > nodeCount { + iterationEndIndex = nodeCount + } + + // Load details + var wg errgroup.Group + for iterationIndex := iterationStartIndex; iterationIndex < iterationEndIndex; iterationIndex++ { + iterationIndex := iterationIndex + wg.Go(func() error { + nativeNodeDetails := r.networkState.NodeDetails[iterationIndex] + nodeDetails := &NodeSmoothingDetails{ + Address: nativeNodeDetails.NodeAddress, + Minipools: []*MinipoolInfo{}, + SmoothingPoolEth: big.NewInt(0), + BonusEth: big.NewInt(0), + RewardsNetwork: nativeNodeDetails.RewardNetwork.Uint64(), + RplStake: nativeNodeDetails.RplStake, + } + + nodeDetails.IsOptedIn = nativeNodeDetails.SmoothingPoolRegistrationState + statusChangeTimeBig := nativeNodeDetails.SmoothingPoolRegistrationChanged + statusChangeTime := time.Unix(statusChangeTimeBig.Int64(), 0) + + if nodeDetails.IsOptedIn { + nodeDetails.OptInTime = statusChangeTime + nodeDetails.OptOutTime = time.Unix(farFutureTimestamp, 0) + } else { + nodeDetails.OptOutTime = statusChangeTime + nodeDetails.OptInTime = time.Unix(farPastTimestamp, 0) + } + + // Get the details for each minipool in the node + for _, mpd := range r.networkState.MinipoolDetailsByNode[nodeDetails.Address] { + if mpd.Exists && mpd.Status == rptypes.Staking { + nativeMinipoolDetails := r.networkState.MinipoolDetailsByAddress[mpd.MinipoolAddress] + penaltyCount := nativeMinipoolDetails.PenaltyCount.Uint64() + if penaltyCount >= 3 { + // This node is a cheater + nodeDetails.IsEligible = false + nodeDetails.Minipools = []*MinipoolInfo{} + r.nodeDetails[iterationIndex] = nodeDetails + return nil + } + + // This minipool is below the penalty count, so include it + nodeDetails.Minipools = append(nodeDetails.Minipools, &MinipoolInfo{ + Address: mpd.MinipoolAddress, + ValidatorPubkey: mpd.Pubkey, + NodeAddress: nodeDetails.Address, + NodeIndex: iterationIndex, + Fee: nativeMinipoolDetails.NodeFee, + //MissedAttestations: 0, + //GoodAttestations: 0, + MissingAttestationSlots: map[uint64]bool{}, + CompletedAttestations: map[uint64]bool{}, + WasActive: true, + AttestationScore: NewQuotedBigInt(0), + NodeOperatorBond: nativeMinipoolDetails.NodeDepositBalance, + }) + } + } + + nodeDetails.IsEligible = len(nodeDetails.Minipools) > 0 + r.nodeDetails[iterationIndex] = nodeDetails + return nil + }) + } + if err := wg.Wait(); err != nil { + return err + } + } + + // Populate the eligible borrowed ETH field for all nodes + for _, nodeDetails := range r.nodeDetails { + nnd := r.networkState.NodeDetailsByAddress[nodeDetails.Address] + nodeDetails.EligibleBorrowedEth = r.networkState.GetEligibleBorrowedEth(nnd) + } + + return nil + +} + +// Validates that the provided network is legal +func (r *treeGeneratorImpl_v9_v10) validateNetwork(network uint64) (bool, error) { + valid, exists := r.validNetworkCache[network] + if !exists { + var err error + valid, err = r.rp.GetNetworkEnabled(big.NewInt(int64(network)), r.opts) + if err != nil { + return false, err + } + r.validNetworkCache[network] = valid + } + + return valid, nil +} + +// Gets the start blocks for the given interval +func (r *treeGeneratorImpl_v9_v10) getBlocksAndTimesForInterval(previousIntervalEvent rewards.RewardsEvent) (*types.Header, error) { + // Sanity check to confirm the BN can access the block from the previous interval + _, exists, err := r.bc.GetBeaconBlock(previousIntervalEvent.ConsensusBlock.String()) + if err != nil { + return nil, fmt.Errorf("error verifying block from previous interval: %w", err) + } + if !exists { + return nil, fmt.Errorf("couldn't retrieve CL block from previous interval (slot %d); this likely means you checkpoint sync'd your Beacon Node and it has not backfilled to the previous interval yet so it cannot be used for tree generation", previousIntervalEvent.ConsensusBlock.Uint64()) + } + + previousEpoch := previousIntervalEvent.ConsensusBlock.Uint64() / r.beaconConfig.SlotsPerEpoch + nextEpoch := previousEpoch + 1 + + consensusStartSlot := nextEpoch * r.beaconConfig.SlotsPerEpoch + startTime := r.beaconConfig.GetSlotTime(consensusStartSlot) + endTime := r.beaconConfig.GetSlotTime(r.snapshotEnd.Slot) + + r.rewardsFile.StartTime = startTime + r.minipoolPerformanceFile.StartTime = startTime + + r.rewardsFile.EndTime = endTime + r.minipoolPerformanceFile.EndTime = endTime + + r.rewardsFile.ConsensusStartBlock = nextEpoch * r.beaconConfig.SlotsPerEpoch + r.minipoolPerformanceFile.ConsensusStartBlock = r.rewardsFile.ConsensusStartBlock + + r.rewardsFile.ConsensusEndBlock = r.snapshotEnd.ConsensusBlock + r.minipoolPerformanceFile.ConsensusEndBlock = r.snapshotEnd.ConsensusBlock + + r.rewardsFile.ExecutionEndBlock = r.snapshotEnd.ExecutionBlock + r.minipoolPerformanceFile.ExecutionEndBlock = r.snapshotEnd.ExecutionBlock + + // Get the first block that isn't missing + var elBlockNumber uint64 + for { + beaconBlock, exists, err := r.bc.GetBeaconBlock(fmt.Sprint(r.rewardsFile.ConsensusStartBlock)) + if err != nil { + return nil, fmt.Errorf("error getting EL data for BC slot %d: %w", r.rewardsFile.ConsensusStartBlock, err) + } + if !exists { + r.rewardsFile.ConsensusStartBlock++ + r.minipoolPerformanceFile.ConsensusStartBlock++ + } else { + elBlockNumber = beaconBlock.ExecutionBlockNumber + break + } + } + + var startElHeader *types.Header + if elBlockNumber == 0 { + // We are pre-merge, so get the first block after the one from the previous interval + r.rewardsFile.ExecutionStartBlock = previousIntervalEvent.ExecutionBlock.Uint64() + 1 + r.minipoolPerformanceFile.ExecutionStartBlock = r.rewardsFile.ExecutionStartBlock + startElHeader, err = r.rp.HeaderByNumber(context.Background(), big.NewInt(int64(r.rewardsFile.ExecutionStartBlock))) + if err != nil { + return nil, fmt.Errorf("error getting EL start block %d: %w", r.rewardsFile.ExecutionStartBlock, err) + } + } else { + // We are post-merge, so get the EL block corresponding to the BC block + r.rewardsFile.ExecutionStartBlock = elBlockNumber + r.minipoolPerformanceFile.ExecutionStartBlock = r.rewardsFile.ExecutionStartBlock + startElHeader, err = r.rp.HeaderByNumber(context.Background(), big.NewInt(int64(elBlockNumber))) + if err != nil { + return nil, fmt.Errorf("error getting EL header for block %d: %w", elBlockNumber, err) + } + } + + return startElHeader, nil +} + +func (r *treeGeneratorImpl_v9_v10) saveFiles(smartnode *config.SmartnodeConfig, treeResult *GenerateTreeResult, nodeTrusted bool) (cid.Cid, map[string]cid.Cid, error) { + return saveRewardsArtifacts(smartnode, treeResult, nodeTrusted) +} diff --git a/shared/services/rewards/generator-v8_test.go b/shared/services/rewards/generator-v8_test.go new file mode 100644 index 000000000..5dd52d226 --- /dev/null +++ b/shared/services/rewards/generator-v8_test.go @@ -0,0 +1,153 @@ +package rewards + +import ( + "fmt" + "math/big" + "os" + "path/filepath" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/fatih/color" + "github.com/rocket-pool/smartnode/shared/services/beacon" + "github.com/rocket-pool/smartnode/shared/services/rewards/test" + "github.com/rocket-pool/smartnode/shared/services/rewards/test/assets" + "github.com/rocket-pool/smartnode/shared/services/state" + "github.com/rocket-pool/smartnode/shared/utils/log" +) + +type v8Test struct { + *testing.T + rp *test.MockRocketPool + bc *test.MockBeaconClient +} + +func (t *v8Test) saveArtifacts(prefix string, result *GenerateTreeResult) { + tmpDir, err := os.MkdirTemp("", fmt.Sprintf("artifacts-%s", t.Name())) + t.failIf(err) + rewardsLocalFile := LocalFile[IRewardsFile]{ + fullPath: filepath.Join(tmpDir, fmt.Sprintf("%s-rewards.json", prefix)), + f: result.RewardsFile, + } + performanceLocalFile := LocalFile[IMinipoolPerformanceFile]{ + fullPath: filepath.Join(tmpDir, fmt.Sprintf("%s-minipool-performance.json", prefix)), + f: result.MinipoolPerformanceFile, + } + _, err = rewardsLocalFile.Write() + t.failIf(err) + _, err = performanceLocalFile.Write() + t.failIf(err) + + t.Logf("wrote artifacts to %s\n", tmpDir) +} + +func newV8Test(t *testing.T, index uint64) *v8Test { + rp := test.NewMockRocketPool(t, index) + out := &v8Test{ + T: t, + rp: rp, + bc: test.NewMockBeaconClient(t), + } + return out +} + +func (t *v8Test) failIf(err error) { + if err != nil { + t.Fatalf(err.Error()) + } +} + +func (t *v8Test) SetMinipoolPerformance(canonicalMinipoolPerformance IMinipoolPerformanceFile, networkState *state.NetworkState) { + addresses := canonicalMinipoolPerformance.GetMinipoolAddresses() + for _, address := range addresses { + + // Get the minipool's performance + perf, ok := canonicalMinipoolPerformance.GetSmoothingPoolPerformance(address) + if !ok { + t.Fatalf("Minipool %s not found in canonical minipool performance, despite being listed as present", address.Hex()) + } + missedSlots := perf.GetMissingAttestationSlots() + pubkey, err := perf.GetPubkey() + + // Get the minipool's validator index + validatorStatus := networkState.ValidatorDetails[pubkey] + + if err != nil { + t.Fatalf("Minipool %s pubkey could not be parsed: %s", address.Hex(), err.Error()) + } + t.bc.SetMinipoolPerformance(validatorStatus.Index, missedSlots) + } +} + +// TestV8Mainnet builds a tree using serialized state for a mainnet interval that used v8 +// and checks that the resulting artifacts match their canonical values. +func TestV8Mainnet(tt *testing.T) { + state := assets.GetMainnet20RewardsState() + + t := newV8Test(tt, state.NetworkDetails.RewardIndex) + + canonical, err := DeserializeRewardsFile(assets.GetMainnet20RewardsJSON()) + t.failIf(err) + + canonicalPerformance, err := DeserializeMinipoolPerformanceFile(assets.GetMainnet20MinipoolPerformanceJSON()) + t.failIf(err) + + t.Logf("pending rpl rewards: %s", state.NetworkDetails.PendingRPLRewards.String()) + + t.bc.SetState(state) + + // Some interval info needed for mocks + consensusStartBlock := canonical.GetConsensusStartBlock() + executionStartBlock := canonical.GetExecutionStartBlock() + consensusEndBlock := canonical.GetConsensusEndBlock() + + // Create a new treeGeneratorImpl_v8 + logger := log.NewColorLogger(color.Faint) + generator := newTreeGeneratorImpl_v8( + &logger, + t.Name(), + state.NetworkDetails.RewardIndex, + canonical.GetStartTime(), + canonical.GetEndTime(), + consensusEndBlock, + &types.Header{ + Number: big.NewInt(int64(canonical.GetExecutionEndBlock())), + Time: assets.Mainnet20ELHeaderTime, + }, + canonical.GetIntervalsPassed(), + state, + ) + + // Load the mock up + t.rp.SetRewardSnapshotEvent(assets.GetRewardSnapshotEventInterval19()) + t.bc.SetBeaconBlock(fmt.Sprint(consensusStartBlock-1), beacon.BeaconBlock{ExecutionBlockNumber: executionStartBlock - 1}) + t.bc.SetBeaconBlock(fmt.Sprint(consensusStartBlock), beacon.BeaconBlock{ExecutionBlockNumber: executionStartBlock}) + t.rp.SetHeaderByNumber(big.NewInt(int64(executionStartBlock)), &types.Header{Time: uint64(canonical.GetStartTime().Unix())}) + + // Set the critical duties slots + t.bc.SetCriticalDutiesSlots(assets.GetMainnet20CriticalDutiesSlots()) + + // Set the minipool performance + t.SetMinipoolPerformance(canonicalPerformance, state) + + artifacts, err := generator.generateTree( + t.rp, + "mainnet", + make([]common.Address, 0), + t.bc, + ) + t.failIf(err) + + // Save the artifacts if verbose mode is enabled + if testing.Verbose() { + t.saveArtifacts("", artifacts) + } + + t.Logf("merkle root: %s\n", artifacts.RewardsFile.GetMerkleRoot()) + if artifacts.RewardsFile.GetMerkleRoot() != canonical.GetMerkleRoot() { + t.Fatalf("Merkle root does not match %s", canonical.GetMerkleRoot()) + } else { + t.Logf("merkle root matches %s", canonical.GetMerkleRoot()) + } +} diff --git a/shared/services/rewards/generator.go b/shared/services/rewards/generator.go index cdcc9d8a3..e0ba76630 100644 --- a/shared/services/rewards/generator.go +++ b/shared/services/rewards/generator.go @@ -6,8 +6,9 @@ import ( "slices" "time" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" - "github.com/rocket-pool/rocketpool-go/rocketpool" + "github.com/ipfs/go-cid" "github.com/rocket-pool/smartnode/shared/services/beacon" "github.com/rocket-pool/smartnode/shared/services/config" "github.com/rocket-pool/smartnode/shared/services/state" @@ -46,38 +47,85 @@ const ( // HoleskyV7Interval uint64 = 0 // Mainnet intervals - MainnetV8Interval uint64 = 18 - + MainnetV8Interval uint64 = 18 + MainnetV9Interval uint64 = 29 + MainnetV10Interval uint64 = 30 // Devnet intervals // Holesky intervals - HoleskyV8Interval uint64 = 93 + HoleskyV8Interval uint64 = 93 + HoleskyV9Interval uint64 = 276 + HoleskyV10Interval uint64 = 277 ) +func GetMainnetRulesetVersion(interval uint64) uint64 { + if interval >= MainnetV10Interval { + return 10 + } + if interval >= MainnetV9Interval { + return 9 + } + return 8 +} + +func GetHoleskyRulesetVersion(interval uint64) uint64 { + if interval >= HoleskyV10Interval { + return 10 + } + if interval >= HoleskyV9Interval { + return 9 + } + return 8 +} + +func GetRulesetVersion(network cfgtypes.Network, interval uint64) uint64 { + switch network { + case cfgtypes.Network_Mainnet: + return GetMainnetRulesetVersion(interval) + case cfgtypes.Network_Holesky: + return GetHoleskyRulesetVersion(interval) + case cfgtypes.Network_Devnet: + return 10 + default: + return 10 + } +} + type TreeGenerator struct { rewardsIntervalInfos map[uint64]rewardsIntervalInfo logger *log.ColorLogger logPrefix string - rp *rocketpool.RocketPool + rp RewardsExecutionClient cfg *config.RocketPoolConfig bc beacon.Client index uint64 startTime time.Time endTime time.Time - consensusBlock uint64 + snapshotEnd *SnapshotEnd elSnapshotHeader *types.Header intervalsPassed uint64 generatorImpl treeGeneratorImpl approximatorImpl treeGeneratorImpl } +type SnapshotEnd struct { + // Slot is the last slot of the interval + Slot uint64 + // ConsensusBlock is the last non-missed slot of the interval + ConsensusBlock uint64 + // ExecutionBlock is the EL block number of ConsensusBlock + ExecutionBlock uint64 +} + type treeGeneratorImpl interface { - generateTree(rp *rocketpool.RocketPool, cfg *config.RocketPoolConfig, bc beacon.Client) (IRewardsFile, error) - approximateStakerShareOfSmoothingPool(rp *rocketpool.RocketPool, cfg *config.RocketPoolConfig, bc beacon.Client) (*big.Int, error) + generateTree(rp RewardsExecutionClient, networkName string, previousRewardsPoolAddresses []common.Address, bc RewardsBeaconClient) (*GenerateTreeResult, error) + approximateStakerShareOfSmoothingPool(rp RewardsExecutionClient, networkName string, bc RewardsBeaconClient) (*big.Int, error) getRulesetVersion() uint64 + // Returns the primary artifact cid for consensus, all cids of all files in a map, and any potential errors + saveFiles(smartnode *config.SmartnodeConfig, treeResult *GenerateTreeResult, nodeTrusted bool) (cid.Cid, map[string]cid.Cid, error) } -func NewTreeGenerator(logger *log.ColorLogger, logPrefix string, rp *rocketpool.RocketPool, cfg *config.RocketPoolConfig, bc beacon.Client, index uint64, startTime time.Time, endTime time.Time, consensusBlock uint64, elSnapshotHeader *types.Header, intervalsPassed uint64, state *state.NetworkState, rollingRecord *RollingRecord) (*TreeGenerator, error) { +func NewTreeGenerator(logger *log.ColorLogger, logPrefix string, rp RewardsExecutionClient, cfg *config.RocketPoolConfig, bc beacon.Client, index uint64, startTime time.Time, endTime time.Time, snapshotEnd *SnapshotEnd, elSnapshotHeader *types.Header, intervalsPassed uint64, state *state.NetworkState) (*TreeGenerator, error) { t := &TreeGenerator{ logger: logger, logPrefix: logPrefix, @@ -87,21 +135,34 @@ func NewTreeGenerator(logger *log.ColorLogger, logPrefix string, rp *rocketpool. index: index, startTime: startTime, endTime: endTime, - consensusBlock: consensusBlock, + snapshotEnd: snapshotEnd, elSnapshotHeader: elSnapshotHeader, intervalsPassed: intervalsPassed, } + // v10 + v10_generator := newTreeGeneratorImpl_v9_v10(10, t.logger, t.logPrefix, t.index, t.snapshotEnd, t.elSnapshotHeader, t.intervalsPassed, state) + + // v9 + v9_generator := newTreeGeneratorImpl_v9_v10(9, t.logger, t.logPrefix, t.index, t.snapshotEnd, t.elSnapshotHeader, t.intervalsPassed, state) + // v8 - var v8_generator treeGeneratorImpl - if rollingRecord == nil { - v8_generator = newTreeGeneratorImpl_v8(t.logger, t.logPrefix, t.index, t.startTime, t.endTime, t.consensusBlock, t.elSnapshotHeader, t.intervalsPassed, state) - } else { - v8_generator = newTreeGeneratorImpl_v8_rolling(t.logger, t.logPrefix, t.index, t.startTime, t.endTime, t.consensusBlock, t.elSnapshotHeader, t.intervalsPassed, state, rollingRecord) - } + v8_generator := newTreeGeneratorImpl_v8(t.logger, t.logPrefix, t.index, t.startTime, t.endTime, t.snapshotEnd.ConsensusBlock, t.elSnapshotHeader, t.intervalsPassed, state) // Create the interval wrappers rewardsIntervalInfos := []rewardsIntervalInfo{ + { + rewardsRulesetVersion: 10, + mainnetStartInterval: MainnetV10Interval, + holeskyStartInterval: HoleskyV10Interval, + generator: v10_generator, + }, + { + rewardsRulesetVersion: 9, + mainnetStartInterval: MainnetV9Interval, + holeskyStartInterval: HoleskyV9Interval, + generator: v9_generator, + }, { rewardsRulesetVersion: 8, mainnetStartInterval: MainnetV8Interval, @@ -167,12 +228,18 @@ func NewTreeGenerator(logger *log.ColorLogger, logPrefix string, rp *rocketpool. return t, nil } -func (t *TreeGenerator) GenerateTree() (IRewardsFile, error) { - return t.generatorImpl.generateTree(t.rp, t.cfg, t.bc) +type GenerateTreeResult struct { + RewardsFile IRewardsFile + MinipoolPerformanceFile IMinipoolPerformanceFile + InvalidNetworkNodes map[common.Address]uint64 +} + +func (t *TreeGenerator) GenerateTree() (*GenerateTreeResult, error) { + return t.generatorImpl.generateTree(t.rp, fmt.Sprint(t.cfg.Smartnode.Network.Value), t.cfg.Smartnode.GetPreviousRewardsPoolAddresses(), t.bc) } func (t *TreeGenerator) ApproximateStakerShareOfSmoothingPool() (*big.Int, error) { - return t.approximatorImpl.approximateStakerShareOfSmoothingPool(t.rp, t.cfg, t.bc) + return t.approximatorImpl.approximateStakerShareOfSmoothingPool(t.rp, fmt.Sprint(t.cfg.Smartnode.Network.Value), t.bc) } func (t *TreeGenerator) GetGeneratorRulesetVersion() uint64 { @@ -183,13 +250,18 @@ func (t *TreeGenerator) GetApproximatorRulesetVersion() uint64 { return t.approximatorImpl.getRulesetVersion() } -func (t *TreeGenerator) GenerateTreeWithRuleset(ruleset uint64) (IRewardsFile, error) { +func (t *TreeGenerator) GenerateTreeWithRuleset(ruleset uint64) (*GenerateTreeResult, error) { info, exists := t.rewardsIntervalInfos[ruleset] if !exists { return nil, fmt.Errorf("ruleset v%d does not exist", ruleset) } - return info.generator.generateTree(t.rp, t.cfg, t.bc) + return info.generator.generateTree( + t.rp, + fmt.Sprint(t.cfg.Smartnode.Network.Value), + t.cfg.Smartnode.GetPreviousRewardsPoolAddresses(), + t.bc, + ) } func (t *TreeGenerator) ApproximateStakerShareOfSmoothingPoolWithRuleset(ruleset uint64) (*big.Int, error) { @@ -198,5 +270,9 @@ func (t *TreeGenerator) ApproximateStakerShareOfSmoothingPoolWithRuleset(ruleset return nil, fmt.Errorf("ruleset v%d does not exist", ruleset) } - return info.generator.approximateStakerShareOfSmoothingPool(t.rp, t.cfg, t.bc) + return info.generator.approximateStakerShareOfSmoothingPool(t.rp, fmt.Sprint(t.cfg.Smartnode.Network.Value), t.bc) +} + +func (t *TreeGenerator) SaveFiles(treeResult *GenerateTreeResult, nodeTrusted bool) (cid.Cid, map[string]cid.Cid, error) { + return t.generatorImpl.saveFiles(t.cfg.Smartnode, treeResult, nodeTrusted) } diff --git a/shared/services/rewards/mock_test.go b/shared/services/rewards/mock_test.go new file mode 100644 index 000000000..e24d5739d --- /dev/null +++ b/shared/services/rewards/mock_test.go @@ -0,0 +1,317 @@ +package rewards + +// This file contains treegen tests which use mock history. +// These mocks are faster to process than real history, and are useful for +// testing new features and refactoring. + +import ( + "fmt" + "math/big" + "strings" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/fatih/color" + "github.com/rocket-pool/smartnode/shared/services/beacon" + "github.com/rocket-pool/smartnode/shared/services/rewards/test" + "github.com/rocket-pool/smartnode/shared/services/rewards/test/assets" + "github.com/rocket-pool/smartnode/shared/utils/log" +) + +func TestMockIntervalDefaultsTreegenv8v9(tt *testing.T) { + history := test.NewDefaultMockHistory() + state := history.GetEndNetworkState() + + t := newV8Test(tt, state.NetworkDetails.RewardIndex) + + t.bc.SetState(state) + + consensusStartBlock := history.GetConsensusStartBlock() + executionStartBlock := history.GetExecutionStartBlock() + consensusEndBlock := history.GetConsensusEndBlock() + executionEndBlock := history.GetExecutionEndBlock() + + logger := log.NewColorLogger(color.Faint) + generator := newTreeGeneratorImpl_v8( + &logger, + t.Name(), + state.NetworkDetails.RewardIndex, + history.GetStartTime(), + history.GetEndTime(), + consensusEndBlock, + &types.Header{ + Number: big.NewInt(int64(history.GetExecutionEndBlock())), + Time: assets.Mainnet20ELHeaderTime, + }, + /* intervalsPassed= */ 1, + state, + ) + + t.rp.SetRewardSnapshotEvent(history.GetPreviousRewardSnapshotEvent()) + t.bc.SetBeaconBlock(fmt.Sprint(consensusStartBlock-1), beacon.BeaconBlock{ExecutionBlockNumber: executionStartBlock - 1}) + t.bc.SetBeaconBlock(fmt.Sprint(consensusStartBlock), beacon.BeaconBlock{ExecutionBlockNumber: executionStartBlock}) + t.rp.SetHeaderByNumber(big.NewInt(int64(executionStartBlock)), &types.Header{Time: uint64(history.GetStartTime().Unix())}) + + for _, validator := range state.ValidatorDetails { + t.bc.SetMinipoolPerformance(validator.Index, make([]uint64, 0)) + } + + v8Artifacts, err := generator.generateTree( + t.rp, + "mainnet", + make([]common.Address, 0), + t.bc, + ) + t.failIf(err) + + if testing.Verbose() { + t.saveArtifacts("v8", v8Artifacts) + } + generatorv9v10 := newTreeGeneratorImpl_v9_v10( + 9, + &logger, + t.Name(), + state.NetworkDetails.RewardIndex, + &SnapshotEnd{ + Slot: consensusEndBlock, + ConsensusBlock: consensusEndBlock, + ExecutionBlock: executionEndBlock, + }, + &types.Header{ + Number: big.NewInt(int64(history.GetExecutionEndBlock())), + Time: assets.Mainnet20ELHeaderTime, + }, + /* intervalsPassed= */ 1, + state, + ) + + v9Artifacts, err := generatorv9v10.generateTree( + t.rp, + "mainnet", + make([]common.Address, 0), + t.bc, + ) + t.failIf(err) + + if testing.Verbose() { + t.saveArtifacts("v9", v9Artifacts) + } + + // Validate individual node details in the rewards file + rewardsFile := v8Artifacts.RewardsFile + nodeSummary := history.GetNodeSummary() + + singleEightEthNodes := nodeSummary["single_eight_eth"] + singleSixteenEthNodes := nodeSummary["single_sixteen_eth"] + for _, node := range append(singleEightEthNodes, singleSixteenEthNodes...) { + // Check the rewards amount in the rewards file + rewardsAmount := rewardsFile.GetNodeCollateralRpl(node.Address) + + expectedRewardsAmount, _ := big.NewInt(0).SetString("1019308880071990649542", 10) + + if rewardsAmount.Cmp(expectedRewardsAmount) != 0 { + t.Fatalf("Rewards amount does not match expected value for node %s: %s != %s", node.Notes, rewardsAmount.String(), expectedRewardsAmount.String()) + } + + // Make sure it got 0 ETH + ethAmount := rewardsFile.GetNodeSmoothingPoolEth(node.Address) + if ethAmount.Sign() != 0 { + t.Fatalf("ETH amount does not match expected value for node %s: %s != %s", node.Notes, ethAmount.String(), "0") + } + + // Make sure it got 0 oDAO rpl + oDaoRplAmount := rewardsFile.GetNodeOracleDaoRpl(node.Address) + if oDaoRplAmount.Sign() != 0 { + t.Fatalf("oDAO rpl amount does not match expected value for node %s: %s != %s", node.Notes, oDaoRplAmount.String(), "0") + } + } + singleEightEthNodesSP := nodeSummary["single_eight_eth_sp"] + singleSixteenEthNodesSP := nodeSummary["single_sixteen_eth_sp"] + for _, node := range append(singleEightEthNodesSP, singleSixteenEthNodesSP...) { + // Check the rewards amount in the rewards file + rewardsAmount := rewardsFile.GetNodeCollateralRpl(node.Address) + + expectedRewardsAmount, _ := big.NewInt(0).SetString("1019308880071990649542", 10) + + if rewardsAmount.Cmp(expectedRewardsAmount) != 0 { + t.Fatalf("Rewards amount does not match expected value for node %s: %s != %s", node.Notes, rewardsAmount.String(), expectedRewardsAmount.String()) + } + + // Make sure it got ETH + ethAmount := rewardsFile.GetNodeSmoothingPoolEth(node.Address) + expectedEthAmount := big.NewInt(0) + if node.SmoothingPoolRegistrationState { + if node.Class == "single_eight_eth_sp" { + expectedEthAmount.SetString("1354725546842756912", 10) + } else { + // 16-eth minipools earn more eth! A bit less than double. + expectedEthAmount.SetString("2292612463887742467", 10) + } + } + if ethAmount.Cmp(expectedEthAmount) != 0 { + t.Fatalf("ETH amount does not match expected value for node %s: %s != %s", node.Notes, ethAmount.String(), expectedEthAmount.String()) + } + + // Make sure it got 0 oDAO rpl + oDaoRplAmount := rewardsFile.GetNodeOracleDaoRpl(node.Address) + if oDaoRplAmount.Sign() != 0 { + t.Fatalf("oDAO rpl amount does not match expected value for node %s: %s != %s", node.Notes, oDaoRplAmount.String(), "0") + } + } + + optingInNodesSP := append( + nodeSummary["single_eight_eth_opted_in_quarter"], + nodeSummary["single_sixteen_eth_opted_in_quarter"]..., + ) + for _, node := range optingInNodesSP { + // Check the rewards amount in the rewards file + rewardsAmount := rewardsFile.GetNodeCollateralRpl(node.Address) + + // Node has 20 RPL and only 1 8-eth minpool which puts it above the linear curve + expectedRewardsAmount := big.NewInt(0) + if node.Class == "single_eight_eth_opted_in_quarter" { + expectedRewardsAmount.SetString("1784353229014464268647", 10) + } else { + // 16-eth minipools earn less for the same RPL stake, due to RPIP-30 + expectedRewardsAmount.SetString("1310160289473732090952", 10) + } + + if rewardsAmount.Cmp(expectedRewardsAmount) != 0 { + t.Fatalf("Rewards amount does not match expected value for node %s: %s != %s", node.Notes, rewardsAmount.String(), expectedRewardsAmount.String()) + } + + // Make sure it got ETH + ethAmount := rewardsFile.GetNodeSmoothingPoolEth(node.Address) + expectedEthAmount := big.NewInt(0) + if node.Class == "single_eight_eth_opted_in_quarter" { + // About 3/4 what the full nodes got + expectedEthAmount.SetString("1019397441188609162", 10) + } else { + // 16-eth minipools earn more eth! A bit less than double. + expectedEthAmount.SetString("1725134131242261659", 10) + } + if ethAmount.Cmp(expectedEthAmount) != 0 { + t.Fatalf("ETH amount does not match expected value for node %s: %s != %s", node.Notes, ethAmount.String(), expectedEthAmount.String()) + } + + // Make sure it got 0 oDAO rpl + oDaoRplAmount := rewardsFile.GetNodeOracleDaoRpl(node.Address) + if oDaoRplAmount.Sign() != 0 { + t.Fatalf("oDAO rpl amount does not match expected value for node %s: %s != %s", node.Notes, oDaoRplAmount.String(), "0") + } + } + + optingOutNodesSP := append( + nodeSummary["single_eight_eth_opted_out_three_quarters"], + nodeSummary["single_sixteen_eth_opted_out_three_quarters"]..., + ) + for _, node := range optingOutNodesSP { + // Check the rewards amount in the rewards file + rewardsAmount := rewardsFile.GetNodeCollateralRpl(node.Address) + + // Node has 20 RPL and only 1 8-eth minpool which puts it above the linear curve + expectedRewardsAmount := big.NewInt(0) + if node.Class == "single_eight_eth_opted_out_three_quarters" { + expectedRewardsAmount.SetString("1784353229014464268647", 10) + } else { + // 16-eth minipools earn less for the same RPL stake, due to RPIP-30 + expectedRewardsAmount.SetString("1310160289473732090952", 10) + } + + if rewardsAmount.Cmp(expectedRewardsAmount) != 0 { + t.Fatalf("Rewards amount does not match expected value for node %s: %s != %s", node.Notes, rewardsAmount.String(), expectedRewardsAmount.String()) + } + + // Make sure it got ETH + ethAmount := rewardsFile.GetNodeSmoothingPoolEth(node.Address) + expectedEthAmount := big.NewInt(0) + if node.Class == "single_eight_eth_opted_out_three_quarters" { + // About 3/4 what the full nodes got + expectedEthAmount.SetString("1005984316962443252", 10) + } else { + // 16-eth minipools earn more eth! A bit less than double. + expectedEthAmount.SetString("1702434997936442426", 10) + } + if ethAmount.Cmp(expectedEthAmount) != 0 { + t.Fatalf("ETH amount does not match expected value for node %s: %s != %s", node.Notes, ethAmount.String(), expectedEthAmount.String()) + } + + // Make sure it got 0 oDAO rpl + oDaoRplAmount := rewardsFile.GetNodeOracleDaoRpl(node.Address) + if oDaoRplAmount.Sign() != 0 { + t.Fatalf("oDAO rpl amount does not match expected value for node %s: %s != %s", node.Notes, oDaoRplAmount.String(), "0") + } + } + + bondReductionNode := nodeSummary["single_bond_reduction"] + for _, node := range bondReductionNode { + // Check the rewards amount in the rewards file + rewardsAmount := rewardsFile.GetNodeCollateralRpl(node.Address) + + // Nodes that bond reduce are treated as having their new bond for the full interval, + // when it comes to RPL rewards. + expectedRewardsAmount, _ := big.NewInt(0).SetString("1019308880071990649542", 10) + + if rewardsAmount.Cmp(expectedRewardsAmount) != 0 { + t.Fatalf("Rewards amount does not match expected value for node %s: %s != %s", node.Notes, rewardsAmount.String(), expectedRewardsAmount.String()) + } + + // Make sure it got reduced ETH + ethAmount := rewardsFile.GetNodeSmoothingPoolEth(node.Address) + expectedEthAmount, _ := big.NewInt(0).SetString("1922203879488237721", 10) + if ethAmount.Cmp(expectedEthAmount) != 0 { + t.Fatalf("ETH amount does not match expected value for node %s: %s != %s", node.Notes, ethAmount.String(), expectedEthAmount.String()) + } + + // Make sure it got 0 oDAO rpl + oDaoRplAmount := rewardsFile.GetNodeOracleDaoRpl(node.Address) + if oDaoRplAmount.Sign() != 0 { + t.Fatalf("oDAO rpl amount does not match expected value for node %s: %s != %s", node.Notes, oDaoRplAmount.String(), "0") + } + } + + noMinipoolsNodes := nodeSummary["no_minipools"] + for _, node := range noMinipoolsNodes { + // Check the rewards amount in the rewards file + rewardsAmount := rewardsFile.GetNodeCollateralRpl(node.Address) + if rewardsAmount.Sign() != 0 { + t.Fatalf("Rewards amount does not match expected value for node %s: %s != %s", node.Notes, rewardsAmount.String(), "0") + } + + // Make sure it got ETH + ethAmount := rewardsFile.GetNodeSmoothingPoolEth(node.Address) + if ethAmount.Sign() != 0 { + t.Fatalf("ETH amount does not match expected value for node %s: %s != %s", node.Notes, ethAmount.String(), "0") + } + + // Make sure it got 0 oDAO rpl + oDaoRplAmount := rewardsFile.GetNodeOracleDaoRpl(node.Address) + if oDaoRplAmount.Sign() != 0 { + t.Fatalf("oDAO rpl amount does not match expected value for node %s: %s != %s", node.Notes, oDaoRplAmount.String(), "0") + } + } + + // Validate merkle root + v8MerkleRoot := v8Artifacts.RewardsFile.GetMerkleRoot() + v9MerkleRoot := v9Artifacts.RewardsFile.GetMerkleRoot() + + if !strings.EqualFold(v8MerkleRoot, v9MerkleRoot) { + t.Fatalf("Merkle root does not match %s != %s", v8MerkleRoot, v9MerkleRoot) + } else { + t.Logf("v8/v9 Merkle root matches %s", v8MerkleRoot) + } + + // Expected merkle root: + // 0x9915d949936995f9045d26c3ef919194445377e83f1be2da47d181ee9ce705d8 + // + // If this does not match, it implies either you updated the set of default mock nodes, + // or you introduced a regression in treegen. + // DO NOT update this value unless you know what you are doing. + expectedMerkleRoot := "0x9915d949936995f9045d26c3ef919194445377e83f1be2da47d181ee9ce705d8" + if !strings.EqualFold(v8MerkleRoot, expectedMerkleRoot) { + t.Fatalf("Merkle root does not match expected value %s != %s", v8MerkleRoot, expectedMerkleRoot) + } else { + t.Logf("Merkle root matches expected value %s", expectedMerkleRoot) + } +} diff --git a/shared/services/rewards/mock_v10_test.go b/shared/services/rewards/mock_v10_test.go new file mode 100644 index 000000000..ce933590b --- /dev/null +++ b/shared/services/rewards/mock_v10_test.go @@ -0,0 +1,864 @@ +package rewards + +// This file contains treegen tests which use mock history. +// These mocks are faster to process than real history, and are useful for +// testing new features and refactoring. + +import ( + "fmt" + "math/big" + "strings" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/fatih/color" + "github.com/rocket-pool/rocketpool-go/utils/eth" + "github.com/rocket-pool/smartnode/shared/services/beacon" + "github.com/rocket-pool/smartnode/shared/services/rewards/test" + "github.com/rocket-pool/smartnode/shared/services/rewards/test/assets" + "github.com/rocket-pool/smartnode/shared/utils/log" +) + +func TestMockIntervalDefaultsTreegenv10(tt *testing.T) { + + history := test.NewDefaultMockHistory() + // Add a node which is earning some bonus commission + node := history.GetNewDefaultMockNode(&test.NewMockNodeParams{ + SmoothingPool: true, + EightEthMinipools: 1, + CollateralRpl: 5, + }) + node.Minipools[0].NodeFee, _ = big.NewInt(0).SetString("50000000000000000", 10) + history.Nodes = append(history.Nodes, node) + state := history.GetEndNetworkState() + + t := newV8Test(tt, state.NetworkDetails.RewardIndex) + + t.bc.SetState(state) + + consensusStartBlock := history.GetConsensusStartBlock() + executionStartBlock := history.GetExecutionStartBlock() + consensusEndBlock := history.GetConsensusEndBlock() + executionEndBlock := history.GetExecutionEndBlock() + + logger := log.NewColorLogger(color.Faint) + + t.rp.SetRewardSnapshotEvent(history.GetPreviousRewardSnapshotEvent()) + t.bc.SetBeaconBlock(fmt.Sprint(consensusStartBlock-1), beacon.BeaconBlock{ExecutionBlockNumber: executionStartBlock - 1}) + t.bc.SetBeaconBlock(fmt.Sprint(consensusStartBlock), beacon.BeaconBlock{ExecutionBlockNumber: executionStartBlock}) + t.rp.SetHeaderByNumber(big.NewInt(int64(executionStartBlock)), &types.Header{Time: uint64(history.GetStartTime().Unix())}) + + for _, validator := range state.ValidatorDetails { + t.bc.SetMinipoolPerformance(validator.Index, make([]uint64, 0)) + } + + // Set some custom balances for the validators that opt in and out of smoothing pool + nodeSummary := history.GetNodeSummary() + customBalanceNodes := nodeSummary["single_eight_eth_opted_in_quarter"] + for _, node := range customBalanceNodes { + node.Minipools[0].SPWithdrawals = eth.EthToWei(0.75) + } + customBalanceNodes = nodeSummary["single_eight_eth_opted_out_three_quarters"] + for _, node := range customBalanceNodes { + node.Minipools[0].SPWithdrawals = eth.EthToWei(0.75) + } + customBalanceNodes = nodeSummary["single_bond_reduction"] + for _, node := range customBalanceNodes { + node.Minipools[0].SPWithdrawals = eth.EthToWei(0.5) + } + + history.SetWithdrawals(t.bc) + + generatorv9v10 := newTreeGeneratorImpl_v9_v10( + 10, + &logger, + t.Name()+"-stateless", + state.NetworkDetails.RewardIndex, + &SnapshotEnd{ + Slot: consensusEndBlock, + ConsensusBlock: consensusEndBlock, + ExecutionBlock: executionEndBlock, + }, + &types.Header{ + Number: big.NewInt(int64(history.GetExecutionEndBlock())), + Time: assets.Mainnet20ELHeaderTime, + }, + /* intervalsPassed= */ 1, + state, + ) + + v10Artifacts, err := generatorv9v10.generateTree( + t.rp, + "mainnet", + make([]common.Address, 0), + t.bc, + ) + t.failIf(err) + + if testing.Verbose() { + t.saveArtifacts("v10", v10Artifacts) + } + + // Validate individual node details in the rewards file + rewardsFile := v10Artifacts.RewardsFile + minipoolPerformanceFile := v10Artifacts.MinipoolPerformanceFile + + singleEightEthNodes := nodeSummary["single_eight_eth"] + singleSixteenEthNodes := nodeSummary["single_sixteen_eth"] + for _, node := range append(singleEightEthNodes, singleSixteenEthNodes...) { + // Check the rewards amount in the rewards file + rewardsAmount := rewardsFile.GetNodeCollateralRpl(node.Address) + + expectedRewardsAmount, _ := big.NewInt(0).SetString("1019308880071990649542", 10) + + if rewardsAmount.Cmp(expectedRewardsAmount) != 0 { + t.Fatalf("Rewards amount does not match expected value for node %s: %s != %s", node.Notes, rewardsAmount.String(), expectedRewardsAmount.String()) + } + + // Make sure it got 0 ETH + ethAmount := rewardsFile.GetNodeSmoothingPoolEth(node.Address) + if ethAmount.Sign() != 0 { + t.Logf("Node %+v", node) + t.Fatalf("ETH amount does not match expected value for node %s: %s != %s", node.Notes, ethAmount.String(), "0") + } + + // Make sure it got 0 oDAO rpl + oDaoRplAmount := rewardsFile.GetNodeOracleDaoRpl(node.Address) + if oDaoRplAmount.Sign() != 0 { + t.Fatalf("oDAO rpl amount does not match expected value for node %s: %s != %s", node.Notes, oDaoRplAmount.String(), "0") + } + } + singleEightEthNodesSP := nodeSummary["single_eight_eth_sp"] + singleSixteenEthNodesSP := nodeSummary["single_sixteen_eth_sp"] + for _, node := range append(singleEightEthNodesSP, singleSixteenEthNodesSP...) { + // Check the rewards amount in the rewards file + rewardsAmount := rewardsFile.GetNodeCollateralRpl(node.Address) + + expectedRewardsAmount, _ := big.NewInt(0).SetString("1019308880071990649542", 10) + + if rewardsAmount.Cmp(expectedRewardsAmount) != 0 { + t.Fatalf("Rewards amount does not match expected value for node %s: %s != %s", node.Notes, rewardsAmount.String(), expectedRewardsAmount.String()) + } + + // Make sure it got ETH + ethAmount := rewardsFile.GetNodeSmoothingPoolEth(node.Address) + expectedEthAmount := big.NewInt(0) + if node.SmoothingPoolRegistrationState { + if node.Class == "single_eight_eth_sp" { + expectedEthAmount.SetString("1450562599049128367", 10) + // There should be a bonus for these nodes' minipools + if len(node.Minipools) != 1 { + t.Fatalf("Expected 1 minipool for node %s, got %d", node.Notes, len(node.Minipools)) + } + minipoolPerf, _ := minipoolPerformanceFile.GetSmoothingPoolPerformance(node.Minipools[0].Address) + // 8 eth minipools with 10% collateral earn 14% commission overall. + // They earned 10% on 24/32 of the 1 eth of consensus rewards already, which is 0.075 eth. + // Their bonus is therefore 4/10 of 0.075 eth, which is 0.03 eth. + expectedBonusEthEarned, _ := big.NewInt(0).SetString("30000000000000000", 10) + if minipoolPerf.GetBonusEthEarned().Cmp(expectedBonusEthEarned) != 0 { + t.Fatalf("Minipool %s bonus does not match expected value: %s != %s", node.Minipools[0].Address.Hex(), minipoolPerf.GetBonusEthEarned().String(), expectedBonusEthEarned.String()) + } + expectedAttestationScore := big.NewInt(0).Sub(oneEth, big.NewInt(14e16)) + expectedAttestationScore.Mul(expectedAttestationScore, eightEth) + expectedAttestationScore.Div(expectedAttestationScore, thirtyTwoEth) + expectedAttestationScore.Add(expectedAttestationScore, big.NewInt(14e16)) + expectedAttestationScore.Mul(expectedAttestationScore, big.NewInt(101)) // there are 101 epochs in the interval + if minipoolPerf.GetAttestationScore().Cmp(expectedAttestationScore) != 0 { + t.Fatalf("Minipool %s attestation score does not match expected value: %s != %s", node.Minipools[0].Address.Hex(), minipoolPerf.GetAttestationScore().String(), expectedAttestationScore.String()) + } + } else { + // 16-eth minipools earn more eth! A bit less than double. + expectedEthAmount.SetString("2200871632329635499", 10) + if len(node.Minipools) != 1 { + t.Fatalf("Expected 1 minipool for node %s, got %d", node.Notes, len(node.Minipools)) + } + minipoolPerf, _ := minipoolPerformanceFile.GetSmoothingPoolPerformance(node.Minipools[0].Address) + // The 16 eth minipools earn 10% on 24/32. + expectedAttestationScore := big.NewInt(0).Sub(oneEth, big.NewInt(1e17)) + expectedAttestationScore.Mul(expectedAttestationScore, sixteenEth) + expectedAttestationScore.Div(expectedAttestationScore, thirtyTwoEth) + expectedAttestationScore.Add(expectedAttestationScore, big.NewInt(1e17)) + expectedAttestationScore.Mul(expectedAttestationScore, big.NewInt(101)) // there are 101 epochs in the interval + if minipoolPerf.GetAttestationScore().Cmp(expectedAttestationScore) != 0 { + t.Fatalf("Minipool %s attestation score does not match expected value: %s != %s", node.Minipools[0].Address.Hex(), minipoolPerf.GetAttestationScore().String(), expectedAttestationScore.String()) + } + // 16 eth minipools earn no bonus. + if minipoolPerf.GetBonusEthEarned().Sign() != 0 { + t.Fatalf("Minipool %s bonus does not match expected value: %s != 0", node.Minipools[0].Address.Hex(), minipoolPerf.GetBonusEthEarned().String()) + } + } + } + if ethAmount.Cmp(expectedEthAmount) != 0 { + t.Fatalf("ETH amount does not match expected value for node %s: %s != %s", node.Notes, ethAmount.String(), expectedEthAmount.String()) + } + + // Make sure it got 0 oDAO rpl + oDaoRplAmount := rewardsFile.GetNodeOracleDaoRpl(node.Address) + if oDaoRplAmount.Sign() != 0 { + t.Fatalf("oDAO rpl amount does not match expected value for node %s: %s != %s", node.Notes, oDaoRplAmount.String(), "0") + } + } + + optingInNodesSP := append( + nodeSummary["single_eight_eth_opted_in_quarter"], + nodeSummary["single_sixteen_eth_opted_in_quarter"]..., + ) + for _, node := range optingInNodesSP { + // Check the rewards amount in the rewards file + rewardsAmount := rewardsFile.GetNodeCollateralRpl(node.Address) + + mp := node.Minipools[0] + perf, _ := minipoolPerformanceFile.GetSmoothingPoolPerformance(mp.Address) + + // Node has 20 RPL and only 1 8-eth minpool which puts it above the linear curve + expectedRewardsAmount := big.NewInt(0) + if node.Class == "single_eight_eth_opted_in_quarter" { + expectedRewardsAmount.SetString("1784353229014464268647", 10) + } else { + // 16-eth minipools earn less for the same RPL stake, due to RPIP-30 + expectedRewardsAmount.SetString("1310160289473732090952", 10) + if perf.GetBonusEthEarned().Sign() != 0 { + // 16 eth minipools should not get bonus commission + t.Fatalf("Minipool %s shouldn't have earned bonus eth and did", mp.Address.Hex()) + } + } + + if rewardsAmount.Cmp(expectedRewardsAmount) != 0 { + t.Fatalf("Rewards amount does not match expected value for node %s: %s != %s", node.Notes, rewardsAmount.String(), expectedRewardsAmount.String()) + } + + // Make sure it got ETH + ethAmount := rewardsFile.GetNodeSmoothingPoolEth(node.Address) + expectedEthAmount := big.NewInt(0) + if node.Class == "single_eight_eth_opted_in_quarter" { + // About 3/4 what the full nodes got + expectedEthAmount.SetString("1091438193343898573", 10) + // Earns 3/4 the bonus of a node that was in for the whole interval + expectedBonusEthEarned, _ := big.NewInt(0).SetString("22500000000000000", 10) + if perf.GetBonusEthEarned().Cmp(expectedBonusEthEarned) != 0 { + t.Fatalf("Minipool %s bonus does not match expected value: %s != %s", mp.Address.Hex(), perf.GetBonusEthEarned().String(), expectedBonusEthEarned.String()) + } + } else { + // 16-eth minipools earn more eth! A bit less than double. + expectedEthAmount.SetString("1656101426307448494", 10) + } + if ethAmount.Cmp(expectedEthAmount) != 0 { + t.Fatalf("ETH amount does not match expected value for node %s: %s != %s", node.Notes, ethAmount.String(), expectedEthAmount.String()) + } + + // Make sure it got 0 oDAO rpl + oDaoRplAmount := rewardsFile.GetNodeOracleDaoRpl(node.Address) + if oDaoRplAmount.Sign() != 0 { + t.Fatalf("oDAO rpl amount does not match expected value for node %s: %s != %s", node.Notes, oDaoRplAmount.String(), "0") + } + } + + optingOutNodesSP := append( + nodeSummary["single_eight_eth_opted_out_three_quarters"], + nodeSummary["single_sixteen_eth_opted_out_three_quarters"]..., + ) + for _, node := range optingOutNodesSP { + // Check the rewards amount in the rewards file + rewardsAmount := rewardsFile.GetNodeCollateralRpl(node.Address) + + mp := node.Minipools[0] + perf, _ := minipoolPerformanceFile.GetSmoothingPoolPerformance(mp.Address) + + // Node has 20 RPL and only 1 8-eth minpool which puts it above the linear curve + expectedRewardsAmount := big.NewInt(0) + if node.Class == "single_eight_eth_opted_out_three_quarters" { + expectedRewardsAmount.SetString("1784353229014464268647", 10) + } else { + // 16-eth minipools earn less for the same RPL stake, due to RPIP-30 + expectedRewardsAmount.SetString("1310160289473732090952", 10) + } + + if rewardsAmount.Cmp(expectedRewardsAmount) != 0 { + t.Fatalf("Rewards amount does not match expected value for node %s: %s != %s", node.Notes, rewardsAmount.String(), expectedRewardsAmount.String()) + } + + // Make sure it got ETH + ethAmount := rewardsFile.GetNodeSmoothingPoolEth(node.Address) + expectedEthAmount := big.NewInt(0) + if node.Class == "single_eight_eth_opted_out_three_quarters" { + // About 3/4 what the full nodes got + expectedEthAmount.SetString("1077373217115689381", 10) + // Earns 3/4 the bonus of a node that was in for the whole interval + expectedBonusEthEarned, _ := big.NewInt(0).SetString("22500000000000000", 10) + if perf.GetBonusEthEarned().Cmp(expectedBonusEthEarned) != 0 { + t.Fatalf("Minipool %s bonus does not match expected value: %s != %s", mp.Address.Hex(), perf.GetBonusEthEarned().String(), expectedBonusEthEarned.String()) + } + } else { + // 16-eth minipools earn more eth! A bit less than double. + expectedEthAmount.SetString("1634310618066561014", 10) + if perf.GetBonusEthEarned().Sign() != 0 { + // 16 eth minipools should not get bonus commission + t.Fatalf("Minipool %s shouldn't have earned bonus eth and did", mp.Address.Hex()) + } + } + if ethAmount.Cmp(expectedEthAmount) != 0 { + t.Fatalf("ETH amount does not match expected value for node %s: %s != %s", node.Notes, ethAmount.String(), expectedEthAmount.String()) + } + + // Make sure it got 0 oDAO rpl + oDaoRplAmount := rewardsFile.GetNodeOracleDaoRpl(node.Address) + if oDaoRplAmount.Sign() != 0 { + t.Fatalf("oDAO rpl amount does not match expected value for node %s: %s != %s", node.Notes, oDaoRplAmount.String(), "0") + } + } + + bondReductionNode := nodeSummary["single_bond_reduction"] + for _, node := range bondReductionNode { + + mp := node.Minipools[0] + perf, _ := minipoolPerformanceFile.GetSmoothingPoolPerformance(mp.Address) + + // Check the rewards amount in the rewards file + rewardsAmount := rewardsFile.GetNodeCollateralRpl(node.Address) + + // Nodes that bond reduce are treated as having their new bond for the full interval, + // when it comes to RPL rewards. + expectedRewardsAmount, _ := big.NewInt(0).SetString("1019308880071990649542", 10) + + if rewardsAmount.Cmp(expectedRewardsAmount) != 0 { + t.Fatalf("Rewards amount does not match expected value for node %s: %s != %s", node.Notes, rewardsAmount.String(), expectedRewardsAmount.String()) + } + + // Make sure it got reduced ETH + ethAmount := rewardsFile.GetNodeSmoothingPoolEth(node.Address) + expectedEthAmount, _ := big.NewInt(0).SetString("1920903328050713153", 10) + if ethAmount.Cmp(expectedEthAmount) != 0 { + t.Fatalf("ETH amount does not match expected value for node %s: %s != %s", node.Notes, ethAmount.String(), expectedEthAmount.String()) + } + + // And a reduced bonus + expectedBonusEthEarned, _ := big.NewInt(0).SetString("15000000000000000", 10) + if perf.GetBonusEthEarned().Cmp(expectedBonusEthEarned) != 0 { + t.Fatalf("Minipool %s bonus does not match expected value: %s != %s", mp.Address.Hex(), perf.GetBonusEthEarned().String(), expectedBonusEthEarned.String()) + } + + // Make sure it got 0 oDAO rpl + oDaoRplAmount := rewardsFile.GetNodeOracleDaoRpl(node.Address) + if oDaoRplAmount.Sign() != 0 { + t.Fatalf("oDAO rpl amount does not match expected value for node %s: %s != %s", node.Notes, oDaoRplAmount.String(), "0") + } + } + + noMinipoolsNodes := nodeSummary["no_minipools"] + for _, node := range noMinipoolsNodes { + // Check the rewards amount in the rewards file + rewardsAmount := rewardsFile.GetNodeCollateralRpl(node.Address) + if rewardsAmount.Sign() != 0 { + t.Fatalf("Rewards amount does not match expected value for node %s: %s != %s", node.Notes, rewardsAmount.String(), "0") + } + + // Make sure it got ETH + ethAmount := rewardsFile.GetNodeSmoothingPoolEth(node.Address) + if ethAmount.Sign() != 0 { + t.Fatalf("ETH amount does not match expected value for node %s: %s != %s", node.Notes, ethAmount.String(), "0") + } + + // Make sure it got 0 oDAO rpl + oDaoRplAmount := rewardsFile.GetNodeOracleDaoRpl(node.Address) + if oDaoRplAmount.Sign() != 0 { + t.Fatalf("oDAO rpl amount does not match expected value for node %s: %s != %s", node.Notes, oDaoRplAmount.String(), "0") + } + } + + // Validate merkle root + v10MerkleRoot := v10Artifacts.RewardsFile.GetMerkleRoot() + + // Expected merkle root: + // 0x176bba15231cb82edb5c34c8882af09dfb77a2ee31a96b623bffd8e48cedf18b + // + // If this does not match, it implies either you updated the set of default mock nodes, + // or you introduced a regression in treegen. + // DO NOT update this value unless you know what you are doing. + expectedMerkleRoot := "0x176bba15231cb82edb5c34c8882af09dfb77a2ee31a96b623bffd8e48cedf18b" + if !strings.EqualFold(v10MerkleRoot, expectedMerkleRoot) { + t.Fatalf("Merkle root does not match expected value %s != %s", v10MerkleRoot, expectedMerkleRoot) + } else { + t.Logf("Merkle root matches expected value %s", expectedMerkleRoot) + } +} + +func TestInsufficientEthForBonuseses(tt *testing.T) { + + history := test.NewDefaultMockHistoryNoNodes() + // Add two nodes which are earning some bonus commission + nodeOne := history.GetNewDefaultMockNode(&test.NewMockNodeParams{ + SmoothingPool: true, + EightEthMinipools: 1, + CollateralRpl: 5, + }) + nodeOne.Minipools[0].NodeFee, _ = big.NewInt(0).SetString("50000000000000000", 10) + history.Nodes = append(history.Nodes, nodeOne) + nodeTwo := history.GetNewDefaultMockNode(&test.NewMockNodeParams{ + SmoothingPool: true, + EightEthMinipools: 1, + CollateralRpl: 20, + }) + history.Nodes = append(history.Nodes, nodeTwo) + + // Add oDAO nodes + odaoNodes := history.GetDefaultMockODAONodes() + history.Nodes = append(history.Nodes, odaoNodes...) + + // Ovewrite the SP balance to a value under the bonus commission + history.NetworkDetails.SmoothingPoolBalance = big.NewInt(1000) + state := history.GetEndNetworkState() + + t := newV8Test(tt, state.NetworkDetails.RewardIndex) + + t.bc.SetState(state) + history.SetWithdrawals(t.bc) + + consensusStartBlock := history.GetConsensusStartBlock() + executionStartBlock := history.GetExecutionStartBlock() + consensusEndBlock := history.GetConsensusEndBlock() + executionEndBlock := history.GetExecutionEndBlock() + + logger := log.NewColorLogger(color.Faint) + + t.rp.SetRewardSnapshotEvent(history.GetPreviousRewardSnapshotEvent()) + t.bc.SetBeaconBlock(fmt.Sprint(consensusStartBlock-1), beacon.BeaconBlock{ExecutionBlockNumber: executionStartBlock - 1}) + t.bc.SetBeaconBlock(fmt.Sprint(consensusStartBlock), beacon.BeaconBlock{ExecutionBlockNumber: executionStartBlock}) + t.rp.SetHeaderByNumber(big.NewInt(int64(executionStartBlock)), &types.Header{Time: uint64(history.GetStartTime().Unix())}) + + for _, validator := range state.ValidatorDetails { + t.bc.SetMinipoolPerformance(validator.Index, make([]uint64, 0)) + } + + generatorv9v10 := newTreeGeneratorImpl_v9_v10( + 10, + &logger, + t.Name(), + state.NetworkDetails.RewardIndex, + &SnapshotEnd{ + Slot: consensusEndBlock, + ConsensusBlock: consensusEndBlock, + ExecutionBlock: executionEndBlock, + }, + &types.Header{ + Number: big.NewInt(int64(history.GetExecutionEndBlock())), + Time: assets.Mainnet20ELHeaderTime, + }, + /* intervalsPassed= */ 1, + state, + ) + + v10Artifacts, err := generatorv9v10.generateTree( + t.rp, + "mainnet", + make([]common.Address, 0), + t.bc, + ) + t.failIf(err) + + if testing.Verbose() { + t.saveArtifacts("v10", v10Artifacts) + } + + // Check the rewards file + rewardsFile := v10Artifacts.RewardsFile + ethOne := rewardsFile.GetNodeSmoothingPoolEth(nodeOne.Address) + if ethOne.Uint64() != 169+416 { + t.Fatalf("Node one ETH amount does not match expected value: %s != %d", ethOne.String(), 169+416) + } + ethTwo := rewardsFile.GetNodeSmoothingPoolEth(nodeTwo.Address) + if ethTwo.Uint64() != 177+237 { + t.Fatalf("Node two ETH amount does not match expected value: %s != %d", ethTwo.String(), 177+237) + } + + // Check the minipool performance file + minipoolPerformanceFile := v10Artifacts.MinipoolPerformanceFile + perfOne, ok := minipoolPerformanceFile.GetSmoothingPoolPerformance(nodeOne.Minipools[0].Address) + if !ok { + t.Fatalf("Node one minipool performance not found") + } + if perfOne.GetBonusEthEarned().Uint64() != 416 { + t.Fatalf("Node one bonus does not match expected value: %s != %d", perfOne.GetBonusEthEarned().String(), 416) + } + perfTwo, ok := minipoolPerformanceFile.GetSmoothingPoolPerformance(nodeTwo.Minipools[0].Address) + if !ok { + t.Fatalf("Node two minipool performance not found") + } + if perfTwo.GetBonusEthEarned().Uint64() != 237 { + t.Fatalf("Node two bonus does not match expected value: %s != %d", perfTwo.GetBonusEthEarned().String(), 237) + } +} + +func TestMockNoRPLRewards(tt *testing.T) { + + history := test.NewDefaultMockHistoryNoNodes() + // Add two nodes which are earning some bonus commission + nodeOne := history.GetNewDefaultMockNode(&test.NewMockNodeParams{ + SmoothingPool: false, + EightEthMinipools: 1, + CollateralRpl: 0, + }) + nodeOne.Minipools[0].NodeFee, _ = big.NewInt(0).SetString("50000000000000000", 10) + history.Nodes = append(history.Nodes, nodeOne) + nodeTwo := history.GetNewDefaultMockNode(&test.NewMockNodeParams{ + SmoothingPool: true, + EightEthMinipools: 2, + CollateralRpl: 0, + }) + nodeTwo.Minipools[0].NodeFee, _ = big.NewInt(0).SetString("50000000000000000", 10) + nodeTwo.Minipools[1].NodeFee, _ = big.NewInt(0).SetString("50000000000000000", 10) + history.Nodes = append(history.Nodes, nodeTwo) + + // Add oDAO nodes + odaoNodes := history.GetDefaultMockODAONodes() + history.Nodes = append(history.Nodes, odaoNodes...) + + state := history.GetEndNetworkState() + + t := newV8Test(tt, state.NetworkDetails.RewardIndex) + + t.bc.SetState(state) + + // Give all three minipools 1 ETH of consensus income + t.bc.AddWithdrawal(history.BeaconConfig.FirstSlotOfEpoch(history.StartEpoch+1), nodeOne.Minipools[0].ValidatorIndex, big.NewInt(1e18)) + t.bc.AddWithdrawal(history.BeaconConfig.FirstSlotOfEpoch(history.StartEpoch+1), nodeTwo.Minipools[0].ValidatorIndex, big.NewInt(1e18)) + t.bc.AddWithdrawal(history.BeaconConfig.FirstSlotOfEpoch(history.StartEpoch+1), nodeTwo.Minipools[1].ValidatorIndex, big.NewInt(1e18)) + + consensusStartBlock := history.GetConsensusStartBlock() + executionStartBlock := history.GetExecutionStartBlock() + consensusEndBlock := history.GetConsensusEndBlock() + executionEndBlock := history.GetExecutionEndBlock() + + logger := log.NewColorLogger(color.Faint) + + t.rp.SetRewardSnapshotEvent(history.GetPreviousRewardSnapshotEvent()) + t.bc.SetBeaconBlock(fmt.Sprint(consensusStartBlock-1), beacon.BeaconBlock{ExecutionBlockNumber: executionStartBlock - 1}) + t.bc.SetBeaconBlock(fmt.Sprint(consensusStartBlock), beacon.BeaconBlock{ExecutionBlockNumber: executionStartBlock}) + t.rp.SetHeaderByNumber(big.NewInt(int64(executionStartBlock)), &types.Header{Time: uint64(history.GetStartTime().Unix())}) + + for _, validator := range state.ValidatorDetails { + t.bc.SetMinipoolPerformance(validator.Index, make([]uint64, 0)) + } + + generatorv9v10 := newTreeGeneratorImpl_v9_v10( + 10, + &logger, + t.Name(), + state.NetworkDetails.RewardIndex, + &SnapshotEnd{ + Slot: consensusEndBlock, + ConsensusBlock: consensusEndBlock, + ExecutionBlock: executionEndBlock, + }, + &types.Header{ + Number: big.NewInt(int64(history.GetExecutionEndBlock())), + Time: assets.Mainnet20ELHeaderTime, + }, + /* intervalsPassed= */ 1, + state, + ) + + v10Artifacts, err := generatorv9v10.generateTree( + t.rp, + "mainnet", + make([]common.Address, 0), + t.bc, + ) + t.failIf(err) + + if testing.Verbose() { + t.saveArtifacts("v10", v10Artifacts) + } + + // Check the rewards file + rewardsFile := v10Artifacts.RewardsFile + ethOne := rewardsFile.GetNodeSmoothingPoolEth(nodeOne.Address) + // Node one is not a SP, so it should have 0 ETH + if ethOne.Uint64() != 0 { + t.Fatalf("Node one ETH amount does not match expected value: %s != %d", ethOne.String(), 0) + } + ethTwo := rewardsFile.GetNodeSmoothingPoolEth(nodeTwo.Address) + expectedEthTwo, _ := big.NewInt(0).SetString("32575000000000000000", 10) + if ethTwo.Cmp(expectedEthTwo) != 0 { + t.Fatalf("Node two ETH amount does not match expected value: %s != %s", ethTwo.String(), expectedEthTwo.String()) + } + + // Check the minipool performance file + minipoolPerformanceFile := v10Artifacts.MinipoolPerformanceFile + _, ok := minipoolPerformanceFile.GetSmoothingPoolPerformance(nodeOne.Minipools[0].Address) + if ok { + t.Fatalf("Node one minipool performance should not be found") + } + perfTwo, ok := minipoolPerformanceFile.GetSmoothingPoolPerformance(nodeTwo.Minipools[0].Address) + if !ok { + t.Fatalf("Node two minipool one performance not found") + } + if perfTwo.GetBonusEthEarned().Uint64() != 37500000000000000 { + t.Fatalf("Node two minipool one bonus does not match expected value: %s != %d", perfTwo.GetBonusEthEarned().String(), 37500000000000000) + } + // Node two is in the SP and starts with 5% commission. It has no RPL staked, so it earns an extra 5% on top of that. + if perfTwo.GetEffectiveCommission().Uint64() != 100000000000000000 { + t.Fatalf("Node two minipool one effective commission does not match expected value: %s != %d", perfTwo.GetEffectiveCommission().String(), 100000000000000000) + } + perfThree, ok := minipoolPerformanceFile.GetSmoothingPoolPerformance(nodeTwo.Minipools[1].Address) + if !ok { + t.Fatalf("Node two minipool two performance not found") + } + if perfThree.GetBonusEthEarned().Uint64() != 37500000000000000 { + t.Fatalf("Node two minipool two bonus does not match expected value: %s != %d", perfThree.GetBonusEthEarned().String(), 37500000000000000) + } + // Node two is in the SP and starts with 5% commission. It has no RPL staked, so it earns an extra 5% on top of that. + if perfThree.GetEffectiveCommission().Uint64() != 100000000000000000 { + t.Fatalf("Node two minipool two effective commission does not match expected value: %s != %d", perfThree.GetEffectiveCommission().String(), 100000000000000000) + } +} + +func TestMockOptedOutAndThenBondReduced(tt *testing.T) { + + history := test.NewDefaultMockHistoryNoNodes() + // Add two nodes which are earning some bonus commission + nodeOne := history.GetNewDefaultMockNode(&test.NewMockNodeParams{ + SmoothingPool: false, + EightEthMinipools: 1, + CollateralRpl: 0, + }) + // Opted out 1/4 of the way through the interval + nodeOne.SmoothingPoolRegistrationChanged = history.BeaconConfig.GetSlotTime(history.BeaconConfig.FirstSlotOfEpoch(history.StartEpoch + (history.EndEpoch-history.StartEpoch)/4)) + nodeOne.Minipools[0].NodeFee, _ = big.NewInt(0).SetString("50000000000000000", 10) + // Bond reduced 1/2 of the way through the interval + nodeOne.Minipools[0].LastBondReductionTime = history.BeaconConfig.GetSlotTime(history.BeaconConfig.FirstSlotOfEpoch(history.StartEpoch + (history.EndEpoch-history.StartEpoch)/2)) + nodeOne.Minipools[0].LastBondReductionPrevValue = big.NewInt(0).Set(sixteenEth) + nodeOne.Minipools[0].LastBondReductionPrevNodeFee, _ = big.NewInt(0).SetString("140000000000000000", 10) + history.Nodes = append(history.Nodes, nodeOne) + nodeTwo := history.GetNewDefaultMockNode(&test.NewMockNodeParams{ + SmoothingPool: true, + EightEthMinipools: 1, + CollateralRpl: 0, + }) + nodeTwo.Minipools[0].NodeFee, _ = big.NewInt(0).SetString("50000000000000000", 10) + history.Nodes = append(history.Nodes, nodeTwo) + + // Add oDAO nodes + odaoNodes := history.GetDefaultMockODAONodes() + history.Nodes = append(history.Nodes, odaoNodes...) + + state := history.GetEndNetworkState() + + t := newV8Test(tt, state.NetworkDetails.RewardIndex) + + t.bc.SetState(state) + + // Add withdrawals to both minipools + t.bc.AddWithdrawal(history.BeaconConfig.FirstSlotOfEpoch(history.StartEpoch+1), nodeOne.Minipools[0].ValidatorIndex, big.NewInt(1e18)) + t.bc.AddWithdrawal(history.BeaconConfig.FirstSlotOfEpoch(history.StartEpoch+1), nodeTwo.Minipools[0].ValidatorIndex, big.NewInt(1e18)) + + consensusStartBlock := history.GetConsensusStartBlock() + executionStartBlock := history.GetExecutionStartBlock() + consensusEndBlock := history.GetConsensusEndBlock() + executionEndBlock := history.GetExecutionEndBlock() + + logger := log.NewColorLogger(color.Faint) + + t.rp.SetRewardSnapshotEvent(history.GetPreviousRewardSnapshotEvent()) + t.bc.SetBeaconBlock(fmt.Sprint(consensusStartBlock-1), beacon.BeaconBlock{ExecutionBlockNumber: executionStartBlock - 1}) + t.bc.SetBeaconBlock(fmt.Sprint(consensusStartBlock), beacon.BeaconBlock{ExecutionBlockNumber: executionStartBlock}) + t.rp.SetHeaderByNumber(big.NewInt(int64(executionStartBlock)), &types.Header{Time: uint64(history.GetStartTime().Unix())}) + + for _, validator := range state.ValidatorDetails { + t.bc.SetMinipoolPerformance(validator.Index, make([]uint64, 0)) + } + + generatorv9v10 := newTreeGeneratorImpl_v9_v10( + 10, + &logger, + t.Name(), + state.NetworkDetails.RewardIndex, + &SnapshotEnd{ + Slot: consensusEndBlock, + ConsensusBlock: consensusEndBlock, + ExecutionBlock: executionEndBlock, + }, + &types.Header{ + Number: big.NewInt(int64(history.GetExecutionEndBlock())), + Time: assets.Mainnet20ELHeaderTime, + }, + /* intervalsPassed= */ 1, + state, + ) + + v10Artifacts, err := generatorv9v10.generateTree( + t.rp, + "mainnet", + make([]common.Address, 0), + t.bc, + ) + t.failIf(err) + + if testing.Verbose() { + t.saveArtifacts("v10", v10Artifacts) + } + + // Check the rewards file + rewardsFile := v10Artifacts.RewardsFile + ethOne := rewardsFile.GetNodeSmoothingPoolEth(nodeOne.Address) + // Node one was in the SP so it should have some ETH, but no bonuses + expectedEthOne, _ := big.NewInt(0).SetString("11309523809523809523", 10) + if ethOne.Cmp(expectedEthOne) != 0 { + t.Fatalf("Node one ETH amount does not match expected value: %s != %s", ethOne.String(), expectedEthOne.String()) + } + ethTwo := rewardsFile.GetNodeSmoothingPoolEth(nodeTwo.Address) + expectedEthTwo, _ := big.NewInt(0).SetString("26089087301587301587", 10) + if ethTwo.Cmp(expectedEthTwo) != 0 { + t.Fatalf("Node two ETH amount does not match expected value: %s != %s", ethTwo.String(), expectedEthTwo.String()) + } + + // Check the minipool performance file + minipoolPerformanceFile := v10Artifacts.MinipoolPerformanceFile + perfOne, ok := minipoolPerformanceFile.GetSmoothingPoolPerformance(nodeOne.Minipools[0].Address) + if !ok { + t.Fatalf("Node one minipool performance should be found") + } + if perfOne.GetBonusEthEarned().Uint64() != 0 { + t.Fatalf("Node one minipool one bonus does not match expected value: %s != %d", perfOne.GetBonusEthEarned().String(), 0) + } + perfTwo, ok := minipoolPerformanceFile.GetSmoothingPoolPerformance(nodeTwo.Minipools[0].Address) + if !ok { + t.Fatalf("Node two minipool one performance not found") + } + if perfTwo.GetBonusEthEarned().Uint64() != 37500000000000000 { + t.Fatalf("Node two minipool one bonus does not match expected value: %s != %d", perfTwo.GetBonusEthEarned().String(), 37500000000000000) + } + // Node two is in the SP and starts with 5% commission. It has no RPL staked, so it earns an extra 5% on top of that. + if perfTwo.GetEffectiveCommission().Uint64() != 100000000000000000 { + t.Fatalf("Node two minipool one effective commission does not match expected value: %s != %d", perfTwo.GetEffectiveCommission().String(), 100000000000000000) + } +} + +func TestMockWithdrawableEpoch(tt *testing.T) { + + history := test.NewDefaultMockHistoryNoNodes() + // Add two nodes which are earning some bonus commission + nodeOne := history.GetNewDefaultMockNode(&test.NewMockNodeParams{ + SmoothingPool: true, + EightEthMinipools: 1, + CollateralRpl: 0, + }) + nodeOne.Minipools[0].NodeFee, _ = big.NewInt(0).SetString("50000000000000000", 10) + history.Nodes = append(history.Nodes, nodeOne) + nodeTwo := history.GetNewDefaultMockNode(&test.NewMockNodeParams{ + SmoothingPool: true, + EightEthMinipools: 1, + CollateralRpl: 0, + }) + nodeTwo.Minipools[0].NodeFee, _ = big.NewInt(0).SetString("50000000000000000", 10) + // Withdrawable epoch half way through the interval + nodeTwo.Minipools[0].WithdrawableEpoch = history.StartEpoch + (history.EndEpoch-history.StartEpoch)/2 + history.Nodes = append(history.Nodes, nodeTwo) + + // Add oDAO nodes + odaoNodes := history.GetDefaultMockODAONodes() + history.Nodes = append(history.Nodes, odaoNodes...) + + state := history.GetEndNetworkState() + + t := newV8Test(tt, state.NetworkDetails.RewardIndex) + + t.bc.SetState(state) + + // Add withdrawals to both minipools + t.bc.AddWithdrawal(history.BeaconConfig.FirstSlotOfEpoch(history.StartEpoch+1), nodeOne.Minipools[0].ValidatorIndex, big.NewInt(1e18)) + // Add a withdrawal in the epoch after the interval ends + t.bc.AddWithdrawal(history.BeaconConfig.FirstSlotOfEpoch(history.EndEpoch+1), nodeOne.Minipools[0].ValidatorIndex, big.NewInt(1e18)) + // Withdraw 0.5 eth at the start of the interval + t.bc.AddWithdrawal(history.BeaconConfig.FirstSlotOfEpoch(history.StartEpoch+1), nodeTwo.Minipools[0].ValidatorIndex, big.NewInt(5e17)) + // Withdraw 32.5 eth at the end of the interval + t.bc.AddWithdrawal(history.BeaconConfig.FirstSlotOfEpoch(history.EndEpoch-1), nodeTwo.Minipools[0].ValidatorIndex, big.NewInt(0).Mul(big.NewInt(325), big.NewInt(1e17))) + + consensusStartBlock := history.GetConsensusStartBlock() + executionStartBlock := history.GetExecutionStartBlock() + consensusEndBlock := history.GetConsensusEndBlock() + executionEndBlock := history.GetExecutionEndBlock() + + logger := log.NewColorLogger(color.Faint) + + t.rp.SetRewardSnapshotEvent(history.GetPreviousRewardSnapshotEvent()) + t.bc.SetBeaconBlock(fmt.Sprint(consensusStartBlock-1), beacon.BeaconBlock{ExecutionBlockNumber: executionStartBlock - 1}) + t.bc.SetBeaconBlock(fmt.Sprint(consensusStartBlock), beacon.BeaconBlock{ExecutionBlockNumber: executionStartBlock}) + t.rp.SetHeaderByNumber(big.NewInt(int64(executionStartBlock)), &types.Header{Time: uint64(history.GetStartTime().Unix())}) + + for _, validator := range state.ValidatorDetails { + t.bc.SetMinipoolPerformance(validator.Index, make([]uint64, 0)) + } + + generatorv9v10 := newTreeGeneratorImpl_v9_v10( + 10, + &logger, + t.Name(), + state.NetworkDetails.RewardIndex, + &SnapshotEnd{ + Slot: consensusEndBlock, + ConsensusBlock: consensusEndBlock, + ExecutionBlock: executionEndBlock, + }, + &types.Header{ + Number: big.NewInt(int64(history.GetExecutionEndBlock())), + Time: assets.Mainnet20ELHeaderTime, + }, + /* intervalsPassed= */ 1, + state, + ) + + v10Artifacts, err := generatorv9v10.generateTree( + t.rp, + "mainnet", + make([]common.Address, 0), + t.bc, + ) + t.failIf(err) + + if testing.Verbose() { + t.saveArtifacts("v10", v10Artifacts) + } + + // Check the rewards file + rewardsFile := v10Artifacts.RewardsFile + ethOne := rewardsFile.GetNodeSmoothingPoolEth(nodeOne.Address) + expectedEthOne, _ := big.NewInt(0).SetString("21920833333333333333", 10) + if ethOne.Cmp(expectedEthOne) != 0 { + t.Fatalf("Node one ETH amount does not match expected value: %s != %s", ethOne.String(), expectedEthOne.String()) + } + ethTwo := rewardsFile.GetNodeSmoothingPoolEth(nodeTwo.Address) + expectedEthTwo, _ := big.NewInt(0).SetString("10654166666666666666", 10) + if ethTwo.Cmp(expectedEthTwo) != 0 { + t.Fatalf("Node two ETH amount does not match expected value: %s != %s", ethTwo.String(), expectedEthTwo.String()) + } + + // Check the minipool performance file + minipoolPerformanceFile := v10Artifacts.MinipoolPerformanceFile + perfOne, ok := minipoolPerformanceFile.GetSmoothingPoolPerformance(nodeOne.Minipools[0].Address) + if !ok { + t.Fatalf("Node one minipool performance should be found") + } + if perfOne.GetBonusEthEarned().Uint64() != 37500000000000000 { + t.Fatalf("Node one minipool one bonus does not match expected value: %s != %d", perfOne.GetBonusEthEarned().String(), 37500000000000000) + } + if perfOne.GetEffectiveCommission().Uint64() != 100000000000000000 { + t.Fatalf("Node one minipool one effective commission does not match expected value: %s != %d", perfOne.GetEffectiveCommission().String(), 1000000000000000000) + } + if perfOne.GetConsensusIncome().Uint64() != 1000000000000000000 { + t.Fatalf("Node one minipool one consensus income does not match expected value: %s != %d", perfOne.GetConsensusIncome().String(), 1000000000000000000) + } + perfTwo, ok := minipoolPerformanceFile.GetSmoothingPoolPerformance(nodeTwo.Minipools[0].Address) + if !ok { + t.Fatalf("Node two minipool one performance not found") + } + if perfTwo.GetBonusEthEarned().Uint64() != 37500000000000000 { + t.Fatalf("Node two minipool one bonus does not match expected value: %s != %d", perfTwo.GetBonusEthEarned().String(), 37500000000000000) + } + if perfTwo.GetEffectiveCommission().Uint64() != 100000000000000000 { + t.Fatalf("Node two minipool one effective commission does not match expected value: %s != %d", perfTwo.GetEffectiveCommission().String(), 100000000000000000) + } + if perfTwo.GetConsensusIncome().Uint64() != 1000000000000000000 { + t.Fatalf("Node two minipool one consensus income does not match expected value: %s != %d", perfTwo.GetConsensusIncome().String(), 1000000000000000000) + } +} diff --git a/shared/services/rewards/record-file-info.go b/shared/services/rewards/record-file-info.go deleted file mode 100644 index 815be6bd8..000000000 --- a/shared/services/rewards/record-file-info.go +++ /dev/null @@ -1,10 +0,0 @@ -package rewards - -// Information about a saved rolling record -type RecordFileInfo struct { - StartSlot uint64 `json:"startSlot"` - EndSlot uint64 `json:"endSlot"` - Filename string `json:"filename"` - Version int `json:"version"` - Checksum [48]byte `json:"checksum"` -} diff --git a/shared/services/rewards/rewards-file-v1.go b/shared/services/rewards/rewards-file-v1.go index 378a6a2ea..d0d0fbb0a 100644 --- a/shared/services/rewards/rewards-file-v1.go +++ b/shared/services/rewards/rewards-file-v1.go @@ -14,7 +14,6 @@ import ( "github.com/wealdtech/go-merkletree/keccak256" ) -// Holds information type MinipoolPerformanceFile_v1 struct { Index uint64 `json:"index"` Network string `json:"network"` @@ -32,6 +31,10 @@ func (f *MinipoolPerformanceFile_v1) Serialize() ([]byte, error) { return json.Marshal(f) } +func (f *MinipoolPerformanceFile_v1) SerializeSSZ() ([]byte, error) { + return nil, fmt.Errorf("ssz format not implemented for minipool performance files") +} + // Serialize a minipool performance file into bytes designed for human readability func (f *MinipoolPerformanceFile_v1) SerializeHuman() ([]byte, error) { return json.MarshalIndent(f, "", "\t") @@ -88,6 +91,18 @@ func (p *SmoothingPoolMinipoolPerformance_v1) GetMissingAttestationSlots() []uin func (p *SmoothingPoolMinipoolPerformance_v1) GetEthEarned() *big.Int { return eth.EthToWei(p.EthEarned) } +func (p *SmoothingPoolMinipoolPerformance_v1) GetBonusEthEarned() *big.Int { + return big.NewInt(0) +} +func (p *SmoothingPoolMinipoolPerformance_v1) GetEffectiveCommission() *big.Int { + return big.NewInt(0) +} +func (p *SmoothingPoolMinipoolPerformance_v1) GetConsensusIncome() *big.Int { + return big.NewInt(0) +} +func (p *SmoothingPoolMinipoolPerformance_v1) GetAttestationScore() *big.Int { + return big.NewInt(0) +} // Node operator rewards type NodeRewardsInfo_v1 struct { @@ -100,21 +115,13 @@ type NodeRewardsInfo_v1 struct { MerkleProof []string `json:"merkleProof"` } -func (i *NodeRewardsInfo_v1) GetRewardNetwork() uint64 { - return i.RewardNetwork -} -func (i *NodeRewardsInfo_v1) GetCollateralRpl() *QuotedBigInt { - return i.CollateralRpl -} -func (i *NodeRewardsInfo_v1) GetOracleDaoRpl() *QuotedBigInt { - return i.OracleDaoRpl -} -func (i *NodeRewardsInfo_v1) GetSmoothingPoolEth() *QuotedBigInt { - return i.SmoothingPoolEth -} -func (n *NodeRewardsInfo_v1) GetMerkleProof() ([]common.Hash, error) { - proof := []common.Hash{} - for _, proofLevel := range n.MerkleProof { +func (f *RewardsFile_v1) GetMerkleProof(addr common.Address) ([]common.Hash, error) { + nr, ok := f.getNodeRewardsInfo(addr) + if !ok { + return nil, nil + } + proof := make([]common.Hash, 0, len(nr.MerkleProof)) + for _, proofLevel := range nr.MerkleProof { proof = append(proof, common.HexToHash(proofLevel)) } return proof, nil @@ -132,14 +139,98 @@ func (f *RewardsFile_v1) Serialize() ([]byte, error) { return json.Marshal(f) } +func (f *RewardsFile_v1) SerializeSSZ() ([]byte, error) { + return nil, fmt.Errorf("ssz format not implemented for rewards file v1") +} + // Deserialize a rewards file from bytes func (f *RewardsFile_v1) Deserialize(bytes []byte) error { return json.Unmarshal(bytes, &f) } -// Get the rewards file's header -func (f *RewardsFile_v1) GetHeader() *RewardsFileHeader { - return f.RewardsFileHeader +// Get the rewards file version +func (f *RewardsFile_v1) GetRewardsFileVersion() uint64 { + return rewardsFileVersionOne +} + +// Get the rewards file index +func (f *RewardsFile_v1) GetIndex() uint64 { + return f.RewardsFileHeader.Index +} + +// Get the TotalNodeWeight (only added in v3) +func (f *RewardsFile_v1) GetTotalNodeWeight() *big.Int { + return nil +} + +// Get the merkle root +func (f *RewardsFile_v1) GetMerkleRoot() string { + return f.RewardsFileHeader.MerkleRoot +} + +// Get network rewards for a specific network +func (f *RewardsFile_v1) GetNetworkRewards(network uint64) *NetworkRewardsInfo { + return f.RewardsFileHeader.NetworkRewards[network] +} + +// Get the number of intervals that have passed +func (f *RewardsFile_v1) GetIntervalsPassed() uint64 { + return f.RewardsFileHeader.IntervalsPassed +} + +// Get the total RPL sent to the pDAO +func (f *RewardsFile_v1) GetTotalProtocolDaoRpl() *big.Int { + return &f.RewardsFileHeader.TotalRewards.ProtocolDaoRpl.Int +} + +// Get the total RPL sent to the pDAO +func (f *RewardsFile_v1) GetTotalOracleDaoRpl() *big.Int { + return &f.RewardsFileHeader.TotalRewards.TotalOracleDaoRpl.Int +} + +// Get the total Eth sent to pool stakers from the SP +func (f *RewardsFile_v1) GetTotalPoolStakerSmoothingPoolEth() *big.Int { + return &f.RewardsFileHeader.TotalRewards.PoolStakerSmoothingPoolEth.Int +} + +// Get the total rpl sent to stakers +func (f *RewardsFile_v1) GetTotalCollateralRpl() *big.Int { + return &f.RewardsFileHeader.TotalRewards.TotalCollateralRpl.Int +} + +// Get the total smoothing pool eth sent to node operators +func (f *RewardsFile_v1) GetTotalNodeOperatorSmoothingPoolEth() *big.Int { + return &f.RewardsFileHeader.TotalRewards.NodeOperatorSmoothingPoolEth.Int +} + +// Get the the execution start block +func (f *RewardsFile_v1) GetExecutionStartBlock() uint64 { + return f.RewardsFileHeader.ExecutionStartBlock +} + +// Get the the consensus start block +func (f *RewardsFile_v1) GetConsensusStartBlock() uint64 { + return f.RewardsFileHeader.ConsensusStartBlock +} + +// Get the the execution end block +func (f *RewardsFile_v1) GetExecutionEndBlock() uint64 { + return f.RewardsFileHeader.ExecutionEndBlock +} + +// Get the the consensus end block +func (f *RewardsFile_v1) GetConsensusEndBlock() uint64 { + return f.RewardsFileHeader.ConsensusEndBlock +} + +// Get the start time +func (f *RewardsFile_v1) GetStartTime() time.Time { + return f.RewardsFileHeader.StartTime +} + +// Get the end time +func (f *RewardsFile_v1) GetEndTime() time.Time { + return f.RewardsFileHeader.EndTime } // Get all of the node addresses with rewards in this file @@ -154,15 +245,71 @@ func (f *RewardsFile_v1) GetNodeAddresses() []common.Address { return addresses } -// Get info about a node's rewards -func (f *RewardsFile_v1) GetNodeRewardsInfo(address common.Address) (INodeRewardsInfo, bool) { +func (f *RewardsFile_v1) getNodeRewardsInfo(address common.Address) (*NodeRewardsInfo_v1, bool) { rewards, exists := f.NodeRewards[address] return rewards, exists } -// Gets the minipool performance file corresponding to this rewards file -func (f *RewardsFile_v1) GetMinipoolPerformanceFile() IMinipoolPerformanceFile { - return &f.MinipoolPerformanceFile +func (f *RewardsFile_v1) HasRewardsFor(addr common.Address) bool { + _, ok := f.NodeRewards[addr] + return ok +} + +func (f *RewardsFile_v1) GetNodeCollateralRpl(addr common.Address) *big.Int { + nr, ok := f.NodeRewards[addr] + if !ok { + return big.NewInt(0) + } + return &nr.CollateralRpl.Int +} + +func (f *RewardsFile_v1) GetNodeOracleDaoRpl(addr common.Address) *big.Int { + nr, ok := f.NodeRewards[addr] + if !ok { + return big.NewInt(0) + } + return &nr.OracleDaoRpl.Int +} + +func (f *RewardsFile_v1) GetNodeSmoothingPoolEth(addr common.Address) *big.Int { + nr, ok := f.NodeRewards[addr] + if !ok { + return big.NewInt(0) + } + return &nr.SmoothingPoolEth.Int +} + +// Getters for network info +func (f *RewardsFile_v1) HasRewardsForNetwork(network uint64) bool { + _, ok := f.NetworkRewards[network] + return ok +} + +func (f *RewardsFile_v1) GetNetworkCollateralRpl(network uint64) *big.Int { + nr, ok := f.NetworkRewards[network] + if !ok { + return big.NewInt(0) + } + + return &nr.CollateralRpl.Int +} + +func (f *RewardsFile_v1) GetNetworkOracleDaoRpl(network uint64) *big.Int { + nr, ok := f.NetworkRewards[network] + if !ok { + return big.NewInt(0) + } + + return &nr.OracleDaoRpl.Int +} + +func (f *RewardsFile_v1) GetNetworkSmoothingPoolEth(network uint64) *big.Int { + nr, ok := f.NetworkRewards[network] + if !ok { + return big.NewInt(0) + } + + return &nr.SmoothingPoolEth.Int } // Sets the CID of the minipool performance file corresponding to this rewards file @@ -171,7 +318,7 @@ func (f *RewardsFile_v1) SetMinipoolPerformanceFileCID(cid string) { } // Generates a merkle tree from the provided rewards map -func (f *RewardsFile_v1) generateMerkleTree() error { +func (f *RewardsFile_v1) GenerateMerkleTree() error { // Generate the leaf data for each node totalData := make([][]byte, 0, len(f.NodeRewards)) for address, rewardsForNode := range f.NodeRewards { diff --git a/shared/services/rewards/rewards-file-v2.go b/shared/services/rewards/rewards-file-v2.go index ae5b2f460..09bfa69cb 100644 --- a/shared/services/rewards/rewards-file-v2.go +++ b/shared/services/rewards/rewards-file-v2.go @@ -13,9 +13,8 @@ import ( "github.com/wealdtech/go-merkletree/keccak256" ) -// Holds information type MinipoolPerformanceFile_v2 struct { - RewardsFileVersion rewardsFileVersion `json:"rewardsFileVersion"` + RewardsFileVersion uint64 `json:"rewardsFileVersion"` RulesetVersion uint64 `json:"rulesetVersion"` Index uint64 `json:"index"` Network string `json:"network"` @@ -26,6 +25,7 @@ type MinipoolPerformanceFile_v2 struct { ExecutionStartBlock uint64 `json:"executionStartBlock,omitempty"` ExecutionEndBlock uint64 `json:"executionEndBlock,omitempty"` MinipoolPerformance map[common.Address]*SmoothingPoolMinipoolPerformance_v2 `json:"minipoolPerformance"` + BonusScalar *QuotedBigInt `json:"bonusScalar,omitempty"` } // Serialize a minipool performance file into bytes @@ -33,6 +33,10 @@ func (f *MinipoolPerformanceFile_v2) Serialize() ([]byte, error) { return json.Marshal(f) } +func (f *MinipoolPerformanceFile_v2) SerializeSSZ() ([]byte, error) { + return nil, fmt.Errorf("ssz format not implemented for minipool performance files") +} + // Serialize a minipool performance file into bytes designed for human readability func (f *MinipoolPerformanceFile_v2) SerializeHuman() ([]byte, error) { return json.MarshalIndent(f, "", "\t") @@ -69,6 +73,9 @@ type SmoothingPoolMinipoolPerformance_v2 struct { AttestationScore *QuotedBigInt `json:"attestationScore"` MissingAttestationSlots []uint64 `json:"missingAttestationSlots"` EthEarned *QuotedBigInt `json:"ethEarned"` + ConsensusIncome *QuotedBigInt `json:"consensusIncome,omitempty"` + BonusEthEarned *QuotedBigInt `json:"bonusEthEarned,omitempty"` + EffectiveCommission *QuotedBigInt `json:"effectiveCommission,omitempty"` } func (p *SmoothingPoolMinipoolPerformance_v2) GetPubkey() (types.ValidatorPubkey, error) { @@ -86,6 +93,27 @@ func (p *SmoothingPoolMinipoolPerformance_v2) GetMissingAttestationSlots() []uin func (p *SmoothingPoolMinipoolPerformance_v2) GetEthEarned() *big.Int { return &p.EthEarned.Int } +func (p *SmoothingPoolMinipoolPerformance_v2) GetBonusEthEarned() *big.Int { + if p.BonusEthEarned == nil { + return big.NewInt(0) + } + return &p.BonusEthEarned.Int +} +func (p *SmoothingPoolMinipoolPerformance_v2) GetEffectiveCommission() *big.Int { + if p.EffectiveCommission == nil { + return big.NewInt(0) + } + return &p.EffectiveCommission.Int +} +func (p *SmoothingPoolMinipoolPerformance_v2) GetConsensusIncome() *big.Int { + if p.ConsensusIncome == nil { + return big.NewInt(0) + } + return &p.ConsensusIncome.Int +} +func (p *SmoothingPoolMinipoolPerformance_v2) GetAttestationScore() *big.Int { + return &p.AttestationScore.Int +} // Node operator rewards type NodeRewardsInfo_v2 struct { @@ -97,21 +125,13 @@ type NodeRewardsInfo_v2 struct { MerkleProof []string `json:"merkleProof"` } -func (i *NodeRewardsInfo_v2) GetRewardNetwork() uint64 { - return i.RewardNetwork -} -func (i *NodeRewardsInfo_v2) GetCollateralRpl() *QuotedBigInt { - return i.CollateralRpl -} -func (i *NodeRewardsInfo_v2) GetOracleDaoRpl() *QuotedBigInt { - return i.OracleDaoRpl -} -func (i *NodeRewardsInfo_v2) GetSmoothingPoolEth() *QuotedBigInt { - return i.SmoothingPoolEth -} -func (n *NodeRewardsInfo_v2) GetMerkleProof() ([]common.Hash, error) { - proof := []common.Hash{} - for _, proofLevel := range n.MerkleProof { +func (f *RewardsFile_v2) GetMerkleProof(addr common.Address) ([]common.Hash, error) { + nr, ok := f.getNodeRewardsInfo(addr) + if !ok { + return nil, nil + } + proof := make([]common.Hash, 0, len(nr.MerkleProof)) + for _, proofLevel := range nr.MerkleProof { proof = append(proof, common.HexToHash(proofLevel)) } return proof, nil @@ -129,14 +149,98 @@ func (f *RewardsFile_v2) Serialize() ([]byte, error) { return json.Marshal(f) } +func (f *RewardsFile_v2) SerializeSSZ() ([]byte, error) { + return nil, fmt.Errorf("ssz format not implemented for rewards file v2") +} + // Deserialize a rewards file from bytes func (f *RewardsFile_v2) Deserialize(bytes []byte) error { return json.Unmarshal(bytes, &f) } -// Get the rewards file's header -func (f *RewardsFile_v2) GetHeader() *RewardsFileHeader { - return f.RewardsFileHeader +// Get the rewards file version +func (f *RewardsFile_v2) GetRewardsFileVersion() uint64 { + return rewardsFileVersionTwo +} + +// Get the rewards file index +func (f *RewardsFile_v2) GetIndex() uint64 { + return f.RewardsFileHeader.Index +} + +// Get the TotalNodeWeight (only added in v3) +func (f *RewardsFile_v2) GetTotalNodeWeight() *big.Int { + return nil +} + +// Get the merkle root +func (f *RewardsFile_v2) GetMerkleRoot() string { + return f.RewardsFileHeader.MerkleRoot +} + +// Get network rewards for a specific network +func (f *RewardsFile_v2) GetNetworkRewards(network uint64) *NetworkRewardsInfo { + return f.RewardsFileHeader.NetworkRewards[network] +} + +// Get the number of intervals that have passed +func (f *RewardsFile_v2) GetIntervalsPassed() uint64 { + return f.RewardsFileHeader.IntervalsPassed +} + +// Get the total RPL sent to the pDAO +func (f *RewardsFile_v2) GetTotalProtocolDaoRpl() *big.Int { + return &f.RewardsFileHeader.TotalRewards.ProtocolDaoRpl.Int +} + +// Get the total RPL sent to the pDAO +func (f *RewardsFile_v2) GetTotalOracleDaoRpl() *big.Int { + return &f.RewardsFileHeader.TotalRewards.TotalOracleDaoRpl.Int +} + +// Get the total Eth sent to pool stakers from the SP +func (f *RewardsFile_v2) GetTotalPoolStakerSmoothingPoolEth() *big.Int { + return &f.RewardsFileHeader.TotalRewards.PoolStakerSmoothingPoolEth.Int +} + +// Get the total rpl sent to stakers +func (f *RewardsFile_v2) GetTotalCollateralRpl() *big.Int { + return &f.RewardsFileHeader.TotalRewards.TotalCollateralRpl.Int +} + +// Get the total smoothing pool eth sent to node operators +func (f *RewardsFile_v2) GetTotalNodeOperatorSmoothingPoolEth() *big.Int { + return &f.RewardsFileHeader.TotalRewards.NodeOperatorSmoothingPoolEth.Int +} + +// Get the the execution start block +func (f *RewardsFile_v2) GetExecutionStartBlock() uint64 { + return f.RewardsFileHeader.ExecutionStartBlock +} + +// Get the the consensus start block +func (f *RewardsFile_v2) GetConsensusStartBlock() uint64 { + return f.RewardsFileHeader.ConsensusStartBlock +} + +// Get the the execution end block +func (f *RewardsFile_v2) GetExecutionEndBlock() uint64 { + return f.RewardsFileHeader.ExecutionEndBlock +} + +// Get the the consensus end block +func (f *RewardsFile_v2) GetConsensusEndBlock() uint64 { + return f.RewardsFileHeader.ConsensusEndBlock +} + +// Get the start time +func (f *RewardsFile_v2) GetStartTime() time.Time { + return f.RewardsFileHeader.StartTime +} + +// Get the end time +func (f *RewardsFile_v2) GetEndTime() time.Time { + return f.RewardsFileHeader.EndTime } // Get all of the node addresses with rewards in this file @@ -151,15 +255,71 @@ func (f *RewardsFile_v2) GetNodeAddresses() []common.Address { return addresses } -// Get info about a node's rewards -func (f *RewardsFile_v2) GetNodeRewardsInfo(address common.Address) (INodeRewardsInfo, bool) { +func (f *RewardsFile_v2) getNodeRewardsInfo(address common.Address) (*NodeRewardsInfo_v2, bool) { rewards, exists := f.NodeRewards[address] return rewards, exists } -// Gets the minipool performance file corresponding to this rewards file -func (f *RewardsFile_v2) GetMinipoolPerformanceFile() IMinipoolPerformanceFile { - return &f.MinipoolPerformanceFile +func (f *RewardsFile_v2) HasRewardsFor(addr common.Address) bool { + _, ok := f.NodeRewards[addr] + return ok +} + +func (f *RewardsFile_v2) GetNodeCollateralRpl(addr common.Address) *big.Int { + nr, ok := f.NodeRewards[addr] + if !ok { + return big.NewInt(0) + } + return &nr.CollateralRpl.Int +} + +func (f *RewardsFile_v2) GetNodeOracleDaoRpl(addr common.Address) *big.Int { + nr, ok := f.NodeRewards[addr] + if !ok { + return big.NewInt(0) + } + return &nr.OracleDaoRpl.Int +} + +func (f *RewardsFile_v2) GetNodeSmoothingPoolEth(addr common.Address) *big.Int { + nr, ok := f.NodeRewards[addr] + if !ok { + return big.NewInt(0) + } + return &nr.SmoothingPoolEth.Int +} + +// Getters for network info +func (f *RewardsFile_v2) HasRewardsForNetwork(network uint64) bool { + _, ok := f.NetworkRewards[network] + return ok +} + +func (f *RewardsFile_v2) GetNetworkCollateralRpl(network uint64) *big.Int { + nr, ok := f.NetworkRewards[network] + if !ok { + return big.NewInt(0) + } + + return &nr.CollateralRpl.Int +} + +func (f *RewardsFile_v2) GetNetworkOracleDaoRpl(network uint64) *big.Int { + nr, ok := f.NetworkRewards[network] + if !ok { + return big.NewInt(0) + } + + return &nr.OracleDaoRpl.Int +} + +func (f *RewardsFile_v2) GetNetworkSmoothingPoolEth(network uint64) *big.Int { + nr, ok := f.NetworkRewards[network] + if !ok { + return big.NewInt(0) + } + + return &nr.SmoothingPoolEth.Int } // Sets the CID of the minipool performance file corresponding to this rewards file @@ -168,7 +328,7 @@ func (f *RewardsFile_v2) SetMinipoolPerformanceFileCID(cid string) { } // Generates a merkle tree from the provided rewards map -func (f *RewardsFile_v2) generateMerkleTree() error { +func (f *RewardsFile_v2) GenerateMerkleTree() error { // Generate the leaf data for each node totalData := make([][]byte, 0, len(f.NodeRewards)) for address, rewardsForNode := range f.NodeRewards { diff --git a/shared/services/rewards/rewards-file-v3.go b/shared/services/rewards/rewards-file-v3.go index efeb8f56c..231badc45 100644 --- a/shared/services/rewards/rewards-file-v3.go +++ b/shared/services/rewards/rewards-file-v3.go @@ -8,135 +8,129 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/goccy/go-json" - "github.com/rocket-pool/rocketpool-go/types" + "github.com/rocket-pool/smartnode/shared/services/rewards/ssz_types" "github.com/wealdtech/go-merkletree" "github.com/wealdtech/go-merkletree/keccak256" ) -// Holds information -type MinipoolPerformanceFile_v3 struct { - RewardsFileVersion rewardsFileVersion `json:"rewardsFileVersion"` - RulesetVersion uint64 `json:"rulesetVersion"` - Index uint64 `json:"index"` - Network string `json:"network"` - StartTime time.Time `json:"startTime,omitempty"` - EndTime time.Time `json:"endTime,omitempty"` - ConsensusStartBlock uint64 `json:"consensusStartBlock,omitempty"` - ConsensusEndBlock uint64 `json:"consensusEndBlock,omitempty"` - ExecutionStartBlock uint64 `json:"executionStartBlock,omitempty"` - ExecutionEndBlock uint64 `json:"executionEndBlock,omitempty"` - MinipoolPerformance map[common.Address]*SmoothingPoolMinipoolPerformance_v3 `json:"minipoolPerformance"` -} - -// Serialize a minipool performance file into bytes -func (f *MinipoolPerformanceFile_v3) Serialize() ([]byte, error) { +// JSON struct for a complete rewards file +type RewardsFile_v3 struct { + *RewardsFileHeader + NodeRewards map[common.Address]*NodeRewardsInfo_v2 `json:"nodeRewards"` + MinipoolPerformanceFile MinipoolPerformanceFile_v2 `json:"-"` +} + +// Serialize a rewards file into bytes +func (f *RewardsFile_v3) Serialize() ([]byte, error) { return json.Marshal(f) } -// Serialize a minipool performance file into bytes designed for human readability -func (f *MinipoolPerformanceFile_v3) SerializeHuman() ([]byte, error) { - return json.MarshalIndent(f, "", "\t") +// Serialize as SSZ +func (f *RewardsFile_v3) SerializeSSZ() ([]byte, error) { + // In order to avoid multiple code paths, we won't bother making a RewardsFile_v3 <-> SSZFile_v1 function + // Instead, we can serialize json, parse to SSZFile_v1, and then serialize that as SSZ + data, err := f.Serialize() + if err != nil { + return nil, fmt.Errorf("error converting RewardsFile v3 to json so it could be parsed as SSZFile_v1: %w", err) + } + + s := &ssz_types.SSZFile_v1{} + err = s.UnmarshalSSZ(data) + if err != nil { + return nil, fmt.Errorf("error parsing RewardsFile v3 json as SSZFile_v1: %w", err) + } + + return s.SerializeSSZ() } -// Deserialize a minipool performance file from bytes -func (f *MinipoolPerformanceFile_v3) Deserialize(bytes []byte) error { +// Deserialize a rewards file from bytes +func (f *RewardsFile_v3) Deserialize(bytes []byte) error { return json.Unmarshal(bytes, &f) } -// Get all of the minipool addresses with rewards in this file -// NOTE: the order of minipool addresses is not guaranteed to be stable, so don't rely on it -func (f *MinipoolPerformanceFile_v3) GetMinipoolAddresses() []common.Address { - addresses := make([]common.Address, len(f.MinipoolPerformance)) - i := 0 - for address := range f.MinipoolPerformance { - addresses[i] = address - i++ - } - return addresses +// Get the rewards file version +func (f *RewardsFile_v3) GetRewardsFileVersion() uint64 { + return rewardsFileVersionThree } -// Get a minipool's smoothing pool performance if it was present -func (f *MinipoolPerformanceFile_v3) GetSmoothingPoolPerformance(minipoolAddress common.Address) (ISmoothingPoolMinipoolPerformance, bool) { - perf, exists := f.MinipoolPerformance[minipoolAddress] - return perf, exists +// Get the rewards file index +func (f *RewardsFile_v3) GetIndex() uint64 { + return f.RewardsFileHeader.Index } -// Minipool stats -type SmoothingPoolMinipoolPerformance_v3 struct { - Pubkey string `json:"pubkey"` - SuccessfulAttestations uint64 `json:"successfulAttestations"` - MissedAttestations uint64 `json:"missedAttestations"` - AttestationScore *QuotedBigInt `json:"attestationScore"` - MissingAttestationSlots []uint64 `json:"missingAttestationSlots"` - EthEarned *QuotedBigInt `json:"ethEarned"` +// Get the TotalNodeWeight (only added in v3) +func (f *RewardsFile_v3) GetTotalNodeWeight() *big.Int { + return &f.RewardsFileHeader.TotalRewards.TotalNodeWeight.Int } -func (p *SmoothingPoolMinipoolPerformance_v3) GetPubkey() (types.ValidatorPubkey, error) { - return types.HexToValidatorPubkey(p.Pubkey) -} -func (p *SmoothingPoolMinipoolPerformance_v3) GetSuccessfulAttestationCount() uint64 { - return p.SuccessfulAttestations +// Get the merkle root +func (f *RewardsFile_v3) GetMerkleRoot() string { + return f.RewardsFileHeader.MerkleRoot } -func (p *SmoothingPoolMinipoolPerformance_v3) GetMissedAttestationCount() uint64 { - return p.MissedAttestations + +// Get network rewards for a specific network +func (f *RewardsFile_v3) GetNetworkRewards(network uint64) *NetworkRewardsInfo { + return f.RewardsFileHeader.NetworkRewards[network] } -func (p *SmoothingPoolMinipoolPerformance_v3) GetMissingAttestationSlots() []uint64 { - return p.MissingAttestationSlots + +// Get the number of intervals that have passed +func (f *RewardsFile_v3) GetIntervalsPassed() uint64 { + return f.RewardsFileHeader.IntervalsPassed } -func (p *SmoothingPoolMinipoolPerformance_v3) GetEthEarned() *big.Int { - return &p.EthEarned.Int + +// Get the total RPL sent to the pDAO +func (f *RewardsFile_v3) GetTotalProtocolDaoRpl() *big.Int { + return &f.RewardsFileHeader.TotalRewards.ProtocolDaoRpl.Int } -// Node operator rewards -type NodeRewardsInfo_v3 struct { - RewardNetwork uint64 `json:"rewardNetwork"` - CollateralRpl *QuotedBigInt `json:"collateralRpl"` - OracleDaoRpl *QuotedBigInt `json:"oracleDaoRpl"` - SmoothingPoolEth *QuotedBigInt `json:"smoothingPoolEth"` - MerkleData []byte `json:"-"` - MerkleProof []string `json:"merkleProof"` +// Get the total RPL sent to the pDAO +func (f *RewardsFile_v3) GetTotalOracleDaoRpl() *big.Int { + return &f.RewardsFileHeader.TotalRewards.TotalOracleDaoRpl.Int } -func (i *NodeRewardsInfo_v3) GetRewardNetwork() uint64 { - return i.RewardNetwork +// Get the total Eth sent to pool stakers from the SP +func (f *RewardsFile_v3) GetTotalPoolStakerSmoothingPoolEth() *big.Int { + return &f.RewardsFileHeader.TotalRewards.PoolStakerSmoothingPoolEth.Int } -func (i *NodeRewardsInfo_v3) GetCollateralRpl() *QuotedBigInt { - return i.CollateralRpl + +// Get the total rpl sent to stakers +func (f *RewardsFile_v3) GetTotalCollateralRpl() *big.Int { + return &f.RewardsFileHeader.TotalRewards.TotalCollateralRpl.Int } -func (i *NodeRewardsInfo_v3) GetOracleDaoRpl() *QuotedBigInt { - return i.OracleDaoRpl + +// Get the total smoothing pool eth sent to node operators +func (f *RewardsFile_v3) GetTotalNodeOperatorSmoothingPoolEth() *big.Int { + return &f.RewardsFileHeader.TotalRewards.NodeOperatorSmoothingPoolEth.Int } -func (i *NodeRewardsInfo_v3) GetSmoothingPoolEth() *QuotedBigInt { - return i.SmoothingPoolEth + +// Get the execution end block +func (f *RewardsFile_v3) GetExecutionEndBlock() uint64 { + return f.RewardsFileHeader.ExecutionEndBlock } -func (n *NodeRewardsInfo_v3) GetMerkleProof() ([]common.Hash, error) { - proof := []common.Hash{} - for _, proofLevel := range n.MerkleProof { - proof = append(proof, common.HexToHash(proofLevel)) - } - return proof, nil + +// Get the consensus end block +func (f *RewardsFile_v3) GetConsensusEndBlock() uint64 { + return f.RewardsFileHeader.ConsensusEndBlock } -// JSON struct for a complete rewards file -type RewardsFile_v3 struct { - *RewardsFileHeader - NodeRewards map[common.Address]*NodeRewardsInfo_v3 `json:"nodeRewards"` - MinipoolPerformanceFile MinipoolPerformanceFile_v3 `json:"-"` +// Get the execution start block +func (f *RewardsFile_v3) GetExecutionStartBlock() uint64 { + return f.RewardsFileHeader.ExecutionStartBlock } -// Serialize a rewards file into bytes -func (f *RewardsFile_v3) Serialize() ([]byte, error) { - return json.Marshal(f) +// Get the consensus start block +func (f *RewardsFile_v3) GetConsensusStartBlock() uint64 { + return f.RewardsFileHeader.ConsensusStartBlock } -// Deserialize a rewards file from bytes -func (f *RewardsFile_v3) Deserialize(bytes []byte) error { - return json.Unmarshal(bytes, &f) +// Get the start time +func (f *RewardsFile_v3) GetStartTime() time.Time { + return f.RewardsFileHeader.StartTime } -// Get the rewards file's header -func (f *RewardsFile_v3) GetHeader() *RewardsFileHeader { - return f.RewardsFileHeader +// Get the end time +func (f *RewardsFile_v3) GetEndTime() time.Time { + return f.RewardsFileHeader.EndTime } // Get all of the node addresses with rewards in this file @@ -151,15 +145,83 @@ func (f *RewardsFile_v3) GetNodeAddresses() []common.Address { return addresses } -// Get info about a node's rewards -func (f *RewardsFile_v3) GetNodeRewardsInfo(address common.Address) (INodeRewardsInfo, bool) { +func (f *RewardsFile_v3) getNodeRewardsInfo(address common.Address) (*NodeRewardsInfo_v2, bool) { rewards, exists := f.NodeRewards[address] return rewards, exists } -// Gets the minipool performance file corresponding to this rewards file -func (f *RewardsFile_v3) GetMinipoolPerformanceFile() IMinipoolPerformanceFile { - return &f.MinipoolPerformanceFile +func (f *RewardsFile_v3) HasRewardsFor(addr common.Address) bool { + _, ok := f.NodeRewards[addr] + return ok +} + +func (f *RewardsFile_v3) GetNodeCollateralRpl(addr common.Address) *big.Int { + nr, ok := f.NodeRewards[addr] + if !ok { + return big.NewInt(0) + } + return &nr.CollateralRpl.Int +} + +func (f *RewardsFile_v3) GetNodeOracleDaoRpl(addr common.Address) *big.Int { + nr, ok := f.NodeRewards[addr] + if !ok { + return big.NewInt(0) + } + return &nr.OracleDaoRpl.Int +} + +func (f *RewardsFile_v3) GetNodeSmoothingPoolEth(addr common.Address) *big.Int { + nr, ok := f.NodeRewards[addr] + if !ok { + return big.NewInt(0) + } + return &nr.SmoothingPoolEth.Int +} + +func (f *RewardsFile_v3) GetMerkleProof(addr common.Address) ([]common.Hash, error) { + nr, ok := f.getNodeRewardsInfo(addr) + if !ok { + return nil, nil + } + proof := make([]common.Hash, 0, len(nr.MerkleProof)) + for _, proofLevel := range nr.MerkleProof { + proof = append(proof, common.HexToHash(proofLevel)) + } + return proof, nil +} + +// Getters for network info +func (f *RewardsFile_v3) HasRewardsForNetwork(network uint64) bool { + _, ok := f.NetworkRewards[network] + return ok +} + +func (f *RewardsFile_v3) GetNetworkCollateralRpl(network uint64) *big.Int { + nr, ok := f.NetworkRewards[network] + if !ok { + return big.NewInt(0) + } + + return &nr.CollateralRpl.Int +} + +func (f *RewardsFile_v3) GetNetworkOracleDaoRpl(network uint64) *big.Int { + nr, ok := f.NetworkRewards[network] + if !ok { + return big.NewInt(0) + } + + return &nr.OracleDaoRpl.Int +} + +func (f *RewardsFile_v3) GetNetworkSmoothingPoolEth(network uint64) *big.Int { + nr, ok := f.NetworkRewards[network] + if !ok { + return big.NewInt(0) + } + + return &nr.SmoothingPoolEth.Int } // Sets the CID of the minipool performance file corresponding to this rewards file @@ -168,7 +230,7 @@ func (f *RewardsFile_v3) SetMinipoolPerformanceFileCID(cid string) { } // Generates a merkle tree from the provided rewards map -func (f *RewardsFile_v3) generateMerkleTree() error { +func (f *RewardsFile_v3) GenerateMerkleTree() error { // Generate the leaf data for each node totalData := make([][]byte, 0, len(f.NodeRewards)) for address, rewardsForNode := range f.NodeRewards { diff --git a/shared/services/rewards/rolling-manager.go b/shared/services/rewards/rolling-manager.go deleted file mode 100644 index 79bbdd7c2..000000000 --- a/shared/services/rewards/rolling-manager.go +++ /dev/null @@ -1,645 +0,0 @@ -package rewards - -import ( - "bytes" - "crypto/sha512" - "encoding/hex" - "fmt" - "os" - "path/filepath" - "regexp" - "sort" - "strconv" - "strings" - "time" - - "github.com/blang/semver/v4" - "github.com/klauspost/compress/zstd" - rprewards "github.com/rocket-pool/rocketpool-go/rewards" - "github.com/rocket-pool/rocketpool-go/rocketpool" - "github.com/rocket-pool/smartnode/shared/services/beacon" - "github.com/rocket-pool/smartnode/shared/services/config" - "github.com/rocket-pool/smartnode/shared/services/state" - "github.com/rocket-pool/smartnode/shared/utils/log" -) - -const ( - recordsFilenameFormat string = "%d-%d.json.zst" - recordsFilenamePattern string = "(?P\\d+)\\-(?P\\d+)\\.json\\.zst" - latestCompatibleVersionString string = "1.11.0-dev" -) - -// Manager for RollingRecords -type RollingRecordManager struct { - Record *RollingRecord - LatestFinalizedEpoch uint64 - ExpectedBalancesBlock uint64 - ExpectedRewardsIntervalBlock uint64 - - log *log.ColorLogger - errLog *log.ColorLogger - logPrefix string - cfg *config.RocketPoolConfig - rp *rocketpool.RocketPool - bc beacon.Client - mgr *state.NetworkStateManager - startSlot uint64 - nextEpochToSave uint64 - - beaconCfg beacon.Eth2Config - genesisTime time.Time - compressor *zstd.Encoder - decompressor *zstd.Decoder - recordsFilenameRegex *regexp.Regexp -} - -// Creates a new manager for rolling records. -func NewRollingRecordManager(log *log.ColorLogger, errLog *log.ColorLogger, cfg *config.RocketPoolConfig, rp *rocketpool.RocketPool, bc beacon.Client, mgr *state.NetworkStateManager, startSlot uint64, beaconCfg beacon.Eth2Config, rewardsInterval uint64) (*RollingRecordManager, error) { - // Get the Beacon genesis time - genesisTime := time.Unix(int64(beaconCfg.GenesisTime), 0) - - // Create the zstd compressor and decompressor - encoder, err := zstd.NewWriter(nil, zstd.WithEncoderLevel(zstd.SpeedBestCompression)) - if err != nil { - return nil, fmt.Errorf("error creating zstd compressor for rolling record manager: %w", err) - } - decoder, err := zstd.NewReader(nil) - if err != nil { - return nil, fmt.Errorf("error creating zstd decompressor for rolling record manager: %w", err) - } - - // Create the records filename regex - recordsFilenameRegex := regexp.MustCompile(recordsFilenamePattern) - - // Make the records folder if it doesn't exist - recordsPath := cfg.Smartnode.GetRecordsPath() - fileInfo, err := os.Stat(recordsPath) - if os.IsNotExist(err) { - err2 := os.MkdirAll(recordsPath, 0755) - if err2 != nil { - return nil, fmt.Errorf("error creating rolling records folder: %w", err) - } - } else if err != nil { - return nil, fmt.Errorf("error checking rolling records folder: %w", err) - } else if !fileInfo.IsDir() { - return nil, fmt.Errorf("rolling records folder location exists (%s), but is not a folder", recordsPath) - } - - logPrefix := "[Rolling Record]" - log.Printlnf("%s Created Rolling Record manager for start slot %d.", logPrefix, startSlot) - return &RollingRecordManager{ - Record: NewRollingRecord(log, logPrefix, bc, startSlot, &beaconCfg, rewardsInterval), - - log: log, - errLog: errLog, - logPrefix: logPrefix, - cfg: cfg, - rp: rp, - bc: bc, - mgr: mgr, - startSlot: startSlot, - beaconCfg: beaconCfg, - genesisTime: genesisTime, - compressor: encoder, - decompressor: decoder, - recordsFilenameRegex: recordsFilenameRegex, - }, nil -} - -// Generate a new record for the provided slot using the latest viable saved record -func (r *RollingRecordManager) GenerateRecordForState(state *state.NetworkState) (*RollingRecord, error) { - // Load the latest viable record - slot := state.BeaconSlotNumber - rewardsInterval := state.NetworkDetails.RewardIndex - record, err := r.LoadBestRecordFromDisk(r.startSlot, slot, rewardsInterval) - if err != nil { - return nil, fmt.Errorf("error loading best record for slot %d: %w", slot, err) - } - - if record.LastDutiesSlot == slot { - // Already have a full snapshot so we don't have to do anything - r.log.Printf("%s Loaded record was already up-to-date for slot %d.", r.logPrefix, slot) - return record, nil - } else if record.LastDutiesSlot > slot { - // This should never happen but sanity check it anyway - return nil, fmt.Errorf("loaded record has duties completed for slot %d, which is too far forward (targeting slot %d)", record.LastDutiesSlot, slot) - } - - // Update to the target slot - err = r.UpdateRecordToState(state, slot) - if err != nil { - return nil, fmt.Errorf("error updating record to slot %d: %w", slot, err) - } - - return record, nil -} - -// Save the rolling record to a file and update the record info catalog -func (r *RollingRecordManager) SaveRecordToFile(record *RollingRecord) error { - - // Serialize the record - bytes, err := record.Serialize() - if err != nil { - return fmt.Errorf("error saving rolling record: %w", err) - } - - // Compress the record - compressedBytes := r.compressor.EncodeAll(bytes, make([]byte, 0, len(bytes))) - - // Get the record filename - slot := record.LastDutiesSlot - epoch := record.LastDutiesSlot / r.beaconCfg.SlotsPerEpoch - recordsPath := r.cfg.Smartnode.GetRecordsPath() - filename := filepath.Join(recordsPath, fmt.Sprintf(recordsFilenameFormat, slot, epoch)) - - // Write it to a file - err = os.WriteFile(filename, compressedBytes, 0664) - if err != nil { - return fmt.Errorf("error writing file [%s]: %w", filename, err) - } - - // Compute the SHA384 hash to act as a checksum - checksum := sha512.Sum384(compressedBytes) - - // Load the existing checksum table - _, lines, err := r.parseChecksumFile() - if err != nil { - return fmt.Errorf("error parsing checkpoint file: %w", err) - } - if lines == nil { - lines = []string{} - } - - // Add the new record checksum - baseFilename := filepath.Base(filename) - checksumLine := fmt.Sprintf("%s %s", hex.EncodeToString(checksum[:]), baseFilename) - - // Sort the lines by their slot - err = r.sortChecksumEntries(lines) - if err != nil { - return fmt.Errorf("error sorting checkpoint file entries: %w", err) - } - - overwritten := false - for i, line := range lines { - if strings.HasSuffix(line, baseFilename) { - // If there is already a line with the filename, overwrite it - lines[i] = checksumLine - overwritten = true - break - } - } - if !overwritten { - // If there's no existing lines, add this to the end - lines = append(lines, checksumLine) - } - - // Get the number of lines to write - checkpointRetentionLimit := r.cfg.Smartnode.CheckpointRetentionLimit.Value.(uint64) - var newLines []string - if len(lines) > int(checkpointRetentionLimit) { - numberOfNewLines := int(checkpointRetentionLimit) - cullCount := len(lines) - numberOfNewLines - - // Remove old lines and delete the corresponding files that shouldn't be retained - for i := 0; i < cullCount; i++ { - line := lines[i] - - // Extract the filename - elems := strings.Split(line, " ") - if len(elems) != 2 { - return fmt.Errorf("error parsing checkpoint line (%s): expected 2 elements, but got %d", line, len(elems)) - } - filename := elems[1] - fullFilename := filepath.Join(recordsPath, filename) - - // Delete the file if it exists - _, err := os.Stat(fullFilename) - if os.IsNotExist(err) { - r.log.Printlnf("%s NOTE: tried removing checkpoint file [%s] based on the retention limit, but it didn't exist.", r.logPrefix, filename) - continue - } - err = os.Remove(fullFilename) - if err != nil { - return fmt.Errorf("error deleting file [%s]: %w", fullFilename, err) - } - - r.log.Printlnf("%s Removed checkpoint file [%s] based on the retention limit.", r.logPrefix, filename) - } - - // Store the rest - newLines = make([]string, numberOfNewLines) - for i := cullCount; i <= numberOfNewLines; i++ { - newLines[i-cullCount] = lines[i] - } - } else { - newLines = lines - } - - fileContents := strings.Join(newLines, "\n") - checksumBytes := []byte(fileContents) - - // Save the new file - checksumFilename := filepath.Join(recordsPath, config.ChecksumTableFilename) - err = os.WriteFile(checksumFilename, checksumBytes, 0644) - if err != nil { - return fmt.Errorf("error writing checksum file after culling: %w", err) - } - - return nil -} - -// Load the most recent appropriate rolling record from disk, using the checksum table as an index -func (r *RollingRecordManager) LoadBestRecordFromDisk(startSlot uint64, targetSlot uint64, rewardsInterval uint64) (*RollingRecord, error) { - recordCheckpointInterval := r.cfg.Smartnode.RecordCheckpointInterval.Value.(uint64) - latestCompatibleVersion, err := semver.New(latestCompatibleVersionString) - if err != nil { - return nil, fmt.Errorf("error parsing latest compatible version string [%s]: %w", latestCompatibleVersionString, err) - } - - // Parse the checksum file - exists, lines, err := r.parseChecksumFile() - if err != nil { - return nil, fmt.Errorf("error parsing checkpoint file: %w", err) - } - if !exists { - // There isn't a checksum file so start over - r.log.Printlnf("%s Checksum file not found, creating a new record from the start of the interval.", r.logPrefix) - record := NewRollingRecord(r.log, r.logPrefix, r.bc, startSlot, &r.beaconCfg, rewardsInterval) - r.Record = record - r.nextEpochToSave = startSlot/r.beaconCfg.SlotsPerEpoch + recordCheckpointInterval - 1 - return record, nil - } - - // Iterate over each file, counting backwards from the bottom - recordsPath := r.cfg.Smartnode.GetRecordsPath() - for i := len(lines) - 1; i >= 0; i-- { - line := lines[i] - - // Extract the checksum, filename, and slot number - checksumString, filename, slot, err := r.parseChecksumEntry(line) - if err != nil { - return nil, err - } - - // Check if the slot was too far into the future - if slot > targetSlot { - r.log.Printlnf("%s File [%s] was too far into the future, trying an older one...", r.logPrefix, filename) - continue - } - - // Check if it was too far into the past - if slot < startSlot { - r.log.Printlnf("%s File [%s] was too old (generated before the target start slot), none of the remaining records can be used.", r.logPrefix, filename) - break - } - - // Make sure the checksum parses properly - checksum, err := hex.DecodeString(checksumString) - if err != nil { - return nil, fmt.Errorf("error scanning checkpoint line (%s): checksum (%s) could not be parsed", line, checksumString) - } - - // Try to load it - fullFilename := filepath.Join(recordsPath, filename) - record, err := r.loadRecordFromFile(fullFilename, checksum) - if err != nil { - r.log.Printlnf("%s WARNING: error loading record from file [%s]: %s... attempting previous file", r.logPrefix, fullFilename, err.Error()) - continue - } - - // Check if it was for the proper interval - if record.RewardsInterval != rewardsInterval { - r.log.Printlnf("%s File [%s] was for rewards interval %d instead of %d so it cannot be used, trying an earlier checkpoint.", r.logPrefix, filename, record.RewardsInterval, rewardsInterval) - continue - } - - // Check if it has the proper start slot - if record.StartSlot != startSlot { - r.log.Printlnf("%s File [%s] started on slot %d instead of %d so it cannot be used, trying an earlier checkpoint.", r.logPrefix, filename, record.StartSlot, startSlot) - continue - } - - // Check if it's using a compatible version - recordVersionString := record.SmartnodeVersion - if recordVersionString == "" { - recordVersionString = "1.10.0" // First release without version info - } - recordVersion, err := semver.New(recordVersionString) - if err != nil { - r.log.Printlnf("%s Failed to parse the version info for file [%s] so it cannot be used, trying an earlier checkpoint.", r.logPrefix, filename) - continue - } - if recordVersion.LT(*latestCompatibleVersion) { - r.log.Printlnf("%s File [%s] was made with Smartnode v%s which is not compatible (lowest compatible = v%s) so it cannot be used, trying an earlier checkpoint.", r.logPrefix, filename, recordVersionString, latestCompatibleVersionString) - continue - } - - epoch := slot / r.beaconCfg.SlotsPerEpoch - r.log.Printlnf("%s Loaded file [%s] which ended on slot %d (epoch %d) for rewards interval %d.", r.logPrefix, filename, slot, epoch, record.RewardsInterval) - r.Record = record - r.nextEpochToSave = record.LastDutiesSlot/r.beaconCfg.SlotsPerEpoch + recordCheckpointInterval - return record, nil - - } - - // If we got here then none of the saved files worked so we have to make a new record - r.log.Printlnf("%s None of the saved record checkpoint files were eligible for use, creating a new record from the start of the interval.", r.logPrefix) - record := NewRollingRecord(r.log, r.logPrefix, r.bc, startSlot, &r.beaconCfg, rewardsInterval) - r.Record = record - r.nextEpochToSave = startSlot/r.beaconCfg.SlotsPerEpoch + recordCheckpointInterval - 1 - return record, nil - -} - -// Updates the manager's record to the provided state, retrying upon errors until success -func (r *RollingRecordManager) UpdateRecordToState(state *state.NetworkState, latestFinalizedSlot uint64) error { - err := r.updateImpl(state, latestFinalizedSlot) - if err != nil { - // Revert to the latest saved state - r.log.Printlnf("%s WARNING: failed to update rolling record to slot %d, block %d: %s", r.logPrefix, state.BeaconSlotNumber, state.ElBlockNumber, err.Error()) - r.log.Printlnf("%s Reverting to the last saved checkpoint to prevent corruption...", r.logPrefix) - _, err2 := r.LoadBestRecordFromDisk(r.startSlot, latestFinalizedSlot, r.Record.RewardsInterval) - if err2 != nil { - return fmt.Errorf("error loading last best checkpoint: %w", err) - } - - // Try again - r.log.Printlnf("%s Successfully reverted to the last saved state.", r.logPrefix) - return err - } - - return nil -} - -// Updates the manager's record to the provided state -func (r *RollingRecordManager) updateImpl(state *state.NetworkState, latestFinalizedSlot uint64) error { - var err error - r.log.Printlnf("Updating record to target slot %d...", latestFinalizedSlot) - - // Create a new record if the current one is for the previous rewards interval - if r.Record.RewardsInterval < state.NetworkDetails.RewardIndex { - err := r.createNewRecord(state) - if err != nil { - return fmt.Errorf("error creating new record: %w", err) - } - } - - // Get the state for the target slot - recordCheckpointInterval := r.cfg.Smartnode.RecordCheckpointInterval.Value.(uint64) - finalTarget := latestFinalizedSlot - finalizedState := state - if finalTarget != state.BeaconSlotNumber { - finalizedState, err = r.mgr.GetStateForSlot(finalTarget) - if err != nil { - return fmt.Errorf("error getting state for latest finalized slot (%d): %w", finalTarget, err) - } - } - - // Break the routine into chunks so it can be saved if necessary - nextStartSlot := r.Record.LastDutiesSlot + 1 - if r.Record.LastDutiesSlot == 0 { - nextStartSlot = r.startSlot - } - - nextStartEpoch := nextStartSlot / r.beaconCfg.SlotsPerEpoch - finalEpoch := finalTarget / r.beaconCfg.SlotsPerEpoch - - nextTargetEpoch := finalEpoch - if nextTargetEpoch > r.nextEpochToSave { - // Make a stop at the next required checkpoint so it can be saved - nextTargetEpoch = r.nextEpochToSave - } - nextTargetSlot := (nextTargetEpoch+1)*r.beaconCfg.SlotsPerEpoch - 1 // Target is the last slot of the epoch - if nextTargetSlot > finalTarget { - nextTargetSlot = finalTarget - } - totalSlots := float64(finalTarget - nextStartSlot + 1) - initialSlot := nextStartSlot - - r.log.Printlnf("%s Collecting records from slot %d (epoch %d) to slot %d (epoch %d).", r.logPrefix, nextStartSlot, nextStartEpoch, finalTarget, finalEpoch) - startTime := time.Now() - for { - if nextStartSlot > finalTarget { - break - } - - // Update the record to the target state - err = r.Record.UpdateToSlot(nextTargetSlot, finalizedState) - if err != nil { - return fmt.Errorf("error updating rolling record to slot %d, block %d: %w", state.BeaconSlotNumber, state.ElBlockNumber, err) - } - slotsProcessed := nextTargetSlot - initialSlot + 1 - r.log.Printf("%s (%.2f%%) Updated from slot %d (epoch %d) to slot %d (epoch %d)... (%s so far) ", r.logPrefix, float64(slotsProcessed)/totalSlots*100.0, nextStartSlot, nextStartEpoch, nextTargetSlot, nextTargetEpoch, time.Since(startTime)) - - // Save if required - if nextTargetEpoch == r.nextEpochToSave { - err = r.SaveRecordToFile(r.Record) - if err != nil { - return fmt.Errorf("error saving record: %w", err) - } - r.log.Printlnf("%s Saved record checkpoint.", r.logPrefix) - r.nextEpochToSave += recordCheckpointInterval // Set the next epoch to save 1 checkpoint in the future - } - - nextStartSlot = nextTargetSlot + 1 - nextStartEpoch = nextStartSlot / r.beaconCfg.SlotsPerEpoch - nextTargetEpoch = finalEpoch - if nextTargetEpoch > r.nextEpochToSave { - // Make a stop at the next required checkpoint so it can be saved - nextTargetEpoch = r.nextEpochToSave - } - nextTargetSlot = (nextTargetEpoch+1)*r.beaconCfg.SlotsPerEpoch - 1 // Target is the last slot of the epoch - if nextTargetSlot > finalTarget { - nextTargetSlot = finalTarget - } - } - - // Log the update - startEpoch := r.Record.StartSlot / r.beaconCfg.SlotsPerEpoch - currentEpoch := r.Record.LastDutiesSlot / r.beaconCfg.SlotsPerEpoch - r.log.Printlnf("%s Record update complete (slot %d-%d, epoch %d-%d).", r.logPrefix, r.Record.StartSlot, r.Record.LastDutiesSlot, startEpoch, currentEpoch) - - return nil -} - -// Prepares the record for a rewards interval report -func (r *RollingRecordManager) PrepareRecordForReport(state *state.NetworkState) error { - rewardsSlot := state.BeaconSlotNumber - - // Check if the current record has gone past the requested slot or if it can be updated / used - if rewardsSlot < r.Record.LastDutiesSlot { - r.log.Printlnf("%s Current record has extended too far (need slot %d, but record has processed slot %d)... reverting to a previous checkpoint.", r.logPrefix, rewardsSlot, r.Record.LastDutiesSlot) - - newRecord, err := r.GenerateRecordForState(state) - if err != nil { - return fmt.Errorf("error creating record for rewards slot: %w", err) - } - - r.Record = newRecord - } else { - r.log.Printlnf("%s Current record can be used (need slot %d, record has only processed slot %d), updating to target slot.", r.logPrefix, rewardsSlot, r.Record.LastDutiesSlot) - err := r.UpdateRecordToState(state, rewardsSlot) - if err != nil { - return fmt.Errorf("error updating record to rewards slot: %w", err) - } - } - - return nil -} - -// Get the slot number from a record filename -func (r *RollingRecordManager) getSlotFromFilename(filename string) (uint64, error) { - matches := r.recordsFilenameRegex.FindStringSubmatch(filename) - if matches == nil { - return 0, fmt.Errorf("filename (%s) did not match the expected format", filename) - } - slotIndex := r.recordsFilenameRegex.SubexpIndex("slot") - if slotIndex == -1 { - return 0, fmt.Errorf("slot number not found in filename (%s)", filename) - } - slotString := matches[slotIndex] - slot, err := strconv.ParseUint(slotString, 10, 64) - if err != nil { - return 0, fmt.Errorf("slot (%s) could not be parsed to a number", slotString) - } - - return slot, nil -} - -// Load a record from a file, making sure its contents match the provided checksum -func (r *RollingRecordManager) loadRecordFromFile(filename string, expectedChecksum []byte) (*RollingRecord, error) { - // Read the file - compressedBytes, err := os.ReadFile(filename) - if err != nil { - return nil, fmt.Errorf("error reading file: %w", err) - } - - // Calculate the hash and validate it - checksum := sha512.Sum384(compressedBytes) - if !bytes.Equal(expectedChecksum, checksum[:]) { - expectedString := hex.EncodeToString(expectedChecksum) - actualString := hex.EncodeToString(checksum[:]) - return nil, fmt.Errorf("checksum mismatch (expected %s, but it was %s)", expectedString, actualString) - } - - // Decompress it - bytes, err := r.decompressor.DecodeAll(compressedBytes, []byte{}) - if err != nil { - return nil, fmt.Errorf("error decompressing data: %w", err) - } - - // Create a new record from the data - return DeserializeRollingRecord(r.log, r.logPrefix, r.bc, &r.beaconCfg, bytes) -} - -// Get the lines from the checksum file -func (r *RollingRecordManager) parseChecksumFile() (bool, []string, error) { - // Get the checksum filename - recordsPath := r.cfg.Smartnode.GetRecordsPath() - checksumFilename := filepath.Join(recordsPath, config.ChecksumTableFilename) - - // Check if the file exists - _, err := os.Stat(checksumFilename) - if os.IsNotExist(err) { - return false, nil, nil - } - - // Open the checksum file - checksumTable, err := os.ReadFile(checksumFilename) - if err != nil { - return false, nil, fmt.Errorf("error loading checksum table (%s): %w", checksumFilename, err) - } - - // Parse out each line - originalLines := strings.Split(string(checksumTable), "\n") - - // Remove empty lines - lines := make([]string, 0, len(originalLines)) - for _, line := range originalLines { - trimmedLine := strings.TrimSpace(line) - if trimmedLine != "" { - lines = append(lines, line) - } - } - - return true, lines, nil -} - -// Sort the checksum file entries by their slot -func (r *RollingRecordManager) sortChecksumEntries(lines []string) error { - var sortErr error - sort.Slice(lines, func(i int, j int) bool { - _, _, firstSlot, err := r.parseChecksumEntry(lines[i]) - if err != nil && sortErr == nil { - sortErr = err - return false - } - - _, _, secondSlot, err := r.parseChecksumEntry(lines[j]) - if err != nil && sortErr == nil { - sortErr = err - return false - } - - return firstSlot < secondSlot - }) - return sortErr -} - -// Get the checksum, the filename, and the slot number from a checksum entry. -func (r *RollingRecordManager) parseChecksumEntry(line string) (string, string, uint64, error) { - // Extract the checksum and filename - elems := strings.Split(line, " ") - if len(elems) != 2 { - return "", "", 0, fmt.Errorf("error parsing checkpoint line (%s): expected 2 elements, but got %d", line, len(elems)) - } - checksumString := elems[0] - filename := elems[1] - - // Extract the slot number for this file - slot, err := r.getSlotFromFilename(filename) - if err != nil { - return "", "", 0, fmt.Errorf("error scanning checkpoint line (%s): %w", line, err) - } - - return checksumString, filename, slot, nil -} - -// Creates a new record -func (r *RollingRecordManager) createNewRecord(state *state.NetworkState) error { - // Get the current interval index - currentIndexBig, err := rprewards.GetRewardIndex(r.rp, nil) - if err != nil { - return fmt.Errorf("error getting rewards index: %w", err) - } - currentIndex := currentIndexBig.Uint64() - - // Get the previous RocketRewardsPool addresses - prevAddresses := r.cfg.Smartnode.GetPreviousRewardsPoolAddresses() - - // Get the last rewards event and starting epoch - found, event, err := rprewards.GetRewardsEvent(r.rp, currentIndex-1, prevAddresses, nil) - if err != nil { - return fmt.Errorf("error getting event for rewards interval %d: %w", currentIndex-1, err) - } - if !found { - return fmt.Errorf("event for rewards interval %d not found", currentIndex-1) - } - - // Get the start slot of the current interval - startSlot, err := GetStartSlotForInterval(event, r.bc, r.beaconCfg) - if err != nil { - return fmt.Errorf("error getting start slot for interval %d: %w", currentIndex, err) - } - newEpoch := startSlot / r.beaconCfg.SlotsPerEpoch - - // Create a new record for the start slot - r.log.Printlnf("%s Current record is for interval %d which has passed, creating a new record for interval %d starting on slot %d (epoch %d).", r.logPrefix, r.Record.RewardsInterval, state.NetworkDetails.RewardIndex, startSlot, newEpoch) - r.Record = NewRollingRecord(r.log, r.logPrefix, r.bc, startSlot, &r.beaconCfg, state.NetworkDetails.RewardIndex) - r.startSlot = startSlot - recordCheckpointInterval := r.cfg.Smartnode.RecordCheckpointInterval.Value.(uint64) - r.nextEpochToSave = startSlot/r.beaconCfg.SlotsPerEpoch + recordCheckpointInterval - 1 - - return nil -} diff --git a/shared/services/rewards/rolling-record.go b/shared/services/rewards/rolling-record.go deleted file mode 100644 index 544d2aebc..000000000 --- a/shared/services/rewards/rolling-record.go +++ /dev/null @@ -1,398 +0,0 @@ -package rewards - -import ( - "fmt" - "math/big" - "time" - - "github.com/ethereum/go-ethereum/common" - "github.com/goccy/go-json" - "github.com/rocket-pool/rocketpool-go/types" - "github.com/rocket-pool/rocketpool-go/utils/eth" - "github.com/rocket-pool/smartnode/shared" - "github.com/rocket-pool/smartnode/shared/services/beacon" - "github.com/rocket-pool/smartnode/shared/services/state" - "github.com/rocket-pool/smartnode/shared/utils/log" - "golang.org/x/sync/errgroup" -) - -const ( - threadLimit int = 12 -) - -type RollingRecord struct { - StartSlot uint64 `json:"startSlot"` - LastDutiesSlot uint64 `json:"lastDutiesSlot"` - ValidatorIndexMap map[string]*MinipoolInfo `json:"validatorIndexMap"` - RewardsInterval uint64 `json:"rewardsInterval"` - SmartnodeVersion string `json:"smartnodeVersion,omitempty"` - - // Private fields - bc beacon.Client `json:"-"` - beaconConfig *beacon.Eth2Config `json:"-"` - genesisTime time.Time `json:"-"` - log *log.ColorLogger `json:"-"` - logPrefix string `json:"-"` - intervalDutiesInfo *IntervalDutiesInfo `json:"-"` - - // Constants for convenience - one *big.Int `json:"-"` - validatorReq *big.Int `json:"-"` -} - -// Create a new rolling record wrapper -func NewRollingRecord(log *log.ColorLogger, logPrefix string, bc beacon.Client, startSlot uint64, beaconConfig *beacon.Eth2Config, rewardsInterval uint64) *RollingRecord { - return &RollingRecord{ - StartSlot: startSlot, - LastDutiesSlot: 0, - ValidatorIndexMap: map[string]*MinipoolInfo{}, - RewardsInterval: rewardsInterval, - SmartnodeVersion: shared.RocketPoolVersion, - - bc: bc, - beaconConfig: beaconConfig, - genesisTime: time.Unix(int64(beaconConfig.GenesisTime), 0), - log: log, - logPrefix: logPrefix, - intervalDutiesInfo: &IntervalDutiesInfo{ - Slots: map[uint64]*SlotInfo{}, - }, - - one: eth.EthToWei(1), - validatorReq: eth.EthToWei(32), - } -} - -// Load an existing record from serialized JSON data -func DeserializeRollingRecord(log *log.ColorLogger, logPrefix string, bc beacon.Client, beaconConfig *beacon.Eth2Config, bytes []byte) (*RollingRecord, error) { - record := &RollingRecord{ - bc: bc, - beaconConfig: beaconConfig, - genesisTime: time.Unix(int64(beaconConfig.GenesisTime), 0), - log: log, - logPrefix: logPrefix, - intervalDutiesInfo: &IntervalDutiesInfo{ - Slots: map[uint64]*SlotInfo{}, - }, - - one: eth.EthToWei(1), - validatorReq: eth.EthToWei(32), - } - - err := json.Unmarshal(bytes, &record) - if err != nil { - return nil, fmt.Errorf("error deserializing record: %w", err) - } - - return record, nil -} - -// Update the record to the requested slot, using the provided state as a reference. -// Requires the epoch *after* the requested slot to be finalized so it can accurately count attestations. -func (r *RollingRecord) UpdateToSlot(slot uint64, state *state.NetworkState) error { - - // Get the slot to start processing from - startSlot := r.LastDutiesSlot + 1 - if r.LastDutiesSlot == 0 { - startSlot = r.StartSlot - } - startEpoch := startSlot / r.beaconConfig.SlotsPerEpoch - - // Get the epoch for the state - stateEpoch := slot / r.beaconConfig.SlotsPerEpoch - - //r.log.Printlnf("%s Updating rolling record from slot %d (epoch %d) to %d (epoch %d).", r.logPrefix, startSlot, startEpoch, slot, stateEpoch) - //start := time.Now() - - // Update the validator indices and flag any cheating nodes - r.updateValidatorIndices(state) - - // Process every epoch from the start to the current one - for epoch := startEpoch; epoch <= stateEpoch; epoch++ { - - // Retrieve the duties for the epoch - this won't get duties higher than the given state - err := r.getDutiesForEpoch(epoch, slot, state) - if err != nil { - return fmt.Errorf("error getting duties for epoch %d: %w", epoch, err) - } - - // Process the epoch's attestation submissions - err = r.processAttestationsInEpoch(epoch, state) - if err != nil { - return fmt.Errorf("error processing attestations in epoch %d: %w", epoch, err) - } - - } - - // Process the epoch after the last one to check for late attestations / attestations of the last slot - err := r.processAttestationsInEpoch(stateEpoch+1, state) - if err != nil { - return fmt.Errorf("error processing attestations in epoch %d: %w", stateEpoch+1, err) - } - - // Clear the duties cache since it's not required anymore - r.intervalDutiesInfo = &IntervalDutiesInfo{ - Slots: map[uint64]*SlotInfo{}, - } - - return nil -} - -// Get the minipool scores, along with the cumulative total score and count - ignores minipools that belonged to cheaters -func (r *RollingRecord) GetScores(cheatingNodes map[common.Address]bool) ([]*MinipoolInfo, *big.Int, uint64) { - // Create a slice of minipools with legal (non-cheater) scores - minipoolInfos := make([]*MinipoolInfo, 0, len(r.ValidatorIndexMap)) - - // TODO: return a new slice of minipool infos that ignores all cheaters - totalScore := big.NewInt(0) - totalCount := uint64(0) - for _, mpInfo := range r.ValidatorIndexMap { - - // Ignore nodes that cheated - if cheatingNodes[mpInfo.NodeAddress] { - continue - } - - totalScore.Add(totalScore, &mpInfo.AttestationScore.Int) - totalCount += uint64(mpInfo.AttestationCount) - minipoolInfos = append(minipoolInfos, mpInfo) - } - - return minipoolInfos, totalScore, totalCount -} - -// Serialize the current record into a byte array -func (r *RollingRecord) Serialize() ([]byte, error) { - // Clone the record - clone := &RollingRecord{ - StartSlot: r.StartSlot, - LastDutiesSlot: r.LastDutiesSlot, - RewardsInterval: r.RewardsInterval, - SmartnodeVersion: r.SmartnodeVersion, - ValidatorIndexMap: map[string]*MinipoolInfo{}, - } - - // Remove minipool perf records with zero attestations from the serialization - for pubkey, mp := range r.ValidatorIndexMap { - if mp.AttestationCount > 0 || len(mp.MissingAttestationSlots) > 0 { - clone.ValidatorIndexMap[pubkey] = mp - } - } - - // Serialize as JSON - bytes, err := json.Marshal(clone) - if err != nil { - return nil, fmt.Errorf("error serializing rolling record: %w", err) - } - - return bytes, nil -} - -// Update the validator index map with any new validators on Beacon -func (r *RollingRecord) updateValidatorIndices(state *state.NetworkState) { - // NOTE: this has to go through every index each time in order to handle out-of-order validators - // or invalid validators that got created on the testnet with broken deposits - for i := 0; i < len(state.MinipoolDetails); i++ { - mpd := state.MinipoolDetails[i] - pubkey := mpd.Pubkey - - validator, exists := state.ValidatorDetails[pubkey] - if !exists { - // Hit a validator that doesn't exist on Beacon yet - continue - } - - _, exists = r.ValidatorIndexMap[validator.Index] - if !exists && mpd.Status == types.Staking { - // Validator exists and is staking but it hasn't been recorded yet, add it to the map and update the latest index so we don't remap stuff we've already seen - minipoolInfo := &MinipoolInfo{ - Address: mpd.MinipoolAddress, - ValidatorPubkey: mpd.Pubkey, - ValidatorIndex: validator.Index, - NodeAddress: mpd.NodeAddress, - MissingAttestationSlots: map[uint64]bool{}, - AttestationScore: NewQuotedBigInt(0), - } - r.ValidatorIndexMap[validator.Index] = minipoolInfo - } - } -} - -// Get the attestation duties for the given epoch, up to (and including) the provided end slot -func (r *RollingRecord) getDutiesForEpoch(epoch uint64, endSlot uint64, state *state.NetworkState) error { - - lastSlotInEpoch := (epoch+1)*r.beaconConfig.SlotsPerEpoch - 1 - - if r.LastDutiesSlot >= lastSlotInEpoch { - // Already collected the duties for this epoch - r.log.Printlnf("%s All duties were already collected for epoch %d, skipping...", r.logPrefix, epoch) - return nil - } - - // Get the attestation committees for the epoch - committees, err := r.bc.GetCommitteesForEpoch(&epoch) - if err != nil { - return fmt.Errorf("error getting committees for epoch %d: %w", epoch, err) - } - defer committees.Release() - - // Crawl the committees - for idx := 0; idx < committees.Count(); idx++ { - slotIndex := committees.Slot(idx) - if slotIndex < r.StartSlot || slotIndex > endSlot { - // Ignore slots that are out of bounds - continue - } - if slotIndex <= r.LastDutiesSlot { - // Ignore slots that have already been processed - continue - } - blockTime := r.genesisTime.Add(time.Second * time.Duration(r.beaconConfig.SecondsPerSlot*slotIndex)) - committeeIndex := committees.Index(idx) - - // Check if there are any RP validators in this committee - rpValidators := map[int]*MinipoolInfo{} - for position, validator := range committees.Validators(idx) { - mpInfo, exists := r.ValidatorIndexMap[validator] - if !exists { - // This isn't an RP validator, so ignore it - continue - } - - // Check if this minipool was opted into the SP for this block - nodeDetails := state.NodeDetailsByAddress[mpInfo.NodeAddress] - isOptedIn := nodeDetails.SmoothingPoolRegistrationState - spRegistrationTime := time.Unix(nodeDetails.SmoothingPoolRegistrationChanged.Int64(), 0) - if (isOptedIn && blockTime.Sub(spRegistrationTime) < 0) || // If this block occurred before the node opted in, ignore it - (!isOptedIn && spRegistrationTime.Sub(blockTime) < 0) { // If this block occurred after the node opted out, ignore it - continue - } - - // Check if this minipool was in the `staking` state during this time - mpd := state.MinipoolDetailsByAddress[mpInfo.Address] - statusChangeTime := time.Unix(mpd.StatusTime.Int64(), 0) - if mpd.Status != types.Staking || blockTime.Sub(statusChangeTime) < 0 { - continue - } - - // This was a legal RP validator opted into the SP during this slot so add it - rpValidators[position] = mpInfo - mpInfo.MissingAttestationSlots[slotIndex] = true - } - - // If there are some RP validators, add this committee to the map - if len(rpValidators) > 0 { - slotInfo, exists := r.intervalDutiesInfo.Slots[slotIndex] - if !exists { - slotInfo = &SlotInfo{ - Index: slotIndex, - Committees: map[uint64]*CommitteeInfo{}, - } - r.intervalDutiesInfo.Slots[slotIndex] = slotInfo - } - slotInfo.Committees[committeeIndex] = &CommitteeInfo{ - Index: committeeIndex, - Positions: rpValidators, - } - } - } - - // Set the last slot duties were collected for - the minimum of the last slot in the epoch and the target state slot - r.LastDutiesSlot = lastSlotInEpoch - if endSlot < lastSlotInEpoch { - r.LastDutiesSlot = endSlot - } - return nil - -} - -// Process the attestations proposed within the given epoch against the existing record, using -// the provided state for EL <-> CL mapping -func (r *RollingRecord) processAttestationsInEpoch(epoch uint64, state *state.NetworkState) error { - - slotsPerEpoch := r.beaconConfig.SlotsPerEpoch - var wg errgroup.Group - wg.SetLimit(threadLimit) - attestationsPerSlot := make([][]beacon.AttestationInfo, r.beaconConfig.SlotsPerEpoch) - - // Get the attestation records for this epoch - for i := uint64(0); i < slotsPerEpoch; i++ { - i := i - slot := epoch*slotsPerEpoch + i - wg.Go(func() error { - attestations, found, err := r.bc.GetAttestations(fmt.Sprint(slot)) - if err != nil { - return fmt.Errorf("error getting attestations for slot %d: %w", slot, err) - } - if found { - attestationsPerSlot[i] = attestations - } else { - attestationsPerSlot[i] = []beacon.AttestationInfo{} - } - - return nil - }) - } - - err := wg.Wait() - if err != nil { - return fmt.Errorf("error getting attestation records for epoch %d: %w", epoch, err) - } - - // Process all of the slots in the epoch - for i, attestations := range attestationsPerSlot { - if len(attestations) > 0 { - // Process these attestations - slot := epoch*slotsPerEpoch + uint64(i) - r.processAttestationsInSlot(slot, attestations, state) - } - } - - return nil - -} - -// Process all of the attestations for a given slot -func (r *RollingRecord) processAttestationsInSlot(inclusionSlot uint64, attestations []beacon.AttestationInfo, state *state.NetworkState) { - - // Go through the attestations for the block - for _, attestation := range attestations { - - // Get the RP committees for this attestation's slot and index - slotInfo, exists := r.intervalDutiesInfo.Slots[attestation.SlotIndex] - if exists && inclusionSlot-attestation.SlotIndex <= r.beaconConfig.SlotsPerEpoch { // Ignore attestations delayed by more than 32 slots - rpCommittee, exists := slotInfo.Committees[attestation.CommitteeIndex] - if exists { - blockTime := r.genesisTime.Add(time.Second * time.Duration(r.beaconConfig.SecondsPerSlot*attestation.SlotIndex)) - - // Check if each RP validator attested successfully - for position, validator := range rpCommittee.Positions { - if attestation.AggregationBits.BitAt(uint64(position)) { - // This was seen, so remove it from the missing attestations - delete(rpCommittee.Positions, position) - if len(rpCommittee.Positions) == 0 { - delete(slotInfo.Committees, attestation.CommitteeIndex) - } - if len(slotInfo.Committees) == 0 { - delete(r.intervalDutiesInfo.Slots, attestation.SlotIndex) - } - delete(validator.MissingAttestationSlots, attestation.SlotIndex) - - // Get the pseudoscore for this attestation - details := state.MinipoolDetailsByAddress[validator.Address] - bond, fee := getMinipoolBondAndNodeFee(details, blockTime) - minipoolScore := big.NewInt(0).Sub(r.one, fee) // 1 - fee - minipoolScore.Mul(minipoolScore, bond) // Multiply by bond - minipoolScore.Div(minipoolScore, r.validatorReq) // Divide by 32 to get the bond as a fraction of a total validator - minipoolScore.Add(minipoolScore, fee) // Total = fee + (bond/32)(1 - fee) - - // Add it to the minipool's score - validator.AttestationScore.Add(&validator.AttestationScore.Int, minipoolScore) - validator.AttestationCount++ - } - } - } - } - } - -} diff --git a/shared/services/rewards/ssz_types/big/uint256.go b/shared/services/rewards/ssz_types/big/uint256.go new file mode 100644 index 000000000..0c05e82a7 --- /dev/null +++ b/shared/services/rewards/ssz_types/big/uint256.go @@ -0,0 +1,105 @@ +package big + +import ( + "encoding/json" + "errors" + "fmt" + "math/big" + + ssz "github.com/ferranbt/fastssz" + "github.com/holiman/uint256" +) + +var Overflow = errors.New("uint256 overflow") +var Negative = errors.New("uint256 can't be negative before serializing") + +// Wraps big.Int but will be checked for sign/overflow when serializing SSZ +type Uint256 struct { + *big.Int +} + +func NewUint256(i int64) Uint256 { + return Uint256{big.NewInt(i)} +} + +func (u *Uint256) SizeSSZ() (size int) { + return 32 +} + +func (u *Uint256) ToUint256() (*uint256.Int, error) { + // Check sign + if u.Sign() < 0 { + return nil, Negative + } + + s, overflow := uint256.FromBig(u.Int) + if overflow { + return nil, Overflow + } + return s, nil +} + +func (u *Uint256) MarshalSSZTo(buf []byte) ([]byte, error) { + s, err := u.ToUint256() + if err != nil { + return nil, err + } + + bytes, err := s.MarshalSSZ() + if err != nil { + return nil, err + } + return append(buf, bytes...), nil +} + +func (u *Uint256) HashTreeRootWith(hh ssz.HashWalker) (err error) { + bytes := make([]byte, 32) + bytes, err = u.MarshalSSZTo(bytes) + if err != nil { + return + } + + hh.AppendBytes32(bytes) + return +} + +func (u *Uint256) UnmarshalSSZ(buf []byte) error { + repr := uint256.NewInt(0) + err := repr.UnmarshalSSZ(buf) + if err != nil { + return err + } + u.Int = repr.ToBig() + return nil +} + +func (u *Uint256) String() string { + return u.Int.String() +} + +func (u *Uint256) UnmarshalJSON(data []byte) error { + var s string + err := json.Unmarshal(data, &s) + if err != nil { + return err + } + u.Int = big.NewInt(0) + return u.Int.UnmarshalJSON([]byte(s)) +} + +func (u *Uint256) MarshalJSON() ([]byte, error) { + s, err := u.Int.MarshalJSON() + if err != nil { + return nil, err + } + return []byte(fmt.Sprintf("\"%s\"", s)), nil +} + +func (u *Uint256) Bytes32() ([32]byte, error) { + s, err := u.ToUint256() + if err != nil { + return [32]byte{}, err + } + + return s.Bytes32(), nil +} diff --git a/shared/services/rewards/ssz_types/encoding.go b/shared/services/rewards/ssz_types/encoding.go new file mode 100644 index 000000000..d99385074 --- /dev/null +++ b/shared/services/rewards/ssz_types/encoding.go @@ -0,0 +1,681 @@ +// Code generated by fastssz. DO NOT EDIT. +// Hash: c302f5cab9af79d858415e7e5bc2002568baf2333120ecc30517636a1b041db6 +// Version: 0.1.3 +package ssz_types + +import ( + ssz "github.com/ferranbt/fastssz" +) + +// MarshalSSZ ssz marshals the SSZFile_v1 object +func (s *SSZFile_v1) MarshalSSZ() ([]byte, error) { + return ssz.MarshalSSZ(s) +} + +// MarshalSSZTo ssz marshals the SSZFile_v1 object to a target array +func (s *SSZFile_v1) MarshalSSZTo(buf []byte) (dst []byte, err error) { + dst = buf + offset := int(356) + + // Field (0) 'Magic' + dst = append(dst, s.Magic[:]...) + + // Field (1) 'RewardsFileVersion' + dst = ssz.MarshalUint64(dst, s.RewardsFileVersion) + + // Field (2) 'RulesetVersion' + dst = ssz.MarshalUint64(dst, s.RulesetVersion) + + // Field (3) 'Network' + dst = ssz.MarshalUint64(dst, uint64(s.Network)) + + // Field (4) 'Index' + dst = ssz.MarshalUint64(dst, s.Index) + + // Field (5) 'StartTime' + dst = ssz.MarshalTime(dst, s.StartTime) + + // Field (6) 'EndTime' + dst = ssz.MarshalTime(dst, s.EndTime) + + // Field (7) 'ConsensusStartBlock' + dst = ssz.MarshalUint64(dst, s.ConsensusStartBlock) + + // Field (8) 'ConsensusEndBlock' + dst = ssz.MarshalUint64(dst, s.ConsensusEndBlock) + + // Field (9) 'ExecutionStartBlock' + dst = ssz.MarshalUint64(dst, s.ExecutionStartBlock) + + // Field (10) 'ExecutionEndBlock' + dst = ssz.MarshalUint64(dst, s.ExecutionEndBlock) + + // Field (11) 'IntervalsPassed' + dst = ssz.MarshalUint64(dst, s.IntervalsPassed) + + // Field (12) 'MerkleRoot' + dst = append(dst, s.MerkleRoot[:]...) + + // Field (13) 'TotalRewards' + if s.TotalRewards == nil { + s.TotalRewards = new(TotalRewards) + } + if dst, err = s.TotalRewards.MarshalSSZTo(dst); err != nil { + return + } + + // Offset (14) 'NetworkRewards' + dst = ssz.WriteOffset(dst, offset) + offset += len(s.NetworkRewards) * 104 + + // Offset (15) 'NodeRewards' + dst = ssz.WriteOffset(dst, offset) + offset += len(s.NodeRewards) * 124 + + // Field (14) 'NetworkRewards' + if size := len(s.NetworkRewards); size > 128 { + err = ssz.ErrListTooBigFn("SSZFile_v1.NetworkRewards", size, 128) + return + } + for ii := 0; ii < len(s.NetworkRewards); ii++ { + if dst, err = s.NetworkRewards[ii].MarshalSSZTo(dst); err != nil { + return + } + } + + // Field (15) 'NodeRewards' + if size := len(s.NodeRewards); size > 9223372036854775807 { + err = ssz.ErrListTooBigFn("SSZFile_v1.NodeRewards", size, 9223372036854775807) + return + } + for ii := 0; ii < len(s.NodeRewards); ii++ { + if dst, err = s.NodeRewards[ii].MarshalSSZTo(dst); err != nil { + return + } + } + + return +} + +// UnmarshalSSZ ssz unmarshals the SSZFile_v1 object +func (s *SSZFile_v1) UnmarshalSSZ(buf []byte) error { + var err error + size := uint64(len(buf)) + if size < 356 { + return ssz.ErrSize + } + + tail := buf + var o14, o15 uint64 + + // Field (0) 'Magic' + copy(s.Magic[:], buf[0:4]) + + // Field (1) 'RewardsFileVersion' + s.RewardsFileVersion = ssz.UnmarshallUint64(buf[4:12]) + + // Field (2) 'RulesetVersion' + s.RulesetVersion = ssz.UnmarshallUint64(buf[12:20]) + + // Field (3) 'Network' + s.Network = Network(ssz.UnmarshallUint64(buf[20:28])) + + // Field (4) 'Index' + s.Index = ssz.UnmarshallUint64(buf[28:36]) + + // Field (5) 'StartTime' + s.StartTime = ssz.UnmarshalTime(buf[36:44]) + + // Field (6) 'EndTime' + s.EndTime = ssz.UnmarshalTime(buf[44:52]) + + // Field (7) 'ConsensusStartBlock' + s.ConsensusStartBlock = ssz.UnmarshallUint64(buf[52:60]) + + // Field (8) 'ConsensusEndBlock' + s.ConsensusEndBlock = ssz.UnmarshallUint64(buf[60:68]) + + // Field (9) 'ExecutionStartBlock' + s.ExecutionStartBlock = ssz.UnmarshallUint64(buf[68:76]) + + // Field (10) 'ExecutionEndBlock' + s.ExecutionEndBlock = ssz.UnmarshallUint64(buf[76:84]) + + // Field (11) 'IntervalsPassed' + s.IntervalsPassed = ssz.UnmarshallUint64(buf[84:92]) + + // Field (12) 'MerkleRoot' + copy(s.MerkleRoot[:], buf[92:124]) + + // Field (13) 'TotalRewards' + if s.TotalRewards == nil { + s.TotalRewards = new(TotalRewards) + } + if err = s.TotalRewards.UnmarshalSSZ(buf[124:348]); err != nil { + return err + } + + // Offset (14) 'NetworkRewards' + if o14 = ssz.ReadOffset(buf[348:352]); o14 > size { + return ssz.ErrOffset + } + + if o14 < 356 { + return ssz.ErrInvalidVariableOffset + } + + // Offset (15) 'NodeRewards' + if o15 = ssz.ReadOffset(buf[352:356]); o15 > size || o14 > o15 { + return ssz.ErrOffset + } + + // Field (14) 'NetworkRewards' + { + buf = tail[o14:o15] + num, err := ssz.DivideInt2(len(buf), 104, 128) + if err != nil { + return err + } + s.NetworkRewards = make([]*NetworkReward, num) + for ii := 0; ii < num; ii++ { + if s.NetworkRewards[ii] == nil { + s.NetworkRewards[ii] = new(NetworkReward) + } + if err = s.NetworkRewards[ii].UnmarshalSSZ(buf[ii*104 : (ii+1)*104]); err != nil { + return err + } + } + } + + // Field (15) 'NodeRewards' + { + buf = tail[o15:] + num, err := ssz.DivideInt2(len(buf), 124, 9223372036854775807) + if err != nil { + return err + } + s.NodeRewards = make([]*NodeReward, num) + for ii := 0; ii < num; ii++ { + if s.NodeRewards[ii] == nil { + s.NodeRewards[ii] = new(NodeReward) + } + if err = s.NodeRewards[ii].UnmarshalSSZ(buf[ii*124 : (ii+1)*124]); err != nil { + return err + } + } + } + return err +} + +// SizeSSZ returns the ssz encoded size in bytes for the SSZFile_v1 object +func (s *SSZFile_v1) SizeSSZ() (size int) { + size = 356 + + // Field (14) 'NetworkRewards' + size += len(s.NetworkRewards) * 104 + + // Field (15) 'NodeRewards' + size += len(s.NodeRewards) * 124 + + return +} + +// HashTreeRoot ssz hashes the SSZFile_v1 object +func (s *SSZFile_v1) HashTreeRoot() ([32]byte, error) { + return ssz.HashWithDefaultHasher(s) +} + +// HashTreeRootWith ssz hashes the SSZFile_v1 object with a hasher +func (s *SSZFile_v1) HashTreeRootWith(hh ssz.HashWalker) (err error) { + indx := hh.Index() + + // Field (0) 'Magic' + hh.PutBytes(s.Magic[:]) + + // Field (1) 'RewardsFileVersion' + hh.PutUint64(s.RewardsFileVersion) + + // Field (2) 'RulesetVersion' + hh.PutUint64(s.RulesetVersion) + + // Field (3) 'Network' + hh.PutUint64(uint64(s.Network)) + + // Field (4) 'Index' + hh.PutUint64(s.Index) + + // Field (5) 'StartTime' + hh.PutUint64(uint64(s.StartTime.Unix())) + + // Field (6) 'EndTime' + hh.PutUint64(uint64(s.EndTime.Unix())) + + // Field (7) 'ConsensusStartBlock' + hh.PutUint64(s.ConsensusStartBlock) + + // Field (8) 'ConsensusEndBlock' + hh.PutUint64(s.ConsensusEndBlock) + + // Field (9) 'ExecutionStartBlock' + hh.PutUint64(s.ExecutionStartBlock) + + // Field (10) 'ExecutionEndBlock' + hh.PutUint64(s.ExecutionEndBlock) + + // Field (11) 'IntervalsPassed' + hh.PutUint64(s.IntervalsPassed) + + // Field (12) 'MerkleRoot' + hh.PutBytes(s.MerkleRoot[:]) + + // Field (13) 'TotalRewards' + if s.TotalRewards == nil { + s.TotalRewards = new(TotalRewards) + } + if err = s.TotalRewards.HashTreeRootWith(hh); err != nil { + return + } + + // Field (14) 'NetworkRewards' + { + subIndx := hh.Index() + num := uint64(len(s.NetworkRewards)) + if num > 128 { + err = ssz.ErrIncorrectListSize + return + } + for _, elem := range s.NetworkRewards { + if err = elem.HashTreeRootWith(hh); err != nil { + return + } + } + hh.MerkleizeWithMixin(subIndx, num, 128) + } + + // Field (15) 'NodeRewards' + { + subIndx := hh.Index() + num := uint64(len(s.NodeRewards)) + if num > 9223372036854775807 { + err = ssz.ErrIncorrectListSize + return + } + for _, elem := range s.NodeRewards { + if err = elem.HashTreeRootWith(hh); err != nil { + return + } + } + hh.MerkleizeWithMixin(subIndx, num, 9223372036854775807) + } + + hh.Merkleize(indx) + return +} + +// GetTree ssz hashes the SSZFile_v1 object +func (s *SSZFile_v1) GetTree() (*ssz.Node, error) { + return ssz.ProofTree(s) +} + +// MarshalSSZ ssz marshals the TotalRewards object +func (t *TotalRewards) MarshalSSZ() ([]byte, error) { + return ssz.MarshalSSZ(t) +} + +// MarshalSSZTo ssz marshals the TotalRewards object to a target array +func (t *TotalRewards) MarshalSSZTo(buf []byte) (dst []byte, err error) { + dst = buf + + // Field (0) 'ProtocolDaoRpl' + if dst, err = t.ProtocolDaoRpl.MarshalSSZTo(dst); err != nil { + return + } + + // Field (1) 'TotalCollateralRpl' + if dst, err = t.TotalCollateralRpl.MarshalSSZTo(dst); err != nil { + return + } + + // Field (2) 'TotalOracleDaoRpl' + if dst, err = t.TotalOracleDaoRpl.MarshalSSZTo(dst); err != nil { + return + } + + // Field (3) 'TotalSmoothingPoolEth' + if dst, err = t.TotalSmoothingPoolEth.MarshalSSZTo(dst); err != nil { + return + } + + // Field (4) 'PoolStakerSmoothingPoolEth' + if dst, err = t.PoolStakerSmoothingPoolEth.MarshalSSZTo(dst); err != nil { + return + } + + // Field (5) 'NodeOperatorSmoothingPoolEth' + if dst, err = t.NodeOperatorSmoothingPoolEth.MarshalSSZTo(dst); err != nil { + return + } + + // Field (6) 'TotalNodeWeight' + if dst, err = t.TotalNodeWeight.MarshalSSZTo(dst); err != nil { + return + } + + return +} + +// UnmarshalSSZ ssz unmarshals the TotalRewards object +func (t *TotalRewards) UnmarshalSSZ(buf []byte) error { + var err error + size := uint64(len(buf)) + if size != 224 { + return ssz.ErrSize + } + + // Field (0) 'ProtocolDaoRpl' + if err = t.ProtocolDaoRpl.UnmarshalSSZ(buf[0:32]); err != nil { + return err + } + + // Field (1) 'TotalCollateralRpl' + if err = t.TotalCollateralRpl.UnmarshalSSZ(buf[32:64]); err != nil { + return err + } + + // Field (2) 'TotalOracleDaoRpl' + if err = t.TotalOracleDaoRpl.UnmarshalSSZ(buf[64:96]); err != nil { + return err + } + + // Field (3) 'TotalSmoothingPoolEth' + if err = t.TotalSmoothingPoolEth.UnmarshalSSZ(buf[96:128]); err != nil { + return err + } + + // Field (4) 'PoolStakerSmoothingPoolEth' + if err = t.PoolStakerSmoothingPoolEth.UnmarshalSSZ(buf[128:160]); err != nil { + return err + } + + // Field (5) 'NodeOperatorSmoothingPoolEth' + if err = t.NodeOperatorSmoothingPoolEth.UnmarshalSSZ(buf[160:192]); err != nil { + return err + } + + // Field (6) 'TotalNodeWeight' + if err = t.TotalNodeWeight.UnmarshalSSZ(buf[192:224]); err != nil { + return err + } + + return err +} + +// SizeSSZ returns the ssz encoded size in bytes for the TotalRewards object +func (t *TotalRewards) SizeSSZ() (size int) { + size = 224 + return +} + +// HashTreeRoot ssz hashes the TotalRewards object +func (t *TotalRewards) HashTreeRoot() ([32]byte, error) { + return ssz.HashWithDefaultHasher(t) +} + +// HashTreeRootWith ssz hashes the TotalRewards object with a hasher +func (t *TotalRewards) HashTreeRootWith(hh ssz.HashWalker) (err error) { + indx := hh.Index() + + // Field (0) 'ProtocolDaoRpl' + if err = t.ProtocolDaoRpl.HashTreeRootWith(hh); err != nil { + return + } + + // Field (1) 'TotalCollateralRpl' + if err = t.TotalCollateralRpl.HashTreeRootWith(hh); err != nil { + return + } + + // Field (2) 'TotalOracleDaoRpl' + if err = t.TotalOracleDaoRpl.HashTreeRootWith(hh); err != nil { + return + } + + // Field (3) 'TotalSmoothingPoolEth' + if err = t.TotalSmoothingPoolEth.HashTreeRootWith(hh); err != nil { + return + } + + // Field (4) 'PoolStakerSmoothingPoolEth' + if err = t.PoolStakerSmoothingPoolEth.HashTreeRootWith(hh); err != nil { + return + } + + // Field (5) 'NodeOperatorSmoothingPoolEth' + if err = t.NodeOperatorSmoothingPoolEth.HashTreeRootWith(hh); err != nil { + return + } + + // Field (6) 'TotalNodeWeight' + if err = t.TotalNodeWeight.HashTreeRootWith(hh); err != nil { + return + } + + hh.Merkleize(indx) + return +} + +// GetTree ssz hashes the TotalRewards object +func (t *TotalRewards) GetTree() (*ssz.Node, error) { + return ssz.ProofTree(t) +} + +// MarshalSSZ ssz marshals the NetworkReward object +func (n *NetworkReward) MarshalSSZ() ([]byte, error) { + return ssz.MarshalSSZ(n) +} + +// MarshalSSZTo ssz marshals the NetworkReward object to a target array +func (n *NetworkReward) MarshalSSZTo(buf []byte) (dst []byte, err error) { + dst = buf + + // Field (0) 'Network' + dst = ssz.MarshalUint64(dst, uint64(n.Network)) + + // Field (1) 'CollateralRpl' + if dst, err = n.CollateralRpl.MarshalSSZTo(dst); err != nil { + return + } + + // Field (2) 'OracleDaoRpl' + if dst, err = n.OracleDaoRpl.MarshalSSZTo(dst); err != nil { + return + } + + // Field (3) 'SmoothingPoolEth' + if dst, err = n.SmoothingPoolEth.MarshalSSZTo(dst); err != nil { + return + } + + return +} + +// UnmarshalSSZ ssz unmarshals the NetworkReward object +func (n *NetworkReward) UnmarshalSSZ(buf []byte) error { + var err error + size := uint64(len(buf)) + if size != 104 { + return ssz.ErrSize + } + + // Field (0) 'Network' + n.Network = Layer(ssz.UnmarshallUint64(buf[0:8])) + + // Field (1) 'CollateralRpl' + if err = n.CollateralRpl.UnmarshalSSZ(buf[8:40]); err != nil { + return err + } + + // Field (2) 'OracleDaoRpl' + if err = n.OracleDaoRpl.UnmarshalSSZ(buf[40:72]); err != nil { + return err + } + + // Field (3) 'SmoothingPoolEth' + if err = n.SmoothingPoolEth.UnmarshalSSZ(buf[72:104]); err != nil { + return err + } + + return err +} + +// SizeSSZ returns the ssz encoded size in bytes for the NetworkReward object +func (n *NetworkReward) SizeSSZ() (size int) { + size = 104 + return +} + +// HashTreeRoot ssz hashes the NetworkReward object +func (n *NetworkReward) HashTreeRoot() ([32]byte, error) { + return ssz.HashWithDefaultHasher(n) +} + +// HashTreeRootWith ssz hashes the NetworkReward object with a hasher +func (n *NetworkReward) HashTreeRootWith(hh ssz.HashWalker) (err error) { + indx := hh.Index() + + // Field (0) 'Network' + hh.PutUint64(uint64(n.Network)) + + // Field (1) 'CollateralRpl' + if err = n.CollateralRpl.HashTreeRootWith(hh); err != nil { + return + } + + // Field (2) 'OracleDaoRpl' + if err = n.OracleDaoRpl.HashTreeRootWith(hh); err != nil { + return + } + + // Field (3) 'SmoothingPoolEth' + if err = n.SmoothingPoolEth.HashTreeRootWith(hh); err != nil { + return + } + + hh.Merkleize(indx) + return +} + +// GetTree ssz hashes the NetworkReward object +func (n *NetworkReward) GetTree() (*ssz.Node, error) { + return ssz.ProofTree(n) +} + +// MarshalSSZ ssz marshals the NodeReward object +func (n *NodeReward) MarshalSSZ() ([]byte, error) { + return ssz.MarshalSSZ(n) +} + +// MarshalSSZTo ssz marshals the NodeReward object to a target array +func (n *NodeReward) MarshalSSZTo(buf []byte) (dst []byte, err error) { + dst = buf + + // Field (0) 'Address' + dst = append(dst, n.Address[:]...) + + // Field (1) 'Network' + dst = ssz.MarshalUint64(dst, uint64(n.Network)) + + // Field (2) 'CollateralRpl' + if dst, err = n.CollateralRpl.MarshalSSZTo(dst); err != nil { + return + } + + // Field (3) 'OracleDaoRpl' + if dst, err = n.OracleDaoRpl.MarshalSSZTo(dst); err != nil { + return + } + + // Field (4) 'SmoothingPoolEth' + if dst, err = n.SmoothingPoolEth.MarshalSSZTo(dst); err != nil { + return + } + + return +} + +// UnmarshalSSZ ssz unmarshals the NodeReward object +func (n *NodeReward) UnmarshalSSZ(buf []byte) error { + var err error + size := uint64(len(buf)) + if size != 124 { + return ssz.ErrSize + } + + // Field (0) 'Address' + copy(n.Address[:], buf[0:20]) + + // Field (1) 'Network' + n.Network = Layer(ssz.UnmarshallUint64(buf[20:28])) + + // Field (2) 'CollateralRpl' + if err = n.CollateralRpl.UnmarshalSSZ(buf[28:60]); err != nil { + return err + } + + // Field (3) 'OracleDaoRpl' + if err = n.OracleDaoRpl.UnmarshalSSZ(buf[60:92]); err != nil { + return err + } + + // Field (4) 'SmoothingPoolEth' + if err = n.SmoothingPoolEth.UnmarshalSSZ(buf[92:124]); err != nil { + return err + } + + return err +} + +// SizeSSZ returns the ssz encoded size in bytes for the NodeReward object +func (n *NodeReward) SizeSSZ() (size int) { + size = 124 + return +} + +// HashTreeRoot ssz hashes the NodeReward object +func (n *NodeReward) HashTreeRoot() ([32]byte, error) { + return ssz.HashWithDefaultHasher(n) +} + +// HashTreeRootWith ssz hashes the NodeReward object with a hasher +func (n *NodeReward) HashTreeRootWith(hh ssz.HashWalker) (err error) { + indx := hh.Index() + + // Field (0) 'Address' + hh.PutBytes(n.Address[:]) + + // Field (1) 'Network' + hh.PutUint64(uint64(n.Network)) + + // Field (2) 'CollateralRpl' + if err = n.CollateralRpl.HashTreeRootWith(hh); err != nil { + return + } + + // Field (3) 'OracleDaoRpl' + if err = n.OracleDaoRpl.HashTreeRootWith(hh); err != nil { + return + } + + // Field (4) 'SmoothingPoolEth' + if err = n.SmoothingPoolEth.HashTreeRootWith(hh); err != nil { + return + } + + hh.Merkleize(indx) + return +} + +// GetTree ssz hashes the NodeReward object +func (n *NodeReward) GetTree() (*ssz.Node, error) { + return ssz.ProofTree(n) +} diff --git a/shared/services/rewards/ssz_types/gen.sh b/shared/services/rewards/ssz_types/gen.sh new file mode 100755 index 000000000..0e575bea7 --- /dev/null +++ b/shared/services/rewards/ssz_types/gen.sh @@ -0,0 +1,3 @@ +#!/bin/bash +rm -fr encoding.go +sszgen --path . -objs SSZFile_v1 -output encoding.go -include big/ diff --git a/shared/services/rewards/ssz_types/json.go b/shared/services/rewards/ssz_types/json.go new file mode 100644 index 000000000..150004cda --- /dev/null +++ b/shared/services/rewards/ssz_types/json.go @@ -0,0 +1,214 @@ +package ssz_types + +import ( + "encoding/hex" + "encoding/json" + "fmt" + "math" + "sort" + "strconv" + "strings" +) + +var networkMap = map[string]Network{ + "mainnet": 1, + "holesky": 17000, +} + +// internal use only +type sszfile_v1_alias SSZFile_v1 + +// This custom unmarshaler avoids creating a landmine where the user +// may forget to call NewSSZFile_v1 before unmarshaling into the result, +// which would cause the Magic header to be unset. +func (f *SSZFile_v1) UnmarshalJSON(data []byte) error { + // Disposable type without a custom unmarshal + var alias sszfile_v1_alias + err := json.Unmarshal(data, &alias) + if err != nil { + return err + } + *f = SSZFile_v1(alias) + + // After unmarshaling, set the magic header + f.Magic = Magic + + // Verify legitimacy of the file + return f.Verify() +} + +// When writing JSON, we need to compute the merkle tree to populate the proofs +func (f *SSZFile_v1) MarshalJSON() ([]byte, error) { + if err := f.Verify(); err != nil { + return nil, fmt.Errorf("error verifying ssz while serializing json: %w", err) + } + proofs, err := f.Proofs() + if err != nil { + return nil, fmt.Errorf("error getting proofs: %w", err) + } + + for _, nr := range f.NodeRewards { + proof, ok := proofs[nr.Address] + if !ok { + return nil, fmt.Errorf("error getting proof for node %s", nr.Address) + } + nr.MerkleProof = proof + } + + var alias sszfile_v1_alias + alias = sszfile_v1_alias(*f) + return json.Marshal(&alias) +} + +func (h *Hash) UnmarshalJSON(data []byte) error { + var s string + err := json.Unmarshal(data, &s) + if err != nil { + return err + } + + s = strings.TrimPrefix(s, "0x") + out, err := hex.DecodeString(s) + if err != nil { + return err + } + + if len(out) != 32 { + return fmt.Errorf("merkle root %s wrong size- must be 32 bytes", s) + } + + copy((*[32]byte)(h)[:], out) + return nil +} + +func (h Hash) MarshalJSON() ([]byte, error) { + return []byte(`"` + h.String() + `"`), nil +} + +func NetworkFromString(s string) (Network, bool) { + n, ok := networkMap[s] + return n, ok +} + +func (n *Network) UnmarshalJSON(data []byte) error { + var s string + err := json.Unmarshal(data, &s) + if err != nil { + return err + } + + id, ok := NetworkFromString(s) + if ok { + *n = Network(id) + return nil + } + + // If the network string doesn't match known values, try to treat it as an integer + u, err := strconv.ParseUint(s, 10, 64) + if err == nil { + *n = Network(u) + return nil + } + + // If the network string isn't an integer, use UINT64_MAX + *n = Network(math.MaxUint64) + return nil +} + +func (n Network) MarshalJSON() ([]byte, error) { + id := n + for k, v := range networkMap { + if v == id { + return json.Marshal(k) + } + } + + // If the network id isn't in the map, serialize it as a string + return json.Marshal(strconv.FormatUint(uint64(id), 10)) +} + +func (n *NetworkRewards) UnmarshalJSON(data []byte) error { + // Network Rewards is a slice, but represented as a map in the json. + var m map[string]json.RawMessage + err := json.Unmarshal(data, &m) + if err != nil { + return err + } + + *n = make(NetworkRewards, 0, len(m)) + for k, v := range m { + networkId, err := strconv.ParseUint(k, 10, 64) + if err != nil { + return err + } + networkReward := new(NetworkReward) + networkReward.Network = networkId + + err = json.Unmarshal(v, networkReward) + if err != nil { + return err + } + *n = append(*n, networkReward) + } + + sort.Sort(*n) + return nil +} + +func (n NetworkRewards) MarshalJSON() ([]byte, error) { + // Network Rewards is a slice, but represented as a map in the json. + m := make(map[string]*NetworkReward, len(n)) + // Make sure we sort, first + sort.Sort(n) + for _, nr := range n { + m[strconv.FormatUint(nr.Network, 10)] = nr + } + + // Serialize the map + return json.Marshal(m) +} + +func (n *NodeRewards) UnmarshalJSON(data []byte) error { + var m map[string]json.RawMessage + err := json.Unmarshal(data, &m) + if err != nil { + return err + } + + *n = make(NodeRewards, 0, len(m)) + for k, v := range m { + s := strings.TrimPrefix(k, "0x") + addr, err := hex.DecodeString(s) + if err != nil { + return err + } + + if len(addr) != 20 { + return fmt.Errorf("address %s wrong size- must be 20 bytes", s) + } + + nodeReward := new(NodeReward) + copy(nodeReward.Address[:], addr) + err = json.Unmarshal(v, nodeReward) + if err != nil { + return err + } + *n = append(*n, nodeReward) + } + + sort.Sort(*n) + return nil +} + +func (n NodeRewards) MarshalJSON() ([]byte, error) { + // Node Rewards is a slice, but represented as a map in the json. + m := make(map[string]*NodeReward, len(n)) + // Make sure we sort, first + sort.Sort(n) + for _, nr := range n { + m[nr.Address.String()] = nr + } + + // Serialize the map + return json.Marshal(m) +} diff --git a/shared/services/rewards/ssz_types/rewards-file-v4.go b/shared/services/rewards/ssz_types/rewards-file-v4.go new file mode 100644 index 000000000..2f4e7c63c --- /dev/null +++ b/shared/services/rewards/ssz_types/rewards-file-v4.go @@ -0,0 +1,591 @@ +package ssz_types + +import ( + "bytes" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + stdbig "math/big" + "slices" + "sort" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/holiman/uint256" + "github.com/rocket-pool/smartnode/shared/services/rewards/ssz_types/big" + "github.com/wealdtech/go-merkletree" + "github.com/wealdtech/go-merkletree/keccak256" +) + +type Format = uint + +const ( + FormatJSON = iota + FormatSSZ +) + +var Magic [4]byte = [4]byte{0x52, 0x50, 0x52, 0x54} + +type Address [20]byte +type Hash [32]byte +type NetworkRewards []*NetworkReward +type NodeRewards []*NodeReward + +// Network corresponds to the top-level Network field, where 1 means mainnet +type Network uint64 + +// Layer corresponds to rewards-level Network fields, where 0 means layer 1... +// Using an alias of uint64 helps serve as documentation rather than function +type Layer = uint64 + +type MerkleProof []Hash + +type SSZFile_v1 struct { + // Fields specific to ssz encoding are first + + // A magic header. Four bytes. Helps immediately verify what follows is a rewards tree. + // 0x52505254 - it's RPRT in ASCII and easy to recognize + Magic [4]byte `ssz-size:"4" json:"-"` + // Version is first- parsers can check the first 12 bytes of the file to make sure they're + // parsing a rewards tree and it is a version they know how to parse. + RewardsFileVersion uint64 `json:"rewardsFileVersion"` + + // Next, we need fields for the rest of the RewardsFileHeader + + // RulesetVersion is the version of the ruleset used to generate the tree, e.g., v9 for the first + // ruleset to use ssz + RulesetVersion uint64 `json:"rulesetVersion"` + // Network is the chain id for which the tree is generated + Network Network `json:"network"` + // Index is the rewards interval index + Index uint64 `json:"index"` + // StartTime is the time of the first slot of the interval + StartTime time.Time `json:"startTime"` + // EndTime is the time fo the last slot of the interval + EndTime time.Time `json:"endTime"` + // ConsensusStartBlock is the first non-empty slot of the interval + ConsensusStartBlock uint64 `json:"consensusStartBlock,omitempty"` + // ConsensusEndBlock is the last non-empty slot of the interval + ConsensusEndBlock uint64 `json:"consensusEndBlock"` + // ExecutionBlock is the execution block number included in ConsensusStartBlock + ExecutionStartBlock uint64 `json:"executionStartBlock,omitempty"` + // ExecutionEndBlock is the execution block number included in ConsensusEndBlock + ExecutionEndBlock uint64 `json:"executionEndBlock"` + // IntervalsPassed is the number of rewards intervals contained in this tree + IntervalsPassed uint64 `json:"intervalsPassed"` + // MerkleRoot is the root of the merkle tree of all the nodes in this tree. + MerkleRoot Hash `ssz-size:"32" json:"merkleRoot,omitempty"` + // TotalRewards is aggregate data on how many rewards this tree contains + TotalRewards *TotalRewards `json:"totalRewards"` + // NetworkRewards is the destinations and aggregate amounts for each network + // this tree distributes to. + // Must be sorted by Chain ID ascending + NetworkRewards NetworkRewards `ssz-max:"128" json:"networkRewards"` + + // Finally, the actual per-node objects that get merkle-ized + + // NodeRewards are the objects that make up the merkle tree. + // Must be sorted by Node Address ascending + NodeRewards NodeRewards `ssz-max:"9223372036854775807" json:"nodeRewards"` + + merkleProofs map[Address]MerkleProof `ssz:"-" json:"-"` +} + +func NewSSZFile_v1() *SSZFile_v1 { + return &SSZFile_v1{ + Magic: Magic, + } +} + +// Check if the NodeRewards field respects unique constraints +func (f *SSZFile_v1) nodeRewardsUnique() bool { + m := make(map[Address]any, len(f.NodeRewards)) + + for _, nr := range f.NodeRewards { + _, found := m[nr.Address] + if found { + return false + } + m[nr.Address] = struct{}{} + } + + return true +} + +// Check if the NetworkRewards field respects unique constraints +func (f *SSZFile_v1) networkRewardsUnique() bool { + m := make(map[uint64]any, len(f.NetworkRewards)) + + for _, nr := range f.NetworkRewards { + _, found := m[nr.Network] + if found { + return false + } + m[nr.Network] = struct{}{} + } + + return true +} + +// Verify checks that the arrays in the file are appropriately sorted and that +// the merkle proof, if present, matches. +func (f *SSZFile_v1) Verify() error { + if !sort.IsSorted(f.NodeRewards) { + return errors.New("ssz file node rewards out of order") + } + + if !sort.IsSorted(f.NetworkRewards) { + return errors.New("ssz file network rewards out of order") + } + + if !f.nodeRewardsUnique() { + return errors.New("ssz file has duplicate entries in its NodeRewards field") + } + + if !f.networkRewardsUnique() { + return errors.New("ssz file has duplicate entries in its NetworkRewards field") + } + + if f.TotalRewards == nil { + return errors.New("missing required field TotalRewards") + } + + if _, err := f.Proofs(); err != nil { + return err + } + + return nil +} + +// Minipool Performance CID is deprecated, but we must implement this for the interface +func (f *SSZFile_v1) SetMinipoolPerformanceFileCID(cid string) { +} + +// The "normal" serialize() call is expected to be JSON by ISerializable in files.go +func (f *SSZFile_v1) Serialize() ([]byte, error) { + return json.Marshal(f) +} + +// Write as SSZ +func (f *SSZFile_v1) SerializeSSZ() ([]byte, error) { + return f.FinalizeSSZ() +} + +func (f *SSZFile_v1) GenerateMerkleTree() error { + _, err := f.Proofs() + return err +} + +// Marshal wrappers that adds the magic header if absent and sets or validators merkle root +func (f *SSZFile_v1) FinalizeSSZ() ([]byte, error) { + + return f.FinalizeSSZTo(make([]byte, 0, f.SizeSSZ())) +} + +func (f *SSZFile_v1) FinalizeSSZTo(buf []byte) ([]byte, error) { + copy(f.Magic[:], Magic[:]) + if err := f.Verify(); err != nil { + return nil, err + } + + return f.MarshalSSZTo(buf) +} + +// Parsing wrapper that adds verification to the merkle root and magic header +func ParseSSZFile(buf []byte) (*SSZFile_v1, error) { + if !bytes.HasPrefix(buf, Magic[:]) { + return nil, errors.New("magic header not found in reward ssz file") + } + + f := &SSZFile_v1{} + if err := f.UnmarshalSSZ(buf); err != nil { + return nil, err + } + + if err := f.Verify(); err != nil { + return nil, err + } + + return f, nil +} + +// This getter lazy-computes the proofs and caches them on the file +func (f *SSZFile_v1) Proofs() (map[Address]MerkleProof, error) { + if f.merkleProofs != nil { + return f.merkleProofs, nil + } + + sort.Sort(f.NodeRewards) + sort.Sort(f.NetworkRewards) + + nodeDataMap := make(map[Address][]byte, len(f.NodeRewards)) + treeData := make([][]byte, 0, len(f.NodeRewards)) + for _, nr := range f.NodeRewards { + // 20 bytes for address, 32 each for network/rpl/eth + address := nr.Address + network := uint256.NewInt(nr.Network).Bytes32() + rpl := stdbig.NewInt(0) + rpl.Add(rpl, nr.CollateralRpl.Int) + rpl.Add(rpl, nr.OracleDaoRpl.Int) + rplBytes := make([]byte, 32) + rplBytes = rpl.FillBytes(rplBytes) + eth, err := nr.SmoothingPoolEth.Bytes32() + if err != nil { + return nil, fmt.Errorf("error converting big.Int to uint256 byte slice: %w", err) + } + + const dataSize = 20 + 32*3 + nodeData := make([]byte, dataSize) + copy(nodeData[0:20], address[:]) + copy(nodeData[20:20+32], network[:]) + copy(nodeData[20+32:20+32*2], rplBytes[:]) + copy(nodeData[20+32*2:20+32*3], eth[:]) + + treeData = append(treeData, nodeData) + nodeDataMap[nr.Address] = nodeData + } + + tree, err := merkletree.NewUsing(treeData, keccak256.New(), false, true) + if err != nil { + return nil, fmt.Errorf("error generating Merkle Tree: %w", err) + } + + // Generate the proofs + out := make(map[Address]MerkleProof) + f.merkleProofs = out + for address, nodeData := range nodeDataMap { + proof, err := tree.GenerateProof(nodeData, 0) + if err != nil { + return nil, fmt.Errorf("error generating proof for node 0x%s: %w", hex.EncodeToString(address[:]), err) + } + + // Store the proof in the result map + out[address] = make([]Hash, len(proof.Hashes)) + for i, hash := range proof.Hashes { + out[address][i] = Hash{} + copy(out[address][i][:], hash) + } + } + + // Populate missing proofs at node level + for _, nr := range f.NodeRewards { + if nr.MerkleProof == nil { + nr.MerkleProof = out[nr.Address] + } + } + + // Finally, set the root. If it's already set, and differs, return an error. + root := Hash{} + copy(root[:], tree.Root()) + if bytes.Count(f.MerkleRoot[:], []byte{0x00}) >= 32 { + f.MerkleRoot = root + return out, nil + } + + if !bytes.Equal(f.MerkleRoot[:], root[:]) { + return nil, fmt.Errorf("generated root %s mismatch against existing root %s", root, f.MerkleRoot) + } + + // The existing root matches the calculated root + return out, nil +} + +type TotalRewards struct { + // Total amount of RPL sent to the pDAO + ProtocolDaoRpl big.Uint256 `ssz-size:"32" json:"protocolDaoRpl"` + // Total amount of RPL sent to Node Operators + TotalCollateralRpl big.Uint256 `ssz-size:"32" json:"totalCollateralRpl"` + // Total amount of RPL sent to the oDAO + TotalOracleDaoRpl big.Uint256 `ssz-size:"32" json:"totalOracleDaoRpl"` + // Total amount of ETH in the Smoothing Pool + TotalSmoothingPoolEth big.Uint256 `ssz-size:"32" json:"totalSmoothingPoolEth"` + // Total amount of Eth sent to the rETH contract + PoolStakerSmoothingPoolEth big.Uint256 `ssz-size:"32" json:"poolStakerSmoothingPoolEth"` + // Total amount of Eth sent to Node Operators in the Smoothing Pool + NodeOperatorSmoothingPoolEth big.Uint256 `ssz-size:"32" json:"nodeOperatorSmoothingPoolEth"` + // Total Node Weight as defined by RPIP-30 + TotalNodeWeight big.Uint256 `ssz-size:"32" json:"totalNodeWeight,omitempty"` +} + +type NetworkReward struct { + // Chain ID (key) + Network Layer `json:"-"` + + // Amount of RPL sent to the network for Node Operators + CollateralRpl big.Uint256 `ssz-size:"32" json:"collateralRpl"` + // Amount of RPL sent to the network for oDAO members + OracleDaoRpl big.Uint256 `ssz-size:"32" json:"oracleDaoRpl"` + // Amount of Eth sent to the network for Node Operators + SmoothingPoolEth big.Uint256 `ssz-size:"32" json:"smoothingPoolEth"` +} + +func NewNetworkReward(network Layer) *NetworkReward { + return &NetworkReward{ + Network: network, + CollateralRpl: big.NewUint256(0), + OracleDaoRpl: big.NewUint256(0), + SmoothingPoolEth: big.NewUint256(0), + } +} + +// NetworkRewards should implement sort.Interface to make it easier to sort. +func (n NetworkRewards) Len() int { + return len(n) +} + +func (n NetworkRewards) Less(i, j int) bool { + return n[i].Network < n[j].Network +} + +func (n NetworkRewards) Swap(i, j int) { + tmp := n[i] + n[i] = n[j] + n[j] = tmp +} + +type NodeReward struct { + // Address of the Node (key) + Address Address `ssz-size:"20" json:"-"` + + // Chain ID on which the Node will claim + Network Layer `json:"rewardNetwork"` + // Amount of staking RPL earned by the Node + CollateralRpl big.Uint256 `ssz-size:"32" json:"collateralRpl"` + // Amount of oDAO RPL earned by the Node + OracleDaoRpl big.Uint256 `ssz-size:"32" json:"oracleDaoRpl"` + // Amount of Smoothing Pool ETH earned by the Node + SmoothingPoolEth big.Uint256 `ssz-size:"32" json:"smoothingPoolEth"` + // Merkle proof for the node claim, sorted with the Merkle root last + MerkleProof MerkleProof `ssz:"-" json:"merkleProof"` +} + +func NewNodeReward(network Layer, address Address) *NodeReward { + return &NodeReward{ + Address: address, + Network: network, + CollateralRpl: big.NewUint256(0), + OracleDaoRpl: big.NewUint256(0), + SmoothingPoolEth: big.NewUint256(0), + } +} + +// NodeRewards should implement sort.Interface to make it easier to sort. +func (n NodeRewards) Len() int { + return len(n) +} + +func (n NodeRewards) Less(i, j int) bool { + ia := n[i].Address + ja := n[j].Address + + if bytes.Compare(ia[:], ja[:]) < 0 { + return true + } + + return false +} + +func (n NodeRewards) Swap(i, j int) { + tmp := n[i] + n[i] = n[j] + n[j] = tmp +} + +func (n NodeRewards) Find(addr Address) *NodeReward { + idx := slices.IndexFunc(n, func(nr *NodeReward) bool { + return bytes.Equal(nr.Address[:], addr[:]) + }) + if idx == -1 { + return nil + } + return n[idx] +} + +func AddressFromBytes(b []byte) Address { + out := Address{} + copy(out[:], b) + return out +} + +// Functions to implement IRewardsFile +func (f *SSZFile_v1) Deserialize(data []byte) error { + if bytes.HasPrefix(data, Magic[:]) { + if err := f.UnmarshalSSZ(data); err != nil { + return err + } + + return f.Verify() + } + + return json.Unmarshal(data, f) +} + +func (f *SSZFile_v1) GetIndex() uint64 { + return f.Index +} + +func (f *SSZFile_v1) GetMerkleRoot() string { + return f.MerkleRoot.String() +} + +func (f *SSZFile_v1) GetNodeAddresses() []common.Address { + out := make([]common.Address, 0, len(f.NodeRewards)) + + for _, nr := range f.NodeRewards { + out = append(out, common.BytesToAddress(nr.Address[:])) + } + return out +} + +func (f *SSZFile_v1) GetConsensusStartBlock() uint64 { + return f.ConsensusStartBlock +} + +func (f *SSZFile_v1) GetExecutionStartBlock() uint64 { + return f.ExecutionStartBlock +} + +func (f *SSZFile_v1) GetConsensusEndBlock() uint64 { + return f.ConsensusEndBlock +} + +func (f *SSZFile_v1) GetExecutionEndBlock() uint64 { + return f.ExecutionEndBlock +} + +func (f *SSZFile_v1) GetStartTime() time.Time { + return f.StartTime +} + +func (f *SSZFile_v1) GetEndTime() time.Time { + return f.EndTime +} + +func (f *SSZFile_v1) GetIntervalsPassed() uint64 { + return f.IntervalsPassed +} + +func (f *SSZFile_v1) GetMerkleProof(address common.Address) ([]common.Hash, error) { + proofs, err := f.Proofs() + if err != nil { + return nil, fmt.Errorf("error while calculating proof for %s: %w", address.String(), err) + } + + var nativeAddress Address + copy(nativeAddress[:], address[:]) + nativeProofs := proofs[nativeAddress] + out := make([]common.Hash, 0, len(nativeProofs)) + for _, p := range nativeProofs { + var h common.Hash + copy(h[:], p[:]) + out = append(out, h) + } + + return out, nil +} + +func (f *SSZFile_v1) getRewardsForNetwork(network uint64) *NetworkReward { + for _, nr := range f.NetworkRewards { + if nr.Network == network { + return nr + } + } + return nil +} + +func (f *SSZFile_v1) HasRewardsForNetwork(network uint64) bool { + return f.getRewardsForNetwork(network) != nil +} + +func (f *SSZFile_v1) GetNetworkCollateralRpl(network uint64) *stdbig.Int { + nr := f.getRewardsForNetwork(network) + if nr == nil { + return stdbig.NewInt(0) + } + + return nr.CollateralRpl.Int +} + +func (f *SSZFile_v1) GetNetworkOracleDaoRpl(network uint64) *stdbig.Int { + nr := f.getRewardsForNetwork(network) + if nr == nil { + return stdbig.NewInt(0) + } + + return nr.OracleDaoRpl.Int +} + +func (f *SSZFile_v1) GetNetworkSmoothingPoolEth(network uint64) *stdbig.Int { + nr := f.getRewardsForNetwork(network) + if nr == nil { + return stdbig.NewInt(0) + } + + return nr.SmoothingPoolEth.Int +} + +func (f *SSZFile_v1) getNodeRewards(addr common.Address) *NodeReward { + var nativeAddress Address + copy(nativeAddress[:], addr[:]) + return f.NodeRewards.Find(nativeAddress) +} + +func (f *SSZFile_v1) HasRewardsFor(addr common.Address) bool { + return f.getNodeRewards(addr) != nil +} + +func (f *SSZFile_v1) GetNodeCollateralRpl(addr common.Address) *stdbig.Int { + nr := f.getNodeRewards(addr) + if nr == nil { + return stdbig.NewInt(0) + } + + return nr.CollateralRpl.Int +} + +func (f *SSZFile_v1) GetNodeOracleDaoRpl(addr common.Address) *stdbig.Int { + nr := f.getNodeRewards(addr) + if nr == nil { + return stdbig.NewInt(0) + } + + return nr.OracleDaoRpl.Int +} + +func (f *SSZFile_v1) GetNodeSmoothingPoolEth(addr common.Address) *stdbig.Int { + nr := f.getNodeRewards(addr) + if nr == nil { + return stdbig.NewInt(0) + } + + return nr.SmoothingPoolEth.Int +} + +func (f *SSZFile_v1) GetRewardsFileVersion() uint64 { + return f.RewardsFileVersion +} + +func (f *SSZFile_v1) GetTotalCollateralRpl() *stdbig.Int { + return f.TotalRewards.TotalCollateralRpl.Int +} + +func (f *SSZFile_v1) GetTotalNodeOperatorSmoothingPoolEth() *stdbig.Int { + return f.TotalRewards.NodeOperatorSmoothingPoolEth.Int +} + +func (f *SSZFile_v1) GetTotalNodeWeight() *stdbig.Int { + return f.TotalRewards.TotalNodeWeight.Int +} + +func (f *SSZFile_v1) GetTotalOracleDaoRpl() *stdbig.Int { + return f.TotalRewards.TotalOracleDaoRpl.Int +} + +func (f *SSZFile_v1) GetTotalPoolStakerSmoothingPoolEth() *stdbig.Int { + return f.TotalRewards.PoolStakerSmoothingPoolEth.Int +} + +func (f *SSZFile_v1) GetTotalProtocolDaoRpl() *stdbig.Int { + return f.TotalRewards.ProtocolDaoRpl.Int +} diff --git a/shared/services/rewards/ssz_types/ssz_test.go b/shared/services/rewards/ssz_types/ssz_test.go new file mode 100644 index 000000000..1a3aee1a7 --- /dev/null +++ b/shared/services/rewards/ssz_types/ssz_test.go @@ -0,0 +1,284 @@ +package ssz_types + +import ( + "bytes" + "encoding/hex" + "slices" + "sort" + "strings" + "testing" + "time" + + "github.com/rocket-pool/smartnode/shared/services/rewards/ssz_types/big" +) + +func sampleFile() *SSZFile_v1 { + out := NewSSZFile_v1() + + out.RewardsFileVersion = 10 + out.RulesetVersion = 4 + out.Network = 17000 + out.Index = 11 + out.StartTime = time.Now().Add(time.Hour * -24) + out.EndTime = time.Now() + out.ConsensusStartBlock = 128 + out.ConsensusEndBlock = 256 + out.ExecutionStartBlock = 1024 + out.ExecutionEndBlock = 1280 + out.IntervalsPassed = 1 + _, _ = hex.Decode(out.MerkleRoot[:], []byte("ac9ddbc55a8cd92612b86866de955f0bb99dd51e1447767afc610b13a5063546")) + out.TotalRewards = &TotalRewards{ + ProtocolDaoRpl: big.NewUint256(1000), + TotalCollateralRpl: big.NewUint256(2000), + TotalOracleDaoRpl: big.NewUint256(3000), + TotalSmoothingPoolEth: big.NewUint256(4000), + PoolStakerSmoothingPoolEth: big.NewUint256(5000), + NodeOperatorSmoothingPoolEth: big.NewUint256(6000), + TotalNodeWeight: big.NewUint256(7000), + } + out.NetworkRewards = NetworkRewards{ + &NetworkReward{ + Network: 0, + CollateralRpl: big.NewUint256(200), + OracleDaoRpl: big.NewUint256(300), + SmoothingPoolEth: big.NewUint256(400), + }, + &NetworkReward{ + Network: 1, + CollateralRpl: big.NewUint256(500), + OracleDaoRpl: big.NewUint256(600), + SmoothingPoolEth: big.NewUint256(700), + }, + } + out.NodeRewards = NodeRewards{ + &NodeReward{ + Address: Address{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x09, 0x08, 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01}, + Network: 0, + CollateralRpl: big.NewUint256(10), + OracleDaoRpl: big.NewUint256(20), + SmoothingPoolEth: big.NewUint256(30), + }, + &NodeReward{ + Address: Address{0x01, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x09, 0x08, 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01}, + Network: 1, + CollateralRpl: big.NewUint256(10), + OracleDaoRpl: big.NewUint256(20), + SmoothingPoolEth: big.NewUint256(30), + }, + } + + return out +} + +func fatalIf(t *testing.T, err error) { + t.Helper() + if err == nil { + return + } + t.Fatal(err) +} + +func TestSSZFileRoundTrip(t *testing.T) { + f := sampleFile() + hashRoot, err := f.HashTreeRoot() + t.Logf("Original hash root: %x", hashRoot) + fatalIf(t, err) + + data, err := f.FinalizeSSZ() + fatalIf(t, err) + + f, err = ParseSSZFile(data) + fatalIf(t, err) + hashRoot2, err := f.HashTreeRoot() + t.Logf("Rount-trip hash root: %x", hashRoot2) + fatalIf(t, err) + + if !bytes.Equal(hashRoot2[:], hashRoot[:]) { + t.Fatal("Round-trip ssz differed from original ssz") + } +} + +func TestSSZFileJSONRoundTrip(t *testing.T) { + f := sampleFile() + hashRoot, err := f.HashTreeRoot() + t.Logf("Original hash root: %x", hashRoot) + fatalIf(t, err) + + data, err := f.MarshalJSON() + fatalIf(t, err) + + f = &SSZFile_v1{} + fatalIf(t, f.UnmarshalJSON(data)) + + hashRoot2, err := f.HashTreeRoot() + t.Logf("Rount-trip hash root: %x", hashRoot2) + fatalIf(t, err) + + if !bytes.Equal(hashRoot2[:], hashRoot[:]) { + t.Fatal("Round-trip ssz differed from original ssz") + } +} + +func TestSSZFileDuplicateNodeRewards(t *testing.T) { + f := sampleFile() + f.NodeRewards = append(f.NodeRewards, f.NodeRewards[1]) + err := f.Verify() + if err == nil { + t.Fatal("expected error due to duplicate entries") + } + if !strings.Contains(err.Error(), "duplicate entries") { + t.Fatalf("unexpected error: %s", err.Error()) + } +} + +func TestSSZFileDuplicateNetworkRewards(t *testing.T) { + f := sampleFile() + f.NetworkRewards = append(f.NetworkRewards, f.NetworkRewards[1]) + err := f.Verify() + if err == nil { + t.Fatal("expected error due to duplicate entries") + } + if !strings.Contains(err.Error(), "duplicate entries") { + t.Fatalf("unexpected error: %s", err.Error()) + } +} + +func TestSSZFileOutOfOrderNodeRewards(t *testing.T) { + f := sampleFile() + slices.Reverse(f.NodeRewards) + err := f.Verify() + if err == nil { + t.Fatal("expected error due to sorting") + } + if !strings.Contains(err.Error(), "out of order") { + t.Fatalf("unexpected error: %s", err.Error()) + } +} + +func TestSSZFileOutOfOrderNetworkRewards(t *testing.T) { + f := sampleFile() + slices.Reverse(f.NetworkRewards) + err := f.Verify() + if err == nil { + t.Fatal("expected error due to sorting") + } + if !strings.Contains(err.Error(), "out of order") { + t.Fatalf("unexpected error: %s", err.Error()) + } +} + +func TestSSZFileMissingTotalRewards(t *testing.T) { + f := sampleFile() + f.TotalRewards = nil + err := f.Verify() + if err == nil { + t.Fatal("expected error due to missing field") + } + if !strings.Contains(err.Error(), "missing required field TotalRewards") { + t.Fatalf("unexpected error: %s", err.Error()) + } +} + +func TestSSZFileUnknownNetwork(t *testing.T) { + f := sampleFile() + f.Network = 3 + hashRoot, err := f.HashTreeRoot() + t.Logf("Original hash root: %x", hashRoot) + fatalIf(t, err) + + data, err := f.MarshalJSON() + fatalIf(t, err) + + f = &SSZFile_v1{} + fatalIf(t, f.UnmarshalJSON(data)) + + hashRoot2, err := f.HashTreeRoot() + t.Logf("Rount-trip hash root: %x", hashRoot2) + fatalIf(t, err) + + if !bytes.Equal(hashRoot2[:], hashRoot[:]) { + t.Fatal("Round-trip ssz differed from original ssz") + } +} + +func TestSSZFileNoMagic(t *testing.T) { + f := sampleFile() + copy(f.Magic[:], []byte{0x00, 0x01, 0x02, 0x03}) + data, err := f.MarshalSSZ() + fatalIf(t, err) + f, err = ParseSSZFile(data) + if err == nil { + t.Fatal("expected error due to missing magic header") + } + if !strings.Contains(err.Error(), "magic header not found") { + t.Fatalf("unexpected error: %s", err.Error()) + } +} + +func TestSSZFileBadRoot(t *testing.T) { + f := sampleFile() + copy(f.MerkleRoot[:], []byte{0x00, 0x01, 0x02, 0x03}) + data, err := f.MarshalSSZ() + fatalIf(t, err) + f, err = ParseSSZFile(data) + if err == nil { + t.Fatal("expected error due to mangled MerkleRoot") + } + if !strings.Contains(err.Error(), "mismatch against existing root") { + t.Fatalf("unexpected error: %s", err.Error()) + } +} + +func TestSSZFileCalculateRoot(t *testing.T) { + f := sampleFile() + _, _ = hex.Decode(f.MerkleRoot[:], []byte("0000000000000000000000000000000000000000000000000000000000000000")) + data, err := f.MarshalSSZ() + fatalIf(t, err) + f, err = ParseSSZFile(data) + fatalIf(t, err) + + // Make sure the root is now set + if bytes.Count(f.MerkleRoot[:], []byte{0x00}) >= 32 { + t.Fatal("Expected ParseSSZFile to set the missing root") + } +} + +func TestSSZFileFinalizeFail(t *testing.T) { + f := sampleFile() + copy(f.MerkleRoot[:], []byte{0x00, 0x01, 0x02, 0x03}) + _, err := f.FinalizeSSZ() + if err == nil { + t.Fatal("expected error due to mangled MerkleRoot") + } + if !strings.Contains(err.Error(), "mismatch against existing root") { + t.Fatalf("unexpected error: %s", err.Error()) + } +} + +func TestSSZFileTruncatedError(t *testing.T) { + f := sampleFile() + data, err := f.FinalizeSSZ() + data = data[:10] + f, err = ParseSSZFile(data) + if err == nil { + t.Fatal("expected error due to mangled file bytes") + } + if !strings.Contains(err.Error(), "incorrect size") { + t.Fatalf("unexpected error: %s", err.Error()) + } +} + +func TestSSZFileSorting(t *testing.T) { + f := sampleFile() + slices.Reverse(f.NetworkRewards) + slices.Reverse(f.NodeRewards) + sort.Sort(f.NetworkRewards) + if !sort.IsSorted(f.NetworkRewards) { + t.Fatal("sorting NetworkRewards failed") + } + sort.Sort(f.NodeRewards) + if !sort.IsSorted(f.NodeRewards) { + t.Fatal("sorting NodeRewards failed") + } + +} diff --git a/shared/services/rewards/ssz_types/string.go b/shared/services/rewards/ssz_types/string.go new file mode 100644 index 000000000..0a164243c --- /dev/null +++ b/shared/services/rewards/ssz_types/string.go @@ -0,0 +1,13 @@ +package ssz_types + +import ( + "encoding/hex" +) + +func (h Hash) String() string { + return "0x" + hex.EncodeToString(h[:]) +} + +func (a Address) String() string { + return "0x" + hex.EncodeToString(a[:]) +} diff --git a/shared/services/rewards/test/assets/assets.go b/shared/services/rewards/test/assets/assets.go new file mode 100644 index 000000000..7faf79d73 --- /dev/null +++ b/shared/services/rewards/test/assets/assets.go @@ -0,0 +1,142 @@ +package assets + +import ( + "bytes" + "compress/gzip" + _ "embed" + "encoding/json" + "io" + "math/big" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/rocket-pool/rocketpool-go/rewards" + "github.com/rocket-pool/smartnode/shared/services/state" +) + +const Mainnet20ELHeaderTime = 1710394571 + +//go:embed rp-rewards-mainnet-20.json.gz +var mainnet20RewardsJSONGZ []byte +var mainnet20RewardsJSON []byte + +func GetMainnet20RewardsJSON() []byte { + if mainnet20RewardsJSON != nil { + return mainnet20RewardsJSON + } + + gz, err := gzip.NewReader(bytes.NewBuffer(mainnet20RewardsJSONGZ)) + if err != nil { + panic(err) + } + defer gz.Close() + mainnet20RewardsJSON, err = io.ReadAll(gz) + if err != nil { + panic(err) + } + return mainnet20RewardsJSON +} + +//go:embed rp-minipool-performance-mainnet-20.json.gz +var Mainnet20MinipoolPerformanceJSONGZ []byte +var Mainnet20MinipoolPerformanceJSON []byte + +func GetMainnet20MinipoolPerformanceJSON() []byte { + if Mainnet20MinipoolPerformanceJSON != nil { + return Mainnet20MinipoolPerformanceJSON + } + + gz, err := gzip.NewReader(bytes.NewBuffer(Mainnet20MinipoolPerformanceJSONGZ)) + if err != nil { + panic(err) + } + defer gz.Close() + Mainnet20MinipoolPerformanceJSON, err = io.ReadAll(gz) + if err != nil { + panic(err) + } + return Mainnet20MinipoolPerformanceJSON +} + +//go:embed rp-network-state-mainnet-20.json.gz +var Mainnet20NetworkStateJSONGZ []byte + +var mainnet20RewardsState *state.NetworkState + +func GetMainnet20RewardsState() *state.NetworkState { + if mainnet20RewardsState != nil { + return mainnet20RewardsState + } + + // GUnzip the embedded bytes + gz, err := gzip.NewReader(bytes.NewBuffer(Mainnet20NetworkStateJSONGZ)) + if err != nil { + panic(err) + } + defer gz.Close() + + // Create a JSON decoder + dec := json.NewDecoder(gz) + + // Decode the JSON + result := state.NetworkState{} + err = dec.Decode(&result) + if err != nil { + panic(err) + } + + // Memoize the result + mainnet20RewardsState = &result + + return mainnet20RewardsState +} + +func GetRewardSnapshotEventInterval19() rewards.RewardsEvent { + var rewardSnapshotEventInterval19 = rewards.RewardsEvent{ + Index: big.NewInt(19), + ExecutionBlock: big.NewInt(19231284), + ConsensusBlock: big.NewInt(8429279), + MerkleRoot: common.HexToHash("0x35d1be64d49aa71dc5b5ea13dd6f91d8613c81aef2593796d6dee599cd228aea"), + MerkleTreeCID: "bafybeiazkzsqe7molppbhbxg2khdgocrip36eoezroa7anbe53za7mxjpq", + IntervalsPassed: big.NewInt(1), + TreasuryRPL: big.NewInt(0), // Set below + TrustedNodeRPL: []*big.Int{}, // XXX Not set, but probably not needed + NodeRPL: []*big.Int{}, // XXX Not set, but probably not needed + NodeETH: []*big.Int{}, // XXX Not set, but probably not needed + UserETH: big.NewInt(0), // XXX Not set, but probably not needed + IntervalStartTime: time.Unix(1705556139, 0), + IntervalEndTime: time.Unix(1707975339, 0), + SubmissionTime: time.Unix(1707976475, 0), + } + rewardSnapshotEventInterval19.TreasuryRPL.SetString("0x0000000000000000000000000000000000000000000000f0a1e7585cd758ffe2", 16) + return rewardSnapshotEventInterval19 +} + +//go:embed rp-network-critical-duties-mainnet-20.json.gz +var mainnet20CriticalDutiesSlotsGZ []byte +var mainnet20CriticalDutiesSlots *state.CriticalDutiesSlots + +func GetMainnet20CriticalDutiesSlots() *state.CriticalDutiesSlots { + if mainnet20CriticalDutiesSlots != nil { + return mainnet20CriticalDutiesSlots + } + + jsonReader, err := gzip.NewReader(bytes.NewBuffer(mainnet20CriticalDutiesSlotsGZ)) + if err != nil { + panic(err) + } + defer jsonReader.Close() + + // Create a JSON decoder + dec := json.NewDecoder(jsonReader) + + // Decode the JSON + result := state.CriticalDutiesSlots{} + err = dec.Decode(&result) + if err != nil { + panic(err) + } + + mainnet20CriticalDutiesSlots = &result + return mainnet20CriticalDutiesSlots +} diff --git a/shared/services/rewards/test/assets/rp-minipool-performance-mainnet-20.json.gz b/shared/services/rewards/test/assets/rp-minipool-performance-mainnet-20.json.gz new file mode 100644 index 000000000..1497ec0cf Binary files /dev/null and b/shared/services/rewards/test/assets/rp-minipool-performance-mainnet-20.json.gz differ diff --git a/shared/services/rewards/test/assets/rp-network-critical-duties-mainnet-20.json.gz b/shared/services/rewards/test/assets/rp-network-critical-duties-mainnet-20.json.gz new file mode 100644 index 000000000..dbb233c4a Binary files /dev/null and b/shared/services/rewards/test/assets/rp-network-critical-duties-mainnet-20.json.gz differ diff --git a/shared/services/rewards/test/assets/rp-network-state-mainnet-20.json.gz b/shared/services/rewards/test/assets/rp-network-state-mainnet-20.json.gz new file mode 100644 index 000000000..ce385c4a9 Binary files /dev/null and b/shared/services/rewards/test/assets/rp-network-state-mainnet-20.json.gz differ diff --git a/shared/services/rewards/test/assets/rp-rewards-mainnet-20.json.gz b/shared/services/rewards/test/assets/rp-rewards-mainnet-20.json.gz new file mode 100644 index 000000000..16004330a Binary files /dev/null and b/shared/services/rewards/test/assets/rp-rewards-mainnet-20.json.gz differ diff --git a/shared/services/rewards/test/beacon.go b/shared/services/rewards/test/beacon.go new file mode 100644 index 000000000..b30925cdc --- /dev/null +++ b/shared/services/rewards/test/beacon.go @@ -0,0 +1,446 @@ +package test + +import ( + "errors" + "fmt" + "math/big" + "strconv" + "testing" + + "github.com/prysmaticlabs/go-bitfield" + "github.com/rocket-pool/rocketpool-go/types" + "github.com/rocket-pool/smartnode/shared/services/beacon" + "github.com/rocket-pool/smartnode/shared/services/state" +) + +type epoch uint64 +type slot uint64 +type validatorIndex string +type validatorIndexToCommitteeIndexMap map[validatorIndex]uint +type criticalDutiesSlotMap map[validatorIndex]map[slot]interface{} + +func (v *validatorIndexToCommitteeIndexMap) set(vI validatorIndex, i uint) { + if *v == nil { + *v = make(validatorIndexToCommitteeIndexMap) + } + (*v)[vI] = i +} + +type missedDutiesMap map[epoch]map[slot][]validatorIndex + +func (missedDuties *missedDutiesMap) add(s slot, validator validatorIndex) { + if *missedDuties == nil { + *missedDuties = make(missedDutiesMap) + } + e := epoch(s / 32) + _, ok := (*missedDuties)[e] + + if !ok { + (*missedDuties)[e] = make(map[slot][]validatorIndex) + } + _, ok = (*missedDuties)[e][s] + if !ok { + (*missedDuties)[e][s] = make([]validatorIndex, 0) + } + (*missedDuties)[e][s] = append((*missedDuties)[e][s], validator) +} + +func (missedDuties *missedDutiesMap) getCount(s slot) uint { + e := epoch(s / 32) + if _, ok := (*missedDuties)[e]; !ok { + return 0 + } + if _, ok := (*missedDuties)[e][s]; !ok { + return 0 + } + return uint(len((*missedDuties)[e][s])) +} + +type missedEpochsMap map[validatorIndex]map[epoch]interface{} + +func (missedEpochs *missedEpochsMap) set(v validatorIndex, s slot) { + e := epoch(s / 32) + if *missedEpochs == nil { + *missedEpochs = make(missedEpochsMap) + } + _, ok := (*missedEpochs)[v] + if !ok { + (*missedEpochs)[v] = make(map[epoch]interface{}) + } + (*missedEpochs)[v][e] = struct{}{} +} + +func (missedEpochs *missedEpochsMap) validatorMissedEpoch(v validatorIndex, e epoch) bool { + if _, ok := (*missedEpochs)[v]; !ok { + return false + } + _, ok := (*missedEpochs)[v][e] + return ok +} + +type MockBeaconClient struct { + state *state.NetworkState + + t *testing.T + blocks map[string]beacon.BeaconBlock + + // A map of epoch -> slot -> validator indices for missed duties + missedDuties missedDutiesMap + + // A map of validator -> epoch -> {} + // that tracks which epochs a validator has missed duties in + missedEpochs missedEpochsMap + + // Count of validators + validatorCount uint + + // A map of validator index -> order in the list + validatorIndices validatorIndexToCommitteeIndexMap + + // A map of validator index to pubkey + validatorPubkeys map[validatorIndex]types.ValidatorPubkey + + // A map of validator index to critical duties slots + criticalDutiesSlots criticalDutiesSlotMap + + // A map of validator index to withdrawals + withdrawals map[slot]map[validatorIndex]*big.Int +} + +func (m *MockBeaconClient) SetState(state *state.NetworkState) { + m.state = state + if m.validatorPubkeys == nil { + m.validatorPubkeys = make(map[validatorIndex]types.ValidatorPubkey) + } + for _, v := range state.ValidatorDetails { + if _, ok := m.validatorPubkeys[validatorIndex(v.Index)]; ok { + m.t.Fatalf("Validator %s already set", v.Index) + } + m.validatorPubkeys[validatorIndex(v.Index)] = v.Pubkey + } +} + +type mockBeaconCommitteeSlot struct { + validators []string +} + +type MockBeaconCommittees struct { + slots []mockBeaconCommitteeSlot + epoch epoch +} + +func NewMockBeaconClient(t *testing.T) *MockBeaconClient { + return &MockBeaconClient{t: t} +} + +func (bc *MockBeaconClient) GetBeaconBlock(s string) (beacon.BeaconBlock, bool, error) { + attestations, _, err := bc.GetAttestations(s) + if err != nil { + return beacon.BeaconBlock{}, false, err + } + sInt, err := strconv.ParseUint(s, 10, 64) + if err != nil { + panic(err) + } + withdrawalMap := bc.withdrawals[slot(sInt)] + var out beacon.BeaconBlock + if block, ok := bc.blocks[s]; ok { + out = block + out.Attestations = attestations + } + + // Withdrawals + out.Withdrawals = make([]beacon.WithdrawalInfo, 0, len(withdrawalMap)) + for validatorIndex, amount := range withdrawalMap { + out.Withdrawals = append(out.Withdrawals, beacon.WithdrawalInfo{ + ValidatorIndex: string(validatorIndex), + Amount: amount, + }) + } + out.Attestations = attestations + + return out, true, nil +} + +func (bc *MockBeaconClient) SetBeaconBlock(slot string, block beacon.BeaconBlock) { + if bc.blocks == nil { + bc.blocks = make(map[string]beacon.BeaconBlock) + } + bc.blocks[slot] = block +} + +func (bc *MockBeaconClient) SetCriticalDutiesSlots(criticalDutiesSlots *state.CriticalDutiesSlots) { + if bc.criticalDutiesSlots == nil { + bc.criticalDutiesSlots = make(criticalDutiesSlotMap) + } + for _validator, slots := range criticalDutiesSlots.CriticalDuties { + validator := validatorIndex(_validator) + if bc.criticalDutiesSlots[validator] == nil { + bc.criticalDutiesSlots[validator] = make(map[slot]interface{}) + } + for _, _slot := range slots { + s := slot(_slot) + bc.criticalDutiesSlots[validator][s] = struct{}{} + } + } +} + +func (bc *MockBeaconClient) isValidatorActive(validator validatorIndex, e epoch) (bool, error) { + // Get the pubkey + validatorPubkey, ok := bc.validatorPubkeys[validator] + if !ok { + return false, fmt.Errorf("validator %s not found", validator) + } + validatorDetails, ok := bc.state.ValidatorDetails[validatorPubkey] + if !ok { + return false, fmt.Errorf("validator %s not found", validatorPubkey) + } + // Validators are assigned duties in the epoch they are activated + // but not in the epoch they exit + return validatorDetails.ActivationEpoch <= uint64(e) && (validatorDetails.ExitEpoch == 0 || uint64(e) < validatorDetails.ExitEpoch), nil +} + +func (bc *MockBeaconClient) GetCommitteesForEpoch(_epoch *uint64) (beacon.Committees, error) { + + out := &MockBeaconCommittees{} + out.epoch = epoch(*_epoch) + + // First find validators that must be assigned to specific slots + var missedDutiesValidators map[slot][]validatorIndex + missedDutiesValidators = bc.missedDuties[out.epoch] + + // Keep track of validators that have been assigned to a slot + assignedValidators := make(map[string]interface{}) + + out.slots = make([]mockBeaconCommitteeSlot, 32) + for s := out.epoch * 32; s < out.epoch*32+32; s++ { + idx := s - out.epoch*32 + out.slots[idx].validators = make([]string, 0, len(bc.validatorIndices)/32) + + // Assign validators that missed duties for this slot + for _, validator := range missedDutiesValidators[slot(s)] { + out.slots[idx].validators = append(out.slots[idx].validators, string(validator)) + } + for _, validator := range out.slots[idx].validators { + assignedValidators[validator] = struct{}{} + } + } + + // Assign the remaining validators based on total order / critical duties + for validator, _ := range bc.validatorIndices { + if _, ok := assignedValidators[string(validator)]; ok { + continue + } + + // If the validator was not active, skip it + active, err := bc.isValidatorActive(validator, out.epoch) + if err != nil { + return nil, err + } + if !active { + continue + } + + // If the validator has critical duties for this slot, assign it + if _, ok := bc.criticalDutiesSlots[validator]; ok { + assigned := false + for s, _ := range bc.criticalDutiesSlots[validator] { + if bc.state.BeaconConfig.SlotToEpoch(uint64(s)) == uint64(out.epoch) { + idx := s % 32 + out.slots[idx].validators = append(out.slots[idx].validators, string(validator)) + assigned = true + break + } + } + if assigned { + continue + } + } + + // The validator was not assigned to a slot, neither by missing duties nor critical duties + // Assign it to a pseudorandom slot + idx := validator.Mod32() + out.slots[idx].validators = append(out.slots[idx].validators, string(validator)) + } + + return out, nil +} + +func (v validatorIndex) Mod32() uint { + vInt, err := strconv.ParseUint(string(v), 10, 64) + if err != nil { + panic(err) + } + return uint(vInt % 32) +} + +func (bc *MockBeaconClient) GetAttestations(_slot string) ([]beacon.AttestationInfo, bool, error) { + + slotNative, err := strconv.ParseUint(_slot, 10, 64) + if err != nil { + bc.t.Fatalf("Invalid slot: %s", _slot) + } + s := slot(slotNative) + + // Report attestations for the previous slot + s -= 16 + + // Get the epoch of the previous slot + e := epoch(s / 32) + + // The length of the bitlist is the number of validators that missed duties + // for the slot, plus the number of validators whose mod 32 is the same as the slot, + // unless that validator has missed duties in the same epoch. + // + // However, a validator can be both in the set of validators that missed duties for the slot + // and the set of validators whose mod 32 is the same as the slot, so we have to be careful + // to not double count them. + slotMod32 := s % 32 + var bitlistLength uint + // Add the number of validators that missed duties for the slot + bitlistLength = bc.missedDuties.getCount(s) + + for index, _ := range bc.validatorIndices { + // Don't count validators that are have misses anywhere in this epoch + if bc.missedEpochs.validatorMissedEpoch(index, e) { + // This validator either missed this slot and was already counted, + // or missed a different slot in the same epoch, and shouldn't be counted + continue + } + + active, err := bc.isValidatorActive(index, e) + if err != nil { + bc.t.Fatalf("Error checking if validator %s is active: %v", index, err) + } + if !active { + continue + } + + // Don't count validators with critical duties in this epoch unless the duty is in slot s + if duties, ok := bc.criticalDutiesSlots[index]; ok { + // The validator has some critical duties + if _, ok := duties[s]; ok { + // The duty is in slot s, so count it + bitlistLength++ + } else { + // Check if any duties are in the same epoch + foundDuty := false + for criticalDutySlot, _ := range duties { + if bc.state.BeaconConfig.SlotToEpoch(uint64(criticalDutySlot)) == uint64(e) { + foundDuty = true + break + } + } + if foundDuty { + continue + } + } + } + + // This validator was assigned to this slot and did not miss duties. + validatorIndexMod32 := index.Mod32() + if validatorIndexMod32 == uint(slotMod32) { + bitlistLength++ + } + } + + bl := bitfield.NewBitlist(uint64(bitlistLength)) + // Include all validators + bl = bl.Not() + // Exclude validators that need to miss duties on the previous slot + if _, ok := bc.missedDuties[e]; ok { + if _, ok := bc.missedDuties[e][s]; ok { + numMissed := len(bc.missedDuties[e][s]) + for i := 0; i < numMissed; i++ { + bl.SetBitAt(uint64(i), false) + } + } + } + out := []beacon.AttestationInfo{ + { + AggregationBits: bl, + SlotIndex: uint64(s), + CommitteeIndex: 0, + }, + } + return out, true, nil +} + +// Count returns the number of committees in the response +func (mbc *MockBeaconCommittees) Count() int { + return len(mbc.slots) +} + +// Index returns the index of the committee at the provided offset +func (mbc *MockBeaconCommittees) Index(index int) uint64 { + return 0 +} + +// Slot returns the slot of the committee at the provided offset +func (mbc *MockBeaconCommittees) Slot(index int) uint64 { + return uint64(mbc.epoch)*32 + uint64(index) +} + +// Validators returns the list of validators of the committee at +// the provided offset +func (mbc *MockBeaconCommittees) Validators(index int) []string { + return mbc.slots[index].validators +} + +// Release is a no-op +func (mbc *MockBeaconCommittees) Release() { +} + +// SetMinipoolPerformance notes the minipool's performance +// to be mocked in the response to GetAttestations +func (bc *MockBeaconClient) SetMinipoolPerformance(index string, missedSlots []uint64) { + + // For each missed slot, add it to the inner map of slot to validator indices + for _, s := range missedSlots { + bc.missedDuties.add(slot(s), validatorIndex(index)) + + // Add to missedEpochs + bc.missedEpochs.set(validatorIndex(index), slot(s)) + } + + // A map of true validator index -> committee index + if _, ok := bc.validatorIndices[validatorIndex(index)]; ok { + bc.t.Fatalf("Validator %s already set", index) + } + bc.validatorIndices.set(validatorIndex(index), bc.validatorCount) + bc.validatorCount++ +} + +func (bc *MockBeaconClient) GetEth2Config() (beacon.Eth2Config, error) { + return bc.state.BeaconConfig, nil +} + +func (bc *MockBeaconClient) GetBeaconHead() (beacon.BeaconHead, error) { + // Tell the tests that the beacon head is far enough ahead that the target slot + // is in an epoch that has a finalized epoch right after it. + out := beacon.BeaconHead{ + Epoch: bc.state.BeaconConfig.SlotToEpoch(bc.state.BeaconSlotNumber) + 3, + JustifiedEpoch: bc.state.BeaconConfig.SlotToEpoch(bc.state.BeaconSlotNumber) + 2, + PreviousJustifiedEpoch: bc.state.BeaconConfig.SlotToEpoch(bc.state.BeaconSlotNumber) + 1, + FinalizedEpoch: bc.state.BeaconConfig.SlotToEpoch(bc.state.BeaconSlotNumber) + 1, + } + return out, nil +} + +func (bc *MockBeaconClient) GetStateForSlot(slot uint64) (*state.NetworkState, error) { + if slot == bc.state.BeaconSlotNumber { + return bc.state, nil + } + return nil, errors.New("not implemented") +} + +func (bc *MockBeaconClient) AddWithdrawal(s uint64, index string, amount *big.Int) { + if bc.withdrawals == nil { + bc.withdrawals = make(map[slot]map[validatorIndex]*big.Int) + } + ss := slot(s) + if bc.withdrawals[ss] == nil { + bc.withdrawals[ss] = make(map[validatorIndex]*big.Int) + } + bc.withdrawals[ss][validatorIndex(index)] = amount +} diff --git a/shared/services/rewards/test/mock.go b/shared/services/rewards/test/mock.go new file mode 100644 index 000000000..02d195cc6 --- /dev/null +++ b/shared/services/rewards/test/mock.go @@ -0,0 +1,740 @@ +package test + +import ( + "math/big" + "strconv" + "time" + + "github.com/ethereum/go-ethereum/common" + rprewards "github.com/rocket-pool/rocketpool-go/rewards" + "github.com/rocket-pool/rocketpool-go/types" + "github.com/rocket-pool/rocketpool-go/utils/eth" + rpstate "github.com/rocket-pool/rocketpool-go/utils/state" + "github.com/rocket-pool/smartnode/shared/services/beacon" + "github.com/rocket-pool/smartnode/shared/services/rewards/fees" + "github.com/rocket-pool/smartnode/shared/services/state" +) + +const FarFutureEpoch uint64 = 0xffffffffffffffff + +// This file contains structs useful for quickly creating mock histories for testing. + +func (h *MockHistory) GetValidatorIndex() string { + u, err := strconv.ParseUint(h.lastValidatorIndex, 10, 64) + if err != nil { + panic(err) + } + h.lastValidatorIndex = strconv.FormatUint(u+1, 10) + return h.lastValidatorIndex +} + +func (h *MockHistory) GetValidatorPubkey() types.ValidatorPubkey { + next := big.NewInt(0).Add(big.NewInt(0).SetBytes(h.lastValidatorPubkey.Bytes()), big.NewInt(1)) + h.lastValidatorPubkey = types.BytesToValidatorPubkey(next.Bytes()) + return h.lastValidatorPubkey +} + +func (h *MockHistory) GetMinipoolAddress() common.Address { + next := big.NewInt(0).Add(big.NewInt(0).SetBytes(h.lastMinipoolAddress.Bytes()), big.NewInt(1)) + h.lastMinipoolAddress = common.BigToAddress(next) + return h.lastMinipoolAddress +} + +func (h *MockHistory) GetNodeAddress() common.Address { + next := big.NewInt(0).Add(big.NewInt(0).SetBytes(h.lastNodeAddress.Bytes()), big.NewInt(1)) + h.lastNodeAddress = common.BigToAddress(next) + return h.lastNodeAddress +} + +var oneEth = big.NewInt(1000000000000000000) +var thirtyTwoEth = big.NewInt(0).Mul(oneEth, big.NewInt(32)) + +func (h *MockHistory) GetMinipoolAttestationScoreAndCount(address common.Address, state *state.NetworkState) (*big.Int, uint64) { + out := big.NewInt(0) + mpi := state.MinipoolDetailsByAddress[address] + nodeDetails := state.NodeDetailsByAddress[mpi.NodeAddress] + + // Check every slot in the history + count := uint64(0) + for slot := h.GetConsensusStartBlock(); slot <= h.GetConsensusEndBlock(); slot++ { + // Get the time at the slot + blockTime := h.BeaconConfig.GetSlotTime(slot) + // Check the status of the minipool at this time + if mpi.Status != types.Staking { + continue + } + if mpi.Finalised { + continue + } + // Check if the minipool was opted in at this time + if !nodeDetails.WasOptedInAt(blockTime) { + continue + } + pubkey := mpi.Pubkey + validator := state.ValidatorDetails[pubkey] + // Check if the validator was exited before this slot + if validator.ExitEpoch <= h.BeaconConfig.SlotToEpoch(slot) { + continue + } + index := validator.Index + indexInt, _ := strconv.ParseUint(index, 10, 64) + // Count the attestation if index%32 == slot%32 + if indexInt%32 == uint64(slot%32) { + count++ + + bond, fee := mpi.GetMinipoolBondAndNodeFee(blockTime) + // Give the minipool a score according to its fee + eligibleBorrowedEth := state.GetEligibleBorrowedEth(nodeDetails) + _, percentOfBorrowedEth := state.GetStakedRplValueInEthAndPercentOfBorrowedEth(eligibleBorrowedEth, nodeDetails.RplStake) + fee = fees.GetMinipoolFeeWithBonus(bond, fee, percentOfBorrowedEth) + minipoolScore := big.NewInt(0).Sub(oneEth, fee) // 1 - fee + minipoolScore.Mul(minipoolScore, bond) // Multiply by bond + minipoolScore.Div(minipoolScore, thirtyTwoEth) // Divide by 32 to get the bond as a fraction of a total validator + minipoolScore.Add(minipoolScore, fee) // Total = fee + (bond/32)(1 - fee) + out.Add(out, minipoolScore) + } + } + return out, count +} + +type MockMinipool struct { + Address common.Address + Pubkey types.ValidatorPubkey + Status types.MinipoolStatus + StatusBlock *big.Int + StatusTime time.Time + Finalised bool + NodeFee *big.Int + NodeDepositBalance *big.Int + NodeAddress common.Address + + LastBondReductionTime time.Time + LastBondReductionPrevValue *big.Int + LastBondReductionPrevNodeFee *big.Int + + ValidatorIndex string + + // Withdrawal amount to add to the minipool during its SP period + SPWithdrawals *big.Int + + // Withdrawal amount to add to the minipool during its regular period + OptedOutWithdrawals *big.Int + + // The epoch after which the minipool is withdrawable. + // Defaults to FAR_FUTURE_EPOCH. + WithdrawableEpoch uint64 + + Notes []string +} + +type BondSize *big.Int + +var ( + BondSizeEightEth = BondSize(eth.EthToWei(8)) + BondSizeSixteenEth = BondSize(eth.EthToWei(16)) + _bondSizeThirtyTwoEth = BondSize(eth.EthToWei(32)) +) + +func (h *MockHistory) GetNewDefaultMockMinipool(bondSize BondSize) *MockMinipool { + if (*big.Int)(_bondSizeThirtyTwoEth).Cmp(bondSize) <= 0 { + panic("Bond size must be less than 32 ether") + } + + out := &MockMinipool{ + Address: h.GetMinipoolAddress(), + Pubkey: h.GetValidatorPubkey(), + // By default, staked since always + Status: types.Staking, + StatusBlock: big.NewInt(0), + StatusTime: time.Unix(DefaultMockHistoryGenesis, 0), + // Default to 10% to make math simpler. Aka 0.1 ether + NodeFee: big.NewInt(100000000000000000), + NodeDepositBalance: big.NewInt(0).Set(bondSize), + ValidatorIndex: h.GetValidatorIndex(), + // Default to 1 ETH of SP withdrawals + SPWithdrawals: big.NewInt(1e18), + } + + return out +} + +type MockNode struct { + Address common.Address + RegistrationTime time.Time + RplStake *big.Int + SmoothingPoolRegistrationState bool + SmoothingPoolRegistrationChanged time.Time + + IsOdao bool + JoinedOdaoAt time.Time + + bondedEth *big.Int + borrowedEth *big.Int + Minipools []*MockMinipool + + Notes string + Class string +} + +func (n *MockNode) AddMinipool(minipool *MockMinipool) { + minipool.NodeAddress = n.Address + n.bondedEth.Add(n.bondedEth, minipool.NodeDepositBalance) + borrowedEth := big.NewInt(0).Sub((*big.Int)(_bondSizeThirtyTwoEth), minipool.NodeDepositBalance) + n.borrowedEth.Add(n.borrowedEth, borrowedEth) + + n.Minipools = append(n.Minipools, minipool) +} + +func (h *MockHistory) SetWithdrawals(mockBeaconClient *MockBeaconClient) { + for _, node := range h.Nodes { + var slotWhileIn uint64 + // Get a slot inside the node's SP period + if node.SmoothingPoolRegistrationState { + // Use the last slot of the SP period + slotWhileIn = h.BeaconConfig.LastSlotOfEpoch(h.EndEpoch) + } else { + // Get the opt-out time and ensure the node was opted in for at least 1 slot + optedOut := node.SmoothingPoolRegistrationChanged + if optedOut.Unix() != 0 { + slotWhileIn = h.BeaconConfig.FirstSlotAtLeast(optedOut.Unix()) - 1 + if slotWhileIn < h.BeaconConfig.FirstSlotOfEpoch(h.StartEpoch) { + slotWhileIn = 0 + } + } + } + + var slotWhileOut uint64 + if !node.SmoothingPoolRegistrationState { + slotWhileOut = h.BeaconConfig.LastSlotOfEpoch(h.EndEpoch) + } else { + // Get the opt-in time and ensure the node was opted out for at least 1 slot + optedIn := node.SmoothingPoolRegistrationChanged + if optedIn.Unix() != 0 { + slotWhileOut = h.BeaconConfig.FirstSlotAtLeast(optedIn.Unix()) - 1 + if slotWhileOut < h.BeaconConfig.FirstSlotOfEpoch(h.StartEpoch) { + slotWhileOut = 0 + } + } + } + + for _, minipool := range node.Minipools { + if minipool.SPWithdrawals != nil && minipool.SPWithdrawals.Sign() > 0 { + if slotWhileIn == 0 { + panic("minipool has sp withdrawals but node was never in the sp") + } + mockBeaconClient.AddWithdrawal(slotWhileIn, minipool.ValidatorIndex, minipool.SPWithdrawals) + } + if minipool.OptedOutWithdrawals != nil && minipool.OptedOutWithdrawals.Sign() > 0 { + if slotWhileOut == 0 { + panic("minipool has opted out withdrawals but node was never opted out of the sp") + } + mockBeaconClient.AddWithdrawal(slotWhileOut, minipool.ValidatorIndex, minipool.OptedOutWithdrawals) + } + } + } +} + +type NewMockNodeParams struct { + SmoothingPool bool + EightEthMinipools int + SixteenEthMinipools int + CollateralRpl int64 +} + +func (h *MockHistory) GetNewDefaultMockNode(params *NewMockNodeParams) *MockNode { + if params == nil { + // Inefficient, but nice code follows. + params = &NewMockNodeParams{} + } + + out := &MockNode{ + Address: h.GetNodeAddress(), + RegistrationTime: time.Unix(DefaultMockHistoryGenesis, 0), + RplStake: big.NewInt(0), + SmoothingPoolRegistrationState: params.SmoothingPool, + SmoothingPoolRegistrationChanged: time.Unix(0, 0), + + borrowedEth: big.NewInt(0), + bondedEth: big.NewInt(0), + } + + for i := 0; i < params.EightEthMinipools; i++ { + out.AddMinipool(h.GetNewDefaultMockMinipool(BondSizeEightEth)) + } + + for i := 0; i < params.SixteenEthMinipools; i++ { + out.AddMinipool(h.GetNewDefaultMockMinipool(BondSizeSixteenEth)) + } + + out.RplStake = big.NewInt(params.CollateralRpl) + out.RplStake.Mul(out.RplStake, eth.EthToWei(1)) + + // Opt nodes in an epoch before the start of the interval + if params.SmoothingPool { + out.SmoothingPoolRegistrationChanged = h.BeaconConfig.GetSlotTime(h.BeaconConfig.FirstSlotOfEpoch(h.StartEpoch - 1)) + } + + return out +} + +// Returns a list of nodes with various attributes- +// some will have active minipools, some will not. +// some will be under and over collateralized. +// some will have opted in or out during the interval +// some will have bond reduced during the interval +func (h *MockHistory) GetDefaultMockNodes() []*MockNode { + nodes := []*MockNode{} + + // Create 10 nodes with one 8-eth minipool each and 10 RPL staked + for i := 0; i < 10; i++ { + node := h.GetNewDefaultMockNode(&NewMockNodeParams{ + EightEthMinipools: 1, + CollateralRpl: 10, + }) + node.Notes = "Regular node with one regular 8-eth minipool" + node.Class = "single_eight_eth" + node.Minipools[0].SPWithdrawals = nil + node.Minipools[0].OptedOutWithdrawals = big.NewInt(1e18) + nodes = append(nodes, node) + } + + // Create 10 more of the same, but in the SP + for i := 0; i < 10; i++ { + node := h.GetNewDefaultMockNode(&NewMockNodeParams{ + EightEthMinipools: 1, + SmoothingPool: true, + CollateralRpl: 10, + }) + node.Notes = "Smoothing pool node with one regular 8-eth minipool" + node.Class = "single_eight_eth_sp" + nodes = append(nodes, node) + } + + // Create 20 as above, but with 16-eth minipools + for i := 0; i < 10; i++ { + node := h.GetNewDefaultMockNode(&NewMockNodeParams{ + SixteenEthMinipools: 1, + CollateralRpl: 10, + }) + node.Notes = "Regular node with one regular 16-eth minipool" + node.Class = "single_sixteen_eth" + node.Minipools[0].SPWithdrawals = nil + node.Minipools[0].OptedOutWithdrawals = big.NewInt(1e18) + nodes = append(nodes, node) + } + + for i := 0; i < 10; i++ { + node := h.GetNewDefaultMockNode(&NewMockNodeParams{ + SixteenEthMinipools: 1, + SmoothingPool: true, + CollateralRpl: 10, + }) + node.Notes = "Smoothing pool node with one regular 16-eth minipool" + node.Class = "single_sixteen_eth_sp" + nodes = append(nodes, node) + } + + // Add a node that opts in a quarter of the way through the interval + node := h.GetNewDefaultMockNode(&NewMockNodeParams{ + EightEthMinipools: 1, + SmoothingPool: true, + CollateralRpl: 20, + }) + node.SmoothingPoolRegistrationChanged = h.BeaconConfig.GetSlotTime(h.BeaconConfig.FirstSlotOfEpoch(h.StartEpoch + (h.EndEpoch-h.StartEpoch)/4)) + node.Notes = "Smoothing pool node with one 8-eth that opts in 1/4 of the way through the interval" + node.Class = "single_eight_eth_opted_in_quarter" + nodes = append(nodes, node) + + // Add a node that opts in a quarter of the way through the interval + node = h.GetNewDefaultMockNode(&NewMockNodeParams{ + SixteenEthMinipools: 1, + SmoothingPool: true, + CollateralRpl: 20, + }) + node.SmoothingPoolRegistrationChanged = h.BeaconConfig.GetSlotTime(h.BeaconConfig.FirstSlotOfEpoch(h.StartEpoch + (h.EndEpoch-h.StartEpoch)/4)) + node.Notes = "Smoothing pool node with one 16-eth that opts in 1/4 of the way through the interval" + node.Class = "single_sixteen_eth_opted_in_quarter" + nodes = append(nodes, node) + + // Add a node that opts out a three quarters of the way through the interval + node = h.GetNewDefaultMockNode(&NewMockNodeParams{ + EightEthMinipools: 1, + SmoothingPool: false, + CollateralRpl: 20, + }) + node.SmoothingPoolRegistrationChanged = h.BeaconConfig.GetSlotTime(h.BeaconConfig.FirstSlotOfEpoch(h.StartEpoch + 3*(h.EndEpoch-h.StartEpoch)/4)) + node.Notes = "Smoothing pool node with one 8-eth that opts out 3/4 of the way through the interval" + node.Class = "single_eight_eth_opted_out_three_quarters" + nodes = append(nodes, node) + + // Add a node that opts out a three quarters of the way through the interval + node = h.GetNewDefaultMockNode(&NewMockNodeParams{ + SixteenEthMinipools: 1, + SmoothingPool: false, + CollateralRpl: 20, + }) + node.SmoothingPoolRegistrationChanged = h.BeaconConfig.GetSlotTime(h.BeaconConfig.FirstSlotOfEpoch(h.StartEpoch + 3*(h.EndEpoch-h.StartEpoch)/4)) + node.Notes = "Smoothing pool node with one 16-eth that opts out 3/4 of the way through the interval" + node.Class = "single_sixteen_eth_opted_out_three_quarters" + nodes = append(nodes, node) + + // Add a node that does a bond reduction half way through the interval + node = h.GetNewDefaultMockNode(&NewMockNodeParams{ + EightEthMinipools: 1, + SmoothingPool: true, + CollateralRpl: 10, + }) + node.Minipools[0].LastBondReductionTime = h.BeaconConfig.GetSlotTime(h.BeaconConfig.FirstSlotOfEpoch(h.StartEpoch + (h.EndEpoch-h.StartEpoch)/2)) + node.Minipools[0].LastBondReductionPrevValue = big.NewInt(0).Mul(big.NewInt(16), eth.EthToWei(1)) + // Say it was 20% for fun + node.Minipools[0].LastBondReductionPrevNodeFee, _ = big.NewInt(0).SetString("200000000000000000", 10) + node.Notes = "Node with one 16-eth that does a bond reduction to 8 eth halfway through the interval" + node.Class = "single_bond_reduction" + nodes = append(nodes, node) + + // Add a node with no minipools + node = h.GetNewDefaultMockNode(&NewMockNodeParams{ + // Give it collateral so we can test that it's ignored despite having collateral + CollateralRpl: 10, + }) + node.Notes = "Node with no minipools but RPL collateral" + node.Class = "no_minipools" + nodes = append(nodes, node) + + // Add a node with a pending minipool + node = h.GetNewDefaultMockNode(&NewMockNodeParams{ + EightEthMinipools: 1, + CollateralRpl: 10, + }) + node.Minipools[0].Status = types.Prelaunch + node.Minipools[0].SPWithdrawals = nil + node.Notes = "Node with one 8-eth minipool that is pending" + node.Class = "single_eight_eth_pending" + nodes = append(nodes, node) + + // Add a node with a single staking minipool that is finalized + node = h.GetNewDefaultMockNode(&NewMockNodeParams{ + EightEthMinipools: 1, + CollateralRpl: 10, + }) + node.Minipools[0].Finalised = true + node.Minipools[0].SPWithdrawals = nil + node.Notes = "Node with one 8-eth minipool that is finalized" + node.Class = "single_eight_eth_finalized" + nodes = append(nodes, node) + + // Finally, create two odao nodes to share the juicy odao rewards + odaoNodes := h.GetDefaultMockODAONodes() + nodes = append(nodes, odaoNodes...) + + return nodes +} + +func (h *MockHistory) GetDefaultMockODAONodes() []*MockNode { + odaoNodes := []*MockNode{ + h.GetNewDefaultMockNode(nil), + h.GetNewDefaultMockNode(nil), + } + for _, node := range odaoNodes { + node.IsOdao = true + node.Class = "odao" + } + return odaoNodes +} + +const DefaultMockHistoryGenesis = 1577836800 + +type MockHistory struct { + StartEpoch uint64 + EndEpoch uint64 + BlockOffset uint64 + BeaconConfig beacon.Eth2Config + + // Network details for the final slot + NetworkDetails *rpstate.NetworkDetails + + Nodes []*MockNode + + // Various offsets to create unique number spaces for each key type + lastNodeAddress common.Address + lastMinipoolAddress common.Address + lastValidatorPubkey types.ValidatorPubkey + lastValidatorIndex string +} + +func NewDefaultMockHistoryNoNodes() *MockHistory { + out := &MockHistory{ + StartEpoch: 100, + EndEpoch: 200, + BlockOffset: 100000, + BeaconConfig: beacon.Eth2Config{ + GenesisEpoch: 0, + // 2020-01-01 midnight UTC for simplicity + GenesisTime: DefaultMockHistoryGenesis, + SlotsPerEpoch: 32, + SecondsPerSlot: 12, + SecondsPerEpoch: 12 * 32, + }, + + NetworkDetails: &rpstate.NetworkDetails{ + // Defaults to 0.24 ether, so 10 RPL is 2.4 ether and a leb8 with 10 RPL is 10% collateralized + RplPrice: big.NewInt(240000000000000000), + // Defaults to 10% aka 0.1 ether + MinCollateralFraction: big.NewInt(100000000000000000), + // Defaults to 60% to mimic current withdrawal limits + MaxCollateralFraction: big.NewInt(600000000000000000), + // Defaults to 100 epochs + IntervalDuration: 100 * 32 * 12 * time.Second, + // Defaults to genesis plus 100 epochs + IntervalStart: time.Unix(DefaultMockHistoryGenesis, 0).Add(100 * 32 * 12 * time.Second), + // Defaults to 0.7 ether to match mainnet + NodeOperatorRewardsPercent: big.NewInt(700000000000000000), + // Defaults to 0.015 ether to match mainnet as of 2024-10-08 + TrustedNodeOperatorRewardsPercent: big.NewInt(15000000000000000), + // Defaults to 1 - 0.7 - 0.015 ether to round out to 100% + ProtocolDaoRewardsPercent: big.NewInt(285000000000000000), + // Defaults to 70,000 ether of RPL to apprixmate 1/13th of 5% of 18m + PendingRPLRewards: big.NewInt(0).Mul(big.NewInt(70000), big.NewInt(1000000000000000000)), + // RewardIndex defaults to 40000 to avoid a test tree from being taken seriously + RewardIndex: 40000, + // Put 100 ether in the smoothing pool + SmoothingPoolBalance: big.NewInt(0).Mul(big.NewInt(100), big.NewInt(1000000000000000000)), + + // The rest of the fields seem unimportant and are left empty + }, + lastNodeAddress: common.BigToAddress(big.NewInt(2000)), + lastMinipoolAddress: common.BigToAddress(big.NewInt(30000)), + lastValidatorPubkey: types.BytesToValidatorPubkey(big.NewInt(600000).Bytes()), + lastValidatorIndex: "0", + } + return out +} + +func NewDefaultMockHistory() *MockHistory { + out := NewDefaultMockHistoryNoNodes() + out.Nodes = out.GetDefaultMockNodes() + return out +} + +func (h *MockHistory) GetEndNetworkState() *state.NetworkState { + out := &state.NetworkState{ + // El block number is the final slot's block, which is the last slot of the last epoch + // plus the offset + ElBlockNumber: h.BlockOffset + h.BeaconConfig.LastSlotOfEpoch(h.EndEpoch), + BeaconSlotNumber: h.BeaconConfig.LastSlotOfEpoch(h.EndEpoch), + BeaconConfig: h.BeaconConfig, + NetworkDetails: h.NetworkDetails, + NodeDetails: []rpstate.NativeNodeDetails{}, + NodeDetailsByAddress: make(map[common.Address]*rpstate.NativeNodeDetails), + MinipoolDetails: []rpstate.NativeMinipoolDetails{}, + MinipoolDetailsByAddress: make(map[common.Address]*rpstate.NativeMinipoolDetails), + MinipoolDetailsByNode: make(map[common.Address][]*rpstate.NativeMinipoolDetails), + ValidatorDetails: make(state.ValidatorDetailsMap), + OracleDaoMemberDetails: []rpstate.OracleDaoMemberDetails{}, + ProtocolDaoProposalDetails: nil, + } + + // Add nodes + for _, node := range h.Nodes { + // Calculate the node's effective RPL stake + // If it's below 10% of borrowed eth per the network details, it's 0 + rplStake := node.RplStake + rplPrice := h.NetworkDetails.RplPrice + // Calculate the minimum RPL stake according to the network details + minRplStake := big.NewInt(0).Mul(node.borrowedEth, h.NetworkDetails.MinCollateralFraction) + // minRplStake is now the minimum RPL stake in eth value measured in wei squared + // divide by the price to get the minimum RPL stake in RPL + minRplStake.Div(minRplStake, rplPrice) + + // Same for max + maxRplStake := big.NewInt(0).Mul(node.borrowedEth, h.NetworkDetails.MaxCollateralFraction) + maxRplStake.Div(maxRplStake, rplPrice) + + // Eth matching limit is rpl stake times the price divided by the collateral fraction + ethMatchingLimit := big.NewInt(0).Mul(node.RplStake, rplPrice) + ethMatchingLimit.Div(ethMatchingLimit, h.NetworkDetails.MinCollateralFraction) + collateralisationRatio := big.NewInt(0) + if node.borrowedEth.Sign() > 0 { + collateralisationRatio.Div(node.bondedEth, big.NewInt(0).Add(big.NewInt(0).Mul(node.bondedEth, eth.EthToWei(1)), node.borrowedEth)) + } + + // Create the node details + details := rpstate.NativeNodeDetails{ + Exists: true, + RegistrationTime: big.NewInt(node.RegistrationTime.Unix()), + TimezoneLocation: "UTC", + RewardNetwork: big.NewInt(0), + RplStake: node.RplStake, + EffectiveRPLStake: rplStake, + MinimumRPLStake: minRplStake, + MaximumRPLStake: maxRplStake, + EthMatched: node.borrowedEth, + EthMatchedLimit: ethMatchingLimit, + MinipoolCount: big.NewInt(int64(len(node.Minipools))), + // Empty node wallet + BalanceETH: big.NewInt(0), + BalanceRETH: big.NewInt(0), + BalanceRPL: big.NewInt(0), + BalanceOldRPL: big.NewInt(0), + DepositCreditBalance: big.NewInt(0), + DistributorBalance: big.NewInt(0), + DistributorBalanceUserETH: big.NewInt(0), + DistributorBalanceNodeETH: big.NewInt(0), + WithdrawalAddress: node.Address, + PendingWithdrawalAddress: common.Address{}, + SmoothingPoolRegistrationState: node.SmoothingPoolRegistrationState, + SmoothingPoolRegistrationChanged: big.NewInt(node.SmoothingPoolRegistrationChanged.Unix()), + NodeAddress: node.Address, + + AverageNodeFee: big.NewInt(0), // Populated by CalculateAverageFeeAndDistributorShares + + // Ratio of bonded to bonded plus borrowed + CollateralisationRatio: collateralisationRatio, + } + + out.NodeDetails = append(out.NodeDetails, details) + ptr := &out.NodeDetails[len(out.NodeDetails)-1] + out.NodeDetailsByAddress[node.Address] = ptr + + // Add minipools + for _, minipool := range node.Minipools { + minipoolDetails := rpstate.NativeMinipoolDetails{ + Exists: true, + MinipoolAddress: minipool.Address, + Pubkey: minipool.Pubkey, + StatusRaw: uint8(minipool.Status), + StatusBlock: minipool.StatusBlock, + StatusTime: big.NewInt(minipool.StatusTime.Unix()), + Finalised: minipool.Finalised, + NodeFee: minipool.NodeFee, + NodeDepositBalance: minipool.NodeDepositBalance, + NodeDepositAssigned: true, + UserDepositBalance: big.NewInt(0).Sub(_bondSizeThirtyTwoEth, minipool.NodeDepositBalance), + UserDepositAssigned: true, + UserDepositAssignedTime: big.NewInt(h.BeaconConfig.GetSlotTime(minipool.StatusBlock.Uint64() - h.BlockOffset).Unix()), + NodeAddress: minipool.NodeAddress, + Balance: big.NewInt(0), + DistributableBalance: big.NewInt(0), + NodeShareOfBalance: big.NewInt(0), + UserShareOfBalance: big.NewInt(0), + NodeRefundBalance: big.NewInt(0), + PenaltyCount: big.NewInt(0), + PenaltyRate: big.NewInt(0), + WithdrawalCredentials: common.Hash{}, + Status: minipool.Status, + DepositType: types.Variable, + + LastBondReductionTime: big.NewInt(minipool.LastBondReductionTime.Unix()), + LastBondReductionPrevValue: minipool.LastBondReductionPrevValue, + LastBondReductionPrevNodeFee: minipool.LastBondReductionPrevNodeFee, + } + out.MinipoolDetails = append(out.MinipoolDetails, minipoolDetails) + minipoolPtr := &out.MinipoolDetails[len(out.MinipoolDetails)-1] + out.MinipoolDetailsByAddress[minipool.Address] = minipoolPtr + out.MinipoolDetailsByNode[minipool.NodeAddress] = append(out.MinipoolDetailsByNode[minipool.NodeAddress], minipoolPtr) + + // Finally, populate the the ValidatorDetails map + pubkey := minipool.Pubkey + withdrawableEpoch := FarFutureEpoch + exitEpoch := FarFutureEpoch + if minipool.WithdrawableEpoch != 0 { + withdrawableEpoch = minipool.WithdrawableEpoch + exitEpoch = minipool.WithdrawableEpoch - 1 + } + details := beacon.ValidatorStatus{ + Pubkey: minipool.Pubkey, + Index: minipool.ValidatorIndex, + WithdrawalCredentials: common.Hash{}, + Balance: (*big.Int)(_bondSizeThirtyTwoEth).Uint64(), + EffectiveBalance: (*big.Int)(_bondSizeThirtyTwoEth).Uint64(), + Slashed: false, + ActivationEligibilityEpoch: 0, + ActivationEpoch: 0, + ExitEpoch: exitEpoch, + WithdrawableEpoch: withdrawableEpoch, + Exists: true, + } + if minipool.Status == types.Staking { + details.Status = beacon.ValidatorState_ActiveOngoing + } + if minipool.Finalised { + details.Status = beacon.ValidatorState_WithdrawalDone + } + out.ValidatorDetails[pubkey] = details + } + + // Calculate the AverageNodeFee and DistributorShares + ptr.CalculateAverageFeeAndDistributorShares(out.MinipoolDetailsByNode[ptr.NodeAddress]) + + // Check if the node is an odao member + if node.IsOdao { + details := rpstate.OracleDaoMemberDetails{ + Address: node.Address, + Exists: true, + ID: node.Address.Hex(), + Url: "https://example.com", + JoinedTime: time.Unix(node.RegistrationTime.Unix(), 0), + LastProposalTime: time.Unix(node.RegistrationTime.Unix(), 0), + RPLBondAmount: node.RplStake, + } + out.OracleDaoMemberDetails = append(out.OracleDaoMemberDetails, details) + } + } + + return out +} + +// Boring derived data getters +func (h *MockHistory) GetConsensusStartBlock() uint64 { + return h.BeaconConfig.FirstSlotOfEpoch(h.StartEpoch) +} + +func (h *MockHistory) GetExecutionStartBlock() uint64 { + return h.GetConsensusStartBlock() + h.BlockOffset +} + +func (h *MockHistory) GetConsensusEndBlock() uint64 { + return h.BeaconConfig.LastSlotOfEpoch(h.EndEpoch) +} + +func (h *MockHistory) GetExecutionEndBlock() uint64 { + return h.GetConsensusEndBlock() + h.BlockOffset +} + +func (h *MockHistory) GetStartTime() time.Time { + return h.BeaconConfig.GetSlotTime(h.GetConsensusStartBlock()) +} + +func (h *MockHistory) GetEndTime() time.Time { + return h.BeaconConfig.GetSlotTime(h.GetConsensusEndBlock()) +} + +func (h *MockHistory) GetPreviousRewardSnapshotEvent() rprewards.RewardsEvent { + intervalEpochLength := h.EndEpoch - h.StartEpoch + 1 + consensusEndBlock := h.BeaconConfig.LastSlotOfEpoch(h.StartEpoch - 1) + consensusStartBlock := consensusEndBlock - intervalEpochLength*h.BeaconConfig.SlotsPerEpoch + return rprewards.RewardsEvent{ + Index: big.NewInt(int64(h.NetworkDetails.RewardIndex - 1)), + ExecutionBlock: big.NewInt(int64(consensusEndBlock + h.BlockOffset)), + ConsensusBlock: big.NewInt(int64(consensusEndBlock)), + MerkleRoot: common.Hash{}, + MerkleTreeCID: "", + IntervalsPassed: big.NewInt(1), + TreasuryRPL: big.NewInt(0), + TrustedNodeRPL: []*big.Int{}, + NodeRPL: []*big.Int{}, + NodeETH: []*big.Int{}, + UserETH: big.NewInt(0), + IntervalStartTime: h.BeaconConfig.GetSlotTime(consensusStartBlock), + IntervalEndTime: h.BeaconConfig.GetSlotTime(consensusEndBlock), + SubmissionTime: h.BeaconConfig.GetSlotTime(consensusEndBlock), + } +} + +func (h *MockHistory) GetNodeSummary() map[string][]*MockNode { + out := make(map[string][]*MockNode) + for _, node := range h.Nodes { + out[node.Class] = append(out[node.Class], node) + } + return out +} diff --git a/shared/services/rewards/test/rocketpool.go b/shared/services/rewards/test/rocketpool.go new file mode 100644 index 000000000..5d123424d --- /dev/null +++ b/shared/services/rewards/test/rocketpool.go @@ -0,0 +1,89 @@ +package test + +import ( + "context" + "fmt" + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/rocket-pool/rocketpool-go/rewards" + "github.com/rocket-pool/rocketpool-go/rocketpool" +) + +// MockRocketPool is a EC mock specifically for testing treegen. +// At a high level our approach is to provide two options to the tester: +// 1) Use a recording of request/response data from production to emulate a canonical tree +// 2) Allow for full response customization. +// +// The former is useful for ensuring that no regressions arise during refactors that should +// otherwise be nonfunction, ie, not impact the merkle root. +// +// The latter is useful to probe specific behaviors such as opt-in/opt-out eligibility timing, +// node weight, smoothing pool status, etc. +// +// Because recording responses ties the test to a specific version of the contracts and therefor +// the client-side code, the interface we need to mock should be as minimized as possible, and the +// recorded data should tightly match that interface. That is, no recorded response should encode +// something like the contract address data are being requested from, but instead the high-level +// function name and arguments. +type MockRocketPool struct { + RewardsIndex *big.Int + t *testing.T + rewardSnapshotEvents map[uint64]rewards.RewardsEvent + headers map[uint64]*types.Header +} + +func NewMockRocketPool(t *testing.T, index uint64) *MockRocketPool { + return &MockRocketPool{t: t, RewardsIndex: big.NewInt(int64(index))} +} + +func (mock *MockRocketPool) GetNetworkEnabled(networkId *big.Int, opts *bind.CallOpts) (bool, error) { + return true, nil +} + +func (mock *MockRocketPool) HeaderByNumber(_ context.Context, number *big.Int) (*types.Header, error) { + if header, ok := mock.headers[number.Uint64()]; ok { + return header, nil + } + return nil, fmt.Errorf("header not found in mock for %d, please set it with SetHeaderByNumber", number.Uint64()) +} + +func (mock *MockRocketPool) SetHeaderByNumber(number *big.Int, header *types.Header) { + if mock.headers == nil { + mock.headers = make(map[uint64]*types.Header) + } + mock.headers[number.Uint64()] = header +} + +func (mock *MockRocketPool) GetRewardsEvent(index uint64, _ []common.Address, opts *bind.CallOpts) (bool, rewards.RewardsEvent, error) { + + if event, ok := mock.rewardSnapshotEvents[index]; ok { + return true, event, nil + } + return false, rewards.RewardsEvent{}, nil +} + +func (mock *MockRocketPool) GetRewardSnapshotEvent(previousRewardsPoolAddresses []common.Address, interval uint64, opts *bind.CallOpts) (rewards.RewardsEvent, error) { + if event, ok := mock.rewardSnapshotEvents[interval]; ok { + return event, nil + } + return rewards.RewardsEvent{}, nil +} + +func (mock *MockRocketPool) SetRewardSnapshotEvent(event rewards.RewardsEvent) { + if mock.rewardSnapshotEvents == nil { + mock.rewardSnapshotEvents = make(map[uint64]rewards.RewardsEvent) + } + mock.rewardSnapshotEvents[event.Index.Uint64()] = event +} + +func (mock *MockRocketPool) GetRewardIndex(opts *bind.CallOpts) (*big.Int, error) { + return mock.RewardsIndex, nil +} + +func (mock *MockRocketPool) Client() *rocketpool.RocketPool { + panic("not implemented") +} diff --git a/shared/services/rewards/types.go b/shared/services/rewards/types.go index 97a279854..a2895c4b4 100644 --- a/shared/services/rewards/types.go +++ b/shared/services/rewards/types.go @@ -1,34 +1,64 @@ package rewards import ( + "context" "fmt" "math/big" "strings" "time" + "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" + ethtypes "github.com/ethereum/go-ethereum/core/types" + "github.com/rocket-pool/rocketpool-go/rewards" "github.com/rocket-pool/rocketpool-go/types" + "github.com/rocket-pool/smartnode/shared/services/beacon" "github.com/wealdtech/go-merkletree" ) -type rewardsFileVersion uint64 - const ( FarEpoch uint64 = 18446744073709551615 ) const ( - rewardsFileVersionUnknown = iota + rewardsFileVersionUnknown uint64 = iota rewardsFileVersionOne rewardsFileVersionTwo rewardsFileVersionThree rewardsFileVersionMax = iota - 1 + + minRewardsFileVersionSSZ = rewardsFileVersionThree ) +// RewardsExecutionClient defines and interface +// that contains only the functions from rocketpool.RocketPool +// required for rewards generation. +// This facade makes it easier to perform dependency injection in tests. +type RewardsExecutionClient interface { + GetNetworkEnabled(networkId *big.Int, opts *bind.CallOpts) (bool, error) + HeaderByNumber(context.Context, *big.Int) (*ethtypes.Header, error) + GetRewardsEvent(index uint64, rocketRewardsPoolAddresses []common.Address, opts *bind.CallOpts) (bool, rewards.RewardsEvent, error) + GetRewardSnapshotEvent(previousRewardsPoolAddresses []common.Address, interval uint64, opts *bind.CallOpts) (rewards.RewardsEvent, error) + GetRewardIndex(opts *bind.CallOpts) (*big.Int, error) +} + +// RewardsBeaconClient defines and interface +// that contains only the functions from beacon.Client +// required for rewards generation. +// This facade makes it easier to perform dependency injection in tests. +type RewardsBeaconClient interface { + GetBeaconBlock(slot string) (beacon.BeaconBlock, bool, error) + GetCommitteesForEpoch(epoch *uint64) (beacon.Committees, error) + GetAttestations(slot string) ([]beacon.AttestationInfo, bool, error) + GetEth2Config() (beacon.Eth2Config, error) + GetBeaconHead() (beacon.BeaconHead, error) +} + // Interface for version-agnostic minipool performance type IMinipoolPerformanceFile interface { // Serialize a minipool performance file into bytes Serialize() ([]byte, error) + SerializeSSZ() ([]byte, error) // Serialize a minipool performance file into bytes designed for human readability SerializeHuman() ([]byte, error) @@ -48,28 +78,51 @@ type IMinipoolPerformanceFile interface { type IRewardsFile interface { // Serialize a rewards file into bytes Serialize() ([]byte, error) + SerializeSSZ() ([]byte, error) // Deserialize a rewards file from bytes Deserialize([]byte) error - // Get the rewards file's header - GetHeader() *RewardsFileHeader + // Getters for general interval info + GetRewardsFileVersion() uint64 + GetIndex() uint64 + GetTotalNodeWeight() *big.Int + GetMerkleRoot() string + GetIntervalsPassed() uint64 + GetTotalProtocolDaoRpl() *big.Int + GetTotalOracleDaoRpl() *big.Int + GetTotalCollateralRpl() *big.Int + GetTotalNodeOperatorSmoothingPoolEth() *big.Int + GetTotalPoolStakerSmoothingPoolEth() *big.Int + GetExecutionStartBlock() uint64 + GetConsensusStartBlock() uint64 + GetExecutionEndBlock() uint64 + GetConsensusEndBlock() uint64 + GetStartTime() time.Time + GetEndTime() time.Time // Get all of the node addresses with rewards in this file // NOTE: the order of node addresses is not guaranteed to be stable, so don't rely on it GetNodeAddresses() []common.Address - // Get info about a node's rewards - GetNodeRewardsInfo(address common.Address) (INodeRewardsInfo, bool) + // Getters for into about specific node's rewards + HasRewardsFor(common.Address) bool + GetNodeCollateralRpl(common.Address) *big.Int + GetNodeOracleDaoRpl(common.Address) *big.Int + GetNodeSmoothingPoolEth(common.Address) *big.Int + GetMerkleProof(common.Address) ([]common.Hash, error) - // Gets the minipool performance file corresponding to this rewards file - GetMinipoolPerformanceFile() IMinipoolPerformanceFile + // Getters for network info + HasRewardsForNetwork(network uint64) bool + GetNetworkCollateralRpl(network uint64) *big.Int + GetNetworkOracleDaoRpl(network uint64) *big.Int + GetNetworkSmoothingPoolEth(network uint64) *big.Int // Sets the CID of the minipool performance file corresponding to this rewards file SetMinipoolPerformanceFileCID(cid string) // Generate the Merkle Tree and its root from the rewards file's proofs - generateMerkleTree() error + GenerateMerkleTree() error } // Rewards per network @@ -97,26 +150,21 @@ type ISmoothingPoolMinipoolPerformance interface { GetMissedAttestationCount() uint64 GetMissingAttestationSlots() []uint64 GetEthEarned() *big.Int -} - -// Interface for version-agnostic node operator rewards -type INodeRewardsInfo interface { - GetRewardNetwork() uint64 - GetCollateralRpl() *QuotedBigInt - GetOracleDaoRpl() *QuotedBigInt - GetSmoothingPoolEth() *QuotedBigInt - GetMerkleProof() ([]common.Hash, error) + GetBonusEthEarned() *big.Int + GetEffectiveCommission() *big.Int + GetConsensusIncome() *big.Int + GetAttestationScore() *big.Int } // Small struct to test version information for rewards files during deserialization type VersionHeader struct { - RewardsFileVersion rewardsFileVersion `json:"rewardsFileVersion,omitempty"` + RewardsFileVersion uint64 `json:"rewardsFileVersion,omitempty"` } // General version-agnostic information about a rewards file type RewardsFileHeader struct { // Serialized fields - RewardsFileVersion rewardsFileVersion `json:"rewardsFileVersion"` + RewardsFileVersion uint64 `json:"rewardsFileVersion"` RulesetVersion uint64 `json:"rulesetVersion,omitempty"` Index uint64 `json:"index"` Network string `json:"network"` @@ -133,8 +181,7 @@ type RewardsFileHeader struct { NetworkRewards map[uint64]*NetworkRewardsInfo `json:"networkRewards"` // Non-serialized fields - MerkleTree *merkletree.MerkleTree `json:"-"` - InvalidNetworkNodes map[common.Address]uint64 `json:"-"` + MerkleTree *merkletree.MerkleTree `json:"-"` } // Information about an interval @@ -153,7 +200,7 @@ type IntervalInfo struct { SmoothingPoolEthAmount *QuotedBigInt `json:"smoothingPoolEthAmount"` MerkleProof []common.Hash `json:"merkleProof"` - TotalNodeWeight *QuotedBigInt `json:"-"` + TotalNodeWeight *big.Int `json:"-"` } type MinipoolInfo struct { @@ -173,8 +220,14 @@ type MinipoolInfo struct { AttestationScore *QuotedBigInt `json:"attestationScore"` CompletedAttestations map[uint64]bool `json:"-"` AttestationCount int `json:"attestationCount"` + TotalFee *big.Int `json:"-"` + MinipoolBonus *big.Int `json:"-"` + NodeOperatorBond *big.Int `json:"-"` + ConsensusIncome *QuotedBigInt `json:"consensusIncome"` } +var sixteenEth = big.NewInt(0).Mul(oneEth, big.NewInt(16)) + type IntervalDutiesInfo struct { Index uint64 Slots map[uint64]*SlotInfo @@ -206,12 +259,26 @@ type NodeSmoothingDetails struct { // v2 Fields OptInTime time.Time OptOutTime time.Time + + // v10 Fields + BonusEth *big.Int + EligibleBorrowedEth *big.Int + RplStake *big.Int } type QuotedBigInt struct { big.Int } +func QuotedBigIntFromBigInt(x *big.Int) *QuotedBigInt { + if x == nil { + return nil + } + q := QuotedBigInt{} + q.Int = *big.NewInt(0).Set(x) + return &q +} + func NewQuotedBigInt(x int64) *QuotedBigInt { q := QuotedBigInt{} native := big.NewInt(x) @@ -279,7 +346,7 @@ func (versionHeader *VersionHeader) deserializeMinipoolPerformanceFile(bytes []b file := &MinipoolPerformanceFile_v2{} return file, file.Deserialize(bytes) case rewardsFileVersionThree: - file := &MinipoolPerformanceFile_v3{} + file := &MinipoolPerformanceFile_v2{} return file, file.Deserialize(bytes) } diff --git a/shared/services/rewards/utils.go b/shared/services/rewards/utils.go index 326ebd7e4..c8c27a752 100644 --- a/shared/services/rewards/utils.go +++ b/shared/services/rewards/utils.go @@ -1,10 +1,8 @@ package rewards import ( - "context" "fmt" "io" - "math" "math/big" "net/http" "os" @@ -14,14 +12,12 @@ import ( "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" "github.com/goccy/go-json" "github.com/klauspost/compress/zstd" "github.com/mitchellh/go-homedir" "github.com/rocket-pool/rocketpool-go/rewards" "github.com/rocket-pool/rocketpool-go/rocketpool" - "github.com/rocket-pool/rocketpool-go/storage" rpstate "github.com/rocket-pool/rocketpool-go/utils/state" "github.com/rocket-pool/smartnode/shared/services/beacon" "github.com/rocket-pool/smartnode/shared/services/config" @@ -34,7 +30,7 @@ var zero *big.Int // Gets the intervals the node can claim and the intervals that have already been claimed func GetClaimStatus(rp *rocketpool.RocketPool, nodeAddress common.Address) (unclaimed []uint64, claimed []uint64, err error) { // Get the current interval - currentIndexBig, err := rewards.GetRewardIndex(rp, nil) + currentIndexBig, err := rp.GetRewardIndex(nil) if err != nil { return } @@ -88,8 +84,11 @@ func GetIntervalInfo(rp *rocketpool.RocketPool, cfg *config.RocketPoolConfig, no info.Index = interval var event rewards.RewardsEvent + previousRewardsPoolAddresses := cfg.Smartnode.GetPreviousRewardsPoolAddresses() + // Get the event details for this interval - event, err = GetRewardSnapshotEvent(rp, cfg, interval, opts) + client := NewRewardsExecutionClient(rp) + event, err = client.GetRewardSnapshotEvent(previousRewardsPoolAddresses, interval, opts) if err != nil { return } @@ -101,7 +100,7 @@ func GetIntervalInfo(rp *rocketpool.RocketPool, cfg *config.RocketPoolConfig, no info.MerkleRoot = merkleRootCanon // Check if the tree file exists - info.TreeFilePath = cfg.Smartnode.GetRewardsTreePath(interval, true) + info.TreeFilePath = cfg.Smartnode.GetRewardsTreePath(interval, true, config.RewardsExtensionJSON) _, err = os.Stat(info.TreeFilePath) if os.IsNotExist(err) { info.TreeFileExists = false @@ -119,10 +118,10 @@ func GetIntervalInfo(rp *rocketpool.RocketPool, cfg *config.RocketPoolConfig, no proofWrapper := localRewardsFile.Impl() - info.TotalNodeWeight = proofWrapper.GetHeader().TotalRewards.TotalNodeWeight + info.TotalNodeWeight = proofWrapper.GetTotalNodeWeight() // Make sure the Merkle root has the expected value - merkleRootFromFile := common.HexToHash(proofWrapper.GetHeader().MerkleRoot) + merkleRootFromFile := common.HexToHash(proofWrapper.GetMerkleRoot()) if merkleRootCanon != merkleRootFromFile { info.MerkleRootValid = false return @@ -130,117 +129,25 @@ func GetIntervalInfo(rp *rocketpool.RocketPool, cfg *config.RocketPoolConfig, no info.MerkleRootValid = true // Get the rewards from it - rewards, exists := proofWrapper.GetNodeRewardsInfo(nodeAddress) - info.NodeExists = exists - if exists { - info.CollateralRplAmount = rewards.GetCollateralRpl() - info.ODaoRplAmount = rewards.GetOracleDaoRpl() - info.SmoothingPoolEthAmount = rewards.GetSmoothingPoolEth() - - var proof []common.Hash - proof, err = rewards.GetMerkleProof() - if err != nil { - err = fmt.Errorf("error deserializing merkle proof for %s, node %s: %w", info.TreeFilePath, nodeAddress.Hex(), err) - return - } - info.MerkleProof = proof - } - - return -} - -// Get the event for a rewards snapshot -func GetRewardSnapshotEvent(rp *rocketpool.RocketPool, cfg *config.RocketPoolConfig, interval uint64, opts *bind.CallOpts) (rewards.RewardsEvent, error) { - - addresses := cfg.Smartnode.GetPreviousRewardsPoolAddresses() - found, event, err := rewards.GetRewardsEvent(rp, interval, addresses, opts) - if err != nil { - return rewards.RewardsEvent{}, fmt.Errorf("error getting rewards event for interval %d: %w", interval, err) - } - if !found { - return rewards.RewardsEvent{}, fmt.Errorf("interval %d event not found", interval) - } - - return event, nil - -} - -// Get the number of the latest EL block that was created before the given timestamp -func GetELBlockHeaderForTime(targetTime time.Time, rp *rocketpool.RocketPool) (*types.Header, error) { - - // Get the latest block's timestamp - latestBlockHeader, err := rp.Client.HeaderByNumber(context.Background(), nil) - if err != nil { - return nil, fmt.Errorf("error getting latest block header: %w", err) + info.NodeExists = proofWrapper.HasRewardsFor(nodeAddress) + if !info.NodeExists { + return } - latestBlock := latestBlockHeader.Number + info.CollateralRplAmount = &QuotedBigInt{*proofWrapper.GetNodeCollateralRpl(nodeAddress)} + info.ODaoRplAmount = &QuotedBigInt{*proofWrapper.GetNodeOracleDaoRpl(nodeAddress)} + info.SmoothingPoolEthAmount = &QuotedBigInt{*proofWrapper.GetNodeSmoothingPoolEth(nodeAddress)} - // Get the block that Rocket Pool deployed to the chain on, use that as the search start - deployBlock, err := storage.GetDeployBlock(rp) - if err != nil { - return nil, fmt.Errorf("error getting Rocket Pool deployment block: %w", err) + proof, err := proofWrapper.GetMerkleProof(nodeAddress) + if proof == nil { + err = fmt.Errorf("error deserializing merkle proof for %s, node %s: no proof for this node found", info.TreeFilePath, nodeAddress.Hex()) + return } - - // Get half the distance between the protocol deployment and right now - delta := big.NewInt(0).Sub(latestBlock, deployBlock) - delta.Div(delta, big.NewInt(2)) - - // Start at the halfway point - candidateBlockNumber := big.NewInt(0).Sub(latestBlock, delta) - candidateBlock, err := rp.Client.HeaderByNumber(context.Background(), candidateBlockNumber) if err != nil { - return nil, fmt.Errorf("error getting EL block %d: %w", candidateBlock, err) + err = fmt.Errorf("error deserializing merkle proof for %s, node %s: %w", info.TreeFilePath, nodeAddress.Hex(), err) } - bestBlock := candidateBlock - pivotSize := candidateBlock.Number.Uint64() - minimumDistance := +math.Inf(1) - targetTimeUnix := float64(targetTime.Unix()) - - for { - // Get the distance from the candidate block to the target time - candidateTime := float64(candidateBlock.Time) - delta := targetTimeUnix - candidateTime - distance := math.Abs(delta) - - // If it's better, replace the best candidate with it - if distance < minimumDistance { - minimumDistance = distance - bestBlock = candidateBlock - } else if pivotSize == 1 { - // If the pivot is down to size 1 and we didn't find anything better after another iteration, this is the best block! - for candidateTime > targetTimeUnix { - // Get the previous block if this one happened after the target time - candidateBlockNumber.Sub(candidateBlockNumber, big.NewInt(1)) - candidateBlock, err = rp.Client.HeaderByNumber(context.Background(), candidateBlockNumber) - if err != nil { - return nil, fmt.Errorf("error getting EL block %d: %w", candidateBlock, err) - } - candidateTime = float64(candidateBlock.Time) - bestBlock = candidateBlock - } - return bestBlock, nil - } + info.MerkleProof = proof - // Iterate over the correct half, setting the pivot to the halfway point of that half (rounded up) - pivotSize = uint64(math.Ceil(float64(pivotSize) / 2)) - if delta < 0 { - // Go left - candidateBlockNumber.Sub(candidateBlockNumber, big.NewInt(int64(pivotSize))) - } else { - // Go right - candidateBlockNumber.Add(candidateBlockNumber, big.NewInt(int64(pivotSize))) - } - - // Clamp the new candidate to the latest block - if candidateBlockNumber.Uint64() > (latestBlock.Uint64() - 1) { - candidateBlockNumber.SetUint64(latestBlock.Uint64() - 1) - } - - candidateBlock, err = rp.Client.HeaderByNumber(context.Background(), candidateBlockNumber) - if err != nil { - return nil, fmt.Errorf("error getting EL block %d: %w", candidateBlock, err) - } - } + return } // Downloads the rewards file for this interval @@ -249,7 +156,7 @@ func (i *IntervalInfo) DownloadRewardsFile(cfg *config.RocketPoolConfig, isDaemo expectedCid := i.CID expectedRoot := i.MerkleRoot // Determine file name and path - rewardsTreePath, err := homedir.Expand(cfg.Smartnode.GetRewardsTreePath(interval, isDaemon)) + rewardsTreePath, err := homedir.Expand(cfg.Smartnode.GetRewardsTreePath(interval, isDaemon, config.RewardsExtensionJSON)) if err != nil { return fmt.Errorf("error expanding rewards tree path: %w", err) } @@ -316,16 +223,13 @@ func (i *IntervalInfo) DownloadRewardsFile(cfg *config.RocketPoolConfig, isDaemo } // Get the original merkle root - downloadedRoot := deserializedRewardsFile.GetHeader().MerkleRoot - - // Clear the merkle root so we have a safer comparison after calculating it again - deserializedRewardsFile.GetHeader().MerkleRoot = "" + downloadedRoot := deserializedRewardsFile.GetMerkleRoot() // Reconstruct the merkle tree from the file data, this should overwrite the stored Merkle Root with a new one - deserializedRewardsFile.generateMerkleTree() + deserializedRewardsFile.GenerateMerkleTree() // Get the resulting merkle root - calculatedRoot := deserializedRewardsFile.GetHeader().MerkleRoot + calculatedRoot := deserializedRewardsFile.GetMerkleRoot() // Compare the merkle roots to see if the original is correct if !strings.EqualFold(downloadedRoot, calculatedRoot) { @@ -342,7 +246,7 @@ func (i *IntervalInfo) DownloadRewardsFile(cfg *config.RocketPoolConfig, isDaemo deserializedRewardsFile, rewardsTreePath, ) - err = localRewardsFile.Write() + _, err = localRewardsFile.Write() if err != nil { return fmt.Errorf("error saving interval %d file to %s: %w", interval, rewardsTreePath, err) } @@ -359,7 +263,7 @@ func (i *IntervalInfo) DownloadRewardsFile(cfg *config.RocketPoolConfig, isDaemo } // Gets the start slot for the given interval -func GetStartSlotForInterval(previousIntervalEvent rewards.RewardsEvent, bc beacon.Client, beaconConfig beacon.Eth2Config) (uint64, error) { +func GetStartSlotForInterval(previousIntervalEvent rewards.RewardsEvent, bc RewardsBeaconClient, beaconConfig beacon.Eth2Config) (uint64, error) { // Get the chain head head, err := bc.GetBeaconHead() if err != nil { diff --git a/shared/services/rocketpool/client.go b/shared/services/rocketpool/client.go index c4ad7de97..c03525922 100644 --- a/shared/services/rocketpool/client.go +++ b/shared/services/rocketpool/client.go @@ -1203,12 +1203,11 @@ func (c *Client) deployTemplates(cfg *config.RocketPoolConfig, rocketpoolDir str } // Create the rewards file dir - rewardsFilePath, err := homedir.Expand(cfg.Smartnode.GetRewardsTreePath(0, false)) + rewardsFileDir, err := homedir.Expand(cfg.Smartnode.GetRewardsTreeDirectory(false)) if err != nil { fmt.Printf("%sWARNING: Couldn't expand the rewards tree file directory (%s). You will not be able to view or claim your rewards until you create the folder manually.%s\n", colorYellow, err.Error(), colorReset) return deployedContainers, nil } - rewardsFileDir := filepath.Dir(rewardsFilePath) err = os.MkdirAll(rewardsFileDir, 0775) if err != nil { fmt.Printf("%sWARNING: Couldn't create the rewards tree file directory (%s). You will not be able to view or claim your rewards until you create the folder [%s] manually.%s\n", colorYellow, err.Error(), rewardsFileDir, colorReset) diff --git a/shared/services/state/cli/.gitignore b/shared/services/state/cli/.gitignore new file mode 100644 index 000000000..d369ba9e0 --- /dev/null +++ b/shared/services/state/cli/.gitignore @@ -0,0 +1,2 @@ +cli +*.json diff --git a/shared/services/state/cli/cli.go b/shared/services/state/cli/cli.go new file mode 100644 index 000000000..1099a31df --- /dev/null +++ b/shared/services/state/cli/cli.go @@ -0,0 +1,121 @@ +package main + +import ( + "encoding/json" + "flag" + "fmt" + "os" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethclient" + "github.com/rocket-pool/rocketpool-go/rocketpool" + "github.com/rocket-pool/smartnode/shared/services/beacon/client" + "github.com/rocket-pool/smartnode/shared/services/config" + "github.com/rocket-pool/smartnode/shared/services/state" + cfgtypes "github.com/rocket-pool/smartnode/shared/types/config" +) + +// A basic CLI tool which can be used to serialize NetworkState objects to files +// for future use. +// Accepts arguments for a beacon node URL, an execution node URL, and a slot number +// to get the state for. + +var bnFlag = flag.String("b", "http://localhost:5052", "The beacon node URL") +var elFlag = flag.String("e", "http://localhost:8545", "The execution node URL") +var slotFlag = flag.Uint64("slot", 0, "The slot number to get the state for") +var networkFlag = flag.String("network", "mainnet", "The network to get the state for, i.e. 'mainnet' or 'holesky'") +var prettyFlag = flag.Bool("p", false, "Pretty print the output") +var inputFlag = flag.Bool("i", false, "Parse a network state from stdin instead of retrieving it from the network") +var criticalDutiesSlotsFlag = flag.Bool("critical-duties-slots", false, "If passed, output a list of critical duties slots for the given state as if it were the final state in a 6300 epoch interval. This is outputted instead of the state json.") +var criticalDutiesEpochCountFlag = flag.Uint64("critical-duties-epoch-count", 6300, "The number of epochs to consider when calculating critical duties") + +func main() { + flag.Parse() + + sn := config.NewSmartnodeConfig(nil) + switch *networkFlag { + case "mainnet": + sn.Network.Value = cfgtypes.Network_Mainnet + case "holesky": + sn.Network.Value = cfgtypes.Network_Holesky + default: + fmt.Fprintf(os.Stderr, "Invalid network: %s\n", *networkFlag) + fmt.Fprintf(os.Stderr, "Valid networks are: mainnet, holesky\n") + os.Exit(1) + } + + ec, err := ethclient.Dial(*elFlag) + if err != nil { + fmt.Fprintf(os.Stderr, "Error connecting to execution node: %v\n", err) + os.Exit(1) + } + + contracts := sn.GetStateManagerContracts() + fmt.Fprintf(os.Stderr, "Contracts: %+v\n", contracts) + + rocketStorage := sn.GetStorageAddress() + + rp, err := rocketpool.NewRocketPool(ec, common.HexToAddress(rocketStorage)) + if err != nil { + fmt.Fprintf(os.Stderr, "Error creating rocketpool: %v\n", err) + os.Exit(1) + } + bc := client.NewStandardHttpClient(*bnFlag) + sm := state.NewNetworkStateManager(rp, contracts, bc, nil) + + var networkState *state.NetworkState + + if *inputFlag { + decoder := json.NewDecoder(os.Stdin) + err := decoder.Decode(&networkState) + if err != nil { + fmt.Fprintf(os.Stderr, "Error decoding network state: %v\n", err) + os.Exit(1) + } + } else if *slotFlag == 0 { + fmt.Fprintf(os.Stderr, "Slot number not provided, defaulting to head slot.\n") + networkState, err = sm.GetHeadState() + } else { + networkState, err = sm.GetStateForSlot(*slotFlag) + } + if err != nil { + fmt.Fprintf(os.Stderr, "Error getting network state: %v\n", err) + os.Exit(1) + } + + if *criticalDutiesSlotsFlag { + criticalDutiesEpochs := state.NewCriticalDutiesEpochs(*criticalDutiesEpochCountFlag, networkState) + fmt.Fprintf(os.Stderr, "Critical duties epochs to check: %d\n", len(criticalDutiesEpochs.CriticalDuties)) + + criticalDutiesSlots, err := state.NewCriticalDutiesSlots(criticalDutiesEpochs, bc) + if err != nil { + fmt.Fprintf(os.Stderr, "Error creating critical duties slots: %v\n", err) + os.Exit(1) + } + + // Serialize the critical duties slots to stdout + encoder := json.NewEncoder(os.Stdout) + if *prettyFlag { + encoder.SetIndent("", " ") + } + err = encoder.Encode(criticalDutiesSlots) + if err != nil { + fmt.Fprintf(os.Stderr, "Error encoding critical duties slots: %v\n", err) + os.Exit(1) + } + + os.Exit(0) + return + } + + fmt.Fprintf(os.Stderr, "Network state fetched, outputting to stdout\n") + encoder := json.NewEncoder(os.Stdout) + if *prettyFlag { + encoder.SetIndent("", " ") + } + err = encoder.Encode(networkState) + if err != nil { + fmt.Fprintf(os.Stderr, "Error encoding network state: %v\n", err) + os.Exit(1) + } +} diff --git a/shared/services/state/critical-duties-slots.go b/shared/services/state/critical-duties-slots.go new file mode 100644 index 000000000..8a81661eb --- /dev/null +++ b/shared/services/state/critical-duties-slots.go @@ -0,0 +1,103 @@ +package state + +import ( + "github.com/rocket-pool/smartnode/shared/services/beacon" +) + +type CriticalDutiesEpochs struct { + // Map of epoch uint64 to a list of validator indices + CriticalDuties map[uint64][]string +} + +type CriticalDutiesSlots struct { + // Map of validator index to a list of critical duties slots + CriticalDuties map[string][]uint64 +} + +// Gets the critical duties slots for a given state as if it were the final state in a epochs epoch interval +func NewCriticalDutiesEpochs(epochs uint64, state *NetworkState) *CriticalDutiesEpochs { + criticalDuties := &CriticalDutiesEpochs{ + CriticalDuties: make(map[uint64][]string), + } + + endSlot := state.BeaconSlotNumber + endEpoch := state.BeaconConfig.SlotToEpoch(endSlot) + // Coerce endSlot to the last slot of the epoch + endSlot = state.BeaconConfig.LastSlotOfEpoch(endEpoch) + // Get the start epoch. Since the end epoch is the last inclusive epoch, we need to subtract 1 from the start epoch + startEpoch := endEpoch - epochs - 1 + + // Check for bond reductions first + for _, minipool := range state.MinipoolDetails { + lastReductionSlot := state.BeaconConfig.FirstSlotAtLeast(minipool.LastBondReductionTime.Int64()) + lastReductionEpoch := state.BeaconConfig.SlotToEpoch(lastReductionSlot) + if lastReductionEpoch < startEpoch { + continue + } + + if lastReductionEpoch > endEpoch { + continue + } + + pubkey := minipool.Pubkey + validatorIndex := state.ValidatorDetails[pubkey].Index + criticalDuties.CriticalDuties[lastReductionEpoch] = append(criticalDuties.CriticalDuties[lastReductionEpoch], validatorIndex) + } + + // Check for smoothing pool opt status changes next + for _, node := range state.NodeDetails { + lastOptStatusChange := state.BeaconConfig.FirstSlotAtLeast(node.SmoothingPoolRegistrationChanged.Int64()) + lastOptStatusChangeEpoch := state.BeaconConfig.SlotToEpoch(lastOptStatusChange) + if lastOptStatusChangeEpoch < startEpoch { + continue + } + + if lastOptStatusChangeEpoch > endEpoch { + continue + } + + // Flag every minipool for this node as having a critical duty + for _, minipool := range state.MinipoolDetailsByNode[node.NodeAddress] { + pubkey := minipool.Pubkey + validatorIndex := state.ValidatorDetails[pubkey].Index + criticalDuties.CriticalDuties[lastOptStatusChangeEpoch] = append(criticalDuties.CriticalDuties[lastOptStatusChangeEpoch], validatorIndex) + } + } + + return criticalDuties +} + +// For each validator in criticalDutiesEpochs, map the epochs to the slot the attestation duty assignment was for +func NewCriticalDutiesSlots(criticalDutiesEpochs *CriticalDutiesEpochs, bc beacon.Client) (*CriticalDutiesSlots, error) { + criticalDuties := &CriticalDutiesSlots{ + CriticalDuties: make(map[string][]uint64), + } + + for epoch, validatorIndices := range criticalDutiesEpochs.CriticalDuties { + // Create a set of validator indices to query when iterating committees + validatorIndicesSet := make(map[string]interface{}) + for _, validatorIndex := range validatorIndices { + validatorIndicesSet[validatorIndex] = struct{}{} + } + + // Get the beacon committee assignments for this epoch + // Rebind e to avoid using a pointer to the accumulator. + e := epoch + committees, err := bc.GetCommitteesForEpoch(&e) + if err != nil { + return nil, err + } + + // Iterate over the committees and check if the validator indices are in the set + for i := 0; i < committees.Count(); i++ { + validators := committees.Validators(i) + for _, validator := range validators { + if _, ok := validatorIndicesSet[validator]; ok { + criticalDuties.CriticalDuties[validator] = append(criticalDuties.CriticalDuties[validator], committees.Slot(i)) + } + } + } + } + + return criticalDuties, nil +} diff --git a/shared/services/state/manager.go b/shared/services/state/manager.go index c3c8d523c..25c856fb9 100644 --- a/shared/services/state/manager.go +++ b/shared/services/state/manager.go @@ -10,46 +10,51 @@ import ( "github.com/rocket-pool/rocketpool-go/rocketpool" "github.com/rocket-pool/smartnode/shared/services/beacon" "github.com/rocket-pool/smartnode/shared/services/config" - cfgtypes "github.com/rocket-pool/smartnode/shared/types/config" "github.com/rocket-pool/smartnode/shared/utils/log" ) type NetworkStateManager struct { - cfg *config.RocketPoolConfig - rp *rocketpool.RocketPool - ec rocketpool.ExecutionClient - bc beacon.Client - log *log.ColorLogger - Config *config.RocketPoolConfig - Network cfgtypes.Network - ChainID uint - BeaconConfig beacon.Eth2Config + rp *rocketpool.RocketPool + bc beacon.Client + log *log.ColorLogger + + // Memoized Beacon config + beaconConfig *beacon.Eth2Config + + // Multicaller and batch balance contract addresses + contracts config.StateManagerContracts } // Create a new manager for the network state -func NewNetworkStateManager(rp *rocketpool.RocketPool, cfg *config.RocketPoolConfig, ec rocketpool.ExecutionClient, bc beacon.Client, log *log.ColorLogger) (*NetworkStateManager, error) { +func NewNetworkStateManager( + rp *rocketpool.RocketPool, + contracts config.StateManagerContracts, + bc beacon.Client, + log *log.ColorLogger, +) *NetworkStateManager { // Create the manager - m := &NetworkStateManager{ - cfg: cfg, - rp: rp, - ec: ec, - bc: bc, - log: log, - Config: cfg, - Network: cfg.Smartnode.Network.Value.(cfgtypes.Network), - ChainID: cfg.Smartnode.GetChainID(), + return &NetworkStateManager{ + rp: rp, + bc: bc, + log: log, + contracts: contracts, + } +} + +func (m *NetworkStateManager) getBeaconConfig() (*beacon.Eth2Config, error) { + if m.beaconConfig != nil { + return m.beaconConfig, nil } // Get the Beacon config info - var err error - m.BeaconConfig, err = m.bc.GetEth2Config() + beaconConfig, err := m.bc.GetEth2Config() if err != nil { return nil, err } + m.beaconConfig = &beaconConfig - return m, nil - + return m.beaconConfig, nil } // Get the state of the network using the latest Execution layer block @@ -86,27 +91,35 @@ func (m *NetworkStateManager) GetLatestBeaconBlock() (beacon.BeaconBlock, error) // Gets the latest valid finalized block func (m *NetworkStateManager) GetLatestFinalizedBeaconBlock() (beacon.BeaconBlock, error) { + beaconConfig, err := m.getBeaconConfig() + if err != nil { + return beacon.BeaconBlock{}, fmt.Errorf("error getting Beacon config: %w", err) + } head, err := m.bc.GetBeaconHead() if err != nil { return beacon.BeaconBlock{}, fmt.Errorf("error getting Beacon chain head: %w", err) } - targetSlot := head.FinalizedEpoch*m.BeaconConfig.SlotsPerEpoch + (m.BeaconConfig.SlotsPerEpoch - 1) + targetSlot := head.FinalizedEpoch*beaconConfig.SlotsPerEpoch + (beaconConfig.SlotsPerEpoch - 1) return m.GetLatestProposedBeaconBlock(targetSlot) } // Gets the Beacon slot for the latest execution layer block func (m *NetworkStateManager) GetHeadSlot() (uint64, error) { + beaconConfig, err := m.getBeaconConfig() + if err != nil { + return 0, fmt.Errorf("error getting Beacon config: %w", err) + } // Get the latest EL block - latestBlockHeader, err := m.ec.HeaderByNumber(context.Background(), nil) + latestBlockHeader, err := m.rp.Client.HeaderByNumber(context.Background(), nil) if err != nil { return 0, fmt.Errorf("error getting latest EL block: %w", err) } // Get the corresponding Beacon slot based on the timestamp latestBlockTime := time.Unix(int64(latestBlockHeader.Time), 0) - genesisTime := time.Unix(int64(m.BeaconConfig.GenesisTime), 0) + genesisTime := time.Unix(int64(beaconConfig.GenesisTime), 0) secondsSinceGenesis := uint64(latestBlockTime.Sub(genesisTime).Seconds()) - targetSlot := secondsSinceGenesis / m.BeaconConfig.SecondsPerSlot + targetSlot := secondsSinceGenesis / beaconConfig.SecondsPerSlot return targetSlot, nil } @@ -131,7 +144,11 @@ func (m *NetworkStateManager) GetLatestProposedBeaconBlock(targetSlot uint64) (b // Get the state of the network at the provided Beacon slot func (m *NetworkStateManager) getState(slotNumber uint64) (*NetworkState, error) { - state, err := CreateNetworkState(m.cfg, m.rp, m.ec, m.bc, m.log, slotNumber, m.BeaconConfig) + beaconConfig, err := m.getBeaconConfig() + if err != nil { + return nil, fmt.Errorf("error getting Beacon config: %w", err) + } + state, err := createNetworkState(m.contracts, m.rp, m.bc, m.log, slotNumber, beaconConfig) if err != nil { return nil, err } @@ -140,7 +157,11 @@ func (m *NetworkStateManager) getState(slotNumber uint64) (*NetworkState, error) // Get the state of the network for a specific node only at the provided Beacon slot func (m *NetworkStateManager) getStateForNode(nodeAddress common.Address, slotNumber uint64, calculateTotalEffectiveStake bool) (*NetworkState, *big.Int, error) { - state, totalEffectiveStake, err := CreateNetworkStateForNode(m.cfg, m.rp, m.ec, m.bc, m.log, slotNumber, m.BeaconConfig, nodeAddress, calculateTotalEffectiveStake) + beaconConfig, err := m.getBeaconConfig() + if err != nil { + return nil, nil, fmt.Errorf("error getting Beacon config: %w", err) + } + state, totalEffectiveStake, err := createNetworkStateForNode(m.contracts, m.rp, m.bc, m.log, slotNumber, beaconConfig, nodeAddress, calculateTotalEffectiveStake) if err != nil { return nil, nil, err } diff --git a/shared/services/state/network-state.go b/shared/services/state/network-state.go index fe77381e9..983bb66e4 100644 --- a/shared/services/state/network-state.go +++ b/shared/services/state/network-state.go @@ -1,6 +1,7 @@ package state import ( + "encoding/json" "fmt" "math/big" "time" @@ -31,44 +32,134 @@ var fifteenEth = big.NewInt(0).Mul(big.NewInt(15), oneEth) var _13_6137_Eth = big.NewInt(0).Mul(big.NewInt(136137), big.NewInt(1e14)) var _13_Eth = big.NewInt(0).Mul(big.NewInt(13), oneEth) +type ValidatorDetailsMap map[types.ValidatorPubkey]beacon.ValidatorStatus + +func (vdm ValidatorDetailsMap) MarshalJSON() ([]byte, error) { + // Marshal as a slice of ValidatorStatus + out := make([]beacon.ValidatorStatus, 0, len(vdm)) + for _, v := range vdm { + out = append(out, v) + } + return json.Marshal(out) +} + +func (vdm *ValidatorDetailsMap) UnmarshalJSON(data []byte) error { + // Unmarshal as a slice of ValidatorStatus + var inp []beacon.ValidatorStatus + err := json.Unmarshal(data, &inp) + if err != nil { + return err + } + + *vdm = make(ValidatorDetailsMap, len(inp)) + + // Convert back to a map + for _, v := range inp { + // Return an error if the pubkey is already in the map + if _, exists := (*vdm)[v.Pubkey]; exists { + return fmt.Errorf("duplicate validator details for pubkey %s", v.Pubkey.Hex()) + } + (*vdm)[v.Pubkey] = v + } + return nil +} + type NetworkState struct { // Network version // Block / slot for this state - ElBlockNumber uint64 - BeaconSlotNumber uint64 - BeaconConfig beacon.Eth2Config + ElBlockNumber uint64 `json:"el_block_number"` + BeaconSlotNumber uint64 `json:"beacon_slot_number"` + BeaconConfig beacon.Eth2Config `json:"beacon_config"` // Network details - NetworkDetails *rpstate.NetworkDetails + NetworkDetails *rpstate.NetworkDetails `json:"network_details"` // Node details - NodeDetails []rpstate.NativeNodeDetails - NodeDetailsByAddress map[common.Address]*rpstate.NativeNodeDetails + NodeDetails []rpstate.NativeNodeDetails `json:"node_details"` + // NodeDetailsByAddress is an index over NodeDetails and is ignored when marshaling to JSON + // it is rebuilt when unmarshaling from JSON. + NodeDetailsByAddress map[common.Address]*rpstate.NativeNodeDetails `json:"-"` // Minipool details - MinipoolDetails []rpstate.NativeMinipoolDetails - MinipoolDetailsByAddress map[common.Address]*rpstate.NativeMinipoolDetails - MinipoolDetailsByNode map[common.Address][]*rpstate.NativeMinipoolDetails + MinipoolDetails []rpstate.NativeMinipoolDetails `json:"minipool_details"` + // These next two fields are indexes over MinipoolDetails and are ignored when marshaling to JSON + // they are rebuilt when unmarshaling from JSON. + MinipoolDetailsByAddress map[common.Address]*rpstate.NativeMinipoolDetails `json:"-"` + MinipoolDetailsByNode map[common.Address][]*rpstate.NativeMinipoolDetails `json:"-"` // Validator details - ValidatorDetails map[types.ValidatorPubkey]beacon.ValidatorStatus + ValidatorDetails ValidatorDetailsMap `json:"validator_details"` // Oracle DAO details - OracleDaoMemberDetails []rpstate.OracleDaoMemberDetails + OracleDaoMemberDetails []rpstate.OracleDaoMemberDetails `json:"oracle_dao_member_details"` // Protocol DAO proposals - ProtocolDaoProposalDetails []protocol.ProtocolDaoProposalDetails + ProtocolDaoProposalDetails []protocol.ProtocolDaoProposalDetails `json:"protocol_dao_proposal_details,omitempty"` // Internal fields log *log.ColorLogger } +func (ns NetworkState) MarshalJSON() ([]byte, error) { + // No changes needed + type Alias NetworkState + a := (*Alias)(&ns) + return json.Marshal(a) +} + +func (ns *NetworkState) UnmarshalJSON(data []byte) error { + type Alias NetworkState + var a Alias + err := json.Unmarshal(data, &a) + if err != nil { + return err + } + *ns = NetworkState(a) + // Rebuild the node details by address index + ns.NodeDetailsByAddress = make(map[common.Address]*rpstate.NativeNodeDetails) + for i, details := range ns.NodeDetails { + if _, ok := ns.NodeDetailsByAddress[details.NodeAddress]; ok { + return fmt.Errorf("duplicate node details for address %s", details.NodeAddress.Hex()) + } + // N.B. &details is not the same as &ns.NodeDetails[i] + // &details is the address of the current element in the loop + // &ns.NodeDetails[i] is the address of the struct in the slice + ns.NodeDetailsByAddress[details.NodeAddress] = &ns.NodeDetails[i] + } + + // Rebuild the minipool details by address index + ns.MinipoolDetailsByAddress = make(map[common.Address]*rpstate.NativeMinipoolDetails) + for i, details := range ns.MinipoolDetails { + if _, ok := ns.MinipoolDetailsByAddress[details.MinipoolAddress]; ok { + return fmt.Errorf("duplicate minipool details for address %s", details.MinipoolAddress.Hex()) + } + + // N.B. &details is not the same as &ns.MinipoolDetails[i] + // &details is the address of the current element in the loop + // &ns.MinipoolDetails[i] is the address of the struct in the slice + ns.MinipoolDetailsByAddress[details.MinipoolAddress] = &ns.MinipoolDetails[i] + } + + // Rebuild the minipool details by node index + ns.MinipoolDetailsByNode = make(map[common.Address][]*rpstate.NativeMinipoolDetails) + for i, details := range ns.MinipoolDetails { + // See comments in above loops as to why we're using &ns.MinipoolDetails[i] + currentDetails := &ns.MinipoolDetails[i] + nodeList, exists := ns.MinipoolDetailsByNode[details.NodeAddress] + if !exists { + ns.MinipoolDetailsByNode[details.NodeAddress] = []*rpstate.NativeMinipoolDetails{currentDetails} + continue + } + // See comments in other loops + ns.MinipoolDetailsByNode[details.NodeAddress] = append(nodeList, currentDetails) + } + + return nil +} + // Creates a snapshot of the entire Rocket Pool network state, on both the Execution and Consensus layers -func CreateNetworkState(cfg *config.RocketPoolConfig, rp *rocketpool.RocketPool, ec rocketpool.ExecutionClient, bc beacon.Client, log *log.ColorLogger, slotNumber uint64, beaconConfig beacon.Eth2Config) (*NetworkState, error) { - // Get the relevant network contracts - multicallerAddress := common.HexToAddress(cfg.Smartnode.GetMulticallAddress()) - balanceBatcherAddress := common.HexToAddress(cfg.Smartnode.GetBalanceBatcherAddress()) +func createNetworkState(batchContracts config.StateManagerContracts, rp *rocketpool.RocketPool, bc beacon.Client, log *log.ColorLogger, slotNumber uint64, beaconConfig *beacon.Eth2Config) (*NetworkState, error) { // Get the execution block for the given slot beaconBlock, exists, err := bc.GetBeaconBlock(fmt.Sprintf("%d", slotNumber)) @@ -92,7 +183,7 @@ func CreateNetworkState(cfg *config.RocketPoolConfig, rp *rocketpool.RocketPool, MinipoolDetailsByNode: map[common.Address][]*rpstate.NativeMinipoolDetails{}, BeaconSlotNumber: slotNumber, ElBlockNumber: elBlockNumber, - BeaconConfig: beaconConfig, + BeaconConfig: *beaconConfig, log: log, } @@ -100,7 +191,7 @@ func CreateNetworkState(cfg *config.RocketPoolConfig, rp *rocketpool.RocketPool, start := time.Now() // Network contracts and details - contracts, err := rpstate.NewNetworkContracts(rp, multicallerAddress, balanceBatcherAddress, opts) + contracts, err := rpstate.NewNetworkContracts(rp, batchContracts.Multicaller, batchContracts.BalanceBatcher, opts) if err != nil { return nil, fmt.Errorf("error getting network contracts: %w", err) } @@ -149,7 +240,7 @@ func CreateNetworkState(cfg *config.RocketPoolConfig, rp *rocketpool.RocketPool, // Calculate avg node fees and distributor shares for _, details := range state.NodeDetails { - rpstate.CalculateAverageFeeAndDistributorShares(rp, contracts, details, state.MinipoolDetailsByNode[details.NodeAddress]) + details.CalculateAverageFeeAndDistributorShares(state.MinipoolDetailsByNode[details.NodeAddress]) } // Oracle DAO member details @@ -193,16 +284,12 @@ func CreateNetworkState(cfg *config.RocketPoolConfig, rp *rocketpool.RocketPool, // Creates a snapshot of the Rocket Pool network, but only for a single node // Also gets the total effective RPL stake of the network for convenience since this is required by several node routines -func CreateNetworkStateForNode(cfg *config.RocketPoolConfig, rp *rocketpool.RocketPool, ec rocketpool.ExecutionClient, bc beacon.Client, log *log.ColorLogger, slotNumber uint64, beaconConfig beacon.Eth2Config, nodeAddress common.Address, calculateTotalEffectiveStake bool) (*NetworkState, *big.Int, error) { +func createNetworkStateForNode(batchContracts config.StateManagerContracts, rp *rocketpool.RocketPool, bc beacon.Client, log *log.ColorLogger, slotNumber uint64, beaconConfig *beacon.Eth2Config, nodeAddress common.Address, calculateTotalEffectiveStake bool) (*NetworkState, *big.Int, error) { steps := 5 if calculateTotalEffectiveStake { steps++ } - // Get the relevant network contracts - multicallerAddress := common.HexToAddress(cfg.Smartnode.GetMulticallAddress()) - balanceBatcherAddress := common.HexToAddress(cfg.Smartnode.GetBalanceBatcherAddress()) - // Get the execution block for the given slot beaconBlock, exists, err := bc.GetBeaconBlock(fmt.Sprintf("%d", slotNumber)) if err != nil { @@ -225,7 +312,7 @@ func CreateNetworkStateForNode(cfg *config.RocketPoolConfig, rp *rocketpool.Rock MinipoolDetailsByNode: map[common.Address][]*rpstate.NativeMinipoolDetails{}, BeaconSlotNumber: slotNumber, ElBlockNumber: elBlockNumber, - BeaconConfig: beaconConfig, + BeaconConfig: *beaconConfig, log: log, } @@ -233,7 +320,7 @@ func CreateNetworkStateForNode(cfg *config.RocketPoolConfig, rp *rocketpool.Rock start := time.Now() // Network contracts and details - contracts, err := rpstate.NewNetworkContracts(rp, multicallerAddress, balanceBatcherAddress, opts) + contracts, err := rpstate.NewNetworkContracts(rp, batchContracts.Multicaller, batchContracts.BalanceBatcher, opts) if err != nil { return nil, nil, fmt.Errorf("error getting network contracts: %w", err) } @@ -283,7 +370,7 @@ func CreateNetworkStateForNode(cfg *config.RocketPoolConfig, rp *rocketpool.Rock // Calculate avg node fees and distributor shares for _, details := range state.NodeDetails { - rpstate.CalculateAverageFeeAndDistributorShares(rp, contracts, details, state.MinipoolDetailsByNode[details.NodeAddress]) + details.CalculateAverageFeeAndDistributorShares(state.MinipoolDetailsByNode[details.NodeAddress]) } // Get the total network effective RPL stake @@ -340,7 +427,8 @@ func CreateNetworkStateForNode(cfg *config.RocketPoolConfig, rp *rocketpool.Rock return state, totalEffectiveStake, nil } -func (s *NetworkState) GetNodeWeight(eligibleBorrowedEth *big.Int, nodeStake *big.Int) *big.Int { +func (s *NetworkState) GetStakedRplValueInEthAndPercentOfBorrowedEth(eligibleBorrowedEth *big.Int, nodeStake *big.Int) (*big.Int, *big.Int) { + rplPrice := s.NetworkDetails.RplPrice // stakedRplValueInEth := nodeStake * ratio / 1 Eth @@ -348,11 +436,22 @@ func (s *NetworkState) GetNodeWeight(eligibleBorrowedEth *big.Int, nodeStake *bi stakedRplValueInEth.Mul(nodeStake, rplPrice) stakedRplValueInEth.Quo(stakedRplValueInEth, oneEth) + // Avoid division by zero + if eligibleBorrowedEth.Sign() == 0 { + return stakedRplValueInEth, big.NewInt(0) + } + // percentOfBorrowedEth := stakedRplValueInEth * 100 Eth / eligibleBorrowedEth percentOfBorrowedEth := big.NewInt(0) percentOfBorrowedEth.Mul(stakedRplValueInEth, oneHundredEth) percentOfBorrowedEth.Quo(percentOfBorrowedEth, eligibleBorrowedEth) + return stakedRplValueInEth, percentOfBorrowedEth +} + +func (s *NetworkState) GetNodeWeight(eligibleBorrowedEth *big.Int, nodeStake *big.Int) *big.Int { + stakedRplValueInEth, percentOfBorrowedEth := s.GetStakedRplValueInEthAndPercentOfBorrowedEth(eligibleBorrowedEth, nodeStake) + // If at or under 15%, return 100 * stakedRplValueInEth if percentOfBorrowedEth.Cmp(fifteenEth) <= 0 { stakedRplValueInEth.Mul(stakedRplValueInEth, oneHundred) diff --git a/shared/services/state/utils.go b/shared/services/state/utils.go index b09b06c28..d7bb23dd0 100644 --- a/shared/services/state/utils.go +++ b/shared/services/state/utils.go @@ -7,31 +7,30 @@ import ( "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/rocket-pool/rocketpool-go/rewards" "github.com/rocket-pool/rocketpool-go/rocketpool" - "github.com/rocket-pool/smartnode/shared/services/config" ) // TODO: temp until rocketpool-go supports RocketStorage contract address lookups per block -func GetClaimIntervalTime(cfg *config.RocketPoolConfig, index uint64, rp *rocketpool.RocketPool, opts *bind.CallOpts) (time.Duration, error) { +func GetClaimIntervalTime(index uint64, rp *rocketpool.RocketPool, opts *bind.CallOpts) (time.Duration, error) { return rewards.GetClaimIntervalTime(rp, opts) } // TODO: temp until rocketpool-go supports RocketStorage contract address lookups per block -func GetNodeOperatorRewardsPercent(cfg *config.RocketPoolConfig, index uint64, rp *rocketpool.RocketPool, opts *bind.CallOpts) (*big.Int, error) { +func GetNodeOperatorRewardsPercent(index uint64, rp *rocketpool.RocketPool, opts *bind.CallOpts) (*big.Int, error) { return rewards.GetNodeOperatorRewardsPercent(rp, opts) } // TODO: temp until rocketpool-go supports RocketStorage contract address lookups per block -func GetTrustedNodeOperatorRewardsPercent(cfg *config.RocketPoolConfig, index uint64, rp *rocketpool.RocketPool, opts *bind.CallOpts) (*big.Int, error) { +func GetTrustedNodeOperatorRewardsPercent(index uint64, rp *rocketpool.RocketPool, opts *bind.CallOpts) (*big.Int, error) { return rewards.GetTrustedNodeOperatorRewardsPercent(rp, opts) } // TODO: temp until rocketpool-go supports RocketStorage contract address lookups per block -func GetProtocolDaoRewardsPercent(cfg *config.RocketPoolConfig, index uint64, rp *rocketpool.RocketPool, opts *bind.CallOpts) (*big.Int, error) { +func GetProtocolDaoRewardsPercent(index uint64, rp *rocketpool.RocketPool, opts *bind.CallOpts) (*big.Int, error) { return rewards.GetProtocolDaoRewardsPercent(rp, opts) } // TODO: temp until rocketpool-go supports RocketStorage contract address lookups per block -func GetPendingRPLRewards(cfg *config.RocketPoolConfig, index uint64, rp *rocketpool.RocketPool, opts *bind.CallOpts) (*big.Int, error) { +func GetPendingRPLRewards(index uint64, rp *rocketpool.RocketPool, opts *bind.CallOpts) (*big.Int, error) { return rewards.GetPendingRPLRewards(rp, opts) }