diff --git a/chainntnfs/esploranotify/driver.go b/chainntnfs/esploranotify/driver.go new file mode 100644 index 00000000000..6aec3190120 --- /dev/null +++ b/chainntnfs/esploranotify/driver.go @@ -0,0 +1,60 @@ +package esploranotify + +import ( + "fmt" + + "github.com/btcsuite/btcd/chaincfg" + "github.com/lightningnetwork/lnd/blockcache" + "github.com/lightningnetwork/lnd/chainntnfs" + "github.com/lightningnetwork/lnd/esplora" +) + +// createNewNotifier creates a new instance of the EsploraNotifier from a +// config. +func createNewNotifier(args ...interface{}) (chainntnfs.ChainNotifier, error) { + if len(args) != 5 { + return nil, fmt.Errorf("incorrect number of arguments to "+ + "createNewNotifier, expected 5, got %d", len(args)) + } + + client, ok := args[0].(*esplora.Client) + if !ok { + return nil, fmt.Errorf("first argument must be an " + + "*esplora.Client") + } + + chainParams, ok := args[1].(*chaincfg.Params) + if !ok { + return nil, fmt.Errorf("second argument must be a " + + "*chaincfg.Params") + } + + spendHintCache, ok := args[2].(chainntnfs.SpendHintCache) + if !ok { + return nil, fmt.Errorf("third argument must be a " + + "chainntnfs.SpendHintCache") + } + + confirmHintCache, ok := args[3].(chainntnfs.ConfirmHintCache) + if !ok { + return nil, fmt.Errorf("fourth argument must be a " + + "chainntnfs.ConfirmHintCache") + } + + blockCache, ok := args[4].(*blockcache.BlockCache) + if !ok { + return nil, fmt.Errorf("fifth argument must be a " + + "*blockcache.BlockCache") + } + + return New(client, chainParams, spendHintCache, confirmHintCache, + blockCache), nil +} + +// init registers a driver for the EsploraNotifier. +func init() { + chainntnfs.RegisterNotifier(&chainntnfs.NotifierDriver{ + NotifierType: notifierType, + New: createNewNotifier, + }) +} diff --git a/chainntnfs/esploranotify/esplora.go b/chainntnfs/esploranotify/esplora.go new file mode 100644 index 00000000000..a87d632b970 --- /dev/null +++ b/chainntnfs/esploranotify/esplora.go @@ -0,0 +1,886 @@ +package esploranotify + +import ( + "bytes" + "context" + "encoding/hex" + "errors" + "fmt" + "sync" + "sync/atomic" + "time" + + "github.com/btcsuite/btcd/btcutil" + "github.com/btcsuite/btcd/chaincfg" + "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/btcsuite/btcd/wire" + "github.com/lightningnetwork/lnd/blockcache" + "github.com/lightningnetwork/lnd/chainntnfs" + "github.com/lightningnetwork/lnd/esplora" + "github.com/lightningnetwork/lnd/queue" +) + +const ( + // notifierType uniquely identifies this concrete implementation of the + // ChainNotifier interface. + notifierType = "esplora" +) + +var ( + // ErrEsploraNotifierShuttingDown is returned when the notifier is + // shutting down. + ErrEsploraNotifierShuttingDown = errors.New( + "esplora notifier is shutting down", + ) +) + +// EsploraNotifier implements the ChainNotifier interface using an Esplora +// HTTP API as the chain backend. This provides a lightweight way to receive +// chain notifications without running a full node. +type EsploraNotifier struct { + epochClientCounter uint64 // To be used atomically. + + start sync.Once + active int32 // To be used atomically. + stopped int32 // To be used atomically. + + bestBlockMtx sync.RWMutex + bestBlock chainntnfs.BlockEpoch + + // client is the Esplora client used to communicate with the API. + client *esplora.Client + + // subscriptionID is the ID of our block notification subscription. + subscriptionID uint64 + + // chainParams are the parameters of the chain we're connected to. + chainParams *chaincfg.Params + + notificationCancels chan interface{} + notificationRegistry chan interface{} + + txNotifier *chainntnfs.TxNotifier + + blockEpochClients map[uint64]*blockEpochRegistration + + // spendHintCache is a cache used to query and update the latest height + // hints for an outpoint. + spendHintCache chainntnfs.SpendHintCache + + // confirmHintCache is a cache used to query the latest height hints for + // a transaction. + confirmHintCache chainntnfs.ConfirmHintCache + + // blockCache is an LRU block cache. + blockCache *blockcache.BlockCache + + wg sync.WaitGroup + quit chan struct{} +} + +// Ensure EsploraNotifier implements the ChainNotifier interface at compile +// time. +var _ chainntnfs.ChainNotifier = (*EsploraNotifier)(nil) + +// New creates a new instance of the EsploraNotifier. The Esplora client +// should already be started and connected before being passed to this +// function. +func New(client *esplora.Client, chainParams *chaincfg.Params, + spendHintCache chainntnfs.SpendHintCache, + confirmHintCache chainntnfs.ConfirmHintCache, + blockCache *blockcache.BlockCache) *EsploraNotifier { + + return &EsploraNotifier{ + client: client, + chainParams: chainParams, + + notificationCancels: make(chan interface{}), + notificationRegistry: make(chan interface{}), + + blockEpochClients: make(map[uint64]*blockEpochRegistration), + + spendHintCache: spendHintCache, + confirmHintCache: confirmHintCache, + + blockCache: blockCache, + + quit: make(chan struct{}), + } +} + +// Start establishes the connection to the Esplora API and begins +// processing block notifications. +func (e *EsploraNotifier) Start() error { + var startErr error + e.start.Do(func() { + startErr = e.startNotifier() + }) + return startErr +} + +// startNotifier is the internal method that performs the actual startup. +func (e *EsploraNotifier) startNotifier() error { + log.Info("Esplora notifier starting...") + + // Ensure the client is connected. + if !e.client.IsConnected() { + return errors.New("esplora client is not connected") + } + + // Get the current best block from the Esplora API. + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + tipHash, err := e.client.GetTipHash(ctx) + if err != nil { + return fmt.Errorf("failed to get tip hash: %w", err) + } + + tipHeight, err := e.client.GetTipHeight(ctx) + if err != nil { + return fmt.Errorf("failed to get tip height: %w", err) + } + + blockHeader, err := e.client.GetBlockHeader(ctx, tipHash) + if err != nil { + return fmt.Errorf("failed to get block header: %w", err) + } + + blockHash, err := chainhash.NewHashFromStr(tipHash) + if err != nil { + return fmt.Errorf("failed to parse block hash: %w", err) + } + + e.bestBlockMtx.Lock() + e.bestBlock = chainntnfs.BlockEpoch{ + Height: int32(tipHeight), + Hash: blockHash, + BlockHeader: blockHeader, + } + e.bestBlockMtx.Unlock() + + log.Infof("Esplora notifier started at height %d, hash %s", + tipHeight, tipHash) + + // Initialize the transaction notifier with the current best height. + e.txNotifier = chainntnfs.NewTxNotifier( + uint32(tipHeight), chainntnfs.ReorgSafetyLimit, + e.confirmHintCache, e.spendHintCache, + ) + + // Start the notification dispatcher goroutine. + e.wg.Add(1) + go e.notificationDispatcher() + + // Start the block polling handler. + e.wg.Add(1) + go e.blockPollingHandler() + + // Mark the notifier as active. + atomic.StoreInt32(&e.active, 1) + + log.Debug("Esplora notifier started successfully") + + return nil +} + +// Stop shuts down the EsploraNotifier. +func (e *EsploraNotifier) Stop() error { + // Already shutting down? + if atomic.AddInt32(&e.stopped, 1) != 1 { + return nil + } + + log.Info("Esplora notifier shutting down...") + defer log.Debug("Esplora notifier shutdown complete") + + close(e.quit) + e.wg.Wait() + + // Notify all pending clients of our shutdown by closing the related + // notification channels. + for _, epochClient := range e.blockEpochClients { + close(epochClient.cancelChan) + epochClient.wg.Wait() + close(epochClient.epochChan) + } + + // Tear down the transaction notifier if it was initialized. + if e.txNotifier != nil { + e.txNotifier.TearDown() + } + + return nil +} + +// Started returns true if this instance has been started, and false otherwise. +func (e *EsploraNotifier) Started() bool { + return atomic.LoadInt32(&e.active) != 0 +} + +// blockPollingHandler polls for new blocks from the Esplora API. +func (e *EsploraNotifier) blockPollingHandler() { + defer e.wg.Done() + + // Subscribe to block notifications from the client. + blockNotifs, subID := e.client.Subscribe() + e.subscriptionID = subID + + defer e.client.Unsubscribe(subID) + + for { + select { + case blockInfo, ok := <-blockNotifs: + if !ok { + log.Warn("Block notification channel closed") + return + } + + if blockInfo == nil { + continue + } + + newHeight := int32(blockInfo.Height) + + // Fetch the block header. + ctx, cancel := context.WithTimeout( + context.Background(), 30*time.Second, + ) + blockHeader, err := e.client.GetBlockHeader(ctx, blockInfo.ID) + cancel() + if err != nil { + log.Errorf("Failed to get block header: %v", err) + continue + } + + blockHash, err := chainhash.NewHashFromStr(blockInfo.ID) + if err != nil { + log.Errorf("Failed to parse block hash: %v", err) + continue + } + + // Check if this is a new block or a reorg. + e.bestBlockMtx.RLock() + prevHeight := e.bestBlock.Height + prevHash := e.bestBlock.Hash + e.bestBlockMtx.RUnlock() + + // Handle the new block. + if newHeight > prevHeight { + // New block connected. + e.handleBlockConnected(newHeight, blockHash, blockHeader) + } else if newHeight <= prevHeight && !blockHash.IsEqual(prevHash) { + // Potential reorg detected. + log.Warnf("Potential reorg detected: "+ + "prev_height=%d, new_height=%d", + prevHeight, newHeight) + + e.handleReorg(prevHeight, newHeight, blockHash, blockHeader) + } + + case <-e.quit: + return + } + } +} + +// handleBlockConnected processes a newly connected block. +func (e *EsploraNotifier) handleBlockConnected(height int32, + hash *chainhash.Hash, header *wire.BlockHeader) { + + log.Debugf("New block connected: height=%d, hash=%s", height, hash) + + // Update the best block. + e.bestBlockMtx.Lock() + e.bestBlock = chainntnfs.BlockEpoch{ + Height: height, + Hash: hash, + BlockHeader: header, + } + e.bestBlockMtx.Unlock() + + // Notify all block epoch clients about the new block. + for _, client := range e.blockEpochClients { + e.notifyBlockEpochClient(client, height, hash, header) + } + + // Update the txNotifier's height. + if e.txNotifier != nil { + err := e.txNotifier.NotifyHeight(uint32(height)) + if err != nil { + log.Errorf("Failed to notify height: %v", err) + } + + // Check pending confirmations and spends in parallel. + var wg sync.WaitGroup + wg.Add(2) + go func() { + defer wg.Done() + e.checkPendingConfirmations(uint32(height)) + }() + go func() { + defer wg.Done() + e.checkPendingSpends(uint32(height)) + }() + wg.Wait() + } +} + +// checkPendingConfirmations queries the Esplora API to check if any +// pending confirmation requests have been satisfied. +func (e *EsploraNotifier) checkPendingConfirmations(currentHeight uint32) { + unconfirmed := e.txNotifier.UnconfirmedRequests() + if len(unconfirmed) == 0 { + return + } + + log.Debugf("Checking %d pending confirmation requests at height %d", + len(unconfirmed), currentHeight) + + for _, confRequest := range unconfirmed { + confDetails, err := e.historicalConfDetails( + confRequest, 0, currentHeight, + ) + if err != nil { + log.Debugf("Error checking confirmation for %v: %v", + confRequest, err) + continue + } + + if confDetails == nil { + continue + } + + log.Infof("Found confirmation for pending request %v at "+ + "height %d", confRequest, confDetails.BlockHeight) + + err = e.txNotifier.UpdateConfDetails(confRequest, confDetails) + if err != nil { + log.Errorf("Failed to update conf details for %v: %v", + confRequest, err) + } + } +} + +// checkPendingSpends queries the Esplora API to check if any pending +// spend requests have been satisfied. +func (e *EsploraNotifier) checkPendingSpends(currentHeight uint32) { + unspent := e.txNotifier.UnspentRequests() + if len(unspent) == 0 { + return + } + + log.Debugf("Checking %d pending spend requests at height %d", + len(unspent), currentHeight) + + for _, spendRequest := range unspent { + spendDetails, err := e.historicalSpendDetails( + spendRequest, 0, currentHeight, + ) + if err != nil { + log.Debugf("Error checking spend for %v: %v", + spendRequest, err) + continue + } + + if spendDetails == nil { + continue + } + + log.Infof("Found spend for pending request %v at height %d", + spendRequest, spendDetails.SpendingHeight) + + err = e.txNotifier.UpdateSpendDetails(spendRequest, spendDetails) + if err != nil { + log.Errorf("Failed to update spend details for %v: %v", + spendRequest, err) + } + } +} + +// handleReorg handles a chain reorganization. +func (e *EsploraNotifier) handleReorg(prevHeight, newHeight int32, + newHash *chainhash.Hash, newHeader *wire.BlockHeader) { + + if e.txNotifier != nil { + for h := uint32(prevHeight); h > uint32(newHeight); h-- { + err := e.txNotifier.DisconnectTip(h) + if err != nil { + log.Errorf("Failed to disconnect tip at "+ + "height %d: %v", h, err) + } + } + } + + e.handleBlockConnected(newHeight, newHash, newHeader) +} + +// notificationDispatcher is the primary goroutine which handles client +// notification registrations, as well as notification dispatches. +func (e *EsploraNotifier) notificationDispatcher() { + defer e.wg.Done() + + for { + select { + case cancelMsg := <-e.notificationCancels: + switch msg := cancelMsg.(type) { + case *epochCancel: + log.Infof("Cancelling epoch notification, "+ + "epoch_id=%v", msg.epochID) + + reg := e.blockEpochClients[msg.epochID] + if reg != nil { + reg.epochQueue.Stop() + close(reg.cancelChan) + reg.wg.Wait() + close(reg.epochChan) + delete(e.blockEpochClients, msg.epochID) + } + } + + case registerMsg := <-e.notificationRegistry: + switch msg := registerMsg.(type) { + case *blockEpochRegistration: + log.Infof("New block epoch subscription, "+ + "epoch_id=%v", msg.epochID) + + e.blockEpochClients[msg.epochID] = msg + + if msg.bestBlock != nil { + e.dispatchMissedBlocks(msg) + } else { + e.bestBlockMtx.RLock() + bestBlock := e.bestBlock + e.bestBlockMtx.RUnlock() + + e.notifyBlockEpochClient( + msg, bestBlock.Height, + bestBlock.Hash, + bestBlock.BlockHeader, + ) + } + + msg.errorChan <- nil + } + + case <-e.quit: + return + } + } +} + +// handleHistoricalConfDispatch handles a request to look up historical +// confirmation details for a transaction. +func (e *EsploraNotifier) handleHistoricalConfDispatch( + dispatch *chainntnfs.HistoricalConfDispatch) { + + defer e.wg.Done() + + confDetails, err := e.historicalConfDetails( + dispatch.ConfRequest, dispatch.StartHeight, dispatch.EndHeight, + ) + if err != nil { + log.Errorf("Failed to get historical conf details for %v: %v", + dispatch.ConfRequest, err) + return + } + + err = e.txNotifier.UpdateConfDetails(dispatch.ConfRequest, confDetails) + if err != nil { + log.Errorf("Failed to update conf details for %v: %v", + dispatch.ConfRequest, err) + } +} + +// handleHistoricalSpendDispatch handles a request to look up historical +// spend details for an outpoint. +func (e *EsploraNotifier) handleHistoricalSpendDispatch( + dispatch *chainntnfs.HistoricalSpendDispatch) { + + defer e.wg.Done() + + spendDetails, err := e.historicalSpendDetails( + dispatch.SpendRequest, dispatch.StartHeight, dispatch.EndHeight, + ) + if err != nil { + log.Errorf("Failed to get historical spend details for %v: %v", + dispatch.SpendRequest, err) + return + } + + err = e.txNotifier.UpdateSpendDetails(dispatch.SpendRequest, spendDetails) + if err != nil { + log.Errorf("Failed to update spend details for %v: %v", + dispatch.SpendRequest, err) + } +} + +// historicalConfDetails looks up the confirmation details for a transaction +// within the given height range. +func (e *EsploraNotifier) historicalConfDetails( + confRequest chainntnfs.ConfRequest, + startHeight, endHeight uint32) (*chainntnfs.TxConfirmation, error) { + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + // If we have a txid, try to get the transaction directly. + if confRequest.TxID != chainntnfs.ZeroHash { + txInfo, err := e.client.GetTransaction(ctx, confRequest.TxID.String()) + if err == nil && txInfo != nil && txInfo.Status.Confirmed { + blockHash, err := chainhash.NewHashFromStr(txInfo.Status.BlockHash) + if err != nil { + return nil, fmt.Errorf("invalid block hash: %w", err) + } + + // Fetch the actual transaction. + msgTx, err := e.client.GetRawTransactionMsgTx(ctx, txInfo.TxID) + if err != nil { + return nil, fmt.Errorf("failed to fetch raw tx %s: %w", + txInfo.TxID, err) + } + + // Get the TxIndex. + txIndex, err := e.client.GetTxIndex( + ctx, txInfo.Status.BlockHash, txInfo.TxID, + ) + if err != nil { + return nil, fmt.Errorf("failed to get tx index for %s: %w", + txInfo.TxID, err) + } + + return &chainntnfs.TxConfirmation{ + BlockHash: blockHash, + BlockHeight: uint32(txInfo.Status.BlockHeight), + TxIndex: txIndex, + Tx: msgTx, + }, nil + } + + if err != nil { + log.Debugf("GetTransaction for %v failed: %v", + confRequest.TxID, err) + } + } + + // If we don't have a pkScript, we can't do scripthash lookup. + if confRequest.PkScript.Script() == nil || + len(confRequest.PkScript.Script()) == 0 { + return nil, nil + } + + // Search by scripthash. + scripthash := esplora.ScripthashFromScript(confRequest.PkScript.Script()) + + txs, err := e.client.GetScripthashTxs(ctx, scripthash) + if err != nil { + return nil, fmt.Errorf("failed to get scripthash txs: %w", err) + } + + targetTxID := confRequest.TxID.String() + for _, txInfo := range txs { + if !txInfo.Status.Confirmed { + continue + } + + if confRequest.TxID != chainntnfs.ZeroHash { + if txInfo.TxID != targetTxID { + continue + } + } else if uint32(txInfo.Status.BlockHeight) < startHeight || + uint32(txInfo.Status.BlockHeight) > endHeight { + continue + } + + blockHash, err := chainhash.NewHashFromStr(txInfo.Status.BlockHash) + if err != nil { + continue + } + + log.Debugf("Found confirmed tx %s at height %d via scripthash", + txInfo.TxID, txInfo.Status.BlockHeight) + + msgTx, err := e.client.GetRawTransactionMsgTx(ctx, txInfo.TxID) + if err != nil { + return nil, fmt.Errorf("failed to fetch raw tx %s: %w", + txInfo.TxID, err) + } + + txIndex, err := e.client.GetTxIndex( + ctx, txInfo.Status.BlockHash, txInfo.TxID, + ) + if err != nil { + return nil, fmt.Errorf("failed to get tx index for %s: %w", + txInfo.TxID, err) + } + + return &chainntnfs.TxConfirmation{ + BlockHash: blockHash, + BlockHeight: uint32(txInfo.Status.BlockHeight), + TxIndex: txIndex, + Tx: msgTx, + }, nil + } + + return nil, nil +} + +// historicalSpendDetails looks up the spend details for an outpoint within +// the given height range. +func (e *EsploraNotifier) historicalSpendDetails( + spendRequest chainntnfs.SpendRequest, + startHeight, endHeight uint32) (*chainntnfs.SpendDetail, error) { + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + // First, check if the output is spent using the outspend endpoint. + outSpend, err := e.client.GetTxOutSpend( + ctx, spendRequest.OutPoint.Hash.String(), + spendRequest.OutPoint.Index, + ) + if err != nil { + return nil, fmt.Errorf("failed to check outspend: %w", err) + } + + if !outSpend.Spent { + return nil, nil + } + + // The output is spent, get the spending transaction. + if !outSpend.Status.Confirmed { + // Spent but not confirmed yet. + return nil, nil + } + + if uint32(outSpend.Status.BlockHeight) < startHeight || + uint32(outSpend.Status.BlockHeight) > endHeight { + return nil, nil + } + + // Fetch the spending transaction. + spenderHash, err := chainhash.NewHashFromStr(outSpend.TxID) + if err != nil { + return nil, fmt.Errorf("invalid spender txid: %w", err) + } + + spendingTx, err := e.client.GetRawTransactionMsgTx(ctx, outSpend.TxID) + if err != nil { + return nil, fmt.Errorf("failed to get spending tx: %w", err) + } + + return &chainntnfs.SpendDetail{ + SpentOutPoint: &spendRequest.OutPoint, + SpenderTxHash: spenderHash, + SpendingTx: spendingTx, + SpenderInputIndex: outSpend.Vin, + SpendingHeight: int32(outSpend.Status.BlockHeight), + }, nil +} + +// dispatchMissedBlocks sends block epoch notifications for any blocks that +// the client may have missed. +func (e *EsploraNotifier) dispatchMissedBlocks( + registration *blockEpochRegistration) { + + e.bestBlockMtx.RLock() + currentHeight := e.bestBlock.Height + e.bestBlockMtx.RUnlock() + + startHeight := registration.bestBlock.Height + 1 + + for height := startHeight; height <= currentHeight; height++ { + ctx, cancel := context.WithTimeout( + context.Background(), 30*time.Second, + ) + + hashStr, err := e.client.GetBlockHashByHeight(ctx, int64(height)) + cancel() + if err != nil { + log.Errorf("Failed to get block hash at height %d: %v", + height, err) + continue + } + + ctx, cancel = context.WithTimeout( + context.Background(), 30*time.Second, + ) + header, err := e.client.GetBlockHeader(ctx, hashStr) + cancel() + if err != nil { + log.Errorf("Failed to get block header at height %d: %v", + height, err) + continue + } + + blockHash, err := chainhash.NewHashFromStr(hashStr) + if err != nil { + continue + } + + e.notifyBlockEpochClient(registration, height, blockHash, header) + } +} + +// notifyBlockEpochClient sends a block epoch notification to a specific client. +func (e *EsploraNotifier) notifyBlockEpochClient( + registration *blockEpochRegistration, height int32, + hash *chainhash.Hash, header *wire.BlockHeader) { + + epoch := &chainntnfs.BlockEpoch{ + Height: height, + Hash: hash, + BlockHeader: header, + } + + select { + case registration.epochQueue.ChanIn() <- epoch: + case <-registration.cancelChan: + case <-e.quit: + } +} + +// RegisterConfirmationsNtfn registers an intent to be notified once the +// target txid/output script has reached numConfs confirmations on-chain. +func (e *EsploraNotifier) RegisterConfirmationsNtfn(txid *chainhash.Hash, + pkScript []byte, numConfs, heightHint uint32, + opts ...chainntnfs.NotifierOption) (*chainntnfs.ConfirmationEvent, error) { + + ntfn, err := e.txNotifier.RegisterConf( + txid, pkScript, numConfs, heightHint, opts..., + ) + if err != nil { + return nil, err + } + + if ntfn.HistoricalDispatch != nil { + e.wg.Add(1) + go e.handleHistoricalConfDispatch(ntfn.HistoricalDispatch) + } + + return ntfn.Event, nil +} + +// RegisterSpendNtfn registers an intent to be notified once the target +// outpoint/output script has been spent by a transaction on-chain. +func (e *EsploraNotifier) RegisterSpendNtfn(outpoint *wire.OutPoint, + pkScript []byte, heightHint uint32) (*chainntnfs.SpendEvent, error) { + + ntfn, err := e.txNotifier.RegisterSpend(outpoint, pkScript, heightHint) + if err != nil { + return nil, err + } + + if ntfn.HistoricalDispatch != nil { + e.wg.Add(1) + go e.handleHistoricalSpendDispatch(ntfn.HistoricalDispatch) + } + + return ntfn.Event, nil +} + +// RegisterBlockEpochNtfn returns a BlockEpochEvent which subscribes the +// caller to receive notifications of each new block connected to the main +// chain. +func (e *EsploraNotifier) RegisterBlockEpochNtfn( + bestBlock *chainntnfs.BlockEpoch) (*chainntnfs.BlockEpochEvent, error) { + + reg := &blockEpochRegistration{ + epochQueue: queue.NewConcurrentQueue(20), + epochChan: make(chan *chainntnfs.BlockEpoch, 20), + cancelChan: make(chan struct{}), + epochID: atomic.AddUint64(&e.epochClientCounter, 1), + bestBlock: bestBlock, + errorChan: make(chan error, 1), + } + reg.epochQueue.Start() + + reg.wg.Add(1) + go func() { + defer reg.wg.Done() + + for { + select { + case item := <-reg.epochQueue.ChanOut(): + epoch := item.(*chainntnfs.BlockEpoch) + select { + case reg.epochChan <- epoch: + case <-reg.cancelChan: + return + case <-e.quit: + return + } + + case <-reg.cancelChan: + return + + case <-e.quit: + return + } + } + }() + + select { + case e.notificationRegistry <- reg: + return &chainntnfs.BlockEpochEvent{ + Epochs: reg.epochChan, + Cancel: func() { + cancel := &epochCancel{ + epochID: reg.epochID, + } + + select { + case e.notificationCancels <- cancel: + case <-e.quit: + } + }, + }, <-reg.errorChan + + case <-e.quit: + reg.epochQueue.Stop() + return nil, ErrEsploraNotifierShuttingDown + } +} + +// GetBlock attempts to retrieve a block from the Esplora API. +func (e *EsploraNotifier) GetBlock(hash chainhash.Hash) (*btcutil.Block, + error) { + + ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) + defer cancel() + + return e.client.GetBlock(ctx, &hash) +} + +// blockEpochRegistration represents a client's registration for block epoch +// notifications. +type blockEpochRegistration struct { + epochID uint64 + epochChan chan *chainntnfs.BlockEpoch + epochQueue *queue.ConcurrentQueue + cancelChan chan struct{} + bestBlock *chainntnfs.BlockEpoch + errorChan chan error + wg sync.WaitGroup +} + +// epochCancel is a message sent to cancel a block epoch registration. +type epochCancel struct { + epochID uint64 +} + +// parseBlockHeader parses a hex-encoded block header into a wire.BlockHeader. +func parseBlockHeader(hexHeader string) (*wire.BlockHeader, error) { + headerBytes, err := hex.DecodeString(hexHeader) + if err != nil { + return nil, fmt.Errorf("failed to decode header hex: %w", err) + } + + var header wire.BlockHeader + err = header.Deserialize(bytes.NewReader(headerBytes)) + if err != nil { + return nil, fmt.Errorf("failed to deserialize header: %w", err) + } + + return &header, nil +} diff --git a/chainntnfs/esploranotify/log.go b/chainntnfs/esploranotify/log.go new file mode 100644 index 00000000000..f1babbf772e --- /dev/null +++ b/chainntnfs/esploranotify/log.go @@ -0,0 +1,23 @@ +package esploranotify + +import "github.com/btcsuite/btclog/v2" + +// Subsystem defines the logging code for this subsystem. +const Subsystem = "ESPN" + +// log is a logger that is initialized with no output filters. This means the +// package will not perform any logging by default until the caller requests +// it. +var log btclog.Logger + +// The default amount of logging is none. +func init() { + UseLogger(btclog.Disabled) +} + +// UseLogger uses a specified Logger to output package logging info. This +// should be used in preference to SetLogWriter if the caller is also using +// btclog. +func UseLogger(logger btclog.Logger) { + log = logger +} diff --git a/chainntnfs/txnotifier.go b/chainntnfs/txnotifier.go index af85c298086..ded5b1840ef 100644 --- a/chainntnfs/txnotifier.go +++ b/chainntnfs/txnotifier.go @@ -1742,6 +1742,13 @@ func (n *TxNotifier) NotifyHeight(height uint32) error { n.Lock() defer n.Unlock() + // Update the current height if the provided height is greater. This is + // important for backends like Esplora that don't call ConnectTip but + // still need the txNotifier to track the current chain height. + if height > n.currentHeight { + n.currentHeight = height + } + // First, we'll dispatch an update to all of the notification clients // for our watched requests with the number of confirmations left at // this new height. @@ -2000,6 +2007,17 @@ func (n *TxNotifier) unconfirmedRequests() []ConfRequest { return unconfirmed } +// UnconfirmedRequests returns the set of confirmation requests that are still +// seen as unconfirmed by the TxNotifier. This is useful for backends like +// Esplora that need to periodically check if pending confirmation requests +// have been satisfied. +func (n *TxNotifier) UnconfirmedRequests() []ConfRequest { + n.Lock() + defer n.Unlock() + + return n.unconfirmedRequests() +} + // unspentRequests returns the set of spend requests that are still seen as // unspent by the TxNotifier. // @@ -2021,6 +2039,16 @@ func (n *TxNotifier) unspentRequests() []SpendRequest { return unspent } +// UnspentRequests returns the set of spend requests that are still seen as +// unspent by the TxNotifier. This is useful for backends like Esplora that +// need to periodically check if pending spend requests have been satisfied. +func (n *TxNotifier) UnspentRequests() []SpendRequest { + n.Lock() + defer n.Unlock() + + return n.unspentRequests() +} + // dispatchConfReorg dispatches a reorg notification to the client if the // confirmation notification was already delivered. // diff --git a/chainreg/chainregistry.go b/chainreg/chainregistry.go index 71e9c04a6c4..18850926d19 100644 --- a/chainreg/chainregistry.go +++ b/chainreg/chainregistry.go @@ -21,8 +21,10 @@ import ( "github.com/lightningnetwork/lnd/chainntnfs" "github.com/lightningnetwork/lnd/chainntnfs/bitcoindnotify" "github.com/lightningnetwork/lnd/chainntnfs/btcdnotify" + "github.com/lightningnetwork/lnd/chainntnfs/esploranotify" "github.com/lightningnetwork/lnd/chainntnfs/neutrinonotify" "github.com/lightningnetwork/lnd/channeldb" + "github.com/lightningnetwork/lnd/esplora" "github.com/lightningnetwork/lnd/fn/v2" "github.com/lightningnetwork/lnd/graph/db/models" "github.com/lightningnetwork/lnd/input" @@ -56,6 +58,9 @@ type Config struct { // BtcdMode defines settings for connecting to a btcd node. BtcdMode *lncfg.Btcd + // EsploraMode defines settings for connecting to an Esplora HTTP API. + EsploraMode *lncfg.Esplora + // HeightHintDB is a pointer to the database that stores the height // hints. HeightHintDB kvdb.Backend @@ -678,6 +683,80 @@ func NewPartialChainControl(cfg *Config) (*PartialChainControl, func(), error) { } } + case "esplora": + esploraMode := cfg.EsploraMode + + log.Infof("Initializing Esplora backend, url=%s", esploraMode.URL) + + // Create the Esplora client configuration. + esploraClientCfg := &esplora.ClientConfig{ + URL: esploraMode.URL, + RequestTimeout: esploraMode.RequestTimeout, + MaxRetries: esploraMode.MaxRetries, + PollInterval: esploraMode.PollInterval, + } + + log.Debug("Creating Esplora client") + + // Create and start the Esplora client. + esploraClient := esplora.NewClient(esploraClientCfg) + + log.Debug("Starting Esplora client") + if err := esploraClient.Start(); err != nil { + return nil, nil, fmt.Errorf("unable to start esplora "+ + "client: %v", err) + } + log.Info("Esplora client started successfully") + + // Create the chain notifier. + log.Debug("Creating Esplora chain notifier") + chainNotifier := esploranotify.New( + esploraClient, cfg.ActiveNetParams.Params, + hintCache, hintCache, cfg.BlockCache, + ) + cc.ChainNotifier = chainNotifier + log.Debug("Esplora chain notifier created") + + // Create the filtered chain view. + log.Debug("Creating Esplora filtered chain view") + cc.ChainView, err = chainview.NewEsploraFilteredChainView( + esploraClient, + ) + if err != nil { + return nil, nil, fmt.Errorf("unable to create "+ + "esplora chain view: %v", err) + } + log.Debug("Esplora filtered chain view created") + + // Create the fee estimator. + log.Debug("Creating Esplora fee estimator") + feeEstimatorCfg := esplora.DefaultFeeEstimatorConfig() + cc.FeeEstimator = esplora.NewFeeEstimator( + esploraClient, feeEstimatorCfg, + ) + log.Debug("Esplora fee estimator created") + + // Create the chain client for wallet integration. + log.Debug("Creating Esplora chain client") + chainClientCfg := &esplora.ChainClientConfig{ + UseGapLimit: esploraMode.UseGapLimit, + GapLimit: esploraMode.GapLimit, + AddressBatchSize: esploraMode.AddressBatchSize, + } + chainClient := esplora.NewChainClient( + esploraClient, cfg.ActiveNetParams.Params, chainClientCfg, + ) + cc.ChainSource = chainClient + log.Debug("Esplora chain client created") + + // Health check verifies we can connect to the Esplora API. + cc.HealthCheck = func() error { + if !esploraClient.IsConnected() { + return fmt.Errorf("esplora client not connected") + } + return nil + } + case "nochainbackend": backend := &NoChainBackend{} source := &NoChainSource{ diff --git a/config.go b/config.go index e9ce4103542..839549bd6cc 100644 --- a/config.go +++ b/config.go @@ -249,6 +249,7 @@ const ( bitcoindBackendName = "bitcoind" btcdBackendName = "btcd" neutrinoBackendName = "neutrino" + esploraBackendName = "esplora" defaultPrunedNodeMaxPeers = 4 defaultNeutrinoMaxPeers = 8 @@ -379,6 +380,7 @@ type Config struct { BtcdMode *lncfg.Btcd `group:"btcd" namespace:"btcd"` BitcoindMode *lncfg.Bitcoind `group:"bitcoind" namespace:"bitcoind"` NeutrinoMode *lncfg.Neutrino `group:"neutrino" namespace:"neutrino"` + EsploraMode *lncfg.Esplora `group:"esplora" namespace:"esplora"` BlockCacheSize uint64 `long:"blockcachesize" description:"The maximum capacity of the block cache"` @@ -621,6 +623,7 @@ func DefaultConfig() Config { UserAgentVersion: neutrino.UserAgentVersion, MaxPeers: defaultNeutrinoMaxPeers, }, + EsploraMode: lncfg.DefaultEsploraConfig(), BlockCacheSize: defaultBlockCacheSize, MaxPendingChannels: lncfg.DefaultMaxPendingChannels, NoSeedBackup: defaultNoSeedBackup, @@ -1343,13 +1346,21 @@ func ValidateConfig(cfg Config, interceptor signal.Interceptor, fileParser, case neutrinoBackendName: // No need to get RPC parameters. + case esploraBackendName: + // Validate that an Esplora URL was provided. + if cfg.EsploraMode.URL == "" { + return nil, mkErr("esplora.url must be set when " + + "using esplora mode (e.g., " + + "http://localhost:3002 or https://blockstream.info/api)") + } + case "nochainbackend": // Nothing to configure, we're running without any chain // backend whatsoever (pure signing mode). default: - str := "only btcd, bitcoind, and neutrino mode " + - "supported for bitcoin at this time" + str := "only btcd, bitcoind, neutrino, and esplora " + + "mode supported for bitcoin at this time" return nil, mkErr(str) } diff --git a/config_builder.go b/config_builder.go index 7ce63041ee2..5e0357f0a29 100644 --- a/config_builder.go +++ b/config_builder.go @@ -622,6 +622,7 @@ func (d *DefaultWalletImpl) BuildWalletConfig(ctx context.Context, NeutrinoMode: d.cfg.NeutrinoMode, BitcoindMode: d.cfg.BitcoindMode, BtcdMode: d.cfg.BtcdMode, + EsploraMode: d.cfg.EsploraMode, HeightHintDB: dbs.HeightHintDB, ChanStateDB: dbs.ChanStateDB.ChannelStateDB(), NeutrinoCS: neutrinoCS, diff --git a/esplora/chainclient.go b/esplora/chainclient.go new file mode 100644 index 00000000000..dcfe89b475f --- /dev/null +++ b/esplora/chainclient.go @@ -0,0 +1,1735 @@ +package esplora + +import ( + "cmp" + "context" + "errors" + "fmt" + "slices" + "sync" + "sync/atomic" + "time" + + "github.com/btcsuite/btcd/btcjson" + "github.com/btcsuite/btcd/btcutil" + "github.com/btcsuite/btcd/chaincfg" + "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/btcsuite/btcd/rpcclient" + "github.com/btcsuite/btcd/txscript" + "github.com/btcsuite/btcd/wire" + "github.com/btcsuite/btcwallet/chain" + "github.com/btcsuite/btcwallet/waddrmgr" + "github.com/btcsuite/btcwallet/wtxmgr" +) + +const ( + // esploraBackendName is the name of the Esplora backend. + esploraBackendName = "esplora" + + // defaultRequestTimeout is the default timeout for Esplora requests. + defaultRequestTimeout = 30 * time.Second +) + +var ( + // ErrChainClientNotStarted is returned when operations are attempted + // before the chain client is started. + ErrChainClientNotStarted = errors.New("chain client not started") + + // ErrOutputSpent is returned when the requested output has been spent. + ErrOutputSpent = errors.New("output has been spent") + + // ErrOutputNotFound is returned when the requested output cannot be + // found. + ErrOutputNotFound = errors.New("output not found") +) + +// ChainClientConfig holds configuration options for the ChainClient. +type ChainClientConfig struct { + // UseGapLimit enables gap limit optimization for wallet recovery. + UseGapLimit bool + + // GapLimit is the number of consecutive unused addresses before stopping. + GapLimit int + + // AddressBatchSize is the number of addresses to query concurrently. + AddressBatchSize int +} + +// DefaultChainClientConfig returns a ChainClientConfig with default values. +func DefaultChainClientConfig() *ChainClientConfig { + return &ChainClientConfig{ + UseGapLimit: true, + GapLimit: 20, + AddressBatchSize: 10, + } +} + +type ChainClient struct { + started int32 + stopped int32 + + client *Client + chainParams *chaincfg.Params + cfg *ChainClientConfig + subscriptionID uint64 + + // bestBlock tracks the current chain tip. + bestBlockMtx sync.RWMutex + bestBlock waddrmgr.BlockStamp + + // lastProcessedHeight tracks the last block height we sent to the wallet. + // This is used to ensure we don't skip any blocks. + lastProcessedHeight int32 + + // headerCache caches block headers by hash. + headerCacheMtx sync.RWMutex + headerCache map[chainhash.Hash]*wire.BlockHeader + + // heightToHash maps block heights to hashes. + heightToHashMtx sync.RWMutex + heightToHash map[int32]*chainhash.Hash + + // notificationChan is used to send notifications to the wallet. + notificationChan chan interface{} + + // notifyBlocks indicates if block notifications are enabled. + notifyBlocks atomic.Bool + + // watchedAddrs tracks addresses being watched. + watchedAddrsMtx sync.RWMutex + watchedAddrs map[string]btcutil.Address + + // watchedOutpoints tracks outpoints being watched. + watchedOutpointsMtx sync.RWMutex + watchedOutpoints map[wire.OutPoint]btcutil.Address + + // progress logging for long rescans/sync. + progressMtx sync.Mutex + lastProgressLog time.Time + lastProgressHeight int64 + + quit chan struct{} + wg sync.WaitGroup +} + +// Compile time check to ensure ChainClient implements chain.Interface. +var _ chain.Interface = (*ChainClient)(nil) + +// NewChainClient creates a new Esplora chain client. +func NewChainClient(client *Client, chainParams *chaincfg.Params, + cfg *ChainClientConfig) *ChainClient { + + if cfg == nil { + cfg = DefaultChainClientConfig() + } + + return &ChainClient{ + client: client, + chainParams: chainParams, + cfg: cfg, + headerCache: make(map[chainhash.Hash]*wire.BlockHeader), + heightToHash: make(map[int32]*chainhash.Hash), + notificationChan: make(chan interface{}, 100), + watchedAddrs: make(map[string]btcutil.Address), + watchedOutpoints: make(map[wire.OutPoint]btcutil.Address), + quit: make(chan struct{}), + } +} + +// Start initializes the chain client and begins processing notifications. +func (c *ChainClient) Start() error { + if atomic.AddInt32(&c.started, 1) != 1 { + return nil + } + + log.Info("Starting Esplora chain client") + + // Ensure the underlying client is connected. + if !c.client.IsConnected() { + return ErrNotConnected + } + + // Get initial best block. + ctx, cancel := context.WithTimeout(context.Background(), defaultRequestTimeout) + defer cancel() + + tipHeight, err := c.client.GetTipHeight(ctx) + if err != nil { + return fmt.Errorf("failed to get tip height: %w", err) + } + + tipHash, err := c.client.GetTipHash(ctx) + if err != nil { + return fmt.Errorf("failed to get tip hash: %w", err) + } + + header, err := c.client.GetBlockHeader(ctx, tipHash) + if err != nil { + return fmt.Errorf("failed to get tip header: %w", err) + } + + hash, err := chainhash.NewHashFromStr(tipHash) + if err != nil { + return fmt.Errorf("failed to parse tip hash: %w", err) + } + + c.bestBlockMtx.Lock() + c.bestBlock = waddrmgr.BlockStamp{ + Height: int32(tipHeight), + Hash: *hash, + Timestamp: header.Timestamp, + } + // Initialize lastProcessedHeight to current tip - we'll start processing + // new blocks from here. + c.lastProcessedHeight = int32(tipHeight) + c.bestBlockMtx.Unlock() + + // Cache the header. + c.cacheHeader(int32(tipHeight), hash, header) + + // Start the notification handler. + c.wg.Add(1) + go c.notificationHandler() + + // Send ClientConnected notification to trigger wallet sync. + log.Infof("Sending ClientConnected notification to trigger wallet sync") + c.notificationChan <- chain.ClientConnected{} + + // Send initial rescan finished notification. + c.bestBlockMtx.RLock() + bestBlock := c.bestBlock + c.bestBlockMtx.RUnlock() + + c.notificationChan <- &chain.RescanFinished{ + Hash: &bestBlock.Hash, + Height: bestBlock.Height, + Time: bestBlock.Timestamp, + } + + return nil +} + +// Stop shuts down the chain client. +func (c *ChainClient) Stop() { + if atomic.AddInt32(&c.stopped, 1) != 1 { + return + } + + log.Info("Stopping Esplora chain client") + + close(c.quit) + c.wg.Wait() + + close(c.notificationChan) +} + +// WaitForShutdown blocks until the client has finished shutting down. +func (c *ChainClient) WaitForShutdown() { + c.wg.Wait() +} + +// GetBestBlock returns the hash and height of the best known block. +func (c *ChainClient) GetBestBlock() (*chainhash.Hash, int32, error) { + c.bestBlockMtx.RLock() + defer c.bestBlockMtx.RUnlock() + + hash := c.bestBlock.Hash + return &hash, c.bestBlock.Height, nil +} + +// GetBlock returns the raw block from the server given its hash. +func (c *ChainClient) GetBlock(hash *chainhash.Hash) (*wire.MsgBlock, error) { + ctx, cancel := context.WithTimeout(context.Background(), defaultRequestTimeout) + defer cancel() + + block, err := c.client.GetBlock(ctx, hash) + if err != nil { + return nil, fmt.Errorf("failed to fetch block: %w", err) + } + + return block.MsgBlock(), nil +} + +// GetTxIndex returns the index of a transaction within a block at the given height. +func (c *ChainClient) GetTxIndex(height int64, txid string) (uint32, string, error) { + ctx, cancel := context.WithTimeout(context.Background(), defaultRequestTimeout) + defer cancel() + + return c.client.GetTxIndexByHeight(ctx, height, txid) +} + +// GetBlockHash returns the hash of the block at the given height. +func (c *ChainClient) GetBlockHash(height int64) (*chainhash.Hash, error) { + // Check cache first. + c.heightToHashMtx.RLock() + if hash, ok := c.heightToHash[int32(height)]; ok { + c.heightToHashMtx.RUnlock() + return hash, nil + } + c.heightToHashMtx.RUnlock() + + // Retry logic to handle race condition where esplora hasn't indexed + // the block yet. This can happen when we receive a block notification + // but the intermediate blocks haven't been indexed. + const maxRetries = 5 + const retryDelay = 500 * time.Millisecond + + var hashStr string + var err error + + for i := 0; i < maxRetries; i++ { + ctx, cancel := context.WithTimeout(context.Background(), defaultRequestTimeout) + start := time.Now() + hashStr, err = c.client.GetBlockHashByHeight(ctx, height) + cancel() + + if err == nil { + c.maybeLogProgress(height) + if dur := time.Since(start); dur > 2*time.Second { + log.Warnf("Slow GetBlockHash height=%d took %v", height, dur) + } + if i > 0 { + log.Debugf("Successfully got block hash at height %d after %d retries", + height, i) + } + break + } + + if dur := time.Since(start); dur > 2*time.Second { + log.Warnf("Slow GetBlockHash height=%d failed after %v: %v", + height, dur, err) + } + + log.Debugf("GetBlockHash attempt %d/%d failed for height %d: %v", + i+1, maxRetries, height, err) + + // If this isn't the last retry, wait before trying again. + if i < maxRetries-1 { + log.Debugf("Retrying GetBlockHash for height %d in %v", + height, retryDelay) + time.Sleep(retryDelay) + } + } + + if err != nil { + return nil, fmt.Errorf("failed to get block hash at height %d after %d retries: %w", + height, maxRetries, err) + } + + hash, err := chainhash.NewHashFromStr(hashStr) + if err != nil { + return nil, fmt.Errorf("failed to parse block hash: %w", err) + } + + // Cache the result. + c.heightToHashMtx.Lock() + c.heightToHash[int32(height)] = hash + c.heightToHashMtx.Unlock() + + return hash, nil +} + +// GetBlockHeader returns the block header for the given hash. +func (c *ChainClient) GetBlockHeader(hash *chainhash.Hash) (*wire.BlockHeader, error) { + // Check cache first. + c.headerCacheMtx.RLock() + if header, ok := c.headerCache[*hash]; ok { + c.headerCacheMtx.RUnlock() + return header, nil + } + c.headerCacheMtx.RUnlock() + + // Retry logic to handle race condition where esplora hasn't indexed + // the block yet. + const maxRetries = 5 + const retryDelay = 500 * time.Millisecond + + var header *wire.BlockHeader + var err error + + for i := 0; i < maxRetries; i++ { + ctx, cancel := context.WithTimeout(context.Background(), defaultRequestTimeout) + start := time.Now() + header, err = c.client.GetBlockHeader(ctx, hash.String()) + cancel() + + if err == nil { + if dur := time.Since(start); dur > 2*time.Second { + log.Warnf("Slow GetBlockHeader hash=%s took %v", hash.String(), dur) + } + break + } + + if dur := time.Since(start); dur > 2*time.Second { + log.Warnf("Slow GetBlockHeader hash=%s failed after %v: %v", + hash.String(), dur, err) + } + + // If this isn't the last retry, wait before trying again. + if i < maxRetries-1 { + log.Debugf("Block header not found for %s, retrying in %v (attempt %d/%d)", + hash.String(), retryDelay, i+1, maxRetries) + time.Sleep(retryDelay) + } + } + + if err != nil { + return nil, fmt.Errorf("failed to get block header after %d retries: %w", + maxRetries, err) + } + + // Cache the header. + c.headerCacheMtx.Lock() + c.headerCache[*hash] = header + c.headerCacheMtx.Unlock() + + return header, nil +} + +// maybeLogProgress logs periodic progress during long scans. +func (c *ChainClient) maybeLogProgress(height int64) { + const ( + progressEvery = int64(500) + progressInterval = 30 * time.Second + ) + + now := time.Now() + + c.progressMtx.Lock() + defer c.progressMtx.Unlock() + + if c.lastProgressLog.IsZero() { + c.lastProgressLog = now + c.lastProgressHeight = height + return + } + + heightDelta := height - c.lastProgressHeight + timeDelta := now.Sub(c.lastProgressLog) + if heightDelta < 0 { + // Reset baseline if height moves backward (e.g. birthday search). + c.lastProgressLog = now + c.lastProgressHeight = height + return + } + if heightDelta < progressEvery && timeDelta < progressInterval { + return + } + + rate := float64(heightDelta) / timeDelta.Seconds() + log.Infof("Esplora sync progress: height=%d (+%d in %s, %.2f blk/s)", + height, heightDelta, timeDelta.Round(time.Second), rate) + + c.lastProgressLog = now + c.lastProgressHeight = height +} + +// IsCurrent returns true if the chain client believes it is synced with the +// network. +func (c *ChainClient) IsCurrent() bool { + bestHash, _, err := c.GetBestBlock() + if err != nil { + return false + } + + bestHeader, err := c.GetBlockHeader(bestHash) + if err != nil { + return false + } + + // Consider ourselves current if the best block is within 2 hours. + return time.Since(bestHeader.Timestamp) < 2*time.Hour +} + +// filterBlocksAddressThreshold is the number of addresses above which we switch +// from per-address API queries to block-based scanning. Block-based scanning +// fetches each block's transactions and scans them locally, which is much more +// efficient when there are many addresses to check. +const filterBlocksAddressThreshold = 500 + +// FilterBlocks scans the blocks contained in the FilterBlocksRequest for any +// addresses of interest. +func (c *ChainClient) FilterBlocks( + req *chain.FilterBlocksRequest) (*chain.FilterBlocksResponse, error) { + + totalAddrs := len(req.ExternalAddrs) + len(req.InternalAddrs) + + log.Tracef("FilterBlocks called: %d external addrs, %d internal addrs, %d blocks", + len(req.ExternalAddrs), len(req.InternalAddrs), len(req.Blocks)) + + // Use gap limit scanning for large address sets when enabled. + // This is dramatically faster than scanning all addresses. + if c.cfg.UseGapLimit && totalAddrs > filterBlocksAddressThreshold { + log.Infof("FilterBlocks: using gap limit scanning (gap=%d) for %d addresses", + c.cfg.GapLimit, totalAddrs) + return c.filterBlocksWithGapLimit(req) + } + + // Use block-based scanning for large address sets (e.g., during wallet recovery). + // This is much more efficient than querying each address individually. + if totalAddrs > filterBlocksAddressThreshold { + log.Infof("FilterBlocks: using block-based scanning for %d addresses across %d blocks", + totalAddrs, len(req.Blocks)) + return c.filterBlocksByScanning(req) + } + + // For small address sets, use per-address queries. + return c.filterBlocksByAddress(req) +} + +// addressScanResult holds the result of scanning a single address. +type addressScanResult struct { + scopedIdx waddrmgr.ScopedIndex + addr btcutil.Address + txInfos []*TxInfo + err error +} + +// filterBlocksWithGapLimit implements BIP-44 gap limit scanning for wallet recovery. +// Instead of scanning all addresses, it scans incrementally and stops when +// it finds GapLimit consecutive unused addresses per scope/chain. +func (c *ChainClient) filterBlocksWithGapLimit( + req *chain.FilterBlocksRequest) (*chain.FilterBlocksResponse, error) { + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Minute) + defer cancel() + + // Build block height lookup for filtering transactions. + blockHeights := make(map[int32]int) + for i, block := range req.Blocks { + blockHeights[block.Height] = i + } + + var ( + batchIndex uint32 = ^uint32(0) + foundRelevant bool + foundExternalAddrs = make(map[waddrmgr.KeyScope]map[uint32]struct{}) + foundInternalAddrs = make(map[waddrmgr.KeyScope]map[uint32]struct{}) + foundOutPoints = make(map[wire.OutPoint]btcutil.Address) + matchedTxIDs = make(map[string]int) // txid -> blockIdx + ) + + // Process external addresses with gap limit. + extResult := c.scanAddressesWithGapLimit( + ctx, req.ExternalAddrs, blockHeights, true, + ) + for scopedIdx, result := range extResult.foundAddrs { + if foundExternalAddrs[scopedIdx.Scope] == nil { + foundExternalAddrs[scopedIdx.Scope] = make(map[uint32]struct{}) + } + foundExternalAddrs[scopedIdx.Scope][scopedIdx.Index] = struct{}{} + + for op, addr := range result.outpoints { + foundOutPoints[op] = addr + } + for txid, blockIdx := range result.txIDs { + matchedTxIDs[txid] = blockIdx + if !foundRelevant || uint32(blockIdx) < batchIndex { + batchIndex = uint32(blockIdx) + } + foundRelevant = true + } + } + + // Process internal addresses with gap limit. + intResult := c.scanAddressesWithGapLimit( + ctx, req.InternalAddrs, blockHeights, false, + ) + for scopedIdx, result := range intResult.foundAddrs { + if foundInternalAddrs[scopedIdx.Scope] == nil { + foundInternalAddrs[scopedIdx.Scope] = make(map[uint32]struct{}) + } + foundInternalAddrs[scopedIdx.Scope][scopedIdx.Index] = struct{}{} + + for op, addr := range result.outpoints { + foundOutPoints[op] = addr + } + for txid, blockIdx := range result.txIDs { + matchedTxIDs[txid] = blockIdx + if !foundRelevant || uint32(blockIdx) < batchIndex { + batchIndex = uint32(blockIdx) + } + foundRelevant = true + } + } + + // Log summary. + log.Infof("Gap limit scan complete: external scanned=%d found=%d, internal scanned=%d found=%d", + extResult.scannedCount, len(extResult.foundAddrs), + intResult.scannedCount, len(intResult.foundAddrs)) + + if !foundRelevant { + log.Infof("FilterBlocks (gap limit): no relevant transactions found") + return nil, nil + } + + // Fetch raw transactions for matches. + log.Infof("FilterBlocks (gap limit): fetching %d matched raw transactions...", len(matchedTxIDs)) + + relevantTxns := make([]*wire.MsgTx, 0, len(matchedTxIDs)) + for txid := range matchedTxIDs { + tx, err := c.client.GetRawTransactionMsgTx(ctx, txid) + if err != nil { + log.Warnf("FilterBlocks: failed to fetch raw tx %s: %v", txid, err) + continue + } + relevantTxns = append(relevantTxns, tx) + } + + log.Infof("FilterBlocks (gap limit): found %d relevant txns, earliest at block height %d", + len(relevantTxns), req.Blocks[batchIndex].Height) + + return &chain.FilterBlocksResponse{ + BatchIndex: batchIndex, + BlockMeta: req.Blocks[batchIndex], + FoundExternalAddrs: foundExternalAddrs, + FoundInternalAddrs: foundInternalAddrs, + FoundOutPoints: foundOutPoints, + RelevantTxns: relevantTxns, + }, nil +} + +// gapLimitScanResult holds results from gap limit address scanning. +type gapLimitScanResult struct { + scannedCount int + foundAddrs map[waddrmgr.ScopedIndex]*addressFoundResult +} + +// addressFoundResult holds details about a found address. +type addressFoundResult struct { + outpoints map[wire.OutPoint]btcutil.Address + txIDs map[string]int // txid -> blockIdx +} + +// scanAddressesWithGapLimit scans addresses using BIP-44 gap limit logic. +// It groups addresses by scope/chain, scans in index order, and stops +// when GapLimit consecutive unused addresses are found. +func (c *ChainClient) scanAddressesWithGapLimit( + ctx context.Context, + addrs map[waddrmgr.ScopedIndex]btcutil.Address, + blockHeights map[int32]int, + isExternal bool) *gapLimitScanResult { + + result := &gapLimitScanResult{ + foundAddrs: make(map[waddrmgr.ScopedIndex]*addressFoundResult), + } + + if len(addrs) == 0 { + return result + } + + // Group addresses by KeyScope for gap limit tracking. + // Within each scope, we track the gap separately. + type scopeGroup struct { + indices []uint32 + addrs map[uint32]waddrmgr.ScopedIndex + } + scopeGroups := make(map[waddrmgr.KeyScope]*scopeGroup) + + for scopedIdx := range addrs { + scope := scopedIdx.Scope + if scopeGroups[scope] == nil { + scopeGroups[scope] = &scopeGroup{ + addrs: make(map[uint32]waddrmgr.ScopedIndex), + } + } + scopeGroups[scope].indices = append(scopeGroups[scope].indices, scopedIdx.Index) + scopeGroups[scope].addrs[scopedIdx.Index] = scopedIdx + } + + // Sort indices within each scope. + for _, group := range scopeGroups { + sortUint32Slice(group.indices) + } + + chainType := "external" + if !isExternal { + chainType = "internal" + } + + // Process each scope with gap limit. + for scope, group := range scopeGroups { + highestUsedIdx := -1 + consecutiveUnused := 0 + scannedInScope := 0 + + log.Debugf("Gap limit scan: scope=%v chain=%s, %d addresses to check", + scope, chainType, len(group.indices)) + + // Process addresses in batches for efficiency. + batchSize := c.cfg.AddressBatchSize + for i := 0; i < len(group.indices); i += batchSize { + // Check if we've hit the gap limit. + if consecutiveUnused >= c.cfg.GapLimit { + log.Debugf("Gap limit reached for scope=%v chain=%s after %d addresses (highest used: %d)", + scope, chainType, scannedInScope, highestUsedIdx) + break + } + + // Prepare batch. + end := i + batchSize + if end > len(group.indices) { + end = len(group.indices) + } + batchIndices := group.indices[i:end] + + // Query addresses in parallel. + resultsChan := make(chan addressScanResult, len(batchIndices)) + var wg sync.WaitGroup + + for _, idx := range batchIndices { + wg.Add(1) + go func(index uint32) { + defer wg.Done() + + scopedIdx := group.addrs[index] + addr := addrs[scopedIdx] + + txInfos, err := c.client.GetAddressTxs(ctx, addr.EncodeAddress()) + resultsChan <- addressScanResult{ + scopedIdx: scopedIdx, + addr: addr, + txInfos: txInfos, + err: err, + } + }(idx) + } + + wg.Wait() + close(resultsChan) + + // Process results in index order. + batchResults := make(map[uint32]addressScanResult) + for res := range resultsChan { + batchResults[res.scopedIdx.Index] = res + } + + for _, idx := range batchIndices { + scannedInScope++ + result.scannedCount++ + + res, ok := batchResults[idx] + if !ok || res.err != nil { + consecutiveUnused++ + continue + } + + // Filter transactions to those in our block range. + hasRelevantTx := false + addrResult := &addressFoundResult{ + outpoints: make(map[wire.OutPoint]btcutil.Address), + txIDs: make(map[string]int), + } + + for _, txInfo := range res.txInfos { + if !txInfo.Status.Confirmed { + continue + } + + blockIdx, inRange := blockHeights[int32(txInfo.Status.BlockHeight)] + if !inRange { + continue + } + + hasRelevantTx = true + addrResult.txIDs[txInfo.TxID] = blockIdx + + // Record outpoints for this address. + txHash, err := chainhash.NewHashFromStr(txInfo.TxID) + if err != nil { + continue + } + for i, vout := range txInfo.Vout { + if vout.ScriptPubKeyAddr == res.addr.EncodeAddress() { + op := wire.OutPoint{Hash: *txHash, Index: uint32(i)} + addrResult.outpoints[op] = res.addr + } + } + } + + if hasRelevantTx { + result.foundAddrs[res.scopedIdx] = addrResult + highestUsedIdx = int(idx) + consecutiveUnused = 0 + + log.Debugf("Gap limit scan: found activity at scope=%v chain=%s index=%d", + scope, chainType, idx) + } else { + consecutiveUnused++ + } + } + } + + log.Infof("Gap limit scan complete: scope=%v chain=%s scanned=%d found=%d", + scope, chainType, scannedInScope, len(result.foundAddrs)) + } + + return result +} + +// sortUint32Slice sorts a slice of uint32 in ascending order. +func sortUint32Slice(s []uint32) { + slices.Sort(s) +} + +// filterBlocksByAddress filters blocks by querying each address individually. +// This is efficient for small address sets but slow for large ones. +func (c *ChainClient) filterBlocksByAddress( + req *chain.FilterBlocksRequest) (*chain.FilterBlocksResponse, error) { + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + + var ( + relevantTxns []*wire.MsgTx + batchIndex uint32 + foundRelevant bool + foundExternalAddrs = make(map[waddrmgr.KeyScope]map[uint32]struct{}) + foundInternalAddrs = make(map[waddrmgr.KeyScope]map[uint32]struct{}) + foundOutPoints = make(map[wire.OutPoint]btcutil.Address) + seenTxs = make(map[chainhash.Hash]struct{}) + ) + + // Helper to process address matches and update results. + processAddressMatch := func(scopedIdx waddrmgr.ScopedIndex, addr btcutil.Address, + txns []*wire.MsgTx, idx uint32, isExternal bool) { + + for _, tx := range txns { + txHash := tx.TxHash() + if _, seen := seenTxs[txHash]; !seen { + relevantTxns = append(relevantTxns, tx) + seenTxs[txHash] = struct{}{} + } + } + + if !foundRelevant || idx < batchIndex { + batchIndex = idx + } + foundRelevant = true + + // Record found address. + if isExternal { + if foundExternalAddrs[scopedIdx.Scope] == nil { + foundExternalAddrs[scopedIdx.Scope] = make(map[uint32]struct{}) + } + foundExternalAddrs[scopedIdx.Scope][scopedIdx.Index] = struct{}{} + } else { + if foundInternalAddrs[scopedIdx.Scope] == nil { + foundInternalAddrs[scopedIdx.Scope] = make(map[uint32]struct{}) + } + foundInternalAddrs[scopedIdx.Scope][scopedIdx.Index] = struct{}{} + } + + // Record outpoints for outputs matching this address. + for _, tx := range txns { + for i, txOut := range tx.TxOut { + _, addrs, _, err := txscript.ExtractPkScriptAddrs( + txOut.PkScript, c.chainParams, + ) + if err != nil { + continue + } + for _, a := range addrs { + if a.EncodeAddress() == addr.EncodeAddress() { + op := wire.OutPoint{ + Hash: tx.TxHash(), + Index: uint32(i), + } + foundOutPoints[op] = addr + } + } + } + } + } + + // Check each watched external address. + for scopedIdx, addr := range req.ExternalAddrs { + txns, idx, err := c.filterAddressInBlocks(ctx, addr, req.Blocks) + if err != nil { + log.Warnf("Failed to filter address %s: %v", addr, err) + continue + } + if len(txns) > 0 { + processAddressMatch(scopedIdx, addr, txns, idx, true) + log.Tracef("FilterBlocks: found %d txs for external addr %s (scope=%v, index=%d)", + len(txns), addr.EncodeAddress(), scopedIdx.Scope, scopedIdx.Index) + } + } + + // Check each watched internal address. + for scopedIdx, addr := range req.InternalAddrs { + txns, idx, err := c.filterAddressInBlocks(ctx, addr, req.Blocks) + if err != nil { + log.Warnf("Failed to filter address %s: %v", addr, err) + continue + } + if len(txns) > 0 { + processAddressMatch(scopedIdx, addr, txns, idx, false) + log.Tracef("FilterBlocks: found %d txs for internal addr %s (scope=%v, index=%d)", + len(txns), addr.EncodeAddress(), scopedIdx.Scope, scopedIdx.Index) + } + } + + // Check watched outpoints for spends. + for outpoint, addr := range req.WatchedOutPoints { + txns, idx, err := c.filterOutpointSpend(ctx, outpoint, req.Blocks) + if err != nil { + log.Warnf("Failed to check outpoint %v: %v", outpoint, err) + continue + } + if len(txns) > 0 { + for _, tx := range txns { + txHash := tx.TxHash() + if _, seen := seenTxs[txHash]; !seen { + relevantTxns = append(relevantTxns, tx) + seenTxs[txHash] = struct{}{} + } + } + if !foundRelevant || idx < batchIndex { + batchIndex = idx + } + foundRelevant = true + log.Debugf("FilterBlocks: found spend of outpoint %v (addr=%s)", + outpoint, addr.EncodeAddress()) + } + } + + if !foundRelevant { + return nil, nil + } + + log.Debugf("FilterBlocks: found %d relevant txns at block height %d", + len(relevantTxns), req.Blocks[batchIndex].Height) + + return &chain.FilterBlocksResponse{ + BatchIndex: batchIndex, + BlockMeta: req.Blocks[batchIndex], + FoundExternalAddrs: foundExternalAddrs, + FoundInternalAddrs: foundInternalAddrs, + FoundOutPoints: foundOutPoints, + RelevantTxns: relevantTxns, + }, nil +} + +// filterOutpointSpend checks if an outpoint was spent in any of the given blocks. +func (c *ChainClient) filterOutpointSpend(ctx context.Context, + outpoint wire.OutPoint, + blocks []wtxmgr.BlockMeta) ([]*wire.MsgTx, uint32, error) { + + // Check if the outpoint has been spent. + outSpend, err := c.client.GetTxOutSpend(ctx, outpoint.Hash.String(), outpoint.Index) + if err != nil { + return nil, 0, err + } + + if !outSpend.Spent || !outSpend.Status.Confirmed { + return nil, 0, nil + } + + // Check if the spending tx is in one of our blocks. + blockHeights := make(map[int32]int) + for i, block := range blocks { + blockHeights[block.Height] = i + } + + if idx, ok := blockHeights[int32(outSpend.Status.BlockHeight)]; ok { + tx, err := c.client.GetRawTransactionMsgTx(ctx, outSpend.TxID) + if err != nil { + return nil, 0, err + } + return []*wire.MsgTx{tx}, uint32(idx), nil + } + + return nil, 0, nil +} + +// maxConcurrentBlockFetches is the maximum number of concurrent block fetches. +// Higher parallelism significantly improves scanning speed over network. +const maxConcurrentBlockFetches = 20 + +// filterBlocksByScanning filters blocks by fetching each block's transactions +// and scanning them locally against the watched address set. This is much more +// efficient than per-address queries when there are many addresses. +func (c *ChainClient) filterBlocksByScanning( + req *chain.FilterBlocksRequest) (*chain.FilterBlocksResponse, error) { + + // Use a longer timeout for block scanning since we may need to fetch many blocks. + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Minute) + defer cancel() + + // Build address lookup maps for O(1) matching. + // Map from address string to ScopedIndex for quick lookup. + externalAddrMap := make(map[string]waddrmgr.ScopedIndex) + for scopedIdx, addr := range req.ExternalAddrs { + externalAddrMap[addr.EncodeAddress()] = scopedIdx + } + + internalAddrMap := make(map[string]waddrmgr.ScopedIndex) + for scopedIdx, addr := range req.InternalAddrs { + internalAddrMap[addr.EncodeAddress()] = scopedIdx + } + + // Pre-fetch all block transaction info in parallel using /block/:hash/txs + // which returns addresses directly - much more efficient than fetching + // txids then individual raw transactions. + type blockTxsResult struct { + blockIdx int + txInfos []*TxInfo + err error + } + + log.Infof("FilterBlocks: pre-fetching transaction info for %d blocks...", len(req.Blocks)) + + blockTxsChan := make(chan blockTxsResult, len(req.Blocks)) + blockSemaphore := make(chan struct{}, maxConcurrentBlockFetches) + + var fetchWg sync.WaitGroup + for i, blockMeta := range req.Blocks { + fetchWg.Add(1) + go func(idx int, meta wtxmgr.BlockMeta) { + defer fetchWg.Done() + + // Acquire semaphore. + select { + case blockSemaphore <- struct{}{}: + defer func() { <-blockSemaphore }() + case <-ctx.Done(): + blockTxsChan <- blockTxsResult{blockIdx: idx, err: ctx.Err()} + return + } + + // Use GetBlockTxs which returns addresses directly - single API call per block. + txInfos, err := c.client.GetBlockTxs(ctx, meta.Hash.String()) + blockTxsChan <- blockTxsResult{blockIdx: idx, txInfos: txInfos, err: err} + }(i, blockMeta) + } + + go func() { + fetchWg.Wait() + close(blockTxsChan) + }() + + // Collect all block transaction info. + allBlockTxInfos := make(map[int][]*TxInfo) + for result := range blockTxsChan { + if result.err != nil { + log.Warnf("FilterBlocks: failed to get transactions for block %d: %v", + result.blockIdx, result.err) + continue + } + allBlockTxInfos[result.blockIdx] = result.txInfos + } + + log.Infof("FilterBlocks: finished fetching, scanning %d blocks...", len(allBlockTxInfos)) + + var ( + batchIndex uint32 + foundRelevant bool + foundExternalAddrs = make(map[waddrmgr.KeyScope]map[uint32]struct{}) + foundInternalAddrs = make(map[waddrmgr.KeyScope]map[uint32]struct{}) + foundOutPoints = make(map[wire.OutPoint]btcutil.Address) + matchedTxIDs = make(map[string]int) // txid -> blockIdx + ) + + // Process blocks sequentially (order matters for finding earliest match). + // This is fast because we're just doing hash map lookups on addresses + // returned directly from the API - no script parsing needed. + for blockIdx, blockMeta := range req.Blocks { + txInfos, ok := allBlockTxInfos[blockIdx] + if !ok { + continue + } + + // Scan each transaction for watched addresses and spent outpoints. + for _, txInfo := range txInfos { + txIsRelevant := false + + // First, check inputs to see if they spend any watched outpoints. + for _, vin := range txInfo.Vin { + if vin.IsCoinbase { + continue + } + prevOutpoint := wire.OutPoint{Index: vin.Vout} + if hash, err := chainhash.NewHashFromStr(vin.TxID); err == nil { + prevOutpoint.Hash = *hash + } else { + continue + } + + // Check if this input spends a watched outpoint. + if addr, ok := req.WatchedOutPoints[prevOutpoint]; ok { + txIsRelevant = true + log.Debugf("FilterBlocks: found spend of watched outpoint %v (addr=%s) in block %d", + prevOutpoint, addr.EncodeAddress(), blockMeta.Height) + } + // Check if this input spends an outpoint we found in this scan. + if addr, ok := foundOutPoints[prevOutpoint]; ok { + txIsRelevant = true + log.Debugf("FilterBlocks: found spend of found outpoint %v (addr=%s) in block %d", + prevOutpoint, addr.EncodeAddress(), blockMeta.Height) + } + } + + // Check outputs for watched addresses - addresses come directly from API! + txHash, err := chainhash.NewHashFromStr(txInfo.TxID) + if err != nil { + continue + } + + for i, vout := range txInfo.Vout { + addrStr := vout.ScriptPubKeyAddr + if addrStr == "" { + continue + } + + // Check external addresses. + if scopedIdx, ok := externalAddrMap[addrStr]; ok { + txIsRelevant = true + + if foundExternalAddrs[scopedIdx.Scope] == nil { + foundExternalAddrs[scopedIdx.Scope] = make(map[uint32]struct{}) + } + foundExternalAddrs[scopedIdx.Scope][scopedIdx.Index] = struct{}{} + + op := wire.OutPoint{Hash: *txHash, Index: uint32(i)} + foundOutPoints[op] = req.ExternalAddrs[scopedIdx] + + log.Debugf("FilterBlocks: found output for external addr %s (scope=%v, index=%d) in block %d, value=%d", + addrStr, scopedIdx.Scope, scopedIdx.Index, blockMeta.Height, vout.Value) + } + + // Check internal addresses. + if scopedIdx, ok := internalAddrMap[addrStr]; ok { + txIsRelevant = true + + if foundInternalAddrs[scopedIdx.Scope] == nil { + foundInternalAddrs[scopedIdx.Scope] = make(map[uint32]struct{}) + } + foundInternalAddrs[scopedIdx.Scope][scopedIdx.Index] = struct{}{} + + op := wire.OutPoint{Hash: *txHash, Index: uint32(i)} + foundOutPoints[op] = req.InternalAddrs[scopedIdx] + + log.Debugf("FilterBlocks: found output for internal addr %s (scope=%v, index=%d) in block %d, value=%d", + addrStr, scopedIdx.Scope, scopedIdx.Index, blockMeta.Height, vout.Value) + } + } + + // Record matched transactions for later raw tx fetch. + if txIsRelevant { + if _, exists := matchedTxIDs[txInfo.TxID]; !exists { + matchedTxIDs[txInfo.TxID] = blockIdx + } + + if !foundRelevant || uint32(blockIdx) < batchIndex { + batchIndex = uint32(blockIdx) + } + foundRelevant = true + } + } + + // Log progress every 50 blocks. + if (blockIdx+1)%50 == 0 || blockIdx == len(req.Blocks)-1 { + log.Infof("FilterBlocks: scanned %d/%d blocks, found %d relevant txns", + blockIdx+1, len(req.Blocks), len(matchedTxIDs)) + } + } + + if !foundRelevant { + log.Infof("FilterBlocks: no relevant transactions found in %d blocks", + len(req.Blocks)) + return nil, nil + } + + // Now fetch only the raw transactions that matched - typically just a few. + log.Infof("FilterBlocks: fetching %d matched raw transactions...", len(matchedTxIDs)) + + relevantTxns := make([]*wire.MsgTx, 0, len(matchedTxIDs)) + for txid := range matchedTxIDs { + tx, err := c.client.GetRawTransactionMsgTx(ctx, txid) + if err != nil { + log.Warnf("FilterBlocks: failed to fetch raw tx %s: %v", txid, err) + continue + } + relevantTxns = append(relevantTxns, tx) + } + + log.Infof("FilterBlocks: found %d relevant txns, earliest at block height %d", + len(relevantTxns), req.Blocks[batchIndex].Height) + + return &chain.FilterBlocksResponse{ + BatchIndex: batchIndex, + BlockMeta: req.Blocks[batchIndex], + FoundExternalAddrs: foundExternalAddrs, + FoundInternalAddrs: foundInternalAddrs, + FoundOutPoints: foundOutPoints, + RelevantTxns: relevantTxns, + }, nil +} + +// filterAddressInBlocks checks if an address has any activity in the given blocks. +func (c *ChainClient) filterAddressInBlocks(ctx context.Context, + addr btcutil.Address, + blocks []wtxmgr.BlockMeta) ([]*wire.MsgTx, uint32, error) { + + addrStr := addr.EncodeAddress() + + txs, err := c.client.GetAddressTxs(ctx, addrStr) + if err != nil { + return nil, 0, err + } + + var ( + relevantTxns []*wire.MsgTx + batchIdx uint32 = ^uint32(0) + ) + + // Build a map of block heights for quick lookup + blockHeights := make(map[int32]int) + for i, block := range blocks { + blockHeights[block.Height] = i + } + + for _, txInfo := range txs { + if !txInfo.Status.Confirmed { + continue + } + + // Check if this height falls within any of our blocks. + if idx, ok := blockHeights[int32(txInfo.Status.BlockHeight)]; ok { + // Fetch the full transaction. + tx, err := c.client.GetRawTransactionMsgTx(ctx, txInfo.TxID) + if err != nil { + continue + } + + relevantTxns = append(relevantTxns, tx) + if uint32(idx) < batchIdx { + batchIdx = uint32(idx) + } + } + } + + return relevantTxns, batchIdx, nil +} + +// BlockStamp returns the latest block notified by the client. +func (c *ChainClient) BlockStamp() (*waddrmgr.BlockStamp, error) { + c.bestBlockMtx.RLock() + defer c.bestBlockMtx.RUnlock() + + return &c.bestBlock, nil +} + +// SendRawTransaction submits the encoded transaction to the server. +func (c *ChainClient) SendRawTransaction(tx *wire.MsgTx, + allowHighFees bool) (*chainhash.Hash, error) { + + ctx, cancel := context.WithTimeout(context.Background(), defaultRequestTimeout) + defer cancel() + + return c.client.BroadcastTx(ctx, tx) +} + +// GetUtxo returns the transaction output identified by the given outpoint. +func (c *ChainClient) GetUtxo(op *wire.OutPoint, pkScript []byte, + heightHint uint32, cancel <-chan struct{}) (*wire.TxOut, error) { + + ctx, ctxCancel := context.WithTimeout(context.Background(), defaultRequestTimeout) + defer ctxCancel() + + // Check if the output is spent. + outSpend, err := c.client.GetTxOutSpend(ctx, op.Hash.String(), op.Index) + if err != nil { + return nil, fmt.Errorf("failed to check output spend status: %w", err) + } + + if outSpend.Spent { + return nil, ErrOutputSpent + } + + // Fetch the transaction to get the output value. + tx, err := c.client.GetTransaction(ctx, op.Hash.String()) + if err != nil { + return nil, fmt.Errorf("failed to get transaction: %w", err) + } + + if int(op.Index) >= len(tx.Vout) { + return nil, ErrOutputNotFound + } + + vout := tx.Vout[op.Index] + + return &wire.TxOut{ + Value: vout.Value, + PkScript: pkScript, + }, nil +} + +// Rescan rescans from the specified height for addresses. +func (c *ChainClient) Rescan(blockHash *chainhash.Hash, addrs []btcutil.Address, + outpoints map[wire.OutPoint]btcutil.Address) error { + + log.Infof("Rescan called for %d addresses, %d outpoints", + len(addrs), len(outpoints)) + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute) + defer cancel() + + // Get the starting height. + var startHeight int32 + if blockHash != nil { + // Find height from hash. + c.headerCacheMtx.RLock() + for h, cachedHash := range c.heightToHash { + if cachedHash.IsEqual(blockHash) { + startHeight = h + break + } + } + c.headerCacheMtx.RUnlock() + } + + // Scan each address for historical transactions. + for _, addr := range addrs { + if err := c.scanAddressHistory(ctx, addr, startHeight); err != nil { + log.Warnf("Failed to scan address %s: %v", addr, err) + } + } + + // Add addresses to watch list for future monitoring. + c.watchedAddrsMtx.Lock() + for _, addr := range addrs { + c.watchedAddrs[addr.EncodeAddress()] = addr + } + c.watchedAddrsMtx.Unlock() + + // Add outpoints to watch list. + c.watchedOutpointsMtx.Lock() + for op, addr := range outpoints { + c.watchedOutpoints[op] = addr + } + c.watchedOutpointsMtx.Unlock() + + // Send rescan finished notification. + c.bestBlockMtx.RLock() + bestBlock := c.bestBlock + c.bestBlockMtx.RUnlock() + + c.notificationChan <- &chain.RescanFinished{ + Hash: &bestBlock.Hash, + Height: bestBlock.Height, + Time: bestBlock.Timestamp, + } + + return nil +} + +// scanAddressHistory scans an address for historical transactions. +func (c *ChainClient) scanAddressHistory(ctx context.Context, + addr btcutil.Address, startHeight int32) error { + + addrStr := addr.EncodeAddress() + + txs, err := c.client.GetAddressTxs(ctx, addrStr) + if err != nil { + return fmt.Errorf("failed to get address history: %w", err) + } + + log.Debugf("Found %d transactions for address %s", len(txs), addrStr) + + // Filter and collect confirmed transactions above startHeight. + var confirmedTxs []*TxInfo + for _, txInfo := range txs { + if !txInfo.Status.Confirmed { + continue + } + if int32(txInfo.Status.BlockHeight) < startHeight { + continue + } + confirmedTxs = append(confirmedTxs, txInfo) + } + + // Sort transactions by block height (oldest first). + // This is critical for proper UTXO tracking - the wallet must see + // funding transactions before spending transactions. + sortTxInfoByHeight(confirmedTxs) + + log.Debugf("Processing %d confirmed transactions for address %s (sorted by height)", + len(confirmedTxs), addrStr) + + for _, txInfo := range confirmedTxs { + // Fetch the full transaction. + tx, err := c.client.GetRawTransactionMsgTx(ctx, txInfo.TxID) + if err != nil { + log.Warnf("Failed to fetch tx %s: %v", txInfo.TxID, err) + continue + } + + blockHash, err := chainhash.NewHashFromStr(txInfo.Status.BlockHash) + if err != nil { + continue + } + + log.Tracef("Sending tx %s at height %d for address %s", + txInfo.TxID, txInfo.Status.BlockHeight, addrStr) + + // Send relevant transaction notification. + c.notificationChan <- chain.RelevantTx{ + TxRecord: &wtxmgr.TxRecord{ + MsgTx: *tx, + Hash: tx.TxHash(), + Received: time.Unix(txInfo.Status.BlockTime, 0), + SerializedTx: nil, + }, + Block: &wtxmgr.BlockMeta{ + Block: wtxmgr.Block{ + Hash: *blockHash, + Height: int32(txInfo.Status.BlockHeight), + }, + Time: time.Unix(txInfo.Status.BlockTime, 0), + }, + } + } + + return nil +} + +// sortTxInfoByHeight sorts transactions by block height in ascending order +// (oldest first). For transactions in the same block, sort by txid for +// deterministic ordering. +func sortTxInfoByHeight(txs []*TxInfo) { + slices.SortFunc(txs, func(a, b *TxInfo) int { + if a.Status.BlockHeight != b.Status.BlockHeight { + return cmp.Compare(a.Status.BlockHeight, b.Status.BlockHeight) + } + return cmp.Compare(a.TxID, b.TxID) + }) +} + +// NotifyReceived marks an address for transaction notifications. +func (c *ChainClient) NotifyReceived(addrs []btcutil.Address) error { + log.Infof("NotifyReceived called with %d addresses", len(addrs)) + + c.watchedAddrsMtx.Lock() + for _, addr := range addrs { + c.watchedAddrs[addr.EncodeAddress()] = addr + } + c.watchedAddrsMtx.Unlock() + + // Scan addresses for existing transactions in the background. + go func() { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + + for _, addr := range addrs { + if err := c.scanAddressForExistingTxs(ctx, addr); err != nil { + log.Debugf("Error scanning address %s: %v", + addr.EncodeAddress(), err) + } + } + }() + + return nil +} + +// scanAddressForExistingTxs scans an address for existing transactions. +func (c *ChainClient) scanAddressForExistingTxs(ctx context.Context, + addr btcutil.Address) error { + + addrStr := addr.EncodeAddress() + + txs, err := c.client.GetAddressTxs(ctx, addrStr) + if err != nil { + return fmt.Errorf("failed to get address transactions: %w", err) + } + + if len(txs) == 0 { + return nil + } + + log.Debugf("Found %d existing transactions for address %s", + len(txs), addrStr) + + for _, txInfo := range txs { + // Fetch the full transaction. + tx, err := c.client.GetRawTransactionMsgTx(ctx, txInfo.TxID) + if err != nil { + log.Warnf("Failed to fetch tx %s: %v", txInfo.TxID, err) + continue + } + + rec := &wtxmgr.TxRecord{ + MsgTx: *tx, + Hash: tx.TxHash(), + Received: time.Now(), + } + + var blockMeta *wtxmgr.BlockMeta + if txInfo.Status.Confirmed { + blockHash, err := chainhash.NewHashFromStr(txInfo.Status.BlockHash) + if err == nil { + blockMeta = &wtxmgr.BlockMeta{ + Block: wtxmgr.Block{ + Hash: *blockHash, + Height: int32(txInfo.Status.BlockHeight), + }, + Time: time.Unix(txInfo.Status.BlockTime, 0), + } + } + } + + c.notificationChan <- chain.RelevantTx{ + TxRecord: rec, + Block: blockMeta, + } + } + + return nil +} + +// NotifyBlocks enables block notifications. +func (c *ChainClient) NotifyBlocks() error { + c.notifyBlocks.Store(true) + return nil +} + +// Notifications returns a channel of notifications from the chain client. +func (c *ChainClient) Notifications() <-chan interface{} { + return c.notificationChan +} + +// BackEnd returns the name of the driver. +func (c *ChainClient) BackEnd() string { + return esploraBackendName +} + +// TestMempoolAccept is not supported by Esplora. +func (c *ChainClient) TestMempoolAccept(txns []*wire.MsgTx, + maxFeeRate float64) ([]*btcjson.TestMempoolAcceptResult, error) { + + // Esplora doesn't support mempool acceptance testing. + // Return ErrBackendVersion to trigger the fallback to direct publish. + return nil, rpcclient.ErrBackendVersion +} + +// MapRPCErr maps errors from the RPC client to equivalent errors in the +// btcjson package. +func (c *ChainClient) MapRPCErr(err error) error { + return err +} + +// notificationHandler processes block notifications and dispatches them. +func (c *ChainClient) notificationHandler() { + defer c.wg.Done() + + blockNotifs, subID := c.client.Subscribe() + c.subscriptionID = subID + + defer c.client.Unsubscribe(subID) + + for { + select { + case <-c.quit: + return + + case blockInfo, ok := <-blockNotifs: + if !ok { + return + } + c.handleNewBlock(blockInfo) + } + } +} + +// handleNewBlock processes a new block notification. +// It ensures all blocks are processed sequentially by fetching any missing +// intermediate blocks before processing the new one. +func (c *ChainClient) handleNewBlock(blockInfo *BlockInfo) { + newHeight := int32(blockInfo.Height) + + // Get the last processed height. + c.bestBlockMtx.RLock() + lastHeight := c.lastProcessedHeight + c.bestBlockMtx.RUnlock() + + // If we're behind, we need to catch up by processing each block sequentially. + // This ensures btcwallet receives all blocks in order. + if newHeight > lastHeight+1 { + log.Debugf("Catching up from height %d to %d", lastHeight+1, newHeight) + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) + defer cancel() + + for h := lastHeight + 1; h < newHeight; h++ { + if err := c.processBlockAtHeight(ctx, h); err != nil { + log.Errorf("Failed to process block at height %d: %v", h, err) + // Continue anyway - the next poll will try again + return + } + } + } + + // Now process the actual block we received. + hash, err := chainhash.NewHashFromStr(blockInfo.ID) + if err != nil { + log.Errorf("Failed to parse block hash: %v", err) + return + } + + // Update best block and last processed height. + c.bestBlockMtx.Lock() + c.bestBlock = waddrmgr.BlockStamp{ + Height: newHeight, + Hash: *hash, + Timestamp: time.Unix(blockInfo.Timestamp, 0), + } + c.lastProcessedHeight = newHeight + c.bestBlockMtx.Unlock() + + // Cache height to hash mapping. + c.heightToHashMtx.Lock() + c.heightToHash[newHeight] = hash + c.heightToHashMtx.Unlock() + + log.Debugf("New block: height=%d hash=%s", blockInfo.Height, blockInfo.ID) + + // Send block connected notification if enabled. + if c.notifyBlocks.Load() { + c.notificationChan <- chain.BlockConnected{ + Block: wtxmgr.Block{ + Hash: *hash, + Height: newHeight, + }, + Time: time.Unix(blockInfo.Timestamp, 0), + } + } + + // Check watched addresses for new activity. + c.checkWatchedAddresses(newHeight) +} + +// processBlockAtHeight fetches and processes a block at the given height. +func (c *ChainClient) processBlockAtHeight(ctx context.Context, height int32) error { + hashStr, err := c.client.GetBlockHashByHeight(ctx, int64(height)) + if err != nil { + return fmt.Errorf("failed to get block hash: %w", err) + } + + hash, err := chainhash.NewHashFromStr(hashStr) + if err != nil { + return fmt.Errorf("failed to parse block hash: %w", err) + } + + header, err := c.client.GetBlockHeader(ctx, hashStr) + if err != nil { + return fmt.Errorf("failed to get block header: %w", err) + } + + // Update state. + c.bestBlockMtx.Lock() + c.bestBlock = waddrmgr.BlockStamp{ + Height: height, + Hash: *hash, + Timestamp: header.Timestamp, + } + c.lastProcessedHeight = height + c.bestBlockMtx.Unlock() + + // Cache the header and height mapping. + c.cacheHeader(height, hash, header) + + log.Debugf("Processed intermediate block: height=%d hash=%s", height, hashStr) + + // Send block connected notification if enabled. + if c.notifyBlocks.Load() { + c.notificationChan <- chain.BlockConnected{ + Block: wtxmgr.Block{ + Hash: *hash, + Height: height, + }, + Time: header.Timestamp, + } + } + + // Check watched addresses for new activity. + c.checkWatchedAddresses(height) + + return nil +} + +// checkWatchedAddresses checks if any watched addresses have new activity. +func (c *ChainClient) checkWatchedAddresses(height int32) { + c.watchedAddrsMtx.RLock() + addrs := make([]btcutil.Address, 0, len(c.watchedAddrs)) + for _, addr := range c.watchedAddrs { + addrs = append(addrs, addr) + } + c.watchedAddrsMtx.RUnlock() + + if len(addrs) == 0 { + return + } + + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) + defer cancel() + + for _, addr := range addrs { + txs, err := c.client.GetAddressTxs(ctx, addr.EncodeAddress()) + if err != nil { + continue + } + + for _, txInfo := range txs { + if !txInfo.Status.Confirmed { + continue + } + if txInfo.Status.BlockHeight != int64(height) { + continue + } + + // New transaction at this height. + tx, err := c.client.GetRawTransactionMsgTx(ctx, txInfo.TxID) + if err != nil { + continue + } + + blockHash, err := chainhash.NewHashFromStr(txInfo.Status.BlockHash) + if err != nil { + continue + } + + c.notificationChan <- chain.RelevantTx{ + TxRecord: &wtxmgr.TxRecord{ + MsgTx: *tx, + Hash: tx.TxHash(), + Received: time.Unix(txInfo.Status.BlockTime, 0), + }, + Block: &wtxmgr.BlockMeta{ + Block: wtxmgr.Block{ + Hash: *blockHash, + Height: int32(txInfo.Status.BlockHeight), + }, + Time: time.Unix(txInfo.Status.BlockTime, 0), + }, + } + } + } +} + +// cacheHeader caches a block header. +func (c *ChainClient) cacheHeader(height int32, hash *chainhash.Hash, + header *wire.BlockHeader) { + + c.headerCacheMtx.Lock() + c.headerCache[*hash] = header + c.headerCacheMtx.Unlock() + + c.heightToHashMtx.Lock() + c.heightToHash[height] = hash + c.heightToHashMtx.Unlock() +} + +// scriptFromAddress creates a pkScript from an address. +func scriptFromAddress(addr btcutil.Address, params *chaincfg.Params) ([]byte, error) { + return txscript.PayToAddrScript(addr) +} diff --git a/esplora/chainclient_test.go b/esplora/chainclient_test.go new file mode 100644 index 00000000000..6ea6ad41198 --- /dev/null +++ b/esplora/chainclient_test.go @@ -0,0 +1,481 @@ +package esplora + +import ( + "testing" + "time" + + "github.com/btcsuite/btcd/btcutil" + "github.com/btcsuite/btcd/chaincfg" + "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/btcsuite/btcd/rpcclient" + "github.com/btcsuite/btcd/wire" + "github.com/btcsuite/btcwallet/chain" + "github.com/stretchr/testify/require" +) + +// TestChainClientInterface verifies that ChainClient implements chain.Interface. +func TestChainClientInterface(t *testing.T) { + t.Parallel() + + var _ chain.Interface = (*ChainClient)(nil) +} + +// TestNewChainClient tests creating a new chain client. +func TestNewChainClient(t *testing.T) { + t.Parallel() + + cfg := &ClientConfig{ + URL: "http://localhost:3002", + RequestTimeout: 30 * time.Second, + MaxRetries: 3, + PollInterval: 10 * time.Second, + } + client := NewClient(cfg) + + chainClient := NewChainClient(client, &chaincfg.MainNetParams, nil) + + require.NotNil(t, chainClient) + require.NotNil(t, chainClient.client) + require.NotNil(t, chainClient.headerCache) + require.NotNil(t, chainClient.heightToHash) + require.NotNil(t, chainClient.notificationChan) + require.Equal(t, &chaincfg.MainNetParams, chainClient.chainParams) +} + +// TestChainClientBackEnd tests the BackEnd method. +func TestChainClientBackEnd(t *testing.T) { + t.Parallel() + + cfg := &ClientConfig{ + URL: "http://localhost:3002", + RequestTimeout: 30 * time.Second, + MaxRetries: 3, + PollInterval: 10 * time.Second, + } + client := NewClient(cfg) + + chainClient := NewChainClient(client, &chaincfg.MainNetParams, nil) + + require.Equal(t, "esplora", chainClient.BackEnd()) +} + +// TestChainClientNotifications tests the Notifications channel. +func TestChainClientNotifications(t *testing.T) { + t.Parallel() + + cfg := &ClientConfig{ + URL: "http://localhost:3002", + RequestTimeout: 30 * time.Second, + MaxRetries: 3, + PollInterval: 10 * time.Second, + } + client := NewClient(cfg) + + chainClient := NewChainClient(client, &chaincfg.MainNetParams, nil) + + notifChan := chainClient.Notifications() + require.NotNil(t, notifChan) +} + +// TestChainClientTestMempoolAccept tests that TestMempoolAccept returns nil. +func TestChainClientTestMempoolAccept(t *testing.T) { + t.Parallel() + + cfg := &ClientConfig{ + URL: "http://localhost:3002", + RequestTimeout: 30 * time.Second, + MaxRetries: 3, + PollInterval: 10 * time.Second, + } + client := NewClient(cfg) + + chainClient := NewChainClient(client, &chaincfg.MainNetParams, nil) + + tx := wire.NewMsgTx(wire.TxVersion) + results, err := chainClient.TestMempoolAccept([]*wire.MsgTx{tx}, 0.0) + + // Esplora doesn't support this, so we expect ErrBackendVersion error + // which triggers the caller to fall back to direct publish. + require.ErrorIs(t, err, rpcclient.ErrBackendVersion) + require.Nil(t, results) +} + +// TestChainClientMapRPCErr tests the MapRPCErr method. +func TestChainClientMapRPCErr(t *testing.T) { + t.Parallel() + + cfg := &ClientConfig{ + URL: "http://localhost:3002", + RequestTimeout: 30 * time.Second, + MaxRetries: 3, + PollInterval: 10 * time.Second, + } + client := NewClient(cfg) + + chainClient := NewChainClient(client, &chaincfg.MainNetParams, nil) + + testErr := ErrNotConnected + mappedErr := chainClient.MapRPCErr(testErr) + + require.Equal(t, testErr, mappedErr) +} + +// TestChainClientNotifyBlocks tests enabling block notifications. +func TestChainClientNotifyBlocks(t *testing.T) { + t.Parallel() + + cfg := &ClientConfig{ + URL: "http://localhost:3002", + RequestTimeout: 30 * time.Second, + MaxRetries: 3, + PollInterval: 10 * time.Second, + } + client := NewClient(cfg) + + chainClient := NewChainClient(client, &chaincfg.MainNetParams, nil) + + err := chainClient.NotifyBlocks() + require.NoError(t, err) + require.True(t, chainClient.notifyBlocks.Load()) +} + +// TestChainClientNotifyReceived tests adding watched addresses. +func TestChainClientNotifyReceived(t *testing.T) { + t.Parallel() + + cfg := &ClientConfig{ + URL: "http://localhost:3002", + RequestTimeout: 30 * time.Second, + MaxRetries: 3, + PollInterval: 10 * time.Second, + } + client := NewClient(cfg) + + chainClient := NewChainClient(client, &chaincfg.MainNetParams, nil) + + // Create a test address. + pubKeyHash := make([]byte, 20) + addr, err := btcutil.NewAddressPubKeyHash(pubKeyHash, &chaincfg.MainNetParams) + require.NoError(t, err) + + err = chainClient.NotifyReceived([]btcutil.Address{addr}) + require.NoError(t, err) + + chainClient.watchedAddrsMtx.RLock() + _, exists := chainClient.watchedAddrs[addr.EncodeAddress()] + chainClient.watchedAddrsMtx.RUnlock() + + require.True(t, exists) +} + +// TestChainClientIsCurrent tests the IsCurrent method. +func TestChainClientIsCurrent(t *testing.T) { + t.Parallel() + + cfg := &ClientConfig{ + URL: "http://localhost:3002", + RequestTimeout: 30 * time.Second, + MaxRetries: 0, + PollInterval: 10 * time.Second, + } + client := NewClient(cfg) + + chainClient := NewChainClient(client, &chaincfg.MainNetParams, nil) + + // Without a live connection, IsCurrent() should return false since it + // cannot fetch the best block from the network. + require.False(t, chainClient.IsCurrent()) +} + +// TestChainClientCacheHeader tests the header caching functionality. +func TestChainClientCacheHeader(t *testing.T) { + t.Parallel() + + cfg := &ClientConfig{ + URL: "http://localhost:3002", + RequestTimeout: 30 * time.Second, + MaxRetries: 3, + PollInterval: 10 * time.Second, + } + client := NewClient(cfg) + + chainClient := NewChainClient(client, &chaincfg.MainNetParams, nil) + + // Create a test header. + header := &wire.BlockHeader{ + Version: 1, + Timestamp: time.Now(), + Bits: 0x1d00ffff, + } + hash := header.BlockHash() + height := int32(100) + + // Cache the header. + chainClient.cacheHeader(height, &hash, header) + + // Verify it's in the header cache. + chainClient.headerCacheMtx.RLock() + cachedHeader, exists := chainClient.headerCache[hash] + chainClient.headerCacheMtx.RUnlock() + + require.True(t, exists) + require.Equal(t, header, cachedHeader) + + // Verify height to hash mapping. + chainClient.heightToHashMtx.RLock() + cachedHash, exists := chainClient.heightToHash[height] + chainClient.heightToHashMtx.RUnlock() + + require.True(t, exists) + require.Equal(t, &hash, cachedHash) +} + +// TestChainClientGetUtxo tests the GetUtxo method. +func TestChainClientGetUtxo(t *testing.T) { + t.Parallel() + + cfg := &ClientConfig{ + URL: "http://localhost:3002", + RequestTimeout: 1 * time.Second, + MaxRetries: 0, + PollInterval: 10 * time.Second, + } + client := NewClient(cfg) + + chainClient := NewChainClient(client, &chaincfg.MainNetParams, nil) + + // Create a test outpoint and pkScript. + testHash := chainhash.Hash{0x01, 0x02, 0x03} + op := &wire.OutPoint{ + Hash: testHash, + Index: 0, + } + pkScript := []byte{0x00, 0x14, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, + 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, + 0x11, 0x12, 0x13, 0x14} + + // Without a connected client, GetUtxo should return an error. + cancel := make(chan struct{}) + _, err := chainClient.GetUtxo(op, pkScript, 100, cancel) + require.Error(t, err) +} + +// TestEsploraUtxoSourceInterface verifies that ChainClient can be used as a +// UTXO source. +func TestEsploraUtxoSourceInterface(t *testing.T) { + t.Parallel() + + cfg := &ClientConfig{ + URL: "http://localhost:3002", + RequestTimeout: 30 * time.Second, + MaxRetries: 3, + PollInterval: 10 * time.Second, + } + client := NewClient(cfg) + + chainClient := NewChainClient(client, &chaincfg.MainNetParams, nil) + + // Define the interface locally to test without importing btcwallet. + type UtxoSource interface { + GetUtxo(op *wire.OutPoint, pkScript []byte, heightHint uint32, + cancel <-chan struct{}) (*wire.TxOut, error) + } + + // Verify ChainClient implements UtxoSource. + var _ UtxoSource = chainClient +} + +// TestChainClientGetBlockHashCaching tests that GetBlockHash caches results. +func TestChainClientGetBlockHashCaching(t *testing.T) { + t.Parallel() + + cfg := &ClientConfig{ + URL: "http://localhost:3002", + RequestTimeout: 30 * time.Second, + MaxRetries: 0, + PollInterval: 10 * time.Second, + } + client := NewClient(cfg) + + chainClient := NewChainClient(client, &chaincfg.MainNetParams, nil) + + // Pre-populate the cache. + testHash := chainhash.Hash{0x01, 0x02, 0x03, 0x04} + height := int32(500) + + chainClient.heightToHashMtx.Lock() + chainClient.heightToHash[height] = &testHash + chainClient.heightToHashMtx.Unlock() + + // GetBlockHash should return the cached value. + hash, err := chainClient.GetBlockHash(int64(height)) + require.NoError(t, err) + require.Equal(t, &testHash, hash) +} + +// TestChainClientGetBlockHeaderCaching tests that GetBlockHeader caches results. +func TestChainClientGetBlockHeaderCaching(t *testing.T) { + t.Parallel() + + cfg := &ClientConfig{ + URL: "http://localhost:3002", + RequestTimeout: 30 * time.Second, + MaxRetries: 0, + PollInterval: 10 * time.Second, + } + client := NewClient(cfg) + + chainClient := NewChainClient(client, &chaincfg.MainNetParams, nil) + + // Create and cache a test header. + header := &wire.BlockHeader{ + Version: 1, + Timestamp: time.Now(), + Bits: 0x1d00ffff, + } + hash := header.BlockHash() + + chainClient.headerCacheMtx.Lock() + chainClient.headerCache[hash] = header + chainClient.headerCacheMtx.Unlock() + + // GetBlockHeader should return the cached value. + cachedHeader, err := chainClient.GetBlockHeader(&hash) + require.NoError(t, err) + require.Equal(t, header, cachedHeader) +} + +// TestChainClientMultipleAddresses tests watching multiple addresses. +func TestChainClientMultipleAddresses(t *testing.T) { + t.Parallel() + + cfg := &ClientConfig{ + URL: "http://localhost:3002", + RequestTimeout: 30 * time.Second, + MaxRetries: 3, + PollInterval: 10 * time.Second, + } + client := NewClient(cfg) + + chainClient := NewChainClient(client, &chaincfg.MainNetParams, nil) + + // Create multiple test addresses. + addrs := make([]btcutil.Address, 5) + for i := 0; i < 5; i++ { + pubKeyHash := make([]byte, 20) + pubKeyHash[0] = byte(i) + addr, err := btcutil.NewAddressPubKeyHash(pubKeyHash, &chaincfg.MainNetParams) + require.NoError(t, err) + addrs[i] = addr + } + + err := chainClient.NotifyReceived(addrs) + require.NoError(t, err) + + chainClient.watchedAddrsMtx.RLock() + count := len(chainClient.watchedAddrs) + chainClient.watchedAddrsMtx.RUnlock() + + require.Equal(t, 5, count) +} + +// TestChainClientDefaultConfig tests that default config is used when nil is passed. +func TestChainClientDefaultConfig(t *testing.T) { + t.Parallel() + + cfg := &ClientConfig{ + URL: "http://localhost:3002", + RequestTimeout: 30 * time.Second, + MaxRetries: 3, + PollInterval: 10 * time.Second, + } + client := NewClient(cfg) + + chainClient := NewChainClient(client, &chaincfg.MainNetParams, nil) + + require.NotNil(t, chainClient.cfg) + require.True(t, chainClient.cfg.UseGapLimit) + require.Equal(t, 20, chainClient.cfg.GapLimit) + require.Equal(t, 10, chainClient.cfg.AddressBatchSize) +} + +// TestChainClientCustomConfig tests that custom config is properly applied. +func TestChainClientCustomConfig(t *testing.T) { + t.Parallel() + + cfg := &ClientConfig{ + URL: "http://localhost:3002", + RequestTimeout: 30 * time.Second, + MaxRetries: 3, + PollInterval: 10 * time.Second, + } + client := NewClient(cfg) + + chainClientCfg := &ChainClientConfig{ + UseGapLimit: false, + GapLimit: 50, + AddressBatchSize: 25, + } + chainClient := NewChainClient(client, &chaincfg.MainNetParams, chainClientCfg) + + require.NotNil(t, chainClient.cfg) + require.False(t, chainClient.cfg.UseGapLimit) + require.Equal(t, 50, chainClient.cfg.GapLimit) + require.Equal(t, 25, chainClient.cfg.AddressBatchSize) +} + +// TestDefaultChainClientConfig tests the DefaultChainClientConfig function. +func TestDefaultChainClientConfig(t *testing.T) { + t.Parallel() + + cfg := DefaultChainClientConfig() + + require.NotNil(t, cfg) + require.True(t, cfg.UseGapLimit) + require.Equal(t, 20, cfg.GapLimit) + require.Equal(t, 10, cfg.AddressBatchSize) +} + +// TestSortUint32Slice tests the sorting helper function. +func TestSortUint32Slice(t *testing.T) { + t.Parallel() + + testCases := []struct { + name string + input []uint32 + expected []uint32 + }{ + { + name: "already sorted", + input: []uint32{1, 2, 3, 4, 5}, + expected: []uint32{1, 2, 3, 4, 5}, + }, + { + name: "reverse order", + input: []uint32{5, 4, 3, 2, 1}, + expected: []uint32{1, 2, 3, 4, 5}, + }, + { + name: "random order", + input: []uint32{3, 1, 4, 1, 5, 9, 2, 6}, + expected: []uint32{1, 1, 2, 3, 4, 5, 6, 9}, + }, + { + name: "single element", + input: []uint32{42}, + expected: []uint32{42}, + }, + { + name: "empty slice", + input: []uint32{}, + expected: []uint32{}, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + sortUint32Slice(tc.input) + require.Equal(t, tc.expected, tc.input) + }) + } +} diff --git a/esplora/client.go b/esplora/client.go new file mode 100644 index 00000000000..d88bf5a2a5f --- /dev/null +++ b/esplora/client.go @@ -0,0 +1,900 @@ +package esplora + +import ( + "bytes" + "context" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "strconv" + "sync" + "sync/atomic" + "time" + + "github.com/btcsuite/btcd/btcutil" + "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/btcsuite/btcd/wire" +) + +var ( + // ErrClientShutdown is returned when the client has been shut down. + ErrClientShutdown = errors.New("esplora client has been shut down") + + // ErrNotConnected is returned when the API is not reachable. + ErrNotConnected = errors.New("esplora API not reachable") + + // ErrBlockNotFound is returned when a block cannot be found. + ErrBlockNotFound = errors.New("block not found") + + // ErrTxNotFound is returned when a transaction cannot be found. + ErrTxNotFound = errors.New("transaction not found") +) + +// ClientConfig holds the configuration for the Esplora client. +type ClientConfig struct { + // URL is the base URL of the Esplora API (e.g., http://localhost:3002). + URL string + + // RequestTimeout is the timeout for individual HTTP requests. + RequestTimeout time.Duration + + // MaxRetries is the maximum number of retries for failed requests. + MaxRetries int + + // PollInterval is the interval for polling new blocks. + PollInterval time.Duration +} + +// BlockStatus represents the status of a block. +type BlockStatus struct { + InBestChain bool `json:"in_best_chain"` + Height int64 `json:"height"` + NextBest string `json:"next_best,omitempty"` +} + +// BlockInfo represents block information from the API. +type BlockInfo struct { + ID string `json:"id"` + Height int64 `json:"height"` + Version int32 `json:"version"` + Timestamp int64 `json:"timestamp"` + TxCount int `json:"tx_count"` + Size int `json:"size"` + Weight int `json:"weight"` + MerkleRoot string `json:"merkle_root"` + PreviousBlockHash string `json:"previousblockhash"` + MedianTime int64 `json:"mediantime"` + Nonce uint32 `json:"nonce"` + Bits uint32 `json:"bits"` + Difficulty float64 `json:"difficulty"` +} + +// TxStatus represents transaction confirmation status. +type TxStatus struct { + Confirmed bool `json:"confirmed"` + BlockHeight int64 `json:"block_height,omitempty"` + BlockHash string `json:"block_hash,omitempty"` + BlockTime int64 `json:"block_time,omitempty"` +} + +// TxInfo represents transaction information from the API. +type TxInfo struct { + TxID string `json:"txid"` + Version int32 `json:"version"` + LockTime uint32 `json:"locktime"` + Size int `json:"size"` + Weight int `json:"weight"` + Fee int64 `json:"fee"` + Vin []TxVin `json:"vin"` + Vout []TxVout `json:"vout"` + Status TxStatus `json:"status"` +} + +// TxVin represents a transaction input. +type TxVin struct { + TxID string `json:"txid"` + Vout uint32 `json:"vout"` + PrevOut *TxVout `json:"prevout,omitempty"` + ScriptSig string `json:"scriptsig"` + ScriptSigAsm string `json:"scriptsig_asm"` + Witness []string `json:"witness,omitempty"` + Sequence uint32 `json:"sequence"` + IsCoinbase bool `json:"is_coinbase"` +} + +// TxVout represents a transaction output. +type TxVout struct { + ScriptPubKey string `json:"scriptpubkey"` + ScriptPubKeyAsm string `json:"scriptpubkey_asm"` + ScriptPubKeyType string `json:"scriptpubkey_type"` + ScriptPubKeyAddr string `json:"scriptpubkey_address,omitempty"` + Value int64 `json:"value"` +} + +// UTXO represents an unspent transaction output. +type UTXO struct { + TxID string `json:"txid"` + Vout uint32 `json:"vout"` + Status TxStatus `json:"status"` + Value int64 `json:"value"` +} + +// OutSpend represents the spend status of an output. +type OutSpend struct { + Spent bool `json:"spent"` + TxID string `json:"txid,omitempty"` + Vin uint32 `json:"vin,omitempty"` + Status TxStatus `json:"status,omitempty"` +} + +// MerkleProof represents a merkle proof for a transaction. +type MerkleProof struct { + BlockHeight int64 `json:"block_height"` + Merkle []string `json:"merkle"` + Pos int `json:"pos"` +} + +// FeeEstimates represents fee estimates from the API. +// Keys are confirmation targets (as strings), values are fee rates in sat/vB. +type FeeEstimates map[string]float64 + +// Client is an HTTP client for the Esplora REST API. +type Client struct { + cfg *ClientConfig + + httpClient *http.Client + + // started indicates whether the client has been started. + started atomic.Bool + + // bestBlockMtx protects bestBlock fields. + bestBlockMtx sync.RWMutex + bestBlockHash string + bestBlockHeight int64 + + // subscribersMtx protects the subscribers map. + subscribersMtx sync.RWMutex + // subscribers maps subscriber IDs to their notification channels. + // Each subscriber gets its own copy of block notifications. + subscribers map[uint64]chan *BlockInfo + nextSubID uint64 + + wg sync.WaitGroup + quit chan struct{} +} + +// NewClient creates a new Esplora client with the given configuration. +func NewClient(cfg *ClientConfig) *Client { + return &Client{ + cfg: cfg, + httpClient: &http.Client{ + Timeout: cfg.RequestTimeout, + }, + subscribers: make(map[uint64]chan *BlockInfo), + quit: make(chan struct{}), + } +} + +// Start initializes the client and begins polling for new blocks. +func (c *Client) Start() error { + if c.started.Swap(true) { + return nil + } + + log.Infof("Starting Esplora client, url=%s", c.cfg.URL) + + // Verify connection by fetching tip. + ctx, cancel := context.WithTimeout(context.Background(), c.cfg.RequestTimeout) + defer cancel() + + height, err := c.GetTipHeight(ctx) + if err != nil { + return fmt.Errorf("failed to connect to Esplora API: %w", err) + } + + hash, err := c.GetTipHash(ctx) + if err != nil { + return fmt.Errorf("failed to get tip hash: %w", err) + } + + c.bestBlockMtx.Lock() + c.bestBlockHeight = height + c.bestBlockHash = hash + c.bestBlockMtx.Unlock() + + log.Infof("Connected to Esplora API: tip height=%d, hash=%s", height, hash) + + // Start block polling goroutine. + c.wg.Add(1) + go c.blockPoller() + + return nil +} + +// Stop shuts down the client. +func (c *Client) Stop() error { + if !c.started.Load() { + return nil + } + + log.Info("Stopping Esplora client") + + close(c.quit) + c.wg.Wait() + + // Close all subscriber channels. + c.subscribersMtx.Lock() + for id, ch := range c.subscribers { + close(ch) + delete(c.subscribers, id) + } + c.subscribersMtx.Unlock() + + return nil +} + +// IsConnected returns true if the client appears to be working. +func (c *Client) IsConnected() bool { + if !c.started.Load() { + return false + } + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + _, err := c.GetTipHeight(ctx) + return err == nil +} + +// Subscribe registers a new subscriber for block notifications and returns +// the subscription channel and ID. Each subscriber gets its own copy of +// block notifications to prevent race conditions between consumers. +func (c *Client) Subscribe() (<-chan *BlockInfo, uint64) { + c.subscribersMtx.Lock() + defer c.subscribersMtx.Unlock() + + id := c.nextSubID + c.nextSubID++ + + // Create a buffered channel for this subscriber. + ch := make(chan *BlockInfo, 10) + c.subscribers[id] = ch + + log.Debugf("New block notification subscriber: id=%d, total=%d", + id, len(c.subscribers)) + + return ch, id +} + +// Unsubscribe removes a subscriber from block notifications. +func (c *Client) Unsubscribe(id uint64) { + c.subscribersMtx.Lock() + defer c.subscribersMtx.Unlock() + + if ch, ok := c.subscribers[id]; ok { + close(ch) + delete(c.subscribers, id) + log.Debugf("Removed block notification subscriber: id=%d, remaining=%d", + id, len(c.subscribers)) + } +} + +// notifySubscribers sends a block notification to all subscribers. +func (c *Client) notifySubscribers(blockInfo *BlockInfo) { + c.subscribersMtx.RLock() + defer c.subscribersMtx.RUnlock() + + for id, ch := range c.subscribers { + select { + case ch <- blockInfo: + // Successfully sent + default: + // Channel full, log warning but don't block + log.Warnf("Block notification channel full for subscriber %d, "+ + "skipping height %d", id, blockInfo.Height) + } + } +} + +// blockPoller polls for new blocks at regular intervals. +func (c *Client) blockPoller() { + defer c.wg.Done() + + ticker := time.NewTicker(c.cfg.PollInterval) + defer ticker.Stop() + + for { + select { + case <-c.quit: + return + case <-ticker.C: + c.checkForNewBlocks() + } + } +} + +// checkForNewBlocks checks if there are new blocks and sends notifications. +func (c *Client) checkForNewBlocks() { + ctx, cancel := context.WithTimeout(context.Background(), c.cfg.RequestTimeout) + defer cancel() + + newHeight, err := c.GetTipHeight(ctx) + if err != nil { + log.Debugf("Failed to get tip height: %v", err) + return + } + + c.bestBlockMtx.RLock() + currentHeight := c.bestBlockHeight + currentHash := c.bestBlockHash + c.bestBlockMtx.RUnlock() + + if newHeight <= currentHeight { + // Check for reorg by comparing hashes. + newHash, err := c.GetTipHash(ctx) + if err != nil { + return + } + if newHash != currentHash && newHeight == currentHeight { + // Possible reorg at same height. + log.Warnf("Possible reorg detected at height %d: old=%s new=%s", + currentHeight, currentHash, newHash) + } + return + } + + // New blocks detected, fetch and notify for each. + for height := currentHeight + 1; height <= newHeight; height++ { + blockHash, err := c.GetBlockHashByHeight(ctx, height) + if err != nil { + log.Warnf("Failed to get block hash at height %d: %v", height, err) + continue + } + + blockInfo, err := c.GetBlockInfo(ctx, blockHash) + if err != nil { + log.Warnf("Failed to get block info for %s: %v", blockHash, err) + continue + } + + // Update best block. + c.bestBlockMtx.Lock() + c.bestBlockHeight = height + c.bestBlockHash = blockHash + c.bestBlockMtx.Unlock() + + // Send notification to all subscribers. + log.Debugf("New block notification: height=%d hash=%s", height, blockHash) + c.notifySubscribers(blockInfo) + } +} + +// doRequest performs an HTTP request with retries. +func (c *Client) doRequest(ctx context.Context, method, path string, body io.Reader) (*http.Response, error) { + url := c.cfg.URL + path + + var lastErr error + for i := 0; i <= c.cfg.MaxRetries; i++ { + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-c.quit: + return nil, ErrClientShutdown + default: + } + + req, err := http.NewRequestWithContext(ctx, method, url, body) + if err != nil { + return nil, fmt.Errorf("failed to create request: %w", err) + } + + if body != nil { + req.Header.Set("Content-Type", "text/plain") + } + + resp, err := c.httpClient.Do(req) + if err != nil { + lastErr = err + if i < c.cfg.MaxRetries { + time.Sleep(time.Duration(i+1) * 100 * time.Millisecond) + } + continue + } + + return resp, nil + } + + return nil, fmt.Errorf("request failed after %d attempts: %w", c.cfg.MaxRetries+1, lastErr) +} + +// doGet performs a GET request and returns the response body. +func (c *Client) doGet(ctx context.Context, path string) ([]byte, error) { + resp, err := c.doRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("failed to read response: %w", err) + } + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("API returned status %d: %s", resp.StatusCode, string(body)) + } + + return body, nil +} + +// GetTipHeight returns the current blockchain tip height. +func (c *Client) GetTipHeight(ctx context.Context) (int64, error) { + body, err := c.doGet(ctx, "/blocks/tip/height") + if err != nil { + return 0, err + } + + height, err := strconv.ParseInt(string(body), 10, 64) + if err != nil { + return 0, fmt.Errorf("failed to parse height: %w", err) + } + + return height, nil +} + +// GetTipHash returns the current blockchain tip hash. +func (c *Client) GetTipHash(ctx context.Context) (string, error) { + body, err := c.doGet(ctx, "/blocks/tip/hash") + if err != nil { + return "", err + } + + return string(body), nil +} + +// GetBlockInfo fetches block information by hash. +func (c *Client) GetBlockInfo(ctx context.Context, blockHash string) (*BlockInfo, error) { + body, err := c.doGet(ctx, "/block/"+blockHash) + if err != nil { + return nil, err + } + + var info BlockInfo + if err := json.Unmarshal(body, &info); err != nil { + return nil, fmt.Errorf("failed to decode response: %w", err) + } + + return &info, nil +} + +// GetBlockStatus fetches block status by hash. +func (c *Client) GetBlockStatus(ctx context.Context, blockHash string) (*BlockStatus, error) { + body, err := c.doGet(ctx, "/block/"+blockHash+"/status") + if err != nil { + return nil, err + } + + var status BlockStatus + if err := json.Unmarshal(body, &status); err != nil { + return nil, fmt.Errorf("failed to decode response: %w", err) + } + + return &status, nil +} + +// GetBlockHeader fetches the raw block header by hash. +func (c *Client) GetBlockHeader(ctx context.Context, blockHash string) (*wire.BlockHeader, error) { + body, err := c.doGet(ctx, "/block/"+blockHash+"/header") + if err != nil { + return nil, err + } + + headerBytes, err := hex.DecodeString(string(body)) + if err != nil { + return nil, fmt.Errorf("failed to decode header hex: %w", err) + } + + header := &wire.BlockHeader{} + if err := header.Deserialize(bytes.NewReader(headerBytes)); err != nil { + return nil, fmt.Errorf("failed to deserialize header: %w", err) + } + + return header, nil +} + +// GetBlockHeaderByHeight fetches block header by height. +func (c *Client) GetBlockHeaderByHeight(ctx context.Context, height int64) (*wire.BlockHeader, error) { + hash, err := c.GetBlockHashByHeight(ctx, height) + if err != nil { + return nil, err + } + + return c.GetBlockHeader(ctx, hash) +} + +// GetBlockHashByHeight fetches the block hash at a given height. +func (c *Client) GetBlockHashByHeight(ctx context.Context, height int64) (string, error) { + body, err := c.doGet(ctx, fmt.Sprintf("/block-height/%d", height)) + if err != nil { + return "", err + } + + return string(body), nil +} + +// GetBlockTxIDs fetches all transaction IDs in a block. +func (c *Client) GetBlockTxIDs(ctx context.Context, blockHash string) ([]string, error) { + body, err := c.doGet(ctx, "/block/"+blockHash+"/txids") + if err != nil { + return nil, err + } + + var txids []string + if err := json.Unmarshal(body, &txids); err != nil { + return nil, fmt.Errorf("failed to decode response: %w", err) + } + + return txids, nil +} + +// GetBlockTxs fetches all transactions in a block with full details including +// addresses. This is more efficient than GetBlockTxIDs + individual tx fetches +// because it returns all data with fewer requests. +// Note: The API is paginated at 25 txs per page, so we fetch all pages. +func (c *Client) GetBlockTxs(ctx context.Context, blockHash string) ([]*TxInfo, error) { + var allTxs []*TxInfo + startIndex := 0 + const pageSize = 25 + + for { + var endpoint string + if startIndex == 0 { + endpoint = "/block/" + blockHash + "/txs" + } else { + endpoint = fmt.Sprintf("/block/%s/txs/%d", blockHash, startIndex) + } + + body, err := c.doGet(ctx, endpoint) + if err != nil { + // If we already have some results, treat errors on subsequent + // pages as end of pagination (API returns 404 for out of range). + if startIndex > 0 && len(allTxs) > 0 { + break + } + return nil, err + } + + var txs []*TxInfo + if err := json.Unmarshal(body, &txs); err != nil { + return nil, fmt.Errorf("failed to decode response: %w", err) + } + + allTxs = append(allTxs, txs...) + + // If we got fewer than pageSize, we've reached the end. + if len(txs) < pageSize { + break + } + + startIndex += pageSize + } + + return allTxs, nil +} + +// GetBlock fetches a full block with all transactions. +func (c *Client) GetBlock(ctx context.Context, blockHash *chainhash.Hash) (*btcutil.Block, error) { + hashStr := blockHash.String() + + // Get block info for header data. + blockInfo, err := c.GetBlockInfo(ctx, hashStr) + if err != nil { + return nil, fmt.Errorf("failed to get block info: %w", err) + } + + // Get all transaction IDs. + txids, err := c.GetBlockTxIDs(ctx, hashStr) + if err != nil { + return nil, fmt.Errorf("failed to get block txids: %w", err) + } + + // Fetch each transaction. + transactions := make([]*wire.MsgTx, 0, len(txids)) + for _, txid := range txids { + tx, err := c.GetRawTransactionMsgTx(ctx, txid) + if err != nil { + return nil, fmt.Errorf("failed to get tx %s: %w", txid, err) + } + transactions = append(transactions, tx) + } + + // Build the block header. + prevHash, err := chainhash.NewHashFromStr(blockInfo.PreviousBlockHash) + if err != nil { + return nil, fmt.Errorf("invalid prev block hash: %w", err) + } + + merkleRoot, err := chainhash.NewHashFromStr(blockInfo.MerkleRoot) + if err != nil { + return nil, fmt.Errorf("invalid merkle root: %w", err) + } + + header := wire.BlockHeader{ + Version: blockInfo.Version, + PrevBlock: *prevHash, + MerkleRoot: *merkleRoot, + Timestamp: time.Unix(blockInfo.Timestamp, 0), + Bits: blockInfo.Bits, + Nonce: blockInfo.Nonce, + } + + msgBlock := wire.MsgBlock{ + Header: header, + Transactions: transactions, + } + + return btcutil.NewBlock(&msgBlock), nil +} + +// GetTransaction fetches transaction information by txid. +func (c *Client) GetTransaction(ctx context.Context, txid string) (*TxInfo, error) { + body, err := c.doGet(ctx, "/tx/"+txid) + if err != nil { + return nil, err + } + + var info TxInfo + if err := json.Unmarshal(body, &info); err != nil { + return nil, fmt.Errorf("failed to decode response: %w", err) + } + + return &info, nil +} + +// GetRawTransaction fetches the raw transaction hex by txid. +func (c *Client) GetRawTransaction(ctx context.Context, txid string) (string, error) { + body, err := c.doGet(ctx, "/tx/"+txid+"/hex") + if err != nil { + return "", err + } + + return string(body), nil +} + +// GetRawTransactionMsgTx fetches and deserializes a transaction. +func (c *Client) GetRawTransactionMsgTx(ctx context.Context, txid string) (*wire.MsgTx, error) { + txHex, err := c.GetRawTransaction(ctx, txid) + if err != nil { + return nil, err + } + + txBytes, err := hex.DecodeString(txHex) + if err != nil { + return nil, fmt.Errorf("failed to decode tx hex: %w", err) + } + + tx := wire.NewMsgTx(wire.TxVersion) + if err := tx.Deserialize(bytes.NewReader(txBytes)); err != nil { + return nil, fmt.Errorf("failed to deserialize tx: %w", err) + } + + return tx, nil +} + +// GetTransactionMsgTx fetches a transaction by hash and returns it as wire.MsgTx. +func (c *Client) GetTransactionMsgTx(ctx context.Context, txHash *chainhash.Hash) (*wire.MsgTx, error) { + return c.GetRawTransactionMsgTx(ctx, txHash.String()) +} + +// GetTxStatus fetches the confirmation status of a transaction. +func (c *Client) GetTxStatus(ctx context.Context, txid string) (*TxStatus, error) { + body, err := c.doGet(ctx, "/tx/"+txid+"/status") + if err != nil { + return nil, err + } + + var status TxStatus + if err := json.Unmarshal(body, &status); err != nil { + return nil, fmt.Errorf("failed to decode response: %w", err) + } + + return &status, nil +} + +// GetTxMerkleProof fetches the merkle proof for a transaction. +func (c *Client) GetTxMerkleProof(ctx context.Context, txid string) (*MerkleProof, error) { + body, err := c.doGet(ctx, "/tx/"+txid+"/merkle-proof") + if err != nil { + return nil, err + } + + var proof MerkleProof + if err := json.Unmarshal(body, &proof); err != nil { + return nil, fmt.Errorf("failed to decode response: %w", err) + } + + return &proof, nil +} + +// GetTxOutSpend checks if a specific output is spent. +func (c *Client) GetTxOutSpend(ctx context.Context, txid string, vout uint32) (*OutSpend, error) { + body, err := c.doGet(ctx, fmt.Sprintf("/tx/%s/outspend/%d", txid, vout)) + if err != nil { + return nil, err + } + + var outSpend OutSpend + if err := json.Unmarshal(body, &outSpend); err != nil { + return nil, fmt.Errorf("failed to decode response: %w", err) + } + + return &outSpend, nil +} + +// GetTxOutSpends checks the spend status of all outputs in a transaction. +func (c *Client) GetTxOutSpends(ctx context.Context, txid string) ([]OutSpend, error) { + body, err := c.doGet(ctx, "/tx/"+txid+"/outspends") + if err != nil { + return nil, err + } + + var outSpends []OutSpend + if err := json.Unmarshal(body, &outSpends); err != nil { + return nil, fmt.Errorf("failed to decode response: %w", err) + } + + return outSpends, nil +} + +// GetAddressTxs fetches transactions for an address. +func (c *Client) GetAddressTxs(ctx context.Context, address string) ([]*TxInfo, error) { + body, err := c.doGet(ctx, "/address/"+address+"/txs") + if err != nil { + return nil, err + } + + var txs []*TxInfo + if err := json.Unmarshal(body, &txs); err != nil { + return nil, fmt.Errorf("failed to decode response: %w", err) + } + + return txs, nil +} + +// GetAddressUTXOs fetches unspent outputs for an address. +func (c *Client) GetAddressUTXOs(ctx context.Context, address string) ([]*UTXO, error) { + body, err := c.doGet(ctx, "/address/"+address+"/utxo") + if err != nil { + return nil, err + } + + var utxos []*UTXO + if err := json.Unmarshal(body, &utxos); err != nil { + return nil, fmt.Errorf("failed to decode response: %w", err) + } + + return utxos, nil +} + +// GetScripthashTxs fetches transactions for a scripthash. +func (c *Client) GetScripthashTxs(ctx context.Context, scripthash string) ([]*TxInfo, error) { + body, err := c.doGet(ctx, "/scripthash/"+scripthash+"/txs") + if err != nil { + return nil, err + } + + var txs []*TxInfo + if err := json.Unmarshal(body, &txs); err != nil { + return nil, fmt.Errorf("failed to decode response: %w", err) + } + + return txs, nil +} + +// GetScripthashUTXOs fetches unspent outputs for a scripthash. +func (c *Client) GetScripthashUTXOs(ctx context.Context, scripthash string) ([]*UTXO, error) { + body, err := c.doGet(ctx, "/scripthash/"+scripthash+"/utxo") + if err != nil { + return nil, err + } + + var utxos []*UTXO + if err := json.Unmarshal(body, &utxos); err != nil { + return nil, fmt.Errorf("failed to decode response: %w", err) + } + + return utxos, nil +} + +// GetFeeEstimates fetches fee estimates for various confirmation targets. +func (c *Client) GetFeeEstimates(ctx context.Context) (FeeEstimates, error) { + body, err := c.doGet(ctx, "/fee-estimates") + if err != nil { + return nil, err + } + + var estimates FeeEstimates + if err := json.Unmarshal(body, &estimates); err != nil { + return nil, fmt.Errorf("failed to decode response: %w", err) + } + + return estimates, nil +} + +// BroadcastTransaction broadcasts a raw transaction to the network. +// Returns the txid on success. +func (c *Client) BroadcastTransaction(ctx context.Context, txHex string) (string, error) { + resp, err := c.doRequest(ctx, http.MethodPost, "/tx", bytes.NewBufferString(txHex)) + if err != nil { + return "", err + } + defer resp.Body.Close() + + body, err := io.ReadAll(resp.Body) + if err != nil { + return "", fmt.Errorf("failed to read response: %w", err) + } + + if resp.StatusCode != http.StatusOK { + return "", fmt.Errorf("broadcast failed with status %d: %s", resp.StatusCode, string(body)) + } + + return string(body), nil +} + +// BroadcastTx broadcasts a wire.MsgTx to the network. +func (c *Client) BroadcastTx(ctx context.Context, tx *wire.MsgTx) (*chainhash.Hash, error) { + var buf bytes.Buffer + if err := tx.Serialize(&buf); err != nil { + return nil, fmt.Errorf("failed to serialize tx: %w", err) + } + + txHex := hex.EncodeToString(buf.Bytes()) + txid, err := c.BroadcastTransaction(ctx, txHex) + if err != nil { + return nil, err + } + + return chainhash.NewHashFromStr(txid) +} + +// GetTxIndex finds the index of a transaction within a block. +func (c *Client) GetTxIndex(ctx context.Context, blockHash string, txid string) (uint32, error) { + txids, err := c.GetBlockTxIDs(ctx, blockHash) + if err != nil { + return 0, err + } + + for i, id := range txids { + if id == txid { + return uint32(i), nil + } + } + + return 0, fmt.Errorf("transaction %s not found in block %s", txid, blockHash) +} + +// GetTxIndexByHeight finds the transaction index in a block at the given height. +func (c *Client) GetTxIndexByHeight(ctx context.Context, height int64, txid string) (uint32, string, error) { + blockHash, err := c.GetBlockHashByHeight(ctx, height) + if err != nil { + return 0, "", err + } + + txIndex, err := c.GetTxIndex(ctx, blockHash, txid) + if err != nil { + return 0, "", err + } + + return txIndex, blockHash, nil +} + +// GetBestBlock returns the current best block hash and height. +func (c *Client) GetBestBlock() (string, int64) { + c.bestBlockMtx.RLock() + defer c.bestBlockMtx.RUnlock() + return c.bestBlockHash, c.bestBlockHeight +} diff --git a/esplora/client_test.go b/esplora/client_test.go new file mode 100644 index 00000000000..8cdf6ae4ada --- /dev/null +++ b/esplora/client_test.go @@ -0,0 +1,395 @@ +package esplora + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +// TestNewClient tests creating a new Esplora client. +func TestNewClient(t *testing.T) { + t.Parallel() + + cfg := &ClientConfig{ + URL: "http://localhost:3002", + RequestTimeout: 30 * time.Second, + MaxRetries: 3, + PollInterval: 10 * time.Second, + } + + client := NewClient(cfg) + + require.NotNil(t, client) + require.NotNil(t, client.cfg) + require.NotNil(t, client.httpClient) + require.NotNil(t, client.subscribers) + require.NotNil(t, client.quit) + require.Equal(t, cfg.URL, client.cfg.URL) + require.Equal(t, cfg.RequestTimeout, client.cfg.RequestTimeout) + require.Equal(t, cfg.MaxRetries, client.cfg.MaxRetries) + require.Equal(t, cfg.PollInterval, client.cfg.PollInterval) +} + +// TestClientConfig tests the ClientConfig struct. +func TestClientConfig(t *testing.T) { + t.Parallel() + + testCases := []struct { + name string + cfg *ClientConfig + }{ + { + name: "minimal config", + cfg: &ClientConfig{ + URL: "http://localhost:3002", + }, + }, + { + name: "full config", + cfg: &ClientConfig{ + URL: "https://blockstream.info/api", + RequestTimeout: 60 * time.Second, + MaxRetries: 5, + PollInterval: 30 * time.Second, + }, + }, + { + name: "testnet config", + cfg: &ClientConfig{ + URL: "https://blockstream.info/testnet/api", + RequestTimeout: 30 * time.Second, + MaxRetries: 3, + PollInterval: 10 * time.Second, + }, + }, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + client := NewClient(tc.cfg) + require.NotNil(t, client) + require.Equal(t, tc.cfg.URL, client.cfg.URL) + }) + } +} + +// TestClientIsConnectedNotStarted tests that IsConnected returns false when +// the client is not started. +func TestClientIsConnectedNotStarted(t *testing.T) { + t.Parallel() + + cfg := &ClientConfig{ + URL: "http://localhost:3002", + RequestTimeout: 1 * time.Second, + MaxRetries: 0, + PollInterval: 10 * time.Second, + } + + client := NewClient(cfg) + + // Client should not be connected since we haven't started it. + require.False(t, client.IsConnected()) +} + +// TestClientSubscribe tests that Subscribe returns a channel and ID. +func TestClientSubscribe(t *testing.T) { + t.Parallel() + + cfg := &ClientConfig{ + URL: "http://localhost:3002", + RequestTimeout: 30 * time.Second, + MaxRetries: 3, + PollInterval: 10 * time.Second, + } + + client := NewClient(cfg) + + // Subscribe should return a channel and ID. + notifChan, id := client.Subscribe() + require.NotNil(t, notifChan) + require.Equal(t, uint64(0), id) + + // Second subscriber should get ID 1. + notifChan2, id2 := client.Subscribe() + require.NotNil(t, notifChan2) + require.Equal(t, uint64(1), id2) + + // Unsubscribe should work. + client.Unsubscribe(id) + client.Unsubscribe(id2) +} + +// TestClientGetBestBlock tests the GetBestBlock method. +func TestClientGetBestBlock(t *testing.T) { + t.Parallel() + + cfg := &ClientConfig{ + URL: "http://localhost:3002", + RequestTimeout: 30 * time.Second, + MaxRetries: 3, + PollInterval: 10 * time.Second, + } + + client := NewClient(cfg) + + // Before starting, best block should be empty. + hash, height := client.GetBestBlock() + require.Empty(t, hash) + require.Equal(t, int64(0), height) +} + +// TestClientStartStopNotConnected tests starting and stopping the client +// when no server is available. +func TestClientStartStopNotConnected(t *testing.T) { + t.Parallel() + + cfg := &ClientConfig{ + URL: "http://localhost:19999", // Non-existent port + RequestTimeout: 1 * time.Second, + MaxRetries: 0, + PollInterval: 10 * time.Second, + } + + client := NewClient(cfg) + + // Start should fail when server is not available. + err := client.Start() + require.Error(t, err) + + // Stop should still work without error. + err = client.Stop() + require.NoError(t, err) +} + +// TestBlockInfoStruct tests the BlockInfo struct fields. +func TestBlockInfoStruct(t *testing.T) { + t.Parallel() + + blockInfo := &BlockInfo{ + ID: "00000000000000000001a2b3c4d5e6f7", + Height: 800000, + Version: 536870912, + Timestamp: 1699999999, + TxCount: 3000, + Size: 1500000, + Weight: 4000000, + MerkleRoot: "abcdef1234567890", + PreviousBlockHash: "00000000000000000000fedcba987654", + Nonce: 12345678, + Bits: 386089497, + } + + require.Equal(t, "00000000000000000001a2b3c4d5e6f7", blockInfo.ID) + require.Equal(t, int64(800000), blockInfo.Height) + require.Equal(t, int32(536870912), blockInfo.Version) + require.Equal(t, int64(1699999999), blockInfo.Timestamp) + require.Equal(t, 3000, blockInfo.TxCount) +} + +// TestTxInfoStruct tests the TxInfo struct fields. +func TestTxInfoStruct(t *testing.T) { + t.Parallel() + + txInfo := &TxInfo{ + TxID: "abcdef1234567890abcdef1234567890", + Version: 2, + LockTime: 0, + Size: 250, + Weight: 1000, + Fee: 5000, + Status: TxStatus{ + Confirmed: true, + BlockHeight: 800000, + BlockHash: "00000000000000000001a2b3c4d5e6f7", + BlockTime: 1699999999, + }, + } + + require.Equal(t, "abcdef1234567890abcdef1234567890", txInfo.TxID) + require.Equal(t, int32(2), txInfo.Version) + require.True(t, txInfo.Status.Confirmed) + require.Equal(t, int64(800000), txInfo.Status.BlockHeight) +} + +// TestUTXOStruct tests the UTXO struct fields. +func TestUTXOStruct(t *testing.T) { + t.Parallel() + + utxo := &UTXO{ + TxID: "abcdef1234567890abcdef1234567890", + Vout: 0, + Value: 100000000, + Status: TxStatus{ + Confirmed: true, + BlockHeight: 800000, + }, + } + + require.Equal(t, "abcdef1234567890abcdef1234567890", utxo.TxID) + require.Equal(t, uint32(0), utxo.Vout) + require.Equal(t, int64(100000000), utxo.Value) + require.True(t, utxo.Status.Confirmed) +} + +// TestOutSpendStruct tests the OutSpend struct fields. +func TestOutSpendStruct(t *testing.T) { + t.Parallel() + + // Unspent output. + unspent := &OutSpend{ + Spent: false, + } + require.False(t, unspent.Spent) + + // Spent output. + spent := &OutSpend{ + Spent: true, + TxID: "spendertxid1234567890", + Vin: 0, + Status: TxStatus{ + Confirmed: true, + BlockHeight: 800001, + }, + } + require.True(t, spent.Spent) + require.Equal(t, "spendertxid1234567890", spent.TxID) + require.Equal(t, uint32(0), spent.Vin) +} + +// TestMerkleProofStruct tests the MerkleProof struct fields. +func TestMerkleProofStruct(t *testing.T) { + t.Parallel() + + proof := &MerkleProof{ + BlockHeight: 800000, + Merkle: []string{"hash1", "hash2", "hash3"}, + Pos: 5, + } + + require.Equal(t, int64(800000), proof.BlockHeight) + require.Len(t, proof.Merkle, 3) + require.Equal(t, 5, proof.Pos) +} + +// TestFeeEstimatesStruct tests the FeeEstimates map type. +func TestFeeEstimatesStruct(t *testing.T) { + t.Parallel() + + estimates := FeeEstimates{ + "1": 50.0, + "2": 40.0, + "3": 30.0, + "6": 20.0, + "12": 10.0, + "25": 5.0, + "144": 1.0, + } + + require.Equal(t, float64(50.0), estimates["1"]) + require.Equal(t, float64(20.0), estimates["6"]) + require.Equal(t, float64(1.0), estimates["144"]) +} + +// TestClientConfigDefaults tests that zero values work correctly. +func TestClientConfigDefaults(t *testing.T) { + t.Parallel() + + // Create a config with minimal settings. + cfg := &ClientConfig{ + URL: "http://localhost:3002", + } + + client := NewClient(cfg) + require.NotNil(t, client) + + // HTTP client should have been created with zero timeout. + require.NotNil(t, client.httpClient) +} + +// TestTxVinStruct tests the TxVin struct fields. +func TestTxVinStruct(t *testing.T) { + t.Parallel() + + // Regular input. + vin := TxVin{ + TxID: "previoustxid", + Vout: 0, + ScriptSig: "scriptsighex", + ScriptSigAsm: "OP_DUP OP_HASH160...", + Witness: []string{"witness1", "witness2"}, + Sequence: 0xffffffff, + IsCoinbase: false, + } + + require.Equal(t, "previoustxid", vin.TxID) + require.Equal(t, uint32(0), vin.Vout) + require.False(t, vin.IsCoinbase) + require.Len(t, vin.Witness, 2) + + // Coinbase input. + coinbase := TxVin{ + IsCoinbase: true, + Sequence: 0xffffffff, + } + + require.True(t, coinbase.IsCoinbase) +} + +// TestTxVoutStruct tests the TxVout struct fields. +func TestTxVoutStruct(t *testing.T) { + t.Parallel() + + vout := TxVout{ + ScriptPubKey: "76a914...88ac", + ScriptPubKeyAsm: "OP_DUP OP_HASH160...", + ScriptPubKeyType: "pubkeyhash", + ScriptPubKeyAddr: "1BitcoinAddress...", + Value: 100000000, + } + + require.Equal(t, "76a914...88ac", vout.ScriptPubKey) + require.Equal(t, "pubkeyhash", vout.ScriptPubKeyType) + require.Equal(t, "1BitcoinAddress...", vout.ScriptPubKeyAddr) + require.Equal(t, int64(100000000), vout.Value) +} + +// TestBlockStatusStruct tests the BlockStatus struct fields. +func TestBlockStatusStruct(t *testing.T) { + t.Parallel() + + // Block in best chain. + inBestChain := &BlockStatus{ + InBestChain: true, + Height: 800000, + } + require.True(t, inBestChain.InBestChain) + require.Equal(t, int64(800000), inBestChain.Height) + + // Orphaned block. + orphaned := &BlockStatus{ + InBestChain: false, + Height: 800000, + NextBest: "nextblockhash", + } + require.False(t, orphaned.InBestChain) + require.Equal(t, "nextblockhash", orphaned.NextBest) +} + +// TestClientErrors tests that error variables are defined correctly. +func TestClientErrors(t *testing.T) { + t.Parallel() + + require.NotNil(t, ErrClientShutdown) + require.NotNil(t, ErrNotConnected) + require.NotNil(t, ErrBlockNotFound) + require.NotNil(t, ErrTxNotFound) + + require.Contains(t, ErrClientShutdown.Error(), "shut down") + require.Contains(t, ErrNotConnected.Error(), "not reachable") + require.Contains(t, ErrBlockNotFound.Error(), "block not found") + require.Contains(t, ErrTxNotFound.Error(), "transaction") +} diff --git a/esplora/fee_estimator.go b/esplora/fee_estimator.go new file mode 100644 index 00000000000..9fed6dbb35b --- /dev/null +++ b/esplora/fee_estimator.go @@ -0,0 +1,301 @@ +package esplora + +import ( + "context" + "fmt" + "math" + "strconv" + "sync" + "sync/atomic" + "time" + + "github.com/lightningnetwork/lnd/lnwallet/chainfee" +) + +const ( + // defaultFeeUpdateInterval is the default interval at which the fee + // estimator will update its cached fee rates. + defaultFeeUpdateInterval = 5 * time.Minute + + // defaultRelayFeePerKW is the default relay fee rate in sat/kw used + // when the API doesn't provide one. + defaultRelayFeePerKW = chainfee.SatPerKWeight(253) +) + +// FeeEstimatorConfig holds the configuration for the Esplora fee estimator. +type FeeEstimatorConfig struct { + // FallbackFeePerKW is the fee rate (in sat/kw) to use when the API + // fails to return a fee estimate. + FallbackFeePerKW chainfee.SatPerKWeight + + // MinFeePerKW is the minimum fee rate (in sat/kw) that should be used. + MinFeePerKW chainfee.SatPerKWeight + + // FeeUpdateInterval is the interval at which the fee estimator will + // update its cached fee rates. + FeeUpdateInterval time.Duration +} + +// DefaultFeeEstimatorConfig returns a FeeEstimatorConfig with sensible +// defaults. +func DefaultFeeEstimatorConfig() *FeeEstimatorConfig { + // Use 25 sat/vB as the fallback fee rate, which matches the bitcoind + // backend's default. 25 sat/vB = 6250 sat/kw (25 * 250). + return &FeeEstimatorConfig{ + FallbackFeePerKW: chainfee.SatPerKWeight(6250), + MinFeePerKW: chainfee.FeePerKwFloor, + FeeUpdateInterval: defaultFeeUpdateInterval, + } +} + +// FeeEstimator is an implementation of the chainfee.Estimator interface that +// uses an Esplora HTTP API to estimate transaction fees. +type FeeEstimator struct { + started int32 + stopped int32 + + cfg *FeeEstimatorConfig + + client *Client + + // relayFeePerKW is the minimum relay fee in sat/kw. + relayFeePerKW chainfee.SatPerKWeight + + // feeCache stores the cached fee estimates by confirmation target. + feeCacheMtx sync.RWMutex + feeCache map[uint32]chainfee.SatPerKWeight + + quit chan struct{} + wg sync.WaitGroup +} + +// Compile time check to ensure FeeEstimator implements chainfee.Estimator. +var _ chainfee.Estimator = (*FeeEstimator)(nil) + +// NewFeeEstimator creates a new Esplora-based fee estimator. +func NewFeeEstimator(client *Client, cfg *FeeEstimatorConfig) *FeeEstimator { + if cfg == nil { + cfg = DefaultFeeEstimatorConfig() + } + + return &FeeEstimator{ + cfg: cfg, + client: client, + relayFeePerKW: defaultRelayFeePerKW, + feeCache: make(map[uint32]chainfee.SatPerKWeight), + quit: make(chan struct{}), + } +} + +// Start signals the FeeEstimator to start any processes or goroutines it needs +// to perform its duty. +// +// NOTE: This is part of the chainfee.Estimator interface. +func (e *FeeEstimator) Start() error { + if atomic.AddInt32(&e.started, 1) != 1 { + return nil + } + + log.Info("Starting Esplora fee estimator") + + // Do an initial fee cache update. + if err := e.updateFeeCache(); err != nil { + log.Warnf("Failed to update initial fee cache: %v", err) + } + + // Start the background fee update goroutine. + e.wg.Add(1) + go e.feeUpdateLoop() + + return nil +} + +// Stop stops any spawned goroutines and cleans up the resources used by the +// fee estimator. +// +// NOTE: This is part of the chainfee.Estimator interface. +func (e *FeeEstimator) Stop() error { + if atomic.AddInt32(&e.stopped, 1) != 1 { + return nil + } + + log.Info("Stopping Esplora fee estimator") + + close(e.quit) + e.wg.Wait() + + return nil +} + +// EstimateFeePerKW takes in a target for the number of blocks until an initial +// confirmation and returns the estimated fee expressed in sat/kw. +// +// NOTE: This is part of the chainfee.Estimator interface. +func (e *FeeEstimator) EstimateFeePerKW( + numBlocks uint32) (chainfee.SatPerKWeight, error) { + + if numBlocks > chainfee.MaxBlockTarget { + log.Debugf("conf target %d exceeds the max value, use %d instead.", + numBlocks, chainfee.MaxBlockTarget) + numBlocks = chainfee.MaxBlockTarget + } + + // Try to get from cache first. + if feeRate, ok := e.getCachedFee(numBlocks); ok { + return e.clampFee(feeRate), nil + } + + // No cached data available, try to fetch fresh data. + if err := e.updateFeeCache(); err != nil { + log.Debugf("Failed to fetch fee estimates: %v", err) + return e.clampFee(e.cfg.FallbackFeePerKW), nil + } + + // Try cache again after update. + if feeRate, ok := e.getCachedFee(numBlocks); ok { + return e.clampFee(feeRate), nil + } + + return e.clampFee(e.cfg.FallbackFeePerKW), nil +} + +// RelayFeePerKW returns the minimum fee rate required for transactions to be +// relayed. +// +// NOTE: This is part of the chainfee.Estimator interface. +func (e *FeeEstimator) RelayFeePerKW() chainfee.SatPerKWeight { + return e.relayFeePerKW +} + +// updateFeeCache fetches fee estimates from the Esplora API and caches them. +func (e *FeeEstimator) updateFeeCache() error { + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + estimates, err := e.client.GetFeeEstimates(ctx) + if err != nil { + return fmt.Errorf("failed to get fee estimates: %w", err) + } + + newFeeCache := make(map[uint32]chainfee.SatPerKWeight) + for targetStr, feeRate := range estimates { + target, err := strconv.ParseUint(targetStr, 10, 32) + if err != nil { + continue + } + + // Esplora returns fee rates in sat/vB, convert to sat/kw. + // 1 vB = 4 weight units, so sat/kw = sat/vB * 1000 / 4 = sat/vB * 250 + feePerKW := satPerVBToSatPerKW(feeRate) + + // Ensure we don't go below the minimum. + if feePerKW < e.cfg.MinFeePerKW { + feePerKW = e.cfg.MinFeePerKW + } + + newFeeCache[uint32(target)] = feePerKW + } + + e.feeCacheMtx.Lock() + e.feeCache = newFeeCache + e.feeCacheMtx.Unlock() + + log.Debugf("Updated fee cache with %d entries", len(estimates)) + + return nil +} + +// feeUpdateLoop periodically updates the fee cache. +func (e *FeeEstimator) feeUpdateLoop() { + defer e.wg.Done() + + ticker := time.NewTicker(e.cfg.FeeUpdateInterval) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + if err := e.updateFeeCache(); err != nil { + log.Debugf("Failed to update fee cache: %v", err) + } + + case <-e.quit: + return + } + } +} + +// satPerVBToSatPerKW converts a fee rate from sat/vB to sat/kw. +// 1 vB = 4 weight units +// 1 kw = 1000 weight units +// So: sat/kw = sat/vB * 1000 / 4 = sat/vB * 250 +func satPerVBToSatPerKW(satPerVB float64) chainfee.SatPerKWeight { + return chainfee.SatPerKWeight(satPerVB * 250) +} + +// getCachedFee finds the best cached fee for a target. It will return the exact +// target if present, otherwise the closest lower target. If no lower target +// exists, it returns the minimum cached target (cheaper than requested). +func (e *FeeEstimator) getCachedFee(numBlocks uint32) ( + chainfee.SatPerKWeight, bool) { + + e.feeCacheMtx.RLock() + defer e.feeCacheMtx.RUnlock() + + if len(e.feeCache) == 0 { + return 0, false + } + + if feeRate, ok := e.feeCache[numBlocks]; ok { + return feeRate, true + } + + closestTarget := uint32(0) + var closestFee chainfee.SatPerKWeight + minTarget := uint32(math.MaxUint32) + var minFee chainfee.SatPerKWeight + hasMin := false + + for target, fee := range e.feeCache { + if target <= numBlocks && target > closestTarget { + closestTarget = target + closestFee = fee + } + + if target < minTarget { + minTarget = target + minFee = fee + hasMin = true + } + } + + if closestTarget > 0 { + log.Warnf("Esplora fee cache missing target=%d, using target=%d instead", + numBlocks, closestTarget) + return closestFee, true + } + + if hasMin { + log.Warnf("Esplora fee cache missing target=%d, using target=%d instead", + numBlocks, minTarget) + return minFee, true + } + + return 0, false +} + +// clampFee enforces a minimum fee floor using relay and configured floors. +func (e *FeeEstimator) clampFee( + fee chainfee.SatPerKWeight) chainfee.SatPerKWeight { + + floor := e.relayFeePerKW + if e.cfg.MinFeePerKW > floor { + floor = e.cfg.MinFeePerKW + } + + if fee < floor { + return floor + } + + return fee +} diff --git a/esplora/fee_estimator_test.go b/esplora/fee_estimator_test.go new file mode 100644 index 00000000000..bd39f56cead --- /dev/null +++ b/esplora/fee_estimator_test.go @@ -0,0 +1,330 @@ +package esplora + +import ( + "testing" + "time" + + "github.com/lightningnetwork/lnd/lnwallet/chainfee" + "github.com/stretchr/testify/require" +) + +// TestNewFeeEstimator tests creating a new fee estimator. +func TestNewFeeEstimator(t *testing.T) { + t.Parallel() + + cfg := &ClientConfig{ + URL: "http://localhost:3002", + RequestTimeout: 30 * time.Second, + MaxRetries: 3, + PollInterval: 10 * time.Second, + } + client := NewClient(cfg) + + estimator := NewFeeEstimator(client, nil) + require.NotNil(t, estimator) + require.NotNil(t, estimator.cfg) + require.NotNil(t, estimator.feeCache) +} + +// TestFeeEstimatorDefaultConfig tests that default config values are applied. +func TestFeeEstimatorDefaultConfig(t *testing.T) { + t.Parallel() + + cfg := DefaultFeeEstimatorConfig() + + require.NotNil(t, cfg) + require.Greater(t, cfg.FallbackFeePerKW, chainfee.SatPerKWeight(0)) + require.Greater(t, cfg.MinFeePerKW, chainfee.SatPerKWeight(0)) + require.Greater(t, cfg.FeeUpdateInterval, time.Duration(0)) +} + +// TestSatPerVBToSatPerKW tests the fee rate conversion function. +func TestSatPerVBToSatPerKW(t *testing.T) { + t.Parallel() + + testCases := []struct { + name string + satPerVB float64 + minSatKW chainfee.SatPerKWeight + maxSatKW chainfee.SatPerKWeight + }{ + { + name: "1 sat/vbyte", + satPerVB: 1.0, + // 1 sat/vB * 250 = 250 sat/kw + minSatKW: 245, + maxSatKW: 255, + }, + { + name: "10 sat/vbyte", + satPerVB: 10.0, + // 10 sat/vB * 250 = 2500 sat/kw + minSatKW: 2450, + maxSatKW: 2550, + }, + { + name: "100 sat/vbyte", + satPerVB: 100.0, + // 100 sat/vB * 250 = 25000 sat/kw + minSatKW: 24500, + maxSatKW: 25500, + }, + { + name: "zero fee", + satPerVB: 0, + minSatKW: 0, + maxSatKW: 0, + }, + { + name: "fractional fee", + satPerVB: 1.5, + // 1.5 sat/vB * 250 = 375 sat/kw + minSatKW: 370, + maxSatKW: 380, + }, + } + + for _, tc := range testCases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + result := satPerVBToSatPerKW(tc.satPerVB) + require.GreaterOrEqual(t, result, tc.minSatKW) + require.LessOrEqual(t, result, tc.maxSatKW) + }) + } +} + +// TestFeeEstimatorRelayFeePerKW tests that RelayFeePerKW returns a valid +// value. +func TestFeeEstimatorRelayFeePerKW(t *testing.T) { + t.Parallel() + + cfg := &ClientConfig{ + URL: "http://localhost:3002", + RequestTimeout: 30 * time.Second, + MaxRetries: 0, + PollInterval: 10 * time.Second, + } + client := NewClient(cfg) + + estimator := NewFeeEstimator(client, nil) + + relayFee := estimator.RelayFeePerKW() + require.Greater(t, relayFee, chainfee.SatPerKWeight(0)) +} + +// TestFeeEstimatorEstimateFeePerKWFallback tests that the estimator returns +// the fallback fee when the server is not available. +func TestFeeEstimatorEstimateFeePerKWFallback(t *testing.T) { + t.Parallel() + + cfg := &ClientConfig{ + URL: "http://localhost:3002", + RequestTimeout: 1 * time.Second, + MaxRetries: 0, + PollInterval: 10 * time.Second, + } + client := NewClient(cfg) + + feeCfg := &FeeEstimatorConfig{ + FallbackFeePerKW: chainfee.SatPerKWeight(12500), + MinFeePerKW: chainfee.FeePerKwFloor, + FeeUpdateInterval: 5 * time.Minute, + } + + estimator := NewFeeEstimator(client, feeCfg) + + // Without starting (and thus without a server), EstimateFeePerKW + // should return the fallback fee. + feeRate, err := estimator.EstimateFeePerKW(6) + require.NoError(t, err) + require.Equal(t, feeCfg.FallbackFeePerKW, feeRate) +} + +// TestFeeEstimatorCaching tests that fee estimates are properly cached. +func TestFeeEstimatorCaching(t *testing.T) { + t.Parallel() + + cfg := &ClientConfig{ + URL: "http://localhost:3002", + RequestTimeout: 30 * time.Second, + MaxRetries: 0, + PollInterval: 10 * time.Second, + } + client := NewClient(cfg) + + feeCfg := &FeeEstimatorConfig{ + FallbackFeePerKW: chainfee.SatPerKWeight(12500), + MinFeePerKW: chainfee.FeePerKwFloor, + FeeUpdateInterval: 5 * time.Minute, + } + + estimator := NewFeeEstimator(client, feeCfg) + + // Manually add a cached fee. + estimator.feeCacheMtx.Lock() + estimator.feeCache[6] = chainfee.SatPerKWeight(5000) + estimator.feeCacheMtx.Unlock() + + // Should return the cached value, not the fallback. + feeRate, err := estimator.EstimateFeePerKW(6) + require.NoError(t, err) + require.Equal(t, chainfee.SatPerKWeight(5000), feeRate) +} + +// TestFeeEstimatorInterface verifies that FeeEstimator implements the +// chainfee.Estimator interface. +func TestFeeEstimatorInterface(t *testing.T) { + t.Parallel() + + // This is a compile-time check that FeeEstimator implements the + // chainfee.Estimator interface. + var _ chainfee.Estimator = (*FeeEstimator)(nil) +} + +// TestFeeEstimatorStartStop tests starting and stopping the fee estimator. +func TestFeeEstimatorStartStop(t *testing.T) { + t.Parallel() + + cfg := &ClientConfig{ + URL: "http://localhost:3002", + RequestTimeout: 1 * time.Second, + MaxRetries: 0, + PollInterval: 10 * time.Second, + } + client := NewClient(cfg) + + feeCfg := &FeeEstimatorConfig{ + FallbackFeePerKW: chainfee.SatPerKWeight(12500), + MinFeePerKW: chainfee.FeePerKwFloor, + FeeUpdateInterval: 5 * time.Minute, + } + + estimator := NewFeeEstimator(client, feeCfg) + + // Start should succeed even without a connected server. + err := estimator.Start() + require.NoError(t, err) + + // Starting again should be a no-op. + err = estimator.Start() + require.NoError(t, err) + + // Stop should succeed. + err = estimator.Stop() + require.NoError(t, err) + + // Stopping again should be a no-op. + err = estimator.Stop() + require.NoError(t, err) +} + +// TestFeeEstimatorClosestTarget tests that the estimator finds the closest +// cached target when the exact target is not available. +func TestFeeEstimatorClosestTarget(t *testing.T) { + t.Parallel() + + cfg := &ClientConfig{ + URL: "http://localhost:3002", + RequestTimeout: 30 * time.Second, + MaxRetries: 0, + PollInterval: 10 * time.Second, + } + client := NewClient(cfg) + + feeCfg := &FeeEstimatorConfig{ + FallbackFeePerKW: chainfee.SatPerKWeight(12500), + MinFeePerKW: chainfee.FeePerKwFloor, + FeeUpdateInterval: 5 * time.Minute, + } + + estimator := NewFeeEstimator(client, feeCfg) + + // Manually add some cached fees at different targets. + estimator.feeCacheMtx.Lock() + estimator.feeCache[1] = chainfee.SatPerKWeight(10000) + estimator.feeCache[3] = chainfee.SatPerKWeight(5000) + estimator.feeCache[6] = chainfee.SatPerKWeight(2500) + estimator.feeCache[12] = chainfee.SatPerKWeight(1000) + estimator.feeCacheMtx.Unlock() + + // Request target 4, should get closest lower target (3). + feeRate, err := estimator.EstimateFeePerKW(4) + require.NoError(t, err) + require.Equal(t, chainfee.SatPerKWeight(5000), feeRate) + + // Request target 10, should get closest lower target (6). + feeRate, err = estimator.EstimateFeePerKW(10) + require.NoError(t, err) + require.Equal(t, chainfee.SatPerKWeight(2500), feeRate) + + // Request target 2, should get closest lower target (1). + feeRate, err = estimator.EstimateFeePerKW(2) + require.NoError(t, err) + require.Equal(t, chainfee.SatPerKWeight(10000), feeRate) +} + +// TestFeeEstimatorMinTargetFallback tests that when no lower target exists, +// we fall back to the minimum cached target. +func TestFeeEstimatorMinTargetFallback(t *testing.T) { + t.Parallel() + + cfg := &ClientConfig{ + URL: "http://localhost:3002", + RequestTimeout: 30 * time.Second, + MaxRetries: 0, + PollInterval: 10 * time.Second, + } + client := NewClient(cfg) + + feeCfg := &FeeEstimatorConfig{ + FallbackFeePerKW: chainfee.SatPerKWeight(12500), + MinFeePerKW: chainfee.FeePerKwFloor, + FeeUpdateInterval: 5 * time.Minute, + } + + estimator := NewFeeEstimator(client, feeCfg) + + estimator.feeCacheMtx.Lock() + estimator.feeCache[3] = chainfee.SatPerKWeight(5000) + estimator.feeCache[6] = chainfee.SatPerKWeight(2500) + estimator.feeCacheMtx.Unlock() + + // Request target 1, should get minimum cached target (3). + feeRate, err := estimator.EstimateFeePerKW(1) + require.NoError(t, err) + require.Equal(t, chainfee.SatPerKWeight(5000), feeRate) +} + +// TestFeeEstimatorClampToRelayFloor tests that fees are clamped to relay fee. +func TestFeeEstimatorClampToRelayFloor(t *testing.T) { + t.Parallel() + + cfg := &ClientConfig{ + URL: "http://localhost:3002", + RequestTimeout: 30 * time.Second, + MaxRetries: 0, + PollInterval: 10 * time.Second, + } + client := NewClient(cfg) + + feeCfg := &FeeEstimatorConfig{ + FallbackFeePerKW: chainfee.SatPerKWeight(12500), + MinFeePerKW: chainfee.FeePerKwFloor, + FeeUpdateInterval: 5 * time.Minute, + } + + estimator := NewFeeEstimator(client, feeCfg) + estimator.relayFeePerKW = chainfee.SatPerKWeight(6000) + + estimator.feeCacheMtx.Lock() + estimator.feeCache[6] = chainfee.SatPerKWeight(1000) + estimator.feeCacheMtx.Unlock() + + feeRate, err := estimator.EstimateFeePerKW(6) + require.NoError(t, err) + require.Equal(t, chainfee.SatPerKWeight(6000), feeRate) +} diff --git a/esplora/log.go b/esplora/log.go new file mode 100644 index 00000000000..8ea46f103ba --- /dev/null +++ b/esplora/log.go @@ -0,0 +1,23 @@ +package esplora + +import "github.com/btcsuite/btclog/v2" + +// Subsystem defines the logging code for this subsystem. +const Subsystem = "ESPL" + +// log is a logger that is initialized with no output filters. This means the +// package will not perform any logging by default until the caller requests +// it. +var log btclog.Logger + +// The default amount of logging is none. +func init() { + UseLogger(btclog.Disabled) +} + +// UseLogger uses a specified Logger to output package logging info. This +// should be used in preference to SetLogWriter if the caller is also using +// btclog. +func UseLogger(logger btclog.Logger) { + log = logger +} diff --git a/esplora/scripthash.go b/esplora/scripthash.go new file mode 100644 index 00000000000..96b8e66e9c2 --- /dev/null +++ b/esplora/scripthash.go @@ -0,0 +1,86 @@ +package esplora + +import ( + "crypto/sha256" + "encoding/hex" + "fmt" + + "github.com/btcsuite/btcd/btcutil" + "github.com/btcsuite/btcd/chaincfg" + "github.com/btcsuite/btcd/txscript" +) + +// ScripthashFromScript converts a pkScript (output script) to a scripthash. +// The scripthash is the SHA256 hash of the script with the bytes reversed +// (displayed in little-endian order). +func ScripthashFromScript(pkScript []byte) string { + hash := sha256.Sum256(pkScript) + + // Reverse the hash bytes for Esplora's format. + reversed := make([]byte, len(hash)) + for i := 0; i < len(hash); i++ { + reversed[i] = hash[len(hash)-1-i] + } + + return hex.EncodeToString(reversed) +} + +// ScripthashFromAddress converts a Bitcoin address to a scripthash. +// This creates the appropriate pkScript for the address type and then computes +// the scripthash. +func ScripthashFromAddress(address string, + params *chaincfg.Params) (string, error) { + + addr, err := btcutil.DecodeAddress(address, params) + if err != nil { + return "", fmt.Errorf("failed to decode address: %w", err) + } + + pkScript, err := txscript.PayToAddrScript(addr) + if err != nil { + return "", fmt.Errorf("failed to create pkScript: %w", err) + } + + return ScripthashFromScript(pkScript), nil +} + +// ScripthashFromAddressUnchecked converts a Bitcoin address to a scripthash +// without network validation. This is useful when the network parameters are +// not available but the address format is known to be valid. +func ScripthashFromAddressUnchecked(address string) (string, error) { + // Try mainnet first, then testnet, then regtest. + networks := []*chaincfg.Params{ + &chaincfg.MainNetParams, + &chaincfg.TestNet3Params, + &chaincfg.RegressionNetParams, + &chaincfg.SigNetParams, + } + + for _, params := range networks { + scripthash, err := ScripthashFromAddress(address, params) + if err == nil { + return scripthash, nil + } + } + + return "", fmt.Errorf("failed to decode address on any network: %s", + address) +} + +// ReverseBytes reverses a byte slice in place and returns it. +func ReverseBytes(b []byte) []byte { + for i, j := 0, len(b)-1; i < j; i, j = i+1, j-1 { + b[i], b[j] = b[j], b[i] + } + return b +} + +// ReversedHash returns a copy of the hash with bytes reversed. This is useful +// for converting between internal byte order and display order. +func ReversedHash(hash []byte) []byte { + reversed := make([]byte, len(hash)) + for i := 0; i < len(hash); i++ { + reversed[i] = hash[len(hash)-1-i] + } + return reversed +} diff --git a/esplora/scripthash_test.go b/esplora/scripthash_test.go new file mode 100644 index 00000000000..84fced2bd1c --- /dev/null +++ b/esplora/scripthash_test.go @@ -0,0 +1,170 @@ +package esplora + +import ( + "encoding/hex" + "testing" + + "github.com/btcsuite/btcd/chaincfg" + "github.com/stretchr/testify/require" +) + +// TestScripthashFromScript tests the conversion of a pkScript to a scripthash. +func TestScripthashFromScript(t *testing.T) { + t.Parallel() + + testCases := []struct { + name string + pkScriptHex string + wantScripthash string + }{ + { + // P2PKH script for 1A1zP1eP5QGefi2DMPTfTL5SLmv7DivfNa + // (Satoshi's genesis address). + name: "p2pkh genesis address", + pkScriptHex: "76a91462e907b15cbf27d5425399ebf6f0fb50ebb88f1888ac", + wantScripthash: "8b01df4e368ea28f8dc0423bcf7a4923" + + "e3a12d307c875e47a0cfbf90b5c39161", + }, + { + // P2WPKH script for + // bc1qw508d6qejxtdg4y5r3zarvary0c5xw7kv8f3t4. + name: "p2wpkh script", + pkScriptHex: "0014751e76e8199196d454941c45d1b3a323f1433bd6", + wantScripthash: "9623df75239b5daa7f5f03042d325b51" + + "498c4bb7059c7748b17049bf96f73888", + }, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + pkScript, err := hex.DecodeString(tc.pkScriptHex) + require.NoError(t, err) + + scripthash := ScripthashFromScript(pkScript) + require.Equal(t, tc.wantScripthash, scripthash) + }) + } +} + +// TestScripthashFromAddress tests the conversion of a Bitcoin address to a +// scripthash. +func TestScripthashFromAddress(t *testing.T) { + t.Parallel() + + testCases := []struct { + name string + address string + params *chaincfg.Params + wantScripthash string + wantErr bool + }{ + { + // Satoshi's genesis address. + name: "mainnet p2pkh", + address: "1A1zP1eP5QGefi2DMPTfTL5SLmv7DivfNa", + params: &chaincfg.MainNetParams, + wantScripthash: "8b01df4e368ea28f8dc0423bcf7a4923" + + "e3a12d307c875e47a0cfbf90b5c39161", + wantErr: false, + }, + { + // Native segwit address. + name: "mainnet p2wpkh", + address: "bc1qw508d6qejxtdg4y5r3zarvary0c5xw7kv8f3t4", + params: &chaincfg.MainNetParams, + wantScripthash: "9623df75239b5daa7f5f03042d325b51" + + "498c4bb7059c7748b17049bf96f73888", + wantErr: false, + }, + { + name: "invalid address", + address: "invalid_address", + params: &chaincfg.MainNetParams, + wantErr: true, + }, + { + // Testnet P2PKH address on mainnet params should fail. + name: "wrong network base58", + address: "mipcBbFg9gMiCh81Kj8tqqdgoZub1ZJRfn", + params: &chaincfg.MainNetParams, + wantErr: true, + }, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + scripthash, err := ScripthashFromAddress( + tc.address, tc.params, + ) + + if tc.wantErr { + require.Error(t, err) + return + } + + require.NoError(t, err) + require.Equal(t, tc.wantScripthash, scripthash) + }) + } +} + +// TestReverseBytes tests the ReverseBytes utility function. +func TestReverseBytes(t *testing.T) { + t.Parallel() + + testCases := []struct { + name string + input []byte + want []byte + }{ + { + name: "empty", + input: []byte{}, + want: []byte{}, + }, + { + name: "single byte", + input: []byte{0x01}, + want: []byte{0x01}, + }, + { + name: "multiple bytes", + input: []byte{0x01, 0x02, 0x03, 0x04}, + want: []byte{0x04, 0x03, 0x02, 0x01}, + }, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + // Make a copy since ReverseBytes modifies in place. + input := make([]byte, len(tc.input)) + copy(input, tc.input) + + result := ReverseBytes(input) + require.Equal(t, tc.want, result) + }) + } +} + +// TestReversedHash tests the ReversedHash utility function. +func TestReversedHash(t *testing.T) { + t.Parallel() + + input := []byte{0x01, 0x02, 0x03, 0x04} + want := []byte{0x04, 0x03, 0x02, 0x01} + + result := ReversedHash(input) + require.Equal(t, want, result) + + // Verify that the original input was not modified. + require.Equal(t, []byte{0x01, 0x02, 0x03, 0x04}, input) +} diff --git a/lncfg/chain.go b/lncfg/chain.go index 3e6fdfdd868..caaa2162b28 100644 --- a/lncfg/chain.go +++ b/lncfg/chain.go @@ -13,7 +13,7 @@ type Chain struct { Active bool `long:"active" description:"DEPRECATED: If the chain should be active or not. This field is now ignored since only the Bitcoin chain is supported" hidden:"true"` ChainDir string `long:"chaindir" description:"The directory to store the chain's data within."` - Node string `long:"node" description:"The blockchain interface to use." choice:"btcd" choice:"bitcoind" choice:"neutrino" choice:"nochainbackend"` + Node string `long:"node" description:"The blockchain interface to use." choice:"btcd" choice:"bitcoind" choice:"neutrino" choice:"esplora" choice:"nochainbackend"` MainNet bool `long:"mainnet" description:"Use the main network"` TestNet3 bool `long:"testnet" description:"Use the test network"` diff --git a/lncfg/esplora.go b/lncfg/esplora.go new file mode 100644 index 00000000000..f3144160cc9 --- /dev/null +++ b/lncfg/esplora.go @@ -0,0 +1,80 @@ +package lncfg + +import "time" + +const ( + // DefaultEsploraPollInterval is the default interval for polling + // the Esplora API for new blocks. + DefaultEsploraPollInterval = 10 * time.Second + + // DefaultEsploraRequestTimeout is the default timeout for HTTP + // requests to the Esplora API. + DefaultEsploraRequestTimeout = 30 * time.Second + + // DefaultEsploraMaxRetries is the default number of times to retry + // a failed request before giving up. + DefaultEsploraMaxRetries = 3 + + // DefaultGapLimit is the default gap limit for address scanning. + // This follows BIP-44 which specifies 20 consecutive unused addresses + // as the stopping point for address discovery. + DefaultGapLimit = 20 + + // DefaultAddressBatchSize is the default number of addresses to query + // concurrently when scanning with gap limit. + DefaultAddressBatchSize = 10 +) + +// Esplora holds the configuration options for the daemon's connection to +// an Esplora HTTP API server (e.g., mempool.space, blockstream.info, or +// a local electrs/mempool instance). +// +//nolint:ll +type Esplora struct { + // URL is the base URL of the Esplora API to connect to. + // Examples: + // - http://localhost:3002 (local electrs/mempool) + // - https://blockstream.info/api (Blockstream mainnet) + // - https://mempool.space/api (mempool.space mainnet) + // - https://mempool.space/testnet/api (mempool.space testnet) + URL string `long:"url" description:"The base URL of the Esplora API (e.g., http://localhost:3002)"` + + // RequestTimeout is the timeout for HTTP requests sent to the Esplora + // API. + RequestTimeout time.Duration `long:"requesttimeout" description:"Timeout for HTTP requests to the Esplora API."` + + // MaxRetries is the maximum number of times to retry a failed request. + MaxRetries int `long:"maxretries" description:"Maximum number of times to retry a failed request."` + + // PollInterval is the interval at which to poll for new blocks. + // Since Esplora is HTTP-only, we need to poll rather than subscribe. + PollInterval time.Duration `long:"pollinterval" description:"Interval at which to poll for new blocks."` + + // UseGapLimit enables gap limit optimization for wallet recovery. + // When enabled, address scanning stops after finding GapLimit + // consecutive unused addresses, dramatically improving recovery time. + UseGapLimit bool `long:"usegaplimit" description:"Enable gap limit optimization for wallet recovery (recommended)."` + + // GapLimit is the number of consecutive unused addresses to scan + // before stopping. BIP-44 specifies 20 as the standard gap limit. + // Higher values may be needed for wallets with non-sequential usage. + GapLimit int `long:"gaplimit" description:"Number of consecutive unused addresses before stopping scan (default: 20)."` + + // AddressBatchSize is the number of addresses to query concurrently + // when using gap limit scanning. Higher values increase speed but + // may trigger rate limiting on public APIs. + AddressBatchSize int `long:"addressbatchsize" description:"Number of addresses to query concurrently (default: 10)."` +} + +// DefaultEsploraConfig returns a new Esplora config with default values +// populated. +func DefaultEsploraConfig() *Esplora { + return &Esplora{ + RequestTimeout: DefaultEsploraRequestTimeout, + MaxRetries: DefaultEsploraMaxRetries, + PollInterval: DefaultEsploraPollInterval, + UseGapLimit: true, + GapLimit: DefaultGapLimit, + AddressBatchSize: DefaultAddressBatchSize, + } +} diff --git a/lnwallet/btcwallet/blockchain.go b/lnwallet/btcwallet/blockchain.go index 25b51d5e067..55a7b5e430b 100644 --- a/lnwallet/btcwallet/blockchain.go +++ b/lnwallet/btcwallet/blockchain.go @@ -15,6 +15,16 @@ import ( "github.com/lightningnetwork/lnd/lnwallet" ) +// UtxoSource is an interface that wraps the GetUtxo method needed +// from chain clients like Esplora. This interface allows us to avoid import +// cycles between the btcwallet and chain client packages. +type UtxoSource interface { + // GetUtxo returns the original output referenced by the passed + // outpoint if it is still unspent. + GetUtxo(op *wire.OutPoint, pkScript []byte, heightHint uint32, + cancel <-chan struct{}) (*wire.TxOut, error) +} + var ( // ErrOutputSpent is returned by the GetUtxo method if the target output // for lookup has already been spent. @@ -123,6 +133,13 @@ func (b *BtcWallet) GetUtxo(op *wire.OutPoint, pkScript []byte, }, nil default: + // Check if the backend implements UtxoSource interface. + // This allows chain clients like Esplora to be used without + // creating an import cycle. + if utxoBackend, ok := b.chain.(UtxoSource); ok { + return utxoBackend.GetUtxo(op, pkScript, heightHint, cancel) + } + return nil, fmt.Errorf("unknown backend") } } diff --git a/lnwallet/btcwallet/btcwallet.go b/lnwallet/btcwallet/btcwallet.go index a29139dbab9..d33e06f4879 100644 --- a/lnwallet/btcwallet/btcwallet.go +++ b/lnwallet/btcwallet/btcwallet.go @@ -1149,9 +1149,10 @@ func mapRpcclientError(err error) error { // already published to the network (either in the mempool or chain) no error // will be returned. func (b *BtcWallet) PublishTransaction(tx *wire.MsgTx, label string) error { - // For neutrino backend there's no mempool, so we return early by - // publishing the transaction. - if b.chain.BackEnd() == "neutrino" { + // For neutrino and esplora backends there's no mempool access, so we + // return early by publishing the transaction. + backEnd := b.chain.BackEnd() + if backEnd == "neutrino" || backEnd == "esplora" { err := b.wallet.PublishTransaction(tx, label) return mapRpcclientError(err) @@ -1821,6 +1822,13 @@ func (b *BtcWallet) RemoveDescendants(tx *wire.MsgTx) error { // CheckMempoolAcceptance is a wrapper around `TestMempoolAccept` which checks // the mempool acceptance of a transaction. func (b *BtcWallet) CheckMempoolAcceptance(tx *wire.MsgTx) error { + // For esplora backends there's no mempool access, so we + // skip the mempool acceptance check. + backEnd := b.chain.BackEnd() + if backEnd == "esplora" { + return nil + } + // Use a max feerate of 0 means the default value will be used when // testing mempool acceptance. The default max feerate is 0.10 BTC/kvb, // or 10,000 sat/vb. diff --git a/lnwallet/btcwallet/btcwallet_test.go b/lnwallet/btcwallet/btcwallet_test.go index c5bd8905a82..b161c85ab0b 100644 --- a/lnwallet/btcwallet/btcwallet_test.go +++ b/lnwallet/btcwallet/btcwallet_test.go @@ -158,6 +158,9 @@ func TestCheckMempoolAcceptance(t *testing.T) { chain: mockChain, } + // Mock BackEnd to return "bitcoind" so the mempool check runs. + mockChain.On("BackEnd").Return("bitcoind") + // Assert that when the chain backend doesn't support // `TestMempoolAccept`, an error is returned. // diff --git a/log.go b/log.go index 5f80bb7f588..50fd88aaeb1 100644 --- a/log.go +++ b/log.go @@ -11,6 +11,7 @@ import ( "github.com/lightningnetwork/lnd/build" "github.com/lightningnetwork/lnd/chainio" "github.com/lightningnetwork/lnd/chainntnfs" + "github.com/lightningnetwork/lnd/chainntnfs/esploranotify" "github.com/lightningnetwork/lnd/chainreg" "github.com/lightningnetwork/lnd/chanacceptor" "github.com/lightningnetwork/lnd/chanbackup" @@ -20,6 +21,7 @@ import ( "github.com/lightningnetwork/lnd/cluster" "github.com/lightningnetwork/lnd/contractcourt" "github.com/lightningnetwork/lnd/discovery" + "github.com/lightningnetwork/lnd/esplora" "github.com/lightningnetwork/lnd/funding" "github.com/lightningnetwork/lnd/graph" graphdb "github.com/lightningnetwork/lnd/graph/db" @@ -214,6 +216,8 @@ func SetupLoggers(root *build.SubLoggerManager, interceptor signal.Interceptor) ) AddSubLogger(root, onionmessage.Subsystem, interceptor, onionmessage.UseLogger) + AddSubLogger(root, esplora.Subsystem, interceptor, esplora.UseLogger) + AddSubLogger(root, esploranotify.Subsystem, interceptor, esploranotify.UseLogger) } // AddSubLogger is a helper method to conveniently create and register the diff --git a/routing/chainview/esplora.go b/routing/chainview/esplora.go new file mode 100644 index 00000000000..93f2097d15b --- /dev/null +++ b/routing/chainview/esplora.go @@ -0,0 +1,585 @@ +package chainview + +import ( + "context" + "fmt" + "sync" + "sync/atomic" + "time" + + "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/btcsuite/btcd/wire" + "github.com/lightningnetwork/lnd/esplora" + graphdb "github.com/lightningnetwork/lnd/graph/db" +) + +// EsploraFilteredChainView is an implementation of the FilteredChainView +// interface which is backed by an Esplora HTTP API connection. It uses +// scripthash queries to monitor for spends of watched outputs. +type EsploraFilteredChainView struct { + started int32 // To be used atomically. + stopped int32 // To be used atomically. + + // bestHeight is the height of the latest block added to the + // blockQueue. It is used to determine up to what height we would + // need to rescan in case of a filter update. + bestHeightMtx sync.Mutex + bestHeight uint32 + + // client is the Esplora client used for all API operations. + client *esplora.Client + + // subscriptionID is the ID of our block notification subscription. + subscriptionID uint64 + + // blockEventQueue is the ordered queue used to keep the order of + // connected and disconnected blocks sent to the reader of the + // chainView. + blockQueue *blockEventQueue + + // filterUpdates is a channel in which updates to the utxo filter + // attached to this instance are sent over. + filterUpdates chan esploraFilterUpdate + + // chainFilter is the set of utxo's that we're currently watching + // spends for within the chain. Maps outpoint to funding pkScript. + filterMtx sync.RWMutex + chainFilter map[wire.OutPoint][]byte + + // scripthashToOutpoint maps scripthashes to their corresponding + // outpoints for efficient lookup when we receive notifications. + scripthashToOutpoint map[string]wire.OutPoint + + // filterBlockReqs is a channel in which requests to filter select + // blocks will be sent over. + filterBlockReqs chan *filterBlockReq + + quit chan struct{} + wg sync.WaitGroup +} + +// A compile time check to ensure EsploraFilteredChainView implements the +// chainview.FilteredChainView. +var _ FilteredChainView = (*EsploraFilteredChainView)(nil) + +// esploraFilterUpdate is a message sent to the chainFilterer to update the +// current chainFilter state. +type esploraFilterUpdate struct { + newUtxos []graphdb.EdgePoint + updateHeight uint32 +} + +// NewEsploraFilteredChainView creates a new instance of the +// EsploraFilteredChainView which is connected to an active Esplora client. +// +// NOTE: The client should already be started and connected before being +// passed into this function. +func NewEsploraFilteredChainView( + client *esplora.Client) (*EsploraFilteredChainView, error) { + + return &EsploraFilteredChainView{ + client: client, + blockQueue: newBlockEventQueue(), + filterUpdates: make(chan esploraFilterUpdate), + chainFilter: make(map[wire.OutPoint][]byte), + scripthashToOutpoint: make(map[string]wire.OutPoint), + filterBlockReqs: make(chan *filterBlockReq), + quit: make(chan struct{}), + }, nil +} + +// Start kicks off the FilteredChainView implementation. This function must be +// called before any calls to UpdateFilter can be processed. +// +// NOTE: This is part of the FilteredChainView interface. +func (e *EsploraFilteredChainView) Start() error { + // Already started? + if atomic.AddInt32(&e.started, 1) != 1 { + return nil + } + + log.Infof("EsploraFilteredChainView starting") + + // Ensure the Esplora client is connected. + if !e.client.IsConnected() { + return fmt.Errorf("esplora client not connected") + } + + // Get the current best block height. + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + tipHeight, err := e.client.GetTipHeight(ctx) + if err != nil { + return fmt.Errorf("unable to get tip height: %w", err) + } + + e.bestHeightMtx.Lock() + e.bestHeight = uint32(tipHeight) + e.bestHeightMtx.Unlock() + + log.Debugf("EsploraFilteredChainView initial height: %d", tipHeight) + + e.blockQueue.Start() + + // Start the main goroutines. + e.wg.Add(2) + go e.blockNotificationHandler() + go e.chainFilterer() + + return nil +} + +// Stop stops all goroutines which we launched by the prior call to the Start +// method. +// +// NOTE: This is part of the FilteredChainView interface. +func (e *EsploraFilteredChainView) Stop() error { + log.Debug("EsploraFilteredChainView stopping") + defer log.Debug("EsploraFilteredChainView stopped") + + // Already shutting down? + if atomic.AddInt32(&e.stopped, 1) != 1 { + return nil + } + + e.blockQueue.Stop() + + close(e.quit) + e.wg.Wait() + + return nil +} + +// blockNotificationHandler handles incoming block notifications from +// the Esplora client and dispatches appropriate events. +func (e *EsploraFilteredChainView) blockNotificationHandler() { + defer e.wg.Done() + + // Subscribe to block notifications from the client. + blockNotifs, subID := e.client.Subscribe() + e.subscriptionID = subID + + defer e.client.Unsubscribe(subID) + + for { + select { + case blockInfo, ok := <-blockNotifs: + if !ok { + log.Warn("Block notification channel closed") + return + } + + if blockInfo == nil { + continue + } + + e.handleBlockConnected(blockInfo) + + case <-e.quit: + return + } + } +} + +// handleBlockConnected processes a new block notification, filters +// for relevant transactions, and dispatches the filtered block event. +func (e *EsploraFilteredChainView) handleBlockConnected( + blockInfo *esplora.BlockInfo) { + + blockHeight := uint32(blockInfo.Height) + + e.bestHeightMtx.Lock() + prevBestHeight := e.bestHeight + e.bestHeightMtx.Unlock() + + // Check for reorg - if the new height is less than or equal to what + // we've seen, we may have a reorg situation. + if blockHeight <= prevBestHeight && blockHeight > 0 { + e.handlePotentialReorg(blockHeight, prevBestHeight) + } + + // Parse block hash. + blockHash, err := chainhash.NewHashFromStr(blockInfo.ID) + if err != nil { + log.Errorf("Failed to parse block hash %s: %v", + blockInfo.ID, err) + return + } + + // Filter the block for transactions that spend our watched outputs. + filteredTxns := e.filterBlockTransactions(blockHeight) + + // Update best height. + e.bestHeightMtx.Lock() + e.bestHeight = blockHeight + e.bestHeightMtx.Unlock() + + // Create and dispatch the filtered block. + filteredBlock := &FilteredBlock{ + Hash: *blockHash, + Height: blockHeight, + Transactions: filteredTxns, + } + + e.blockQueue.Add(&blockEvent{ + eventType: connected, + block: filteredBlock, + }) +} + +// handlePotentialReorg handles potential chain reorganizations by sending +// disconnected block events for blocks that are no longer on the main chain. +func (e *EsploraFilteredChainView) handlePotentialReorg(newHeight, + prevHeight uint32) { + + log.Debugf("Potential reorg detected: new height %d, prev height %d", + newHeight, prevHeight) + + ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) + defer cancel() + + // Send disconnected events for blocks from prevHeight down to + // newHeight. + for h := prevHeight; h >= newHeight; h-- { + hashStr, err := e.client.GetBlockHashByHeight(ctx, int64(h)) + if err != nil { + log.Warnf("Failed to get hash for disconnected "+ + "block %d: %v", h, err) + continue + } + + blockHash, err := chainhash.NewHashFromStr(hashStr) + if err != nil { + log.Warnf("Failed to parse block hash: %v", err) + continue + } + + disconnectedBlock := &FilteredBlock{ + Hash: *blockHash, + Height: h, + } + + e.blockQueue.Add(&blockEvent{ + eventType: disconnected, + block: disconnectedBlock, + }) + } +} + +// filterBlockTransactions scans the watched outputs to find any that were +// spent in the given block height. It fetches all block transactions and +// scans them locally, which is more efficient than making per-outpoint API +// calls when there are many watched outpoints. +func (e *EsploraFilteredChainView) filterBlockTransactions( + blockHeight uint32) []*wire.MsgTx { + + e.filterMtx.RLock() + if len(e.chainFilter) == 0 { + e.filterMtx.RUnlock() + return nil + } + + // Copy the current filter to avoid holding the lock during API calls. + watchedOutpoints := make(map[wire.OutPoint][]byte) + for op, script := range e.chainFilter { + watchedOutpoints[op] = script + } + e.filterMtx.RUnlock() + + ctx, cancel := context.WithTimeout(context.Background(), 120*time.Second) + defer cancel() + + // Get block hash for this height. + blockHashStr, err := e.client.GetBlockHashByHeight(ctx, int64(blockHeight)) + if err != nil { + log.Errorf("Failed to get block hash at height %d: %v", + blockHeight, err) + return nil + } + + // Fetch all transactions in the block. This is more efficient than + // making individual GetTxOutSpend calls for each watched outpoint, + // especially for nodes with many channels. + txInfos, err := e.client.GetBlockTxs(ctx, blockHashStr) + if err != nil { + log.Errorf("Failed to get block transactions at height %d: %v", + blockHeight, err) + return nil + } + + var spentOutpoints []wire.OutPoint + matchedTxIDs := make(map[string]struct{}) + + // Scan all transactions for inputs that spend watched outpoints. + for _, txInfo := range txInfos { + for _, vin := range txInfo.Vin { + if vin.IsCoinbase { + continue + } + + // Parse the previous outpoint being spent by this input. + prevHash, err := chainhash.NewHashFromStr(vin.TxID) + if err != nil { + continue + } + + prevOutpoint := wire.OutPoint{ + Hash: *prevHash, + Index: vin.Vout, + } + + // Check if this input spends a watched outpoint. + if _, watched := watchedOutpoints[prevOutpoint]; watched { + // Track the spending transaction (avoid duplicates + // if tx spends multiple watched outpoints). + if _, exists := matchedTxIDs[txInfo.TxID]; !exists { + matchedTxIDs[txInfo.TxID] = struct{}{} + } + spentOutpoints = append(spentOutpoints, prevOutpoint) + } + } + } + + // Fetch the raw transactions for matches. + var filteredTxns []*wire.MsgTx + for txid := range matchedTxIDs { + tx, err := e.client.GetRawTransactionMsgTx(ctx, txid) + if err != nil { + log.Debugf("Failed to get spending tx %s: %v", txid, err) + continue + } + filteredTxns = append(filteredTxns, tx) + } + + // Remove spent outpoints from the filter. + if len(spentOutpoints) > 0 { + e.filterMtx.Lock() + for _, op := range spentOutpoints { + pkScript := e.chainFilter[op] + delete(e.chainFilter, op) + + // Also remove from scripthash mapping. + if pkScript != nil { + sh := esplora.ScripthashFromScript(pkScript) + delete(e.scripthashToOutpoint, sh) + } + } + e.filterMtx.Unlock() + } + + return filteredTxns +} + +// chainFilterer is the primary goroutine which handles filter updates and +// block filtering requests. +func (e *EsploraFilteredChainView) chainFilterer() { + defer e.wg.Done() + + for { + select { + case update := <-e.filterUpdates: + e.handleFilterUpdate(update) + + case req := <-e.filterBlockReqs: + e.handleFilterBlockReq(req) + + case <-e.quit: + return + } + } +} + +// handleFilterUpdate processes a filter update by adding new outpoints to +// watch and rescanning if necessary. +func (e *EsploraFilteredChainView) handleFilterUpdate( + update esploraFilterUpdate) { + + log.Tracef("Updating chain filter with %d new UTXO's", + len(update.newUtxos)) + + // Add new outpoints to the filter. + e.filterMtx.Lock() + for _, edgePoint := range update.newUtxos { + e.chainFilter[edgePoint.OutPoint] = edgePoint.FundingPkScript + + // Also add to scripthash mapping. + sh := esplora.ScripthashFromScript(edgePoint.FundingPkScript) + e.scripthashToOutpoint[sh] = edgePoint.OutPoint + } + e.filterMtx.Unlock() + + // Check if we need to rescan for spends we might have missed. + e.bestHeightMtx.Lock() + bestHeight := e.bestHeight + e.bestHeightMtx.Unlock() + + if update.updateHeight < bestHeight { + log.Debugf("Rescanning for filter update from height %d to %d", + update.updateHeight, bestHeight) + + ctx, cancel := context.WithTimeout( + context.Background(), 120*time.Second, + ) + defer cancel() + + // Check each new outpoint to see if it was already spent. + for _, edgePoint := range update.newUtxos { + outSpend, err := e.client.GetTxOutSpend( + ctx, edgePoint.OutPoint.Hash.String(), + edgePoint.OutPoint.Index, + ) + if err != nil { + log.Debugf("Failed to check outspend: %v", err) + continue + } + + if !outSpend.Spent || !outSpend.Status.Confirmed { + continue + } + + spendHeight := uint32(outSpend.Status.BlockHeight) + if spendHeight < update.updateHeight || + spendHeight > bestHeight { + continue + } + + // Fetch the spending transaction. + tx, err := e.client.GetRawTransactionMsgTx( + ctx, outSpend.TxID, + ) + if err != nil { + log.Debugf("Failed to get tx: %v", err) + continue + } + + // Get the block hash for this height. + blockHash, err := e.client.GetBlockHashByHeight( + ctx, int64(spendHeight), + ) + if err != nil { + log.Debugf("Failed to get block hash: %v", err) + continue + } + + hash, err := chainhash.NewHashFromStr(blockHash) + if err != nil { + continue + } + + // Send a filtered block for this spend. + filteredBlock := &FilteredBlock{ + Hash: *hash, + Height: spendHeight, + Transactions: []*wire.MsgTx{tx}, + } + + e.blockQueue.Add(&blockEvent{ + eventType: connected, + block: filteredBlock, + }) + + // Remove from filter. + e.filterMtx.Lock() + delete(e.chainFilter, edgePoint.OutPoint) + sh := esplora.ScripthashFromScript(edgePoint.FundingPkScript) + delete(e.scripthashToOutpoint, sh) + e.filterMtx.Unlock() + } + } +} + +// handleFilterBlockReq handles a request to filter a specific block. +func (e *EsploraFilteredChainView) handleFilterBlockReq(req *filterBlockReq) { + blockHash := req.blockHash + + ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) + defer cancel() + + // Get block info to find the height. + blockInfo, err := e.client.GetBlockInfo(ctx, blockHash.String()) + if err != nil { + req.err <- fmt.Errorf("failed to get block info: %w", err) + return + } + + // Filter transactions at this block height. + filteredTxns := e.filterBlockTransactions(uint32(blockInfo.Height)) + + filteredBlock := &FilteredBlock{ + Hash: *blockHash, + Height: uint32(blockInfo.Height), + Transactions: filteredTxns, + } + + req.resp <- filteredBlock +} + +// FilterBlock takes a block hash and returns a FilteredBlock with any +// transactions that spend watched outputs. +// +// NOTE: This is part of the FilteredChainView interface. +func (e *EsploraFilteredChainView) FilterBlock( + blockHash *chainhash.Hash) (*FilteredBlock, error) { + + req := &filterBlockReq{ + blockHash: blockHash, + resp: make(chan *FilteredBlock, 1), + err: make(chan error, 1), + } + + select { + case e.filterBlockReqs <- req: + case <-e.quit: + return nil, fmt.Errorf("esplora chain view shutting down") + } + + select { + case filteredBlock := <-req.resp: + return filteredBlock, nil + + case err := <-req.err: + return nil, err + + case <-e.quit: + return nil, fmt.Errorf("esplora chain view shutting down") + } +} + +// UpdateFilter updates the UTXO filter which is to be consulted when creating +// FilteredBlocks to be sent to subscribed clients. +// +// NOTE: This is part of the FilteredChainView interface. +func (e *EsploraFilteredChainView) UpdateFilter(ops []graphdb.EdgePoint, + updateHeight uint32) error { + + select { + case e.filterUpdates <- esploraFilterUpdate{ + newUtxos: ops, + updateHeight: updateHeight, + }: + return nil + + case <-e.quit: + return fmt.Errorf("esplora chain view shutting down") + } +} + +// FilteredBlocks returns the channel that filtered blocks are to be sent +// over. Each time a block is connected to the end of a main chain, and +// passes the filter previously set via UpdateFilter(), a struct over the +// returned channel will be sent. +// +// NOTE: This is part of the FilteredChainView interface. +func (e *EsploraFilteredChainView) FilteredBlocks() <-chan *FilteredBlock { + return e.blockQueue.newBlocks +} + +// DisconnectedBlocks returns the channel that filtered blocks are to be sent +// over. Each time a block is disconnected from the end of the main chain, a +// struct over the returned channel will be sent. +// +// NOTE: This is part of the FilteredChainView interface. +func (e *EsploraFilteredChainView) DisconnectedBlocks() <-chan *FilteredBlock { + return e.blockQueue.staleBlocks +} diff --git a/sample-lnd.conf b/sample-lnd.conf index a487565afcd..15ef0c9708f 100644 --- a/sample-lnd.conf +++ b/sample-lnd.conf @@ -682,6 +682,7 @@ ; Example: ; bitcoin.node=bitcoind ; bitcoin.node=neutrino +; bitcoin.node=esplora ; The default number of confirmations a channel must have before it's considered ; open. We'll require any incoming channel requests to wait this many @@ -916,6 +917,43 @@ ; Neutrino is used. ; neutrino.validatechannels=false +[esplora] + +; The base URL of the Esplora API to connect to. This must be set when using +; esplora mode. +; Default: +; esplora.url= +; Examples: +; esplora.url=http://localhost:3002 (local electrs/mempool) +; esplora.url=https://blockstream.info/api (Blockstream mainnet) +; esplora.url=https://mempool.space/api (mempool.space mainnet) +; esplora.url=https://mempool.space/testnet/api (mempool.space testnet) + +; Timeout for HTTP requests to the Esplora API. +; esplora.requesttimeout=30s + +; Maximum number of times to retry a failed request before giving up. +; esplora.maxretries=3 + +; Interval at which to poll for new blocks. Since Esplora is HTTP-only, we +; need to poll rather than subscribe to new blocks. +; esplora.pollinterval=10s + +; Enable gap limit optimization for wallet recovery. When enabled, address +; scanning stops after finding gaplimit consecutive unused addresses, +; dramatically improving recovery time. Recommended for most users. +; esplora.usegaplimit=true + +; Number of consecutive unused addresses before stopping scan. BIP-44 specifies +; 20 as the standard gap limit. Higher values may be needed for wallets with +; non-sequential address usage patterns. +; esplora.gaplimit=20 + +; Number of addresses to query concurrently when using gap limit scanning. +; Higher values increase speed but may trigger rate limiting on public APIs. +; esplora.addressbatchsize=10 + + [autopilot] ; If the autopilot agent should be active or not. The autopilot agent will