diff --git a/src/builder.rs b/src/builder.rs index 3d12ee103..dea270e97 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -55,13 +55,15 @@ use crate::fee_estimator::OnchainFeeEstimator; use crate::gossip::GossipSource; use crate::io::sqlite_store::SqliteStore; use crate::io::utils::{ - read_event_queue, read_external_pathfinding_scores_from_cache, read_network_graph, - read_node_metrics, read_output_sweeper, read_payments, read_peer_info, read_pending_payments, - read_scorer, write_node_metrics, + read_closed_channels, read_event_queue, read_external_pathfinding_scores_from_cache, + read_network_graph, read_node_metrics, read_output_sweeper, read_payments, read_peer_info, + read_pending_payments, read_scorer, write_node_metrics, }; use crate::io::vss_store::VssStoreBuilder; use crate::io::{ - self, PAYMENT_INFO_PERSISTENCE_PRIMARY_NAMESPACE, PAYMENT_INFO_PERSISTENCE_SECONDARY_NAMESPACE, + self, CLOSED_CHANNEL_INFO_PERSISTENCE_PRIMARY_NAMESPACE, + CLOSED_CHANNEL_INFO_PERSISTENCE_SECONDARY_NAMESPACE, + PAYMENT_INFO_PERSISTENCE_PRIMARY_NAMESPACE, PAYMENT_INFO_PERSISTENCE_SECONDARY_NAMESPACE, PENDING_PAYMENT_INFO_PERSISTENCE_PRIMARY_NAMESPACE, PENDING_PAYMENT_INFO_PERSISTENCE_SECONDARY_NAMESPACE, }; @@ -76,9 +78,9 @@ use crate::peer_store::PeerStore; use crate::runtime::{Runtime, RuntimeSpawner}; use crate::tx_broadcaster::TransactionBroadcaster; use crate::types::{ - AsyncPersister, ChainMonitor, ChannelManager, DynStore, DynStoreRef, DynStoreWrapper, - GossipSync, Graph, KeysManager, MessageRouter, OnionMessenger, PaymentStore, PeerManager, - PendingPaymentStore, SyncAndAsyncKVStore, + AsyncPersister, ChainMonitor, ChannelManager, ClosedChannelStore, DynStore, DynStoreRef, + DynStoreWrapper, GossipSync, Graph, KeysManager, MessageRouter, OnionMessenger, PaymentStore, + PeerManager, PendingPaymentStore, SyncAndAsyncKVStore, }; use crate::wallet::persist::KVStoreWalletPersister; use crate::wallet::Wallet; @@ -1267,12 +1269,13 @@ fn build_with_store_internal( let kv_store_ref = Arc::clone(&kv_store); let logger_ref = Arc::clone(&logger); - let (payment_store_res, node_metris_res, pending_payment_store_res) = + let (payment_store_res, node_metris_res, pending_payment_store_res, closed_channel_store_res) = runtime.block_on(async move { tokio::join!( read_payments(&*kv_store_ref, Arc::clone(&logger_ref)), read_node_metrics(&*kv_store_ref, Arc::clone(&logger_ref)), - read_pending_payments(&*kv_store_ref, Arc::clone(&logger_ref)) + read_pending_payments(&*kv_store_ref, Arc::clone(&logger_ref)), + read_closed_channels(&*kv_store_ref, Arc::clone(&logger_ref)), ) }); @@ -1303,6 +1306,20 @@ fn build_with_store_internal( }, }; + let closed_channel_store = match closed_channel_store_res { + Ok(channels) => Arc::new(ClosedChannelStore::new( + channels, + CLOSED_CHANNEL_INFO_PERSISTENCE_PRIMARY_NAMESPACE.to_string(), + CLOSED_CHANNEL_INFO_PERSISTENCE_SECONDARY_NAMESPACE.to_string(), + Arc::clone(&kv_store), + Arc::clone(&logger), + )), + Err(e) => { + log_error!(logger, "Failed to read closed channel data from store: {}", e); + return Err(BuildError::ReadFailed); + }, + }; + let (chain_source, chain_tip_opt) = match chain_data_source_config { Some(ChainDataSourceConfig::Esplora { server_url, headers, sync_config }) => { let sync_config = sync_config.unwrap_or(EsploraSyncConfig::default()); @@ -1996,6 +2013,7 @@ fn build_with_store_internal( scorer, peer_store, payment_store, + closed_channel_store, lnurl_auth, is_running, node_metrics, diff --git a/src/closed_channel.rs b/src/closed_channel.rs new file mode 100644 index 000000000..58b58f151 --- /dev/null +++ b/src/closed_channel.rs @@ -0,0 +1,131 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + +use std::time::{Duration, SystemTime, UNIX_EPOCH}; + +use bitcoin::secp256k1::PublicKey; +use bitcoin::OutPoint; +use lightning::events::ClosureReason; +use lightning::ln::msgs::DecodeError; +use lightning::ln::types::ChannelId; +use lightning::util::ser::{Readable, Writeable, Writer}; +use lightning::{_init_and_read_len_prefixed_tlv_fields, write_tlv_fields}; + +use crate::data_store::{StorableObject, StorableObjectId, StorableObjectUpdate}; +use crate::hex_utils; +use crate::types::UserChannelId; + +/// Details of a closed channel. +/// +/// Returned by [`Node::list_closed_channels`]. +/// +/// [`Node::list_closed_channels`]: crate::Node::list_closed_channels +#[derive(Clone, Debug, PartialEq, Eq)] +#[cfg_attr(feature = "uniffi", derive(uniffi::Record))] +pub struct ClosedChannelDetails { + /// The channel's ID at the time it was closed. + pub channel_id: ChannelId, + /// The local identifier of the channel. + pub user_channel_id: UserChannelId, + /// The node ID of the channel's counterparty. + pub counterparty_node_id: Option, + /// The channel's funding transaction outpoint. + pub funding_txo: Option, + /// The channel's capacity in satoshis. + pub channel_capacity_sats: Option, + /// Our local balance in millisatoshis at the time of channel closure. + pub last_local_balance_msat: Option, + /// Indicates whether we initiated the channel opening. + /// + /// `true` if the channel was opened by us (outbound), `false` if opened by the counterparty + /// (inbound). This will be `false` for channels opened prior to this field being tracked. + pub is_outbound: bool, + /// The reason for the channel closure. + pub closure_reason: Option, + /// The timestamp, in seconds since start of the UNIX epoch, when the channel was closed. + pub closed_at: u64, +} + +impl Writeable for ClosedChannelDetails { + fn write(&self, writer: &mut W) -> Result<(), lightning::io::Error> { + write_tlv_fields!(writer, { + (0, self.channel_id, required), + (2, self.user_channel_id, required), + (4, self.counterparty_node_id, option), + (6, self.funding_txo, option), + (8, self.channel_capacity_sats, option), + (10, self.last_local_balance_msat, option), + (12, self.is_outbound, required), + (14, self.closure_reason, upgradable_option), + (16, self.closed_at, required), + }); + Ok(()) + } +} + +impl Readable for ClosedChannelDetails { + fn read(reader: &mut R) -> Result { + let unix_time_secs = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap_or(Duration::from_secs(0)) + .as_secs(); + _init_and_read_len_prefixed_tlv_fields!(reader, { + (0, channel_id, required), + (2, user_channel_id, required), + (4, counterparty_node_id, option), + (6, funding_txo, option), + (8, channel_capacity_sats, option), + (10, last_local_balance_msat, option), + (12, is_outbound, required), + (14, closure_reason, upgradable_option), + (16, closed_at, (default_value, unix_time_secs)), + }); + Ok(ClosedChannelDetails { + channel_id: channel_id.0.ok_or(DecodeError::InvalidValue)?, + user_channel_id: user_channel_id.0.ok_or(DecodeError::InvalidValue)?, + counterparty_node_id, + funding_txo, + channel_capacity_sats, + last_local_balance_msat, + is_outbound: is_outbound.0.ok_or(DecodeError::InvalidValue)?, + closure_reason, + closed_at: closed_at.0.ok_or(DecodeError::InvalidValue)?, + }) + } +} + +pub(crate) struct ClosedChannelDetailsUpdate(pub UserChannelId); + +impl StorableObjectUpdate for ClosedChannelDetailsUpdate { + fn id(&self) -> UserChannelId { + self.0 + } +} + +impl StorableObject for ClosedChannelDetails { + type Id = UserChannelId; + type Update = ClosedChannelDetailsUpdate; + + fn id(&self) -> UserChannelId { + self.user_channel_id + } + + fn update(&mut self, _update: Self::Update) -> bool { + // Closed channel records are immutable once written. + false + } + + fn to_update(&self) -> Self::Update { + ClosedChannelDetailsUpdate(self.user_channel_id) + } +} + +impl StorableObjectId for UserChannelId { + fn encode_to_hex_str(&self) -> String { + hex_utils::to_string(&self.0.to_be_bytes()) + } +} diff --git a/src/event.rs b/src/event.rs index 3161daa2a..de5cfab0d 100644 --- a/src/event.rs +++ b/src/event.rs @@ -7,9 +7,10 @@ use core::future::Future; use core::task::{Poll, Waker}; -use std::collections::VecDeque; +use std::collections::{HashSet, VecDeque}; use std::ops::Deref; use std::sync::{Arc, Mutex}; +use std::time::{Duration, SystemTime, UNIX_EPOCH}; use bitcoin::blockdata::locktime::absolute::LockTime; use bitcoin::secp256k1::PublicKey; @@ -35,6 +36,7 @@ use lightning::util::ser::{Readable, ReadableArgs, Writeable, Writer}; use lightning_liquidity::lsps2::utils::compute_opening_fee; use lightning_types::payment::{PaymentHash, PaymentPreimage}; +use crate::closed_channel::ClosedChannelDetails; use crate::config::{may_announce_channel, Config}; use crate::connection::ConnectionManager; use crate::data_store::DataStoreUpdateResult; @@ -54,7 +56,8 @@ use crate::payment::store::{ }; use crate::runtime::Runtime; use crate::types::{ - CustomTlvRecord, DynStore, KeysManager, OnionMessenger, PaymentStore, Sweeper, Wallet, + ClosedChannelStore, CustomTlvRecord, DynStore, KeysManager, OnionMessenger, PaymentStore, + Sweeper, Wallet, }; use crate::{ hex_utils, BumpTransactionEventHandler, ChannelManager, Error, Graph, PeerInfo, PeerStore, @@ -252,6 +255,18 @@ pub enum Event { counterparty_node_id: Option, /// This will be `None` for events serialized by LDK Node v0.2.1 and prior. reason: Option, + /// The channel's capacity in satoshis. + /// + /// This will be `None` for events serialized by LDK Node v0.8.0 and prior. + channel_capacity_sats: Option, + /// The channel's funding transaction outpoint. + /// + /// This will be `None` for events serialized by LDK Node v0.8.0 and prior. + channel_funding_txo: Option, + /// Our local balance in millisatoshis at the time of channel closure. + /// + /// This will be `None` for events serialized by LDK Node v0.8.0 and prior. + last_local_balance_msat: Option, }, /// A channel splice is pending confirmation on-chain. SplicePending { @@ -314,6 +329,9 @@ impl_writeable_tlv_based_enum!(Event, (1, counterparty_node_id, option), (2, user_channel_id, required), (3, reason, upgradable_option), + (5, channel_capacity_sats, option), + (7, channel_funding_txo, option), + (9, last_local_balance_msat, option), }, (6, PaymentClaimable) => { (0, payment_hash, required), @@ -508,6 +526,10 @@ where liquidity_source: Option>>>, payment_store: Arc, peer_store: Arc>, + closed_channel_store: Arc, + // Tracks which user_channel_ids correspond to outbound channels. Populated at startup from + // list_channels() and updated on ChannelPending events. Consumed on ChannelClosed events. + outbound_channel_ids: Mutex>, keys_manager: Arc, runtime: Arc, logger: L, @@ -528,10 +550,23 @@ where output_sweeper: Arc, network_graph: Arc, liquidity_source: Option>>>, payment_store: Arc, peer_store: Arc>, - keys_manager: Arc, static_invoice_store: Option, - onion_messenger: Arc, om_mailbox: Option>, - runtime: Arc, logger: L, config: Arc, + closed_channel_store: Arc, keys_manager: Arc, + static_invoice_store: Option, onion_messenger: Arc, + om_mailbox: Option>, runtime: Arc, logger: L, + config: Arc, ) -> Self { + // Seed outbound_channel_ids from currently open channels so we correctly classify channels + // that were already open when this node started. + let outbound_channel_ids = { + let mut set = HashSet::new(); + for chan in channel_manager.list_channels() { + if chan.is_outbound { + set.insert(UserChannelId(chan.user_channel_id)); + } + } + Mutex::new(set) + }; + Self { event_queue, wallet, @@ -543,6 +578,8 @@ where liquidity_source, payment_store, peer_store, + closed_channel_store, + outbound_channel_ids, keys_manager, logger, runtime, @@ -1477,6 +1514,13 @@ where if let Some(pending_channel) = channels.into_iter().find(|c| c.channel_id == channel_id) { + if pending_channel.is_outbound { + self.outbound_channel_ids + .lock() + .unwrap() + .insert(UserChannelId(user_channel_id)); + } + if !pending_channel.is_outbound && self.peer_store.get_peer(&counterparty_node_id).is_none() { @@ -1552,15 +1596,53 @@ where reason, user_channel_id, counterparty_node_id, - .. + channel_capacity_sats, + channel_funding_txo, + last_local_balance_msat, } => { log_info!(self.logger, "Channel {} closed due to: {}", channel_id, reason); + let user_channel_id = UserChannelId(user_channel_id); + let is_outbound = + self.outbound_channel_ids.lock().unwrap().remove(&user_channel_id); + + let closed_at = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap_or(Duration::ZERO) + .as_secs(); + + let funding_txo = + channel_funding_txo.map(|op| OutPoint { txid: op.txid, vout: op.index as u32 }); + + let record = ClosedChannelDetails { + channel_id, + user_channel_id, + counterparty_node_id, + funding_txo, + channel_capacity_sats, + last_local_balance_msat, + is_outbound, + closure_reason: Some(reason.clone()), + closed_at, + }; + + if let Err(e) = self.closed_channel_store.insert(record) { + log_error!( + self.logger, + "Failed to persist closed channel {}: {}", + channel_id, + e + ); + } + let event = Event::ChannelClosed { channel_id, - user_channel_id: UserChannelId(user_channel_id), + user_channel_id, counterparty_node_id, reason: Some(reason), + channel_capacity_sats, + channel_funding_txo: funding_txo, + last_local_balance_msat, }; match self.event_queue.add_event(event).await { diff --git a/src/io/mod.rs b/src/io/mod.rs index e080d39f7..a7e98ef6e 100644 --- a/src/io/mod.rs +++ b/src/io/mod.rs @@ -27,6 +27,10 @@ pub(crate) const PEER_INFO_PERSISTENCE_KEY: &str = "peers"; pub(crate) const PAYMENT_INFO_PERSISTENCE_PRIMARY_NAMESPACE: &str = "payments"; pub(crate) const PAYMENT_INFO_PERSISTENCE_SECONDARY_NAMESPACE: &str = ""; +/// The closed channel information will be persisted under this prefix. +pub(crate) const CLOSED_CHANNEL_INFO_PERSISTENCE_PRIMARY_NAMESPACE: &str = "closed_channels"; +pub(crate) const CLOSED_CHANNEL_INFO_PERSISTENCE_SECONDARY_NAMESPACE: &str = ""; + /// The node metrics will be persisted under this key. pub(crate) const NODE_METRICS_PRIMARY_NAMESPACE: &str = ""; pub(crate) const NODE_METRICS_SECONDARY_NAMESPACE: &str = ""; diff --git a/src/io/utils.rs b/src/io/utils.rs index eef71ec0b..82ca5d7ba 100644 --- a/src/io/utils.rs +++ b/src/io/utils.rs @@ -38,10 +38,13 @@ use lightning_types::string::PrintableString; use super::*; use crate::chain::ChainSource; +use crate::closed_channel::ClosedChannelDetails; use crate::config::WALLET_KEYS_SEED_LEN; use crate::fee_estimator::OnchainFeeEstimator; use crate::io::{ - NODE_METRICS_KEY, NODE_METRICS_PRIMARY_NAMESPACE, NODE_METRICS_SECONDARY_NAMESPACE, + CLOSED_CHANNEL_INFO_PERSISTENCE_PRIMARY_NAMESPACE, + CLOSED_CHANNEL_INFO_PERSISTENCE_SECONDARY_NAMESPACE, NODE_METRICS_KEY, + NODE_METRICS_PRIMARY_NAMESPACE, NODE_METRICS_SECONDARY_NAMESPACE, }; use crate::logger::{log_error, LdkLogger, Logger}; use crate::payment::PendingPaymentDetails; @@ -298,6 +301,82 @@ where Ok(res) } +pub(crate) async fn read_closed_channels( + kv_store: &DynStore, logger: L, +) -> Result, std::io::Error> +where + L::Target: LdkLogger, +{ + let mut res = Vec::new(); + + let mut stored_keys = KVStore::list( + &*kv_store, + CLOSED_CHANNEL_INFO_PERSISTENCE_PRIMARY_NAMESPACE, + CLOSED_CHANNEL_INFO_PERSISTENCE_SECONDARY_NAMESPACE, + ) + .await?; + + const BATCH_SIZE: usize = 50; + + let mut set = tokio::task::JoinSet::new(); + + // Fill JoinSet with tasks if possible + while set.len() < BATCH_SIZE && !stored_keys.is_empty() { + if let Some(next_key) = stored_keys.pop() { + let fut = KVStore::read( + &*kv_store, + CLOSED_CHANNEL_INFO_PERSISTENCE_PRIMARY_NAMESPACE, + CLOSED_CHANNEL_INFO_PERSISTENCE_SECONDARY_NAMESPACE, + &next_key, + ); + set.spawn(fut); + debug_assert!(set.len() <= BATCH_SIZE); + } + } + + while let Some(read_res) = set.join_next().await { + // Exit early if we get an IO error. + let reader = read_res + .map_err(|e| { + log_error!(logger, "Failed to read ClosedChannelDetails: {}", e); + set.abort_all(); + e + })? + .map_err(|e| { + log_error!(logger, "Failed to read ClosedChannelDetails: {}", e); + set.abort_all(); + e + })?; + + // Refill set for every finished future, if we still have something to do. + if let Some(next_key) = stored_keys.pop() { + let fut = KVStore::read( + &*kv_store, + CLOSED_CHANNEL_INFO_PERSISTENCE_PRIMARY_NAMESPACE, + CLOSED_CHANNEL_INFO_PERSISTENCE_SECONDARY_NAMESPACE, + &next_key, + ); + set.spawn(fut); + debug_assert!(set.len() <= BATCH_SIZE); + } + + // Handle result. + let channel = ClosedChannelDetails::read(&mut &*reader).map_err(|e| { + log_error!(logger, "Failed to deserialize ClosedChannelDetails: {}", e); + std::io::Error::new( + std::io::ErrorKind::InvalidData, + "Failed to deserialize ClosedChannelDetails", + ) + })?; + res.push(channel); + } + + debug_assert!(set.is_empty()); + debug_assert!(stored_keys.is_empty()); + + Ok(res) +} + /// Read `OutputSweeper` state from the store. pub(crate) async fn read_output_sweeper( broadcaster: Arc, fee_estimator: Arc, diff --git a/src/lib.rs b/src/lib.rs index dd82c39f9..45e370779 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -83,6 +83,7 @@ mod balance; mod builder; mod chain; +pub(crate) mod closed_channel; pub mod config; mod connection; mod data_store; @@ -128,6 +129,7 @@ pub use builder::BuildError; #[cfg(not(feature = "uniffi"))] pub use builder::NodeBuilder as Builder; use chain::ChainSource; +pub use closed_channel::ClosedChannelDetails; use config::{ default_user_config, may_announce_channel, AsyncPaymentsRole, ChannelConfig, Config, LNURL_AUTH_TIMEOUT_SECS, NODE_ANN_BCAST_INTERVAL, PEER_RECONNECTION_INTERVAL, @@ -173,9 +175,9 @@ use peer_store::{PeerInfo, PeerStore}; use runtime::Runtime; pub use tokio; use types::{ - Broadcaster, BumpTransactionEventHandler, ChainMonitor, ChannelManager, DynStore, Graph, - HRNResolver, KeysManager, OnionMessenger, PaymentStore, PeerManager, Router, Scorer, Sweeper, - Wallet, + Broadcaster, BumpTransactionEventHandler, ChainMonitor, ChannelManager, ClosedChannelStore, + DynStore, Graph, HRNResolver, KeysManager, OnionMessenger, PaymentStore, PeerManager, Router, + Scorer, Sweeper, Wallet, }; pub use types::{ChannelDetails, CustomTlvRecord, PeerDetails, SyncAndAsyncKVStore, UserChannelId}; pub use vss_client; @@ -233,6 +235,7 @@ pub struct Node { scorer: Arc>, peer_store: Arc>>, payment_store: Arc, + closed_channel_store: Arc, lnurl_auth: Arc, is_running: Arc>, node_metrics: Arc>, @@ -601,6 +604,7 @@ impl Node { self.liquidity_source.clone(), Arc::clone(&self.payment_store), Arc::clone(&self.peer_store), + Arc::clone(&self.closed_channel_store), Arc::clone(&self.keys_manager), static_invoice_store, Arc::clone(&self.onion_messenger), @@ -1093,6 +1097,13 @@ impl Node { self.channel_manager.list_channels().into_iter().map(|c| c.into()).collect() } + /// Retrieve a list of closed channels. + /// + /// Channels are added to this list when they are closed and will be persisted across restarts. + pub fn list_closed_channels(&self) -> Vec { + self.closed_channel_store.list_filter(|_| true) + } + /// Connect to a node on the peer-to-peer network. /// /// If `persist` is set to `true`, we'll remember the peer and reconnect to it on restart. diff --git a/src/types.rs b/src/types.rs index 3424d2779..e4b2d4ec2 100644 --- a/src/types.rs +++ b/src/types.rs @@ -32,6 +32,7 @@ use lightning_net_tokio::SocketDescriptor; use crate::chain::bitcoind::UtxoSourceClient; use crate::chain::ChainSource; +use crate::closed_channel::ClosedChannelDetails; use crate::config::ChannelConfig; use crate::data_store::DataStore; use crate::fee_estimator::OnchainFeeEstimator; @@ -353,7 +354,7 @@ pub(crate) type PaymentStore = DataStore>; /// A local, potentially user-provided, identifier of a channel. /// /// By default, this will be randomly generated for the user to ensure local uniqueness. -#[derive(Debug, Copy, Clone, PartialEq, Eq)] +#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] pub struct UserChannelId(pub u128); impl Writeable for UserChannelId { @@ -663,3 +664,5 @@ impl From<&(u64, Vec)> for CustomTlvRecord { } pub(crate) type PendingPaymentStore = DataStore>; + +pub(crate) type ClosedChannelStore = DataStore>; diff --git a/tests/common/mod.rs b/tests/common/mod.rs index be9e16189..5e8aa204e 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -1481,6 +1481,23 @@ pub(crate) async fn do_channel_full_cycle( assert_eq!(node_a.next_event(), None); assert_eq!(node_b.next_event(), None); + // Check that the closed channel record was persisted. + let closed_a = node_a.list_closed_channels(); + let closed_b = node_b.list_closed_channels(); + assert_eq!(closed_a.len(), 1); + assert_eq!(closed_b.len(), 1); + assert!(closed_a[0].channel_capacity_sats.is_some()); + assert!(closed_b[0].channel_capacity_sats.is_some()); + assert!(closed_a[0].is_outbound, "node_a opened the channel, should be outbound"); + assert!(!closed_b[0].is_outbound, "node_b received the channel, should be inbound"); + assert!(closed_a[0].closure_reason.is_some()); + assert!(closed_b[0].closure_reason.is_some()); + assert!(closed_a[0].funding_txo.is_some()); + assert!(closed_b[0].funding_txo.is_some()); + assert_eq!(closed_a[0].funding_txo, closed_b[0].funding_txo); + assert_eq!(closed_a[0].counterparty_node_id, Some(node_b.node_id())); + assert_eq!(closed_b[0].counterparty_node_id, Some(node_a.node_id())); + node_a.stop().unwrap(); println!("\nA stopped"); node_b.stop().unwrap(); diff --git a/tests/integration_tests_rust.rs b/tests/integration_tests_rust.rs index d2c057a16..ddcc64b2d 100644 --- a/tests/integration_tests_rust.rs +++ b/tests/integration_tests_rust.rs @@ -35,7 +35,7 @@ use ldk_node::payment::{ ConfirmationStatus, PaymentDetails, PaymentDirection, PaymentKind, PaymentStatus, UnifiedPaymentResult, }; -use ldk_node::{Builder, Event, NodeError}; +use ldk_node::{Builder, ClosedChannelDetails, Event, NodeError}; use lightning::ln::channelmanager::PaymentId; use lightning::routing::gossip::{NodeAlias, NodeId}; use lightning::routing::router::RouteParametersConfig; @@ -2957,3 +2957,98 @@ async fn splice_in_with_all_balance() { node_a.stop().unwrap(); node_b.stop().unwrap(); } + +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn closed_channel_history_persists_after_restart() { + let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); + let chain_source = random_chain_source(&bitcoind, &electrsd); + + println!("== Node A =="); + let mut config_a = random_config(true); + config_a.store_type = TestStoreType::Sqlite; + + println!("\n== Node B =="); + let mut config_b = random_config(true); + config_b.store_type = TestStoreType::Sqlite; + + let channel_amount_sat = 1_000_000; + let premine_amount_sat = 2_125_000; + + let closed_channel_before: ClosedChannelDetails; + + { + let node_a = setup_node(&chain_source, config_a.clone()); + let node_b = setup_node(&chain_source, config_b.clone()); + + let addr_a = node_a.onchain_payment().new_address().unwrap(); + let addr_b = node_b.onchain_payment().new_address().unwrap(); + + premine_and_distribute_funds( + &bitcoind.client, + &electrsd.client, + vec![addr_a, addr_b], + Amount::from_sat(premine_amount_sat), + ) + .await; + node_a.sync_wallets().unwrap(); + node_b.sync_wallets().unwrap(); + + // Open channel from A to B. + let funding_txo = open_channel(&node_a, &node_b, channel_amount_sat, true, &electrsd).await; + generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6).await; + node_a.sync_wallets().unwrap(); + node_b.sync_wallets().unwrap(); + expect_channel_ready_event!(node_a, node_b.node_id()); + expect_channel_ready_event!(node_b, node_a.node_id()); + + // Cooperatively close. + let user_channel_id = node_a + .list_channels() + .into_iter() + .find(|c| c.counterparty_node_id == node_b.node_id()) + .map(|c| c.user_channel_id) + .expect("open channel not found"); + node_a.close_channel(&user_channel_id, node_b.node_id()).unwrap(); + expect_event!(node_a, ChannelClosed); + expect_event!(node_b, ChannelClosed); + + generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 1).await; + node_a.sync_wallets().unwrap(); + node_b.sync_wallets().unwrap(); + + // Verify the record is present before restart. + let closed_a = node_a.list_closed_channels(); + assert_eq!(closed_a.len(), 1); + let record = &closed_a[0]; + assert_eq!(record.channel_capacity_sats, Some(channel_amount_sat)); + assert!(record.is_outbound); + assert_eq!(record.counterparty_node_id, Some(node_b.node_id())); + assert!(record.funding_txo.is_some()); + assert_eq!(record.funding_txo.unwrap().txid, funding_txo.txid); + assert!(record.closure_reason.is_some()); + + closed_channel_before = record.clone(); + + node_a.stop().unwrap(); + node_b.stop().unwrap(); + } + + // Restart node_a with the same config and verify the record survived. + println!("\nRestarting node A..."); + let restarted_node_a = setup_node(&chain_source, config_a); + + let closed_after = restarted_node_a.list_closed_channels(); + assert_eq!(closed_after.len(), 1, "closed channel record should survive restart"); + + let record = &closed_after[0]; + assert_eq!(record.channel_id, closed_channel_before.channel_id); + assert_eq!(record.user_channel_id, closed_channel_before.user_channel_id); + assert_eq!(record.channel_capacity_sats, closed_channel_before.channel_capacity_sats); + assert_eq!(record.funding_txo, closed_channel_before.funding_txo); + assert_eq!(record.counterparty_node_id, closed_channel_before.counterparty_node_id); + assert_eq!(record.is_outbound, closed_channel_before.is_outbound); + assert_eq!(record.closed_at, closed_channel_before.closed_at); + assert_eq!(record.closure_reason, closed_channel_before.closure_reason); + + restarted_node_a.stop().unwrap(); +}