Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -873,6 +873,9 @@ case class Commitments(params: ChannelParams,
// We always use the last commitment that was created, to make sure we never go back in time.
val latest = FullCommitment(params, changes, active.head.fundingTxIndex, active.head.firstRemoteCommitIndex, active.head.remoteFundingPubKey, active.head.localFundingStatus, active.head.remoteFundingStatus, active.head.localCommit, active.head.remoteCommit, active.head.nextRemoteCommit_opt)

val lastLocalLocked_opt: Option[Commitment] = active.filter(_.localFundingStatus.isInstanceOf[LocalFundingStatus.Locked]).sortBy(_.fundingTxIndex).lastOption
val lastRemoteLocked_opt: Option[Commitment] = active.filter(c => c.remoteFundingStatus == RemoteFundingStatus.Locked).sortBy(_.fundingTxIndex).lastOption

def add(commitment: Commitment): Commitments = copy(active = commitment +: active)

// @formatter:off
Expand Down Expand Up @@ -1270,8 +1273,6 @@ case class Commitments(params: ChannelParams,
// This ensures that we only have to send splice_locked for the latest commitment instead of sending it for every commitment.
// A side-effect is that previous commitments that are implicitly locked don't necessarily have their status correctly set.
// That's why we look at locked commitments separately and then select the one with the oldest fundingTxIndex.
val lastLocalLocked_opt = active.find(_.localFundingStatus.isInstanceOf[LocalFundingStatus.Locked])
val lastRemoteLocked_opt = active.find(_.remoteFundingStatus == RemoteFundingStatus.Locked)
val lastLocked_opt = (lastLocalLocked_opt, lastRemoteLocked_opt) match {
// We select the locked commitment with the smaller value for fundingTxIndex, but both have to be defined.
// If both have the same fundingTxIndex, they must actually be the same commitment, because:
Expand All @@ -1280,13 +1281,13 @@ case class Commitments(params: ChannelParams,
// - we don't allow creating a splice on top of an unconfirmed transaction that has RBF attempts (because it
// would become invalid if another of the RBF attempts end up being confirmed)
case (Some(lastLocalLocked), Some(lastRemoteLocked)) => Some(Seq(lastLocalLocked, lastRemoteLocked).minBy(_.fundingTxIndex))
// Special case for the initial funding tx, we only require a local lock because channel_ready doesn't explicitly reference a funding tx.
// Special case for the initial funding tx, we only require a local lock because our peer may have never sent channel_ready.
case (Some(lastLocalLocked), None) if lastLocalLocked.fundingTxIndex == 0 => Some(lastLocalLocked)
case _ => None
}
lastLocked_opt match {
case Some(lastLocked) =>
// all commitments older than this one are inactive
// All commitments older than this one, and RBF alternatives, become inactive.
val inactive1 = active.filter(c => c.fundingTxId != lastLocked.fundingTxId && c.fundingTxIndex <= lastLocked.fundingTxIndex)
inactive1.foreach(c => log.info("deactivating commitment fundingTxIndex={} fundingTxId={}", c.fundingTxIndex, c.fundingTxId))
copy(
Expand Down
115 changes: 87 additions & 28 deletions eclair-core/src/main/scala/fr/acinq/eclair/channel/fsm/Channel.scala
Original file line number Diff line number Diff line change
Expand Up @@ -222,6 +222,8 @@ class Channel(val nodeParams: NodeParams, val wallet: OnChainChannelFunder with
var announcementSigsStash = Map.empty[RealShortChannelId, AnnouncementSignatures]
// we record the announcement_signatures messages we already sent to avoid unnecessary retransmission
var announcementSigsSent = Set.empty[RealShortChannelId]
// we keep track of the splice_locked we sent after channel_reestablish and it's funding tx index to avoid sending it again
private var spliceLockedSent = Map.empty[TxId, Long]

private def trimAnnouncementSigsStashIfNeeded(): Unit = {
if (announcementSigsStash.size >= 10) {
Expand All @@ -233,6 +235,17 @@ class Channel(val nodeParams: NodeParams, val wallet: OnChainChannelFunder with
}
}

private def trimSpliceLockedSentIfNeeded(): Unit = {
if (spliceLockedSent.size >= 10) {
// We shouldn't store an unbounded number of splice_locked: on long-lived connections where we do a lot of splice
// transactions, we only need to keep track of the most recent ones.
val oldestFundingTxId = spliceLockedSent.toSeq
.sortBy { case (_, fundingTxIndex) => fundingTxIndex }
.map { case (fundingTxId, _) => fundingTxId }.head
spliceLockedSent -= oldestFundingTxId
}
}

val txPublisher = txPublisherFactory.spawnTxPublisher(context, remoteNodeId)

// this will be used to detect htlc timeouts
Expand Down Expand Up @@ -775,10 +788,15 @@ class Channel(val nodeParams: NodeParams, val wallet: OnChainChannelFunder with

case Event(c: CurrentFeerates.BitcoinCore, d: DATA_NORMAL) => handleCurrentFeerate(c, d)

case Event(_: ChannelReady, _: DATA_NORMAL) =>
// This happens on reconnection, because channel_ready is sent again if the channel hasn't been used yet,
// otherwise we cannot be sure that it was correctly received before disconnecting.
stay()
case Event(_: ChannelReady, d: DATA_NORMAL) =>
// After a reconnection, if the channel hasn't been used yet, our peer cannot be sure we received their channel_ready
// so they will resend it. Their remote funding status must also be set to Locked if it wasn't already.
// NB: Their remote funding status will be stored when the commitment is next updated, or channel_ready will
// be sent again if a reconnection occurs first.
stay() using d.copy(commitments = d.commitments.copy(active = d.commitments.active.map {
case c if c.fundingTxIndex == 0 => c.copy(remoteFundingStatus = RemoteFundingStatus.Locked)
case c => c
}))

// Channels are publicly announced if both parties want it: we ignore this message if we don't want to announce the channel.
case Event(remoteAnnSigs: AnnouncementSignatures, d: DATA_NORMAL) if d.commitments.announceChannel =>
Expand Down Expand Up @@ -1341,11 +1359,13 @@ class Channel(val nodeParams: NodeParams, val wallet: OnChainChannelFunder with
case Event(w: WatchPublishedTriggered, d: DATA_NORMAL) =>
val fundingStatus = LocalFundingStatus.ZeroconfPublishedFundingTx(w.tx, d.commitments.localFundingSigs(w.tx.txid), d.commitments.liquidityPurchase(w.tx.txid))
d.commitments.updateLocalFundingStatus(w.tx.txid, fundingStatus, d.lastAnnouncedFundingTxId_opt) match {
case Right((commitments1, _)) =>
case Right((commitments1, commitment)) =>
// This is a zero-conf channel, the min-depth isn't critical: we use the default.
watchFundingConfirmed(w.tx.txid, Some(nodeParams.channelConf.minDepth), delay_opt = None)
maybeEmitEventsPostSplice(d.aliases, d.commitments, commitments1, d.lastAnnouncement_opt)
maybeUpdateMaxHtlcAmount(d.channelUpdate.htlcMaximumMsat, commitments1)
spliceLockedSent += (commitment.fundingTxId -> commitment.fundingTxIndex)
trimSpliceLockedSentIfNeeded()
stay() using d.copy(commitments = commitments1) storing() sending SpliceLocked(d.channelId, w.tx.txid)
case Left(_) => stay()
}
Expand All @@ -1356,7 +1376,11 @@ class Channel(val nodeParams: NodeParams, val wallet: OnChainChannelFunder with
// We check if this commitment was already locked before receiving the event (which happens when using 0-conf
// or for the initial funding transaction). If it was previously not locked, we must send splice_locked now.
val previouslyNotLocked = d.commitments.all.exists(c => c.fundingTxId == commitment.fundingTxId && c.localFundingStatus.isInstanceOf[LocalFundingStatus.NotLocked])
val spliceLocked_opt = if (previouslyNotLocked) Some(SpliceLocked(d.channelId, w.tx.txid)) else None
val spliceLocked_opt = if (previouslyNotLocked) {
spliceLockedSent += (commitment.fundingTxId -> commitment.fundingTxIndex)
trimSpliceLockedSentIfNeeded()
Some(SpliceLocked(d.channelId, w.tx.txid))
} else None
// If the channel is public and we've received the remote splice_locked, we send our announcement_signatures
// in order to generate the channel_announcement.
val remoteLocked = commitment.fundingTxIndex == 0 || d.commitments.all.exists(c => c.fundingTxId == commitment.fundingTxId && c.remoteFundingStatus == RemoteFundingStatus.Locked)
Expand All @@ -1379,19 +1403,34 @@ class Channel(val nodeParams: NodeParams, val wallet: OnChainChannelFunder with
case Event(msg: SpliceLocked, d: DATA_NORMAL) =>
d.commitments.updateRemoteFundingStatus(msg.fundingTxId, d.lastAnnouncedFundingTxId_opt) match {
case Right((commitments1, commitment)) =>
// If we have both already sent splice_locked for this commitment, then we are receiving splice_locked
// again after a reconnection and must retransmit our splice_locked and new announcement_signatures. Nodes
// retransmit splice_locked after a reconnection when they have received splice_locked but NOT matching signatures
// before the last disconnect. If a matching splice_locked has already been sent since reconnecting, then do not
// retransmit splice_locked to avoid a loop.
// NB: It is important both nodes retransmit splice_locked after reconnecting to ensure new Taproot nonces
// are exchanged for channel announcements.
val isLatestLocked = d.commitments.lastLocalLocked_opt.exists(_.fundingTxId == msg.fundingTxId) && d.commitments.lastRemoteLocked_opt.exists(_.fundingTxId == msg.fundingTxId)
val spliceLocked_opt = if (d.commitments.announceChannel && isLatestLocked && !spliceLockedSent.contains(commitment.fundingTxId)) {
spliceLockedSent += (commitment.fundingTxId -> commitment.fundingTxIndex)
trimSpliceLockedSentIfNeeded()
Some(SpliceLocked(d.channelId, commitment.fundingTxId))
} else {
None
}
// If the commitment is confirmed, we were waiting to receive the remote splice_locked before sending our announcement_signatures.
val localAnnSigs_opt = if (d.commitments.announceChannel) commitment.signAnnouncement(nodeParams, commitments1.params) else None
localAnnSigs_opt match {
case Some(localAnnSigs) =>
// The commitment was locked on our side and we were waiting to receive the remote splice_locked before sending our announcement_signatures.
val localAnnSigs_opt = commitment.signAnnouncement(nodeParams, commitments1.params) match {
case Some(localAnnSigs) if !announcementSigsSent.contains(localAnnSigs.shortChannelId) =>
announcementSigsSent += localAnnSigs.shortChannelId
// If we've already received the remote announcement_signatures, we're now ready to process them.
announcementSigsStash.get(localAnnSigs.shortChannelId).foreach(self ! _)
case None => // The channel is private or the commitment isn't locked on our side.
Some(localAnnSigs)
case Some(_) => None // We've already sent these announcement_signatures since the last reconnect.
case None => None // The channel is private or the commitment isn't locked on our side.
}
maybeEmitEventsPostSplice(d.aliases, d.commitments, commitments1, d.lastAnnouncement_opt)
maybeUpdateMaxHtlcAmount(d.channelUpdate.htlcMaximumMsat, commitments1)
stay() using d.copy(commitments = commitments1) storing() sending localAnnSigs_opt.toSeq
stay() using d.copy(commitments = commitments1) storing() sending spliceLocked_opt.toSeq ++ localAnnSigs_opt.toSeq
case Left(_) => stay()
}

Expand Down Expand Up @@ -2227,13 +2266,17 @@ class Channel(val nodeParams: NodeParams, val wallet: OnChainChannelFunder with
}
case _ => Set.empty
}
val lastFundingLockedTlvs: Set[ChannelReestablishTlv] =
d.commitments.lastLocalLocked_opt.map(c => ChannelReestablishTlv.MyCurrentFundingLockedTlv(c.fundingTxId)).toSet ++
d.commitments.lastRemoteLocked_opt.map(c => ChannelReestablishTlv.YourLastFundingLockedTlv(c.fundingTxId)).toSet

val channelReestablish = ChannelReestablish(
channelId = d.channelId,
nextLocalCommitmentNumber = d.commitments.localCommitIndex + 1,
nextRemoteRevocationNumber = d.commitments.remoteCommitIndex,
yourLastPerCommitmentSecret = PrivateKey(yourLastPerCommitmentSecret),
myCurrentPerCommitmentPoint = myCurrentPerCommitmentPoint,
tlvStream = TlvStream(rbfTlv)
tlvStream = TlvStream(rbfTlv ++ lastFundingLockedTlvs)
)
// we update local/remote connection-local global/local features, we don't persist it right now
val d1 = Helpers.updateFeatures(d, localInit, remoteInit)
Expand Down Expand Up @@ -2325,6 +2368,7 @@ class Channel(val nodeParams: NodeParams, val wallet: OnChainChannelFunder with
// re-send channel_ready if necessary
if (d.commitments.latest.fundingTxIndex == 0 && channelReestablish.nextLocalCommitmentNumber == 1 && d.commitments.localCommitIndex == 0) {
// If next_local_commitment_number is 1 in both the channel_reestablish it sent and received, then the node MUST retransmit channel_ready, otherwise it MUST NOT
// TODO: when the remote node enables option_splice we can use your_last_funding_locked to detect they did not receive our channel_ready.
log.debug("re-sending channelReady")
val channelKeyPath = keyManager.keyPath(d.commitments.params.localParams, d.commitments.params.channelConfig)
val nextPerCommitmentPoint = keyManager.commitmentPoint(channelKeyPath, 1)
Expand Down Expand Up @@ -2371,25 +2415,39 @@ class Channel(val nodeParams: NodeParams, val wallet: OnChainChannelFunder with
case None => d.spliceStatus
}

// re-send splice_locked (must come *after* potentially retransmitting tx_signatures)
// NB: there is a key difference between channel_ready and splice_confirmed:
// - channel_ready: a non-zero commitment index implies that both sides have seen the channel_ready
// - splice_confirmed: the commitment index can be updated as long as it is compatible with all splices, so
// we must keep sending our most recent splice_locked at each reconnection
val spliceLocked = d.commitments.active
.filter(c => c.fundingTxIndex > 0) // only consider splice txs
.collectFirst { case c if c.localFundingStatus.isInstanceOf[LocalFundingStatus.Locked] =>
log.debug("re-sending splice_locked for fundingTxId={}", c.fundingTxId)
SpliceLocked(d.channelId, c.fundingTxId)
}
sendQueue = sendQueue ++ spliceLocked
// Prune previous funding transactions and RBF attempts if we already sent splice_locked for the last funding
// transaction that is also locked by our counterparty; we either missed their splice_locked or it confirmed
// while disconnected.
val commitments1: Commitments = channelReestablish.myCurrentFundingLocked_opt
.flatMap(remoteFundingTxLocked => d.commitments.updateRemoteFundingStatus(remoteFundingTxLocked, d.lastAnnouncedFundingTxId_opt).toOption.map(_._1))
.getOrElse(d.commitments)
// We then clean up unsigned updates that haven't been received before the disconnection.
.discardUnsignedUpdates()

val spliceLocked_opt = commitments1.lastLocalLocked_opt match {
case None => None
// We only send splice_locked for splice transactions.
case Some(c) if c.fundingTxIndex == 0 => None
case Some(c) =>
// If our peer has not received our splice_locked, we retransmit it.
val notReceivedByRemote = !channelReestablish.yourLastFundingLocked_opt.contains(c.fundingTxId)
// If this is a public channel and we haven't announced the splice, we retransmit our splice_locked and
// will exchange announcement_signatures afterwards.
val notAnnouncedYet = commitments1.announceChannel && d.lastAnnouncement_opt.forall(ann => !c.shortChannelId_opt.contains(ann.shortChannelId))
if (notReceivedByRemote || notAnnouncedYet) {
log.debug("re-sending splice_locked for fundingTxId={}", c.fundingTxId)
spliceLockedSent += (c.fundingTxId -> c.fundingTxIndex)
trimSpliceLockedSentIfNeeded()
Some(SpliceLocked(d.channelId, c.fundingTxId))
} else {
None
}
}
sendQueue = sendQueue ++ spliceLocked_opt.toSeq

// we may need to retransmit updates and/or commit_sig and/or revocation
sendQueue = sendQueue ++ syncSuccess.retransmit

// then we clean up unsigned updates
val commitments1 = d.commitments.discardUnsignedUpdates()

commitments1.remoteNextCommitInfo match {
case Left(_) =>
// we expect them to (re-)send the revocation immediately
Expand Down Expand Up @@ -2869,6 +2927,7 @@ class Channel(val nodeParams: NodeParams, val wallet: OnChainChannelFunder with
sigStash = Nil
announcementSigsStash = Map.empty
announcementSigsSent = Set.empty
spliceLockedSent = Map.empty[TxId, Long]
}

/*
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ trait ChannelOpenDualFunded extends DualFundingHandlers with ErrorHandlers {
| . |
| . |
WAIT_FOR_DUAL_FUNDING_LOCKED | | WAIT_FOR_DUAL_FUNDING_LOCKED
| funding_locked funding_locked |
| channel_ready channel_ready |
|---------------- ---------------|
| \/ |
| /\ |
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -143,7 +143,14 @@ trait CommonFundingHandlers extends CommonHandlers {
val initialChannelUpdate = Announcements.makeChannelUpdate(nodeParams, remoteNodeId, scidForChannelUpdate, commitments.params, relayFees, Helpers.maxHtlcAmount(nodeParams, commitments), enable = true)
// We need to periodically re-send channel updates, otherwise channel will be considered stale and get pruned by network.
context.system.scheduler.scheduleWithFixedDelay(initialDelay = REFRESH_CHANNEL_UPDATE_INTERVAL, delay = REFRESH_CHANNEL_UPDATE_INTERVAL, receiver = self, message = BroadcastChannelUpdate(PeriodicRefresh))
val commitments1 = commitments.modify(_.remoteNextCommitInfo).setTo(Right(channelReady.nextPerCommitmentPoint))
val commitments1 = commitments.copy(
// Set the remote status for all initial funding commitments to Locked. If there are RBF attempts, only one can be confirmed locally.
active = commitments.active.map {
case c if c.fundingTxIndex == 0 => c.copy(remoteFundingStatus = RemoteFundingStatus.Locked)
case c => c
},
remoteNextCommitInfo = Right(channelReady.nextPerCommitmentPoint)
)
peer ! ChannelReadyForPayments(self, remoteNodeId, commitments.channelId, fundingTxIndex = 0)
DATA_NORMAL(commitments1, aliases1, None, initialChannelUpdate, None, None, None, SpliceStatus.NoSplice)
}
Expand Down
Loading