-
Notifications
You must be signed in to change notification settings - Fork 375
/
peer_handler.rs
3631 lines (3298 loc) · 163 KB
/
peer_handler.rs
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
// This file is Copyright its original authors, visible in version control
// history.
//
// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
// You may not use this file except in accordance with one or both of these
// licenses.
//! Top level peer message handling and socket handling logic lives here.
//!
//! Instead of actually servicing sockets ourselves we require that you implement the
//! SocketDescriptor interface and use that to receive actions which you should perform on the
//! socket, and call into PeerManager with bytes read from the socket. The PeerManager will then
//! call into the provided message handlers (probably a ChannelManager and P2PGossipSync) with
//! messages they should handle, and encoding/sending response messages.
use bitcoin::constants::ChainHash;
use bitcoin::secp256k1::{self, Secp256k1, SecretKey, PublicKey};
use crate::blinded_path::message::{AsyncPaymentsContext, DNSResolverContext, OffersContext};
use crate::sign::{NodeSigner, Recipient};
use crate::events::{MessageSendEvent, MessageSendEventsProvider};
use crate::ln::types::ChannelId;
use crate::types::features::{InitFeatures, NodeFeatures};
use crate::ln::msgs;
use crate::ln::msgs::{ChannelMessageHandler, Init, LightningError, SocketAddress, OnionMessageHandler, RoutingMessageHandler};
use crate::util::ser::{VecWriter, Writeable, Writer};
use crate::ln::peer_channel_encryptor::{PeerChannelEncryptor, NextNoiseStep, MessageBuf, MSG_BUF_ALLOC_SIZE};
use crate::ln::wire;
use crate::ln::wire::{Encode, Type};
use crate::onion_message::async_payments::{AsyncPaymentsMessageHandler, HeldHtlcAvailable, ReleaseHeldHtlc};
use crate::onion_message::dns_resolution::{DNSResolverMessageHandler, DNSResolverMessage, DNSSECProof, DNSSECQuery};
use crate::onion_message::messenger::{CustomOnionMessageHandler, Responder, ResponseInstruction, MessageSendInstructions};
use crate::onion_message::offers::{OffersMessage, OffersMessageHandler};
use crate::onion_message::packet::OnionMessageContents;
use crate::routing::gossip::{NodeId, NodeAlias};
use crate::util::atomic_counter::AtomicCounter;
use crate::util::logger::{Level, Logger, WithContext};
use crate::util::string::PrintableString;
#[allow(unused_imports)]
use crate::prelude::*;
use crate::io;
use crate::sync::{Mutex, MutexGuard, FairRwLock};
use core::sync::atomic::{AtomicBool, AtomicU32, AtomicI32, Ordering};
use core::{cmp, hash, fmt, mem};
use core::ops::Deref;
use core::convert::Infallible;
#[cfg(not(c_bindings))]
use {
crate::ln::channelmanager::{SimpleArcChannelManager, SimpleRefChannelManager},
crate::onion_message::messenger::{SimpleArcOnionMessenger, SimpleRefOnionMessenger},
crate::routing::gossip::{NetworkGraph, P2PGossipSync},
crate::sign::KeysManager,
crate::sync::Arc,
};
use bitcoin::hashes::sha256::Hash as Sha256;
use bitcoin::hashes::sha256::HashEngine as Sha256Engine;
use bitcoin::hashes::{HashEngine, Hash};
/// A handler provided to [`PeerManager`] for reading and handling custom messages.
///
/// [BOLT 1] specifies a custom message type range for use with experimental or application-specific
/// messages. `CustomMessageHandler` allows for user-defined handling of such types. See the
/// [`lightning_custom_message`] crate for tools useful in composing more than one custom handler.
///
/// [BOLT 1]: https://github.com/lightning/bolts/blob/master/01-messaging.md
/// [`lightning_custom_message`]: https://docs.rs/lightning_custom_message/latest/lightning_custom_message
pub trait CustomMessageHandler: wire::CustomMessageReader {
/// Handles the given message sent from `sender_node_id`, possibly producing messages for
/// [`CustomMessageHandler::get_and_clear_pending_msg`] to return and thus for [`PeerManager`]
/// to send.
fn handle_custom_message(&self, msg: Self::CustomMessage, sender_node_id: PublicKey) -> Result<(), LightningError>;
/// Returns the list of pending messages that were generated by the handler, clearing the list
/// in the process. Each message is paired with the node id of the intended recipient. If no
/// connection to the node exists, then the message is simply not sent.
fn get_and_clear_pending_msg(&self) -> Vec<(PublicKey, Self::CustomMessage)>;
/// Indicates a peer disconnected.
fn peer_disconnected(&self, their_node_id: PublicKey);
/// Handle a peer connecting.
///
/// May return an `Err(())` if the features the peer supports are not sufficient to communicate
/// with us. Implementors should be somewhat conservative about doing so, however, as other
/// message handlers may still wish to communicate with this peer.
fn peer_connected(&self, their_node_id: PublicKey, msg: &Init, inbound: bool) -> Result<(), ()>;
/// Gets the node feature flags which this handler itself supports. All available handlers are
/// queried similarly and their feature flags are OR'd together to form the [`NodeFeatures`]
/// which are broadcasted in our [`NodeAnnouncement`] message.
///
/// [`NodeAnnouncement`]: crate::ln::msgs::NodeAnnouncement
fn provided_node_features(&self) -> NodeFeatures;
/// Gets the init feature flags which should be sent to the given peer. All available handlers
/// are queried similarly and their feature flags are OR'd together to form the [`InitFeatures`]
/// which are sent in our [`Init`] message.
///
/// [`Init`]: crate::ln::msgs::Init
fn provided_init_features(&self, their_node_id: PublicKey) -> InitFeatures;
}
/// A dummy struct which implements `RoutingMessageHandler` without storing any routing information
/// or doing any processing. You can provide one of these as the route_handler in a MessageHandler.
pub struct IgnoringMessageHandler{}
impl MessageSendEventsProvider for IgnoringMessageHandler {
fn get_and_clear_pending_msg_events(&self) -> Vec<MessageSendEvent> { Vec::new() }
}
impl RoutingMessageHandler for IgnoringMessageHandler {
fn handle_node_announcement(&self, _their_node_id: Option<PublicKey>, _msg: &msgs::NodeAnnouncement) -> Result<bool, LightningError> { Ok(false) }
fn handle_channel_announcement(&self, _their_node_id: Option<PublicKey>, _msg: &msgs::ChannelAnnouncement) -> Result<bool, LightningError> { Ok(false) }
fn handle_channel_update(&self, _their_node_id: Option<PublicKey>, _msg: &msgs::ChannelUpdate) -> Result<bool, LightningError> { Ok(false) }
fn get_next_channel_announcement(&self, _starting_point: u64) ->
Option<(msgs::ChannelAnnouncement, Option<msgs::ChannelUpdate>, Option<msgs::ChannelUpdate>)> { None }
fn get_next_node_announcement(&self, _starting_point: Option<&NodeId>) -> Option<msgs::NodeAnnouncement> { None }
fn peer_connected(&self, _their_node_id: PublicKey, _init: &msgs::Init, _inbound: bool) -> Result<(), ()> { Ok(()) }
fn handle_reply_channel_range(&self, _their_node_id: PublicKey, _msg: msgs::ReplyChannelRange) -> Result<(), LightningError> { Ok(()) }
fn handle_reply_short_channel_ids_end(&self, _their_node_id: PublicKey, _msg: msgs::ReplyShortChannelIdsEnd) -> Result<(), LightningError> { Ok(()) }
fn handle_query_channel_range(&self, _their_node_id: PublicKey, _msg: msgs::QueryChannelRange) -> Result<(), LightningError> { Ok(()) }
fn handle_query_short_channel_ids(&self, _their_node_id: PublicKey, _msg: msgs::QueryShortChannelIds) -> Result<(), LightningError> { Ok(()) }
fn provided_node_features(&self) -> NodeFeatures { NodeFeatures::empty() }
fn provided_init_features(&self, _their_node_id: PublicKey) -> InitFeatures {
InitFeatures::empty()
}
fn processing_queue_high(&self) -> bool { false }
}
impl OnionMessageHandler for IgnoringMessageHandler {
fn handle_onion_message(&self, _their_node_id: PublicKey, _msg: &msgs::OnionMessage) {}
fn next_onion_message_for_peer(&self, _peer_node_id: PublicKey) -> Option<msgs::OnionMessage> { None }
fn peer_connected(&self, _their_node_id: PublicKey, _init: &msgs::Init, _inbound: bool) -> Result<(), ()> { Ok(()) }
fn peer_disconnected(&self, _their_node_id: PublicKey) {}
fn timer_tick_occurred(&self) {}
fn provided_node_features(&self) -> NodeFeatures { NodeFeatures::empty() }
fn provided_init_features(&self, _their_node_id: PublicKey) -> InitFeatures {
InitFeatures::empty()
}
}
impl OffersMessageHandler for IgnoringMessageHandler {
fn handle_message(&self, _message: OffersMessage, _context: Option<OffersContext>, _responder: Option<Responder>) -> Option<(OffersMessage, ResponseInstruction)> {
None
}
}
impl AsyncPaymentsMessageHandler for IgnoringMessageHandler {
fn handle_held_htlc_available(
&self, _message: HeldHtlcAvailable, _responder: Option<Responder>,
) -> Option<(ReleaseHeldHtlc, ResponseInstruction)> {
None
}
fn handle_release_held_htlc(&self, _message: ReleaseHeldHtlc, _context: AsyncPaymentsContext) {}
}
impl DNSResolverMessageHandler for IgnoringMessageHandler {
fn handle_dnssec_query(
&self, _message: DNSSECQuery, _responder: Option<Responder>,
) -> Option<(DNSResolverMessage, ResponseInstruction)> {
None
}
fn handle_dnssec_proof(&self, _message: DNSSECProof, _context: DNSResolverContext) {}
}
impl CustomOnionMessageHandler for IgnoringMessageHandler {
type CustomMessage = Infallible;
fn handle_custom_message(&self, _message: Infallible, _context: Option<Vec<u8>>, _responder: Option<Responder>) -> Option<(Infallible, ResponseInstruction)> {
// Since we always return `None` in the read the handle method should never be called.
unreachable!();
}
fn read_custom_message<R: io::Read>(&self, _msg_type: u64, _buffer: &mut R) -> Result<Option<Infallible>, msgs::DecodeError> where Self: Sized {
Ok(None)
}
fn release_pending_custom_messages(&self) -> Vec<(Infallible, MessageSendInstructions)> {
vec![]
}
}
impl OnionMessageContents for Infallible {
fn tlv_type(&self) -> u64 { unreachable!(); }
#[cfg(c_bindings)]
fn msg_type(&self) -> String { unreachable!(); }
#[cfg(not(c_bindings))]
fn msg_type(&self) -> &'static str { unreachable!(); }
}
impl Deref for IgnoringMessageHandler {
type Target = IgnoringMessageHandler;
fn deref(&self) -> &Self { self }
}
// Implement Type for Infallible, note that it cannot be constructed, and thus you can never call a
// method that takes self for it.
impl wire::Type for Infallible {
fn type_id(&self) -> u16 {
unreachable!();
}
}
impl Writeable for Infallible {
fn write<W: Writer>(&self, _: &mut W) -> Result<(), io::Error> {
unreachable!();
}
}
impl wire::CustomMessageReader for IgnoringMessageHandler {
type CustomMessage = Infallible;
fn read<R: io::Read>(&self, _message_type: u16, _buffer: &mut R) -> Result<Option<Self::CustomMessage>, msgs::DecodeError> {
Ok(None)
}
}
impl CustomMessageHandler for IgnoringMessageHandler {
fn handle_custom_message(&self, _msg: Infallible, _sender_node_id: PublicKey) -> Result<(), LightningError> {
// Since we always return `None` in the read the handle method should never be called.
unreachable!();
}
fn get_and_clear_pending_msg(&self) -> Vec<(PublicKey, Self::CustomMessage)> { Vec::new() }
fn peer_disconnected(&self, _their_node_id: PublicKey) {}
fn peer_connected(&self, _their_node_id: PublicKey, _msg: &Init, _inbound: bool) -> Result<(), ()> { Ok(()) }
fn provided_node_features(&self) -> NodeFeatures { NodeFeatures::empty() }
fn provided_init_features(&self, _their_node_id: PublicKey) -> InitFeatures {
InitFeatures::empty()
}
}
/// A dummy struct which implements `ChannelMessageHandler` without having any channels.
/// You can provide one of these as the route_handler in a MessageHandler.
pub struct ErroringMessageHandler {
message_queue: Mutex<Vec<MessageSendEvent>>
}
impl ErroringMessageHandler {
/// Constructs a new ErroringMessageHandler
pub fn new() -> Self {
Self { message_queue: Mutex::new(Vec::new()) }
}
fn push_error(&self, node_id: PublicKey, channel_id: ChannelId) {
self.message_queue.lock().unwrap().push(MessageSendEvent::HandleError {
action: msgs::ErrorAction::SendErrorMessage {
msg: msgs::ErrorMessage { channel_id, data: "We do not support channel messages, sorry.".to_owned() },
},
node_id,
});
}
}
impl MessageSendEventsProvider for ErroringMessageHandler {
fn get_and_clear_pending_msg_events(&self) -> Vec<MessageSendEvent> {
let mut res = Vec::new();
mem::swap(&mut res, &mut self.message_queue.lock().unwrap());
res
}
}
impl ChannelMessageHandler for ErroringMessageHandler {
// Any messages which are related to a specific channel generate an error message to let the
// peer know we don't care about channels.
fn handle_open_channel(&self, their_node_id: PublicKey, msg: &msgs::OpenChannel) {
ErroringMessageHandler::push_error(self, their_node_id, msg.common_fields.temporary_channel_id);
}
fn handle_accept_channel(&self, their_node_id: PublicKey, msg: &msgs::AcceptChannel) {
ErroringMessageHandler::push_error(self, their_node_id, msg.common_fields.temporary_channel_id);
}
fn handle_funding_created(&self, their_node_id: PublicKey, msg: &msgs::FundingCreated) {
ErroringMessageHandler::push_error(self, their_node_id, msg.temporary_channel_id);
}
fn handle_funding_signed(&self, their_node_id: PublicKey, msg: &msgs::FundingSigned) {
ErroringMessageHandler::push_error(self, their_node_id, msg.channel_id);
}
fn handle_channel_ready(&self, their_node_id: PublicKey, msg: &msgs::ChannelReady) {
ErroringMessageHandler::push_error(self, their_node_id, msg.channel_id);
}
fn handle_shutdown(&self, their_node_id: PublicKey, msg: &msgs::Shutdown) {
ErroringMessageHandler::push_error(self, their_node_id, msg.channel_id);
}
fn handle_closing_signed(&self, their_node_id: PublicKey, msg: &msgs::ClosingSigned) {
ErroringMessageHandler::push_error(self, their_node_id, msg.channel_id);
}
fn handle_stfu(&self, their_node_id: PublicKey, msg: &msgs::Stfu) {
ErroringMessageHandler::push_error(&self, their_node_id, msg.channel_id);
}
#[cfg(splicing)]
fn handle_splice_init(&self, their_node_id: PublicKey, msg: &msgs::SpliceInit) {
ErroringMessageHandler::push_error(&self, their_node_id, msg.channel_id);
}
#[cfg(splicing)]
fn handle_splice_ack(&self, their_node_id: PublicKey, msg: &msgs::SpliceAck) {
ErroringMessageHandler::push_error(&self, their_node_id, msg.channel_id);
}
#[cfg(splicing)]
fn handle_splice_locked(&self, their_node_id: PublicKey, msg: &msgs::SpliceLocked) {
ErroringMessageHandler::push_error(&self, their_node_id, msg.channel_id);
}
fn handle_update_add_htlc(&self, their_node_id: PublicKey, msg: &msgs::UpdateAddHTLC) {
ErroringMessageHandler::push_error(self, their_node_id, msg.channel_id);
}
fn handle_update_fulfill_htlc(&self, their_node_id: PublicKey, msg: &msgs::UpdateFulfillHTLC) {
ErroringMessageHandler::push_error(self, their_node_id, msg.channel_id);
}
fn handle_update_fail_htlc(&self, their_node_id: PublicKey, msg: &msgs::UpdateFailHTLC) {
ErroringMessageHandler::push_error(self, their_node_id, msg.channel_id);
}
fn handle_update_fail_malformed_htlc(&self, their_node_id: PublicKey, msg: &msgs::UpdateFailMalformedHTLC) {
ErroringMessageHandler::push_error(self, their_node_id, msg.channel_id);
}
fn handle_commitment_signed(&self, their_node_id: PublicKey, msg: &msgs::CommitmentSigned) {
ErroringMessageHandler::push_error(self, their_node_id, msg.channel_id);
}
fn handle_revoke_and_ack(&self, their_node_id: PublicKey, msg: &msgs::RevokeAndACK) {
ErroringMessageHandler::push_error(self, their_node_id, msg.channel_id);
}
fn handle_update_fee(&self, their_node_id: PublicKey, msg: &msgs::UpdateFee) {
ErroringMessageHandler::push_error(self, their_node_id, msg.channel_id);
}
fn handle_announcement_signatures(&self, their_node_id: PublicKey, msg: &msgs::AnnouncementSignatures) {
ErroringMessageHandler::push_error(self, their_node_id, msg.channel_id);
}
fn handle_channel_reestablish(&self, their_node_id: PublicKey, msg: &msgs::ChannelReestablish) {
ErroringMessageHandler::push_error(self, their_node_id, msg.channel_id);
}
// msgs::ChannelUpdate does not contain the channel_id field, so we just drop them.
fn handle_channel_update(&self, _their_node_id: PublicKey, _msg: &msgs::ChannelUpdate) {}
fn peer_disconnected(&self, _their_node_id: PublicKey) {}
fn peer_connected(&self, _their_node_id: PublicKey, _init: &msgs::Init, _inbound: bool) -> Result<(), ()> { Ok(()) }
fn handle_error(&self, _their_node_id: PublicKey, _msg: &msgs::ErrorMessage) {}
fn provided_node_features(&self) -> NodeFeatures { NodeFeatures::empty() }
fn provided_init_features(&self, _their_node_id: PublicKey) -> InitFeatures {
// Set a number of features which various nodes may require to talk to us. It's totally
// reasonable to indicate we "support" all kinds of channel features...we just reject all
// channels.
let mut features = InitFeatures::empty();
features.set_data_loss_protect_optional();
features.set_upfront_shutdown_script_optional();
features.set_variable_length_onion_optional();
features.set_static_remote_key_optional();
features.set_payment_secret_optional();
features.set_basic_mpp_optional();
features.set_wumbo_optional();
features.set_shutdown_any_segwit_optional();
features.set_dual_fund_optional();
features.set_channel_type_optional();
features.set_scid_privacy_optional();
features.set_zero_conf_optional();
features.set_route_blinding_optional();
features
}
fn get_chain_hashes(&self) -> Option<Vec<ChainHash>> {
// We don't enforce any chains upon peer connection for `ErroringMessageHandler` and leave it up
// to users of `ErroringMessageHandler` to make decisions on network compatiblility.
// There's not really any way to pull in specific networks here, and hardcoding can cause breakages.
None
}
fn handle_open_channel_v2(&self, their_node_id: PublicKey, msg: &msgs::OpenChannelV2) {
ErroringMessageHandler::push_error(self, their_node_id, msg.common_fields.temporary_channel_id);
}
fn handle_accept_channel_v2(&self, their_node_id: PublicKey, msg: &msgs::AcceptChannelV2) {
ErroringMessageHandler::push_error(self, their_node_id, msg.common_fields.temporary_channel_id);
}
fn handle_tx_add_input(&self, their_node_id: PublicKey, msg: &msgs::TxAddInput) {
ErroringMessageHandler::push_error(self, their_node_id, msg.channel_id);
}
fn handle_tx_add_output(&self, their_node_id: PublicKey, msg: &msgs::TxAddOutput) {
ErroringMessageHandler::push_error(self, their_node_id, msg.channel_id);
}
fn handle_tx_remove_input(&self, their_node_id: PublicKey, msg: &msgs::TxRemoveInput) {
ErroringMessageHandler::push_error(self, their_node_id, msg.channel_id);
}
fn handle_tx_remove_output(&self, their_node_id: PublicKey, msg: &msgs::TxRemoveOutput) {
ErroringMessageHandler::push_error(self, their_node_id, msg.channel_id);
}
fn handle_tx_complete(&self, their_node_id: PublicKey, msg: &msgs::TxComplete) {
ErroringMessageHandler::push_error(self, their_node_id, msg.channel_id);
}
fn handle_tx_signatures(&self, their_node_id: PublicKey, msg: &msgs::TxSignatures) {
ErroringMessageHandler::push_error(self, their_node_id, msg.channel_id);
}
fn handle_tx_init_rbf(&self, their_node_id: PublicKey, msg: &msgs::TxInitRbf) {
ErroringMessageHandler::push_error(self, their_node_id, msg.channel_id);
}
fn handle_tx_ack_rbf(&self, their_node_id: PublicKey, msg: &msgs::TxAckRbf) {
ErroringMessageHandler::push_error(self, their_node_id, msg.channel_id);
}
fn handle_tx_abort(&self, their_node_id: PublicKey, msg: &msgs::TxAbort) {
ErroringMessageHandler::push_error(self, their_node_id, msg.channel_id);
}
fn message_received(&self) {}
}
impl Deref for ErroringMessageHandler {
type Target = ErroringMessageHandler;
fn deref(&self) -> &Self { self }
}
/// Provides references to trait impls which handle different types of messages.
pub struct MessageHandler<CM: Deref, RM: Deref, OM: Deref, CustomM: Deref> where
CM::Target: ChannelMessageHandler,
RM::Target: RoutingMessageHandler,
OM::Target: OnionMessageHandler,
CustomM::Target: CustomMessageHandler,
{
/// A message handler which handles messages specific to channels. Usually this is just a
/// [`ChannelManager`] object or an [`ErroringMessageHandler`].
///
/// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
pub chan_handler: CM,
/// A message handler which handles messages updating our knowledge of the network channel
/// graph. Usually this is just a [`P2PGossipSync`] object or an [`IgnoringMessageHandler`].
///
/// [`P2PGossipSync`]: crate::routing::gossip::P2PGossipSync
pub route_handler: RM,
/// A message handler which handles onion messages. This should generally be an
/// [`OnionMessenger`], but can also be an [`IgnoringMessageHandler`].
///
/// [`OnionMessenger`]: crate::onion_message::messenger::OnionMessenger
pub onion_message_handler: OM,
/// A message handler which handles custom messages. The only LDK-provided implementation is
/// [`IgnoringMessageHandler`].
pub custom_message_handler: CustomM,
}
/// Provides an object which can be used to send data to and which uniquely identifies a connection
/// to a remote host. You will need to be able to generate multiple of these which meet Eq and
/// implement Hash to meet the PeerManager API.
///
/// For efficiency, [`Clone`] should be relatively cheap for this type.
///
/// Two descriptors may compare equal (by [`cmp::Eq`] and [`hash::Hash`]) as long as the original
/// has been disconnected, the [`PeerManager`] has been informed of the disconnection (either by it
/// having triggered the disconnection or a call to [`PeerManager::socket_disconnected`]), and no
/// further calls to the [`PeerManager`] related to the original socket occur. This allows you to
/// use a file descriptor for your SocketDescriptor directly, however for simplicity you may wish
/// to simply use another value which is guaranteed to be globally unique instead.
pub trait SocketDescriptor : cmp::Eq + hash::Hash + Clone {
/// Attempts to send some data from the given slice to the peer.
///
/// Returns the amount of data which was sent, possibly 0 if the socket has since disconnected.
/// Note that in the disconnected case, [`PeerManager::socket_disconnected`] must still be
/// called and further write attempts may occur until that time.
///
/// If the returned size is smaller than `data.len()`, a
/// [`PeerManager::write_buffer_space_avail`] call must be made the next time more data can be
/// written. Additionally, until a `send_data` event completes fully, no further
/// [`PeerManager::read_event`] calls should be made for the same peer! Because this is to
/// prevent denial-of-service issues, you should not read or buffer any data from the socket
/// until then.
///
/// If a [`PeerManager::read_event`] call on this descriptor had previously returned true
/// (indicating that read events should be paused to prevent DoS in the send buffer),
/// `resume_read` may be set indicating that read events on this descriptor should resume. A
/// `resume_read` of false carries no meaning, and should not cause any action.
fn send_data(&mut self, data: &[u8], resume_read: bool) -> usize;
/// Disconnect the socket pointed to by this SocketDescriptor.
///
/// You do *not* need to call [`PeerManager::socket_disconnected`] with this socket after this
/// call (doing so is a noop).
fn disconnect_socket(&mut self);
}
/// Details of a connected peer as returned by [`PeerManager::list_peers`].
pub struct PeerDetails {
/// The node id of the peer.
///
/// For outbound connections, this [`PublicKey`] will be the same as the `their_node_id` parameter
/// passed in to [`PeerManager::new_outbound_connection`].
pub counterparty_node_id: PublicKey,
/// The socket address the peer provided in the initial handshake.
///
/// Will only be `Some` if an address had been previously provided to
/// [`PeerManager::new_outbound_connection`] or [`PeerManager::new_inbound_connection`].
pub socket_address: Option<SocketAddress>,
/// The features the peer provided in the initial handshake.
pub init_features: InitFeatures,
/// Indicates the direction of the peer connection.
///
/// Will be `true` for inbound connections, and `false` for outbound connections.
pub is_inbound_connection: bool,
}
/// Error for PeerManager errors. If you get one of these, you must disconnect the socket and
/// generate no further read_event/write_buffer_space_avail/socket_disconnected calls for the
/// descriptor.
#[derive(Clone)]
pub struct PeerHandleError { }
impl fmt::Debug for PeerHandleError {
fn fmt(&self, formatter: &mut fmt::Formatter) -> Result<(), fmt::Error> {
formatter.write_str("Peer Sent Invalid Data")
}
}
impl fmt::Display for PeerHandleError {
fn fmt(&self, formatter: &mut fmt::Formatter) -> Result<(), fmt::Error> {
formatter.write_str("Peer Sent Invalid Data")
}
}
/// Internal struct for keeping track of the gossip syncing progress with a given peer
enum InitSyncTracker{
/// Only sync ad-hoc gossip as it comes in, do not send historical gossip.
/// Upon receipt of a GossipTimestampFilter message, this is the default initial state if the
/// contained timestamp is less than 6 hours old.
NoSyncRequested,
/// Send historical gossip starting at the given channel id, which gets incremented as the
/// gossiping progresses.
/// Upon receipt of a GossipTimestampFilter message, this is the default initial state if the
/// contained timestamp is at least 6 hours old, and the initial channel id is set to 0.
ChannelsSyncing(u64),
/// Once the channel announcements and updates finish syncing, the node announcements are synced.
NodesSyncing(NodeId),
}
/// The ratio between buffer sizes at which we stop sending initial sync messages vs when we stop
/// forwarding gossip messages to peers altogether.
const FORWARD_INIT_SYNC_BUFFER_LIMIT_RATIO: usize = 2;
/// When the outbound buffer has this many messages, we'll stop reading bytes from the peer until
/// we have fewer than this many messages in the outbound buffer again.
/// We also use this as the target number of outbound gossip messages to keep in the write buffer,
/// refilled as we send bytes.
const OUTBOUND_BUFFER_LIMIT_READ_PAUSE: usize = 12;
/// When the outbound buffer has this many messages, we'll simply skip relaying gossip messages to
/// the peer.
const OUTBOUND_BUFFER_LIMIT_DROP_GOSSIP: usize = OUTBOUND_BUFFER_LIMIT_READ_PAUSE * FORWARD_INIT_SYNC_BUFFER_LIMIT_RATIO;
/// If we've sent a ping, and are still awaiting a response, we may need to churn our way through
/// the socket receive buffer before receiving the ping.
///
/// On a fairly old Arm64 board, with Linux defaults, this can take as long as 20 seconds, not
/// including any network delays, outbound traffic, or the same for messages from other peers.
///
/// Thus, to avoid needlessly disconnecting a peer, we allow a peer to take this many timer ticks
/// per connected peer to respond to a ping, as long as they send us at least one message during
/// each tick, ensuring we aren't actually just disconnected.
/// With a timer tick interval of ten seconds, this translates to about 40 seconds per connected
/// peer.
///
/// When we improve parallelism somewhat we should reduce this to e.g. this many timer ticks per
/// two connected peers, assuming most LDK-running systems have at least two cores.
const MAX_BUFFER_DRAIN_TICK_INTERVALS_PER_PEER: i8 = 4;
/// This is the minimum number of messages we expect a peer to be able to handle within one timer
/// tick. Once we have sent this many messages since the last ping, we send a ping right away to
/// ensures we don't just fill up our send buffer and leave the peer with too many messages to
/// process before the next ping.
///
/// Note that we continue responding to other messages even after we've sent this many messages, so
/// it's more of a general guideline used for gossip backfill (and gossip forwarding, times
/// [`FORWARD_INIT_SYNC_BUFFER_LIMIT_RATIO`]) than a hard limit.
const BUFFER_DRAIN_MSGS_PER_TICK: usize = 32;
struct Peer {
channel_encryptor: PeerChannelEncryptor,
/// We cache a `NodeId` here to avoid serializing peers' keys every time we forward gossip
/// messages in `PeerManager`. Use `Peer::set_their_node_id` to modify this field.
their_node_id: Option<(PublicKey, NodeId)>,
/// The features provided in the peer's [`msgs::Init`] message.
///
/// This is set only after we've processed the [`msgs::Init`] message and called relevant
/// `peer_connected` handler methods. Thus, this field is set *iff* we've finished our
/// handshake and can talk to this peer normally (though use [`Peer::handshake_complete`] to
/// check this.
their_features: Option<InitFeatures>,
their_socket_address: Option<SocketAddress>,
pending_outbound_buffer: VecDeque<Vec<u8>>,
pending_outbound_buffer_first_msg_offset: usize,
/// Queue gossip broadcasts separately from `pending_outbound_buffer` so we can easily
/// prioritize channel messages over them.
///
/// Note that these messages are *not* encrypted/MAC'd, and are only serialized.
gossip_broadcast_buffer: VecDeque<MessageBuf>,
awaiting_write_event: bool,
pending_read_buffer: Vec<u8>,
pending_read_buffer_pos: usize,
pending_read_is_header: bool,
sync_status: InitSyncTracker,
msgs_sent_since_pong: usize,
awaiting_pong_timer_tick_intervals: i64,
received_message_since_timer_tick: bool,
sent_gossip_timestamp_filter: bool,
/// Indicates we've received a `channel_announcement` since the last time we had
/// [`PeerManager::gossip_processing_backlogged`] set (or, really, that we've received a
/// `channel_announcement` at all - we set this unconditionally but unset it every time we
/// check if we're gossip-processing-backlogged).
received_channel_announce_since_backlogged: bool,
inbound_connection: bool,
}
impl Peer {
/// True after we've processed the [`msgs::Init`] message and called relevant `peer_connected`
/// handler methods. Thus, this implies we've finished our handshake and can talk to this peer
/// normally.
fn handshake_complete(&self) -> bool {
self.their_features.is_some()
}
/// Returns true if the channel announcements/updates for the given channel should be
/// forwarded to this peer.
/// If we are sending our routing table to this peer and we have not yet sent channel
/// announcements/updates for the given channel_id then we will send it when we get to that
/// point and we shouldn't send it yet to avoid sending duplicate updates. If we've already
/// sent the old versions, we should send the update, and so return true here.
fn should_forward_channel_announcement(&self, channel_id: u64) -> bool {
if !self.handshake_complete() { return false; }
if self.their_features.as_ref().unwrap().supports_gossip_queries() &&
!self.sent_gossip_timestamp_filter {
return false;
}
match self.sync_status {
InitSyncTracker::NoSyncRequested => true,
InitSyncTracker::ChannelsSyncing(i) => i < channel_id,
InitSyncTracker::NodesSyncing(_) => true,
}
}
/// Similar to the above, but for node announcements indexed by node_id.
fn should_forward_node_announcement(&self, node_id: NodeId) -> bool {
if !self.handshake_complete() { return false; }
if self.their_features.as_ref().unwrap().supports_gossip_queries() &&
!self.sent_gossip_timestamp_filter {
return false;
}
match self.sync_status {
InitSyncTracker::NoSyncRequested => true,
InitSyncTracker::ChannelsSyncing(_) => false,
InitSyncTracker::NodesSyncing(sync_node_id) => sync_node_id.as_slice() < node_id.as_slice(),
}
}
/// Returns whether we should be reading bytes from this peer, based on whether its outbound
/// buffer still has space and we don't need to pause reads to get some writes out.
fn should_read(&mut self, gossip_processing_backlogged: bool) -> bool {
if !gossip_processing_backlogged {
self.received_channel_announce_since_backlogged = false;
}
self.pending_outbound_buffer.len() < OUTBOUND_BUFFER_LIMIT_READ_PAUSE &&
(!gossip_processing_backlogged || !self.received_channel_announce_since_backlogged)
}
/// Determines if we should push additional gossip background sync (aka "backfill") onto a peer's
/// outbound buffer. This is checked every time the peer's buffer may have been drained.
fn should_buffer_gossip_backfill(&self) -> bool {
self.pending_outbound_buffer.is_empty() && self.gossip_broadcast_buffer.is_empty()
&& self.msgs_sent_since_pong < BUFFER_DRAIN_MSGS_PER_TICK
&& self.handshake_complete()
}
/// Determines if we should push an onion message onto a peer's outbound buffer. This is checked
/// every time the peer's buffer may have been drained.
fn should_buffer_onion_message(&self) -> bool {
self.pending_outbound_buffer.is_empty() && self.handshake_complete()
&& self.msgs_sent_since_pong < BUFFER_DRAIN_MSGS_PER_TICK
}
/// Determines if we should push additional gossip broadcast messages onto a peer's outbound
/// buffer. This is checked every time the peer's buffer may have been drained.
fn should_buffer_gossip_broadcast(&self) -> bool {
self.pending_outbound_buffer.is_empty() && self.handshake_complete()
&& self.msgs_sent_since_pong < BUFFER_DRAIN_MSGS_PER_TICK
}
/// Returns whether this peer's outbound buffers are full and we should drop gossip broadcasts.
fn buffer_full_drop_gossip_broadcast(&self) -> bool {
let total_outbound_buffered =
self.gossip_broadcast_buffer.len() + self.pending_outbound_buffer.len();
total_outbound_buffered > OUTBOUND_BUFFER_LIMIT_DROP_GOSSIP ||
self.msgs_sent_since_pong > BUFFER_DRAIN_MSGS_PER_TICK * FORWARD_INIT_SYNC_BUFFER_LIMIT_RATIO
}
fn set_their_node_id(&mut self, node_id: PublicKey) {
self.their_node_id = Some((node_id, NodeId::from_pubkey(&node_id)));
}
}
/// SimpleArcPeerManager is useful when you need a PeerManager with a static lifetime, e.g.
/// when you're using lightning-net-tokio (since tokio::spawn requires parameters with static
/// lifetimes). Other times you can afford a reference, which is more efficient, in which case
/// SimpleRefPeerManager is the more appropriate type. Defining these type aliases prevents
/// issues such as overly long function definitions.
///
/// This is not exported to bindings users as type aliases aren't supported in most languages.
#[cfg(not(c_bindings))]
pub type SimpleArcPeerManager<SD, M, T, F, C, L> = PeerManager<
SD,
Arc<SimpleArcChannelManager<M, T, F, L>>,
Arc<P2PGossipSync<Arc<NetworkGraph<Arc<L>>>, C, Arc<L>>>,
Arc<SimpleArcOnionMessenger<M, T, F, L>>,
Arc<L>,
IgnoringMessageHandler,
Arc<KeysManager>
>;
/// SimpleRefPeerManager is a type alias for a PeerManager reference, and is the reference
/// counterpart to the SimpleArcPeerManager type alias. Use this type by default when you don't
/// need a PeerManager with a static lifetime. You'll need a static lifetime in cases such as
/// usage of lightning-net-tokio (since tokio::spawn requires parameters with static lifetimes).
/// But if this is not necessary, using a reference is more efficient. Defining these type aliases
/// helps with issues such as long function definitions.
///
/// This is not exported to bindings users as type aliases aren't supported in most languages.
#[cfg(not(c_bindings))]
pub type SimpleRefPeerManager<
'a, 'b, 'c, 'd, 'e, 'f, 'logger, 'h, 'i, 'j, 'graph, 'k, 'mr, SD, M, T, F, C, L
> = PeerManager<
SD,
&'j SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, 'graph, 'logger, 'i, 'mr, M, T, F, L>,
&'f P2PGossipSync<&'graph NetworkGraph<&'logger L>, C, &'logger L>,
&'h SimpleRefOnionMessenger<'a, 'b, 'c, 'd, 'e, 'graph, 'logger, 'i, 'j, 'k, M, T, F, L>,
&'logger L,
IgnoringMessageHandler,
&'c KeysManager
>;
/// A generic trait which is implemented for all [`PeerManager`]s. This makes bounding functions or
/// structs on any [`PeerManager`] much simpler as only this trait is needed as a bound, rather
/// than the full set of bounds on [`PeerManager`] itself.
///
/// This is not exported to bindings users as general cover traits aren't useful in other
/// languages.
#[allow(missing_docs)]
pub trait APeerManager {
type Descriptor: SocketDescriptor;
type CMT: ChannelMessageHandler + ?Sized;
type CM: Deref<Target=Self::CMT>;
type RMT: RoutingMessageHandler + ?Sized;
type RM: Deref<Target=Self::RMT>;
type OMT: OnionMessageHandler + ?Sized;
type OM: Deref<Target=Self::OMT>;
type LT: Logger + ?Sized;
type L: Deref<Target=Self::LT>;
type CMHT: CustomMessageHandler + ?Sized;
type CMH: Deref<Target=Self::CMHT>;
type NST: NodeSigner + ?Sized;
type NS: Deref<Target=Self::NST>;
/// Gets a reference to the underlying [`PeerManager`].
fn as_ref(&self) -> &PeerManager<Self::Descriptor, Self::CM, Self::RM, Self::OM, Self::L, Self::CMH, Self::NS>;
}
impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CMH: Deref, NS: Deref>
APeerManager for PeerManager<Descriptor, CM, RM, OM, L, CMH, NS> where
CM::Target: ChannelMessageHandler,
RM::Target: RoutingMessageHandler,
OM::Target: OnionMessageHandler,
L::Target: Logger,
CMH::Target: CustomMessageHandler,
NS::Target: NodeSigner,
{
type Descriptor = Descriptor;
type CMT = <CM as Deref>::Target;
type CM = CM;
type RMT = <RM as Deref>::Target;
type RM = RM;
type OMT = <OM as Deref>::Target;
type OM = OM;
type LT = <L as Deref>::Target;
type L = L;
type CMHT = <CMH as Deref>::Target;
type CMH = CMH;
type NST = <NS as Deref>::Target;
type NS = NS;
fn as_ref(&self) -> &PeerManager<Descriptor, CM, RM, OM, L, CMH, NS> { self }
}
/// A PeerManager manages a set of peers, described by their [`SocketDescriptor`] and marshalls
/// socket events into messages which it passes on to its [`MessageHandler`].
///
/// Locks are taken internally, so you must never assume that reentrancy from a
/// [`SocketDescriptor`] call back into [`PeerManager`] methods will not deadlock.
///
/// Calls to [`read_event`] will decode relevant messages and pass them to the
/// [`ChannelMessageHandler`], likely doing message processing in-line. Thus, the primary form of
/// parallelism in Rust-Lightning is in calls to [`read_event`]. Note, however, that calls to any
/// [`PeerManager`] functions related to the same connection must occur only in serial, making new
/// calls only after previous ones have returned.
///
/// Rather than using a plain [`PeerManager`], it is preferable to use either a [`SimpleArcPeerManager`]
/// a [`SimpleRefPeerManager`], for conciseness. See their documentation for more details, but
/// essentially you should default to using a [`SimpleRefPeerManager`], and use a
/// [`SimpleArcPeerManager`] when you require a `PeerManager` with a static lifetime, such as when
/// you're using lightning-net-tokio.
///
/// [`read_event`]: PeerManager::read_event
pub struct PeerManager<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CMH: Deref, NS: Deref> where
CM::Target: ChannelMessageHandler,
RM::Target: RoutingMessageHandler,
OM::Target: OnionMessageHandler,
L::Target: Logger,
CMH::Target: CustomMessageHandler,
NS::Target: NodeSigner {
message_handler: MessageHandler<CM, RM, OM, CMH>,
/// Connection state for each connected peer - we have an outer read-write lock which is taken
/// as read while we're doing processing for a peer and taken write when a peer is being added
/// or removed.
///
/// The inner Peer lock is held for sending and receiving bytes, but note that we do *not* hold
/// it while we're processing a message. This is fine as [`PeerManager::read_event`] requires
/// that there be no parallel calls for a given peer, so mutual exclusion of messages handed to
/// the `MessageHandler`s for a given peer is already guaranteed.
peers: FairRwLock<HashMap<Descriptor, Mutex<Peer>>>,
/// Only add to this set when noise completes.
/// Locked *after* peers. When an item is removed, it must be removed with the `peers` write
/// lock held. Entries may be added with only the `peers` read lock held (though the
/// `Descriptor` value must already exist in `peers`).
node_id_to_descriptor: Mutex<HashMap<PublicKey, Descriptor>>,
/// We can only have one thread processing events at once, but if a second call to
/// `process_events` happens while a first call is in progress, one of the two calls needs to
/// start from the top to ensure any new messages are also handled.
///
/// Because the event handler calls into user code which may block, we don't want to block a
/// second thread waiting for another thread to handle events which is then blocked on user
/// code, so we store an atomic counter here:
/// * 0 indicates no event processor is running
/// * 1 indicates an event processor is running
/// * > 1 indicates an event processor is running but needs to start again from the top once
/// it finishes as another thread tried to start processing events but returned early.
event_processing_state: AtomicI32,
/// Used to track the last value sent in a node_announcement "timestamp" field. We ensure this
/// value increases strictly since we don't assume access to a time source.
last_node_announcement_serial: AtomicU32,
ephemeral_key_midstate: Sha256Engine,
peer_counter: AtomicCounter,
gossip_processing_backlogged: AtomicBool,
gossip_processing_backlog_lifted: AtomicBool,
node_signer: NS,
logger: L,
secp_ctx: Secp256k1<secp256k1::SignOnly>
}
enum MessageHandlingError {
PeerHandleError(PeerHandleError),
LightningError(LightningError),
}
impl From<PeerHandleError> for MessageHandlingError {
fn from(error: PeerHandleError) -> Self {
MessageHandlingError::PeerHandleError(error)
}
}
impl From<LightningError> for MessageHandlingError {
fn from(error: LightningError) -> Self {
MessageHandlingError::LightningError(error)
}
}
macro_rules! encode_msg {
($msg: expr) => {{
let mut buffer = VecWriter(Vec::with_capacity(MSG_BUF_ALLOC_SIZE));
wire::write($msg, &mut buffer).unwrap();
buffer.0
}}
}
impl<Descriptor: SocketDescriptor, CM: Deref, OM: Deref, L: Deref, NS: Deref> PeerManager<Descriptor, CM, IgnoringMessageHandler, OM, L, IgnoringMessageHandler, NS> where
CM::Target: ChannelMessageHandler,
OM::Target: OnionMessageHandler,
L::Target: Logger,
NS::Target: NodeSigner {
/// Constructs a new `PeerManager` with the given `ChannelMessageHandler` and
/// `OnionMessageHandler`. No routing message handler is used and network graph messages are
/// ignored.
///
/// `ephemeral_random_data` is used to derive per-connection ephemeral keys and must be
/// cryptographically secure random bytes.
///
/// `current_time` is used as an always-increasing counter that survives across restarts and is
/// incremented irregularly internally. In general it is best to simply use the current UNIX
/// timestamp, however if it is not available a persistent counter that increases once per
/// minute should suffice.
///
/// This is not exported to bindings users as we can't export a PeerManager with a dummy route handler
pub fn new_channel_only(channel_message_handler: CM, onion_message_handler: OM, current_time: u32, ephemeral_random_data: &[u8; 32], logger: L, node_signer: NS) -> Self {
Self::new(MessageHandler {
chan_handler: channel_message_handler,
route_handler: IgnoringMessageHandler{},
onion_message_handler,
custom_message_handler: IgnoringMessageHandler{},
}, current_time, ephemeral_random_data, logger, node_signer)
}
}
impl<Descriptor: SocketDescriptor, RM: Deref, L: Deref, NS: Deref> PeerManager<Descriptor, ErroringMessageHandler, RM, IgnoringMessageHandler, L, IgnoringMessageHandler, NS> where
RM::Target: RoutingMessageHandler,
L::Target: Logger,
NS::Target: NodeSigner {
/// Constructs a new `PeerManager` with the given `RoutingMessageHandler`. No channel message
/// handler or onion message handler is used and onion and channel messages will be ignored (or
/// generate error messages). Note that some other lightning implementations time-out connections
/// after some time if no channel is built with the peer.
///
/// `current_time` is used as an always-increasing counter that survives across restarts and is
/// incremented irregularly internally. In general it is best to simply use the current UNIX
/// timestamp, however if it is not available a persistent counter that increases once per
/// minute should suffice.
///
/// `ephemeral_random_data` is used to derive per-connection ephemeral keys and must be
/// cryptographically secure random bytes.
///
/// This is not exported to bindings users as we can't export a PeerManager with a dummy channel handler
pub fn new_routing_only(routing_message_handler: RM, current_time: u32, ephemeral_random_data: &[u8; 32], logger: L, node_signer: NS) -> Self {
Self::new(MessageHandler {
chan_handler: ErroringMessageHandler::new(),
route_handler: routing_message_handler,
onion_message_handler: IgnoringMessageHandler{},
custom_message_handler: IgnoringMessageHandler{},
}, current_time, ephemeral_random_data, logger, node_signer)
}
}
/// A simple wrapper that optionally prints ` from <pubkey>` for an optional pubkey.
/// This works around `format!()` taking a reference to each argument, preventing
/// `if let Some(node_id) = peer.their_node_id { format!(.., node_id) } else { .. }` from compiling
/// due to lifetime errors.
struct OptionalFromDebugger<'a>(&'a Option<(PublicKey, NodeId)>);
impl core::fmt::Display for OptionalFromDebugger<'_> {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> {
if let Some((node_id, _)) = self.0 { write!(f, " from {}", log_pubkey!(node_id)) } else { Ok(()) }
}
}
/// A function used to filter out local or private addresses
/// <https://www.iana.org./assignments/ipv4-address-space/ipv4-address-space.xhtml>
/// <https://www.iana.org/assignments/ipv6-address-space/ipv6-address-space.xhtml>
fn filter_addresses(ip_address: Option<SocketAddress>) -> Option<SocketAddress> {
match ip_address{
// For IPv4 range 10.0.0.0 - 10.255.255.255 (10/8)
Some(SocketAddress::TcpIpV4{addr: [10, _, _, _], port: _}) => None,
// For IPv4 range 0.0.0.0 - 0.255.255.255 (0/8)
Some(SocketAddress::TcpIpV4{addr: [0, _, _, _], port: _}) => None,
// For IPv4 range 100.64.0.0 - 100.127.255.255 (100.64/10)
Some(SocketAddress::TcpIpV4{addr: [100, 64..=127, _, _], port: _}) => None,
// For IPv4 range 127.0.0.0 - 127.255.255.255 (127/8)
Some(SocketAddress::TcpIpV4{addr: [127, _, _, _], port: _}) => None,
// For IPv4 range 169.254.0.0 - 169.254.255.255 (169.254/16)
Some(SocketAddress::TcpIpV4{addr: [169, 254, _, _], port: _}) => None,
// For IPv4 range 172.16.0.0 - 172.31.255.255 (172.16/12)
Some(SocketAddress::TcpIpV4{addr: [172, 16..=31, _, _], port: _}) => None,
// For IPv4 range 192.168.0.0 - 192.168.255.255 (192.168/16)
Some(SocketAddress::TcpIpV4{addr: [192, 168, _, _], port: _}) => None,
// For IPv4 range 192.88.99.0 - 192.88.99.255 (192.88.99/24)
Some(SocketAddress::TcpIpV4{addr: [192, 88, 99, _], port: _}) => None,
// For IPv6 range 2000:0000:0000:0000:0000:0000:0000:0000 - 3fff:ffff:ffff:ffff:ffff:ffff:ffff:ffff (2000::/3)
Some(SocketAddress::TcpIpV6{addr: [0x20..=0x3F, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _], port: _}) => ip_address,
// For remaining addresses
Some(SocketAddress::TcpIpV6{addr: _, port: _}) => None,
Some(..) => ip_address,
None => None,
}
}
impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CMH: Deref, NS: Deref> PeerManager<Descriptor, CM, RM, OM, L, CMH, NS> where
CM::Target: ChannelMessageHandler,
RM::Target: RoutingMessageHandler,
OM::Target: OnionMessageHandler,
L::Target: Logger,
CMH::Target: CustomMessageHandler,
NS::Target: NodeSigner
{
/// Constructs a new `PeerManager` with the given message handlers.
///
/// `ephemeral_random_data` is used to derive per-connection ephemeral keys and must be
/// cryptographically secure random bytes.
///
/// `current_time` is used as an always-increasing counter that survives across restarts and is
/// incremented irregularly internally. In general it is best to simply use the current UNIX
/// timestamp, however if it is not available a persistent counter that increases once per
/// minute should suffice.
pub fn new(message_handler: MessageHandler<CM, RM, OM, CMH>, current_time: u32, ephemeral_random_data: &[u8; 32], logger: L, node_signer: NS) -> Self {
let mut ephemeral_key_midstate = Sha256::engine();
ephemeral_key_midstate.input(ephemeral_random_data);