@@ -611,43 +611,35 @@ where
611
611
return Err ( P2pError :: ProtocolError ( ProtocolError :: DisconnectedHeaders ) ) ;
612
612
}
613
613
614
- // The first header must be connected to a known block (it can be in
615
- // the chainstate or requested_blocks).
614
+ // The first header must be connected to the chainstate.
616
615
let first_header_prev_id = * headers
617
616
. first ( )
618
617
// This is OK because of the `headers.is_empty()` check above.
619
618
. expect ( "Headers shouldn't be empty" )
620
619
. prev_block_id ( ) ;
621
620
622
621
// Note: we require a peer to send headers starting from a block that we already have
623
- // in our chainstate or from one that we've already requested from the peer.
624
- // I.e. peers shouldn't track what block headers they've sent us already and use
625
- // the last header (best_sent_block_header) as a starting point for future HeaderList
626
- // updates.
627
- // This restriction is needed to prevent malicious peers from flooding the node with
628
- // headers, potentially exhausting the node's memory.
629
- // The downside of this is that the peer may have to send the same headers multiple times.
630
- // So, to avoid extra traffic, an honest peer should't send header updates when the node
631
- // is already downloading blocks. But still, the node shouldn't punish the peer for
632
- // doing so, because it's possible for it to do so on accident, e.g. a "new tip" event
633
- // may happen on the peer's side after it has sent us the last requested block but
634
- // before we've asked it for more.
622
+ // in our chainstate. I.e. we don't allow:
623
+ // 1) Basing new headers on a previously sent header, because this would give a malicious
624
+ // peer an opportunity to flood the node with headers, potentially exhausting its memory.
625
+ // The downside of this restriction is that the peer may have to send the same headers
626
+ // multiple times. So, to avoid extra traffic, an honest peer should't send header updates
627
+ // when the node is already downloading blocks. (But still, the node shouldn't punish
628
+ // the peer for doing so, because it's possible for it to do so by accident, e.g.
629
+ // a "new tip" event may happen on the peer's side after it has sent us the last requested
630
+ // block but before we've asked it for more.)
631
+ // 2) Basing new headers on a block that we've requested from them but that has not yet
632
+ // been sent. This is a rather useless optimization (provided that peers don't send
633
+ // header updates when we're downloading blocks from them, as mentioned above) that
634
+ // would only complicate the logic.
635
635
636
636
let first_header_is_connected_to_chainstate = self
637
637
. chainstate_handle
638
638
. call ( move |c| Ok ( c. get_gen_block_index ( & first_header_prev_id) ?) )
639
639
. await ?
640
640
. is_some ( ) ;
641
641
642
- let first_header_is_connected_to_requested_blocks = first_header_prev_id
643
- . classify ( & self . chain_config )
644
- . chain_block_id ( )
645
- . and_then ( |id| self . incoming . requested_blocks . get ( & id) )
646
- . is_some ( ) ;
647
-
648
- if !( first_header_is_connected_to_chainstate
649
- || first_header_is_connected_to_requested_blocks)
650
- {
642
+ if !first_header_is_connected_to_chainstate {
651
643
// Note: legacy nodes will send singular unconnected headers during block announcement,
652
644
// so we have to handle this behavior here.
653
645
if headers. len ( ) == 1 {
0 commit comments