@@ -764,6 +764,53 @@ int inet_csk_listen_start(struct sock *sk, int backlog)
764764}
765765EXPORT_SYMBOL_GPL (inet_csk_listen_start );
766766
767+ static void inet_child_forget (struct sock * sk , struct request_sock * req ,
768+ struct sock * child )
769+ {
770+ sk -> sk_prot -> disconnect (child , O_NONBLOCK );
771+
772+ sock_orphan (child );
773+
774+ percpu_counter_inc (sk -> sk_prot -> orphan_count );
775+
776+ if (sk -> sk_protocol == IPPROTO_TCP && tcp_rsk (req )-> tfo_listener ) {
777+ BUG_ON (tcp_sk (child )-> fastopen_rsk != req );
778+ BUG_ON (sk != req -> rsk_listener );
779+
780+ /* Paranoid, to prevent race condition if
781+ * an inbound pkt destined for child is
782+ * blocked by sock lock in tcp_v4_rcv().
783+ * Also to satisfy an assertion in
784+ * tcp_v4_destroy_sock().
785+ */
786+ tcp_sk (child )-> fastopen_rsk = NULL ;
787+ }
788+ inet_csk_destroy_sock (child );
789+ reqsk_put (req );
790+ }
791+
792+ void inet_csk_reqsk_queue_add (struct sock * sk , struct request_sock * req ,
793+ struct sock * child )
794+ {
795+ struct request_sock_queue * queue = & inet_csk (sk )-> icsk_accept_queue ;
796+
797+ spin_lock (& queue -> rskq_lock );
798+ if (unlikely (sk -> sk_state != TCP_LISTEN )) {
799+ inet_child_forget (sk , req , child );
800+ } else {
801+ req -> sk = child ;
802+ req -> dl_next = NULL ;
803+ if (queue -> rskq_accept_head == NULL )
804+ queue -> rskq_accept_head = req ;
805+ else
806+ queue -> rskq_accept_tail -> dl_next = req ;
807+ queue -> rskq_accept_tail = req ;
808+ sk_acceptq_added (sk );
809+ }
810+ spin_unlock (& queue -> rskq_lock );
811+ }
812+ EXPORT_SYMBOL (inet_csk_reqsk_queue_add );
813+
767814/*
768815 * This routine closes sockets which have been at least partially
769816 * opened, but not yet accepted.
@@ -790,31 +837,11 @@ void inet_csk_listen_stop(struct sock *sk)
790837 WARN_ON (sock_owned_by_user (child ));
791838 sock_hold (child );
792839
793- sk -> sk_prot -> disconnect (child , O_NONBLOCK );
794-
795- sock_orphan (child );
796-
797- percpu_counter_inc (sk -> sk_prot -> orphan_count );
798-
799- if (sk -> sk_protocol == IPPROTO_TCP && tcp_rsk (req )-> tfo_listener ) {
800- BUG_ON (tcp_sk (child )-> fastopen_rsk != req );
801- BUG_ON (sk != req -> rsk_listener );
802-
803- /* Paranoid, to prevent race condition if
804- * an inbound pkt destined for child is
805- * blocked by sock lock in tcp_v4_rcv().
806- * Also to satisfy an assertion in
807- * tcp_v4_destroy_sock().
808- */
809- tcp_sk (child )-> fastopen_rsk = NULL ;
810- }
811- inet_csk_destroy_sock (child );
812-
840+ inet_child_forget (sk , req , child );
813841 bh_unlock_sock (child );
814842 local_bh_enable ();
815843 sock_put (child );
816844
817- reqsk_put (req );
818845 cond_resched ();
819846 }
820847 if (queue -> fastopenq .rskq_rst_head ) {
@@ -829,7 +856,7 @@ void inet_csk_listen_stop(struct sock *sk)
829856 req = next ;
830857 }
831858 }
832- WARN_ON (sk -> sk_ack_backlog );
859+ WARN_ON_ONCE (sk -> sk_ack_backlog );
833860}
834861EXPORT_SYMBOL_GPL (inet_csk_listen_stop );
835862
0 commit comments