@@ -39,8 +39,9 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
3939 unsigned int debug_id )
4040{
4141 const void * here = __builtin_return_address (0 );
42- struct rxrpc_call * call ;
42+ struct rxrpc_call * call , * xcall ;
4343 struct rxrpc_net * rxnet = rxrpc_net (sock_net (& rx -> sk ));
44+ struct rb_node * parent , * * pp ;
4445 int max , tmp ;
4546 unsigned int size = RXRPC_BACKLOG_MAX ;
4647 unsigned int head , tail , call_head , call_tail ;
@@ -94,7 +95,7 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
9495 }
9596
9697 /* Now it gets complicated, because calls get registered with the
97- * socket here, particularly if a user ID is preassigned by the user.
98+ * socket here, with a user ID preassigned by the user.
9899 */
99100 call = rxrpc_alloc_call (rx , gfp , debug_id );
100101 if (!call )
@@ -107,34 +108,33 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
107108 here , (const void * )user_call_ID );
108109
109110 write_lock (& rx -> call_lock );
110- if (user_attach_call ) {
111- struct rxrpc_call * xcall ;
112- struct rb_node * parent , * * pp ;
113-
114- /* Check the user ID isn't already in use */
115- pp = & rx -> calls .rb_node ;
116- parent = NULL ;
117- while (* pp ) {
118- parent = * pp ;
119- xcall = rb_entry (parent , struct rxrpc_call , sock_node );
120- if (user_call_ID < xcall -> user_call_ID )
121- pp = & (* pp )-> rb_left ;
122- else if (user_call_ID > xcall -> user_call_ID )
123- pp = & (* pp )-> rb_right ;
124- else
125- goto id_in_use ;
126- }
127111
128- call -> user_call_ID = user_call_ID ;
129- call -> notify_rx = notify_rx ;
112+ /* Check the user ID isn't already in use */
113+ pp = & rx -> calls .rb_node ;
114+ parent = NULL ;
115+ while (* pp ) {
116+ parent = * pp ;
117+ xcall = rb_entry (parent , struct rxrpc_call , sock_node );
118+ if (user_call_ID < xcall -> user_call_ID )
119+ pp = & (* pp )-> rb_left ;
120+ else if (user_call_ID > xcall -> user_call_ID )
121+ pp = & (* pp )-> rb_right ;
122+ else
123+ goto id_in_use ;
124+ }
125+
126+ call -> user_call_ID = user_call_ID ;
127+ call -> notify_rx = notify_rx ;
128+ if (user_attach_call ) {
130129 rxrpc_get_call (call , rxrpc_call_got_kernel );
131130 user_attach_call (call , user_call_ID );
132- rxrpc_get_call (call , rxrpc_call_got_userid );
133- rb_link_node (& call -> sock_node , parent , pp );
134- rb_insert_color (& call -> sock_node , & rx -> calls );
135- set_bit (RXRPC_CALL_HAS_USERID , & call -> flags );
136131 }
137132
133+ rxrpc_get_call (call , rxrpc_call_got_userid );
134+ rb_link_node (& call -> sock_node , parent , pp );
135+ rb_insert_color (& call -> sock_node , & rx -> calls );
136+ set_bit (RXRPC_CALL_HAS_USERID , & call -> flags );
137+
138138 list_add (& call -> sock_link , & rx -> sock_calls );
139139
140140 write_unlock (& rx -> call_lock );
@@ -157,11 +157,8 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
157157}
158158
159159/*
160- * Preallocate sufficient service connections, calls and peers to cover the
161- * entire backlog of a socket. When a new call comes in, if we don't have
162- * sufficient of each available, the call gets rejected as busy or ignored.
163- *
164- * The backlog is replenished when a connection is accepted or rejected.
160+ * Allocate the preallocation buffers for incoming service calls. These must
161+ * be charged manually.
165162 */
166163int rxrpc_service_prealloc (struct rxrpc_sock * rx , gfp_t gfp )
167164{
@@ -174,13 +171,6 @@ int rxrpc_service_prealloc(struct rxrpc_sock *rx, gfp_t gfp)
174171 rx -> backlog = b ;
175172 }
176173
177- if (rx -> discard_new_call )
178- return 0 ;
179-
180- while (rxrpc_service_prealloc_one (rx , b , NULL , NULL , 0 , gfp ,
181- atomic_inc_return (& rxrpc_debug_id )) == 0 )
182- ;
183-
184174 return 0 ;
185175}
186176
@@ -333,6 +323,7 @@ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
333323 rxrpc_see_call (call );
334324 call -> conn = conn ;
335325 call -> security = conn -> security ;
326+ call -> security_ix = conn -> security_ix ;
336327 call -> peer = rxrpc_get_peer (conn -> params .peer );
337328 call -> cong_cwnd = call -> peer -> cong_cwnd ;
338329 return call ;
@@ -402,8 +393,6 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
402393
403394 if (rx -> notify_new_call )
404395 rx -> notify_new_call (& rx -> sk , call , call -> user_call_ID );
405- else
406- sk_acceptq_added (& rx -> sk );
407396
408397 spin_lock (& conn -> state_lock );
409398 switch (conn -> state ) {
@@ -415,12 +404,8 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
415404
416405 case RXRPC_CONN_SERVICE :
417406 write_lock (& call -> state_lock );
418- if (call -> state < RXRPC_CALL_COMPLETE ) {
419- if (rx -> discard_new_call )
420- call -> state = RXRPC_CALL_SERVER_RECV_REQUEST ;
421- else
422- call -> state = RXRPC_CALL_SERVER_ACCEPTING ;
423- }
407+ if (call -> state < RXRPC_CALL_COMPLETE )
408+ call -> state = RXRPC_CALL_SERVER_RECV_REQUEST ;
424409 write_unlock (& call -> state_lock );
425410 break ;
426411
@@ -440,9 +425,6 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
440425
441426 rxrpc_send_ping (call , skb );
442427
443- if (call -> state == RXRPC_CALL_SERVER_ACCEPTING )
444- rxrpc_notify_socket (call );
445-
446428 /* We have to discard the prealloc queue's ref here and rely on a
447429 * combination of the RCU read lock and refs held either by the socket
448430 * (recvmsg queue, to-be-accepted queue or user ID tree) or the kernel
@@ -460,187 +442,18 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
460442}
461443
462444/*
463- * handle acceptance of a call by userspace
464- * - assign the user call ID to the call at the front of the queue
465- * - called with the socket locked.
445+ * Charge up socket with preallocated calls, attaching user call IDs.
466446 */
467- struct rxrpc_call * rxrpc_accept_call (struct rxrpc_sock * rx ,
468- unsigned long user_call_ID ,
469- rxrpc_notify_rx_t notify_rx )
470- __releases (& rx - > sk .sk_lock .slock )
471- __acquires (call - > user_mutex )
447+ int rxrpc_user_charge_accept (struct rxrpc_sock * rx , unsigned long user_call_ID )
472448{
473- struct rxrpc_call * call ;
474- struct rb_node * parent , * * pp ;
475- int ret ;
476-
477- _enter (",%lx" , user_call_ID );
478-
479- ASSERT (!irqs_disabled ());
480-
481- write_lock (& rx -> call_lock );
482-
483- if (list_empty (& rx -> to_be_accepted )) {
484- write_unlock (& rx -> call_lock );
485- release_sock (& rx -> sk );
486- kleave (" = -ENODATA [empty]" );
487- return ERR_PTR (- ENODATA );
488- }
489-
490- /* check the user ID isn't already in use */
491- pp = & rx -> calls .rb_node ;
492- parent = NULL ;
493- while (* pp ) {
494- parent = * pp ;
495- call = rb_entry (parent , struct rxrpc_call , sock_node );
496-
497- if (user_call_ID < call -> user_call_ID )
498- pp = & (* pp )-> rb_left ;
499- else if (user_call_ID > call -> user_call_ID )
500- pp = & (* pp )-> rb_right ;
501- else
502- goto id_in_use ;
503- }
504-
505- /* Dequeue the first call and check it's still valid. We gain
506- * responsibility for the queue's reference.
507- */
508- call = list_entry (rx -> to_be_accepted .next ,
509- struct rxrpc_call , accept_link );
510- write_unlock (& rx -> call_lock );
511-
512- /* We need to gain the mutex from the interrupt handler without
513- * upsetting lockdep, so we have to release it there and take it here.
514- * We are, however, still holding the socket lock, so other accepts
515- * must wait for us and no one can add the user ID behind our backs.
516- */
517- if (mutex_lock_interruptible (& call -> user_mutex ) < 0 ) {
518- release_sock (& rx -> sk );
519- kleave (" = -ERESTARTSYS" );
520- return ERR_PTR (- ERESTARTSYS );
521- }
522-
523- write_lock (& rx -> call_lock );
524- list_del_init (& call -> accept_link );
525- sk_acceptq_removed (& rx -> sk );
526- rxrpc_see_call (call );
527-
528- /* Find the user ID insertion point. */
529- pp = & rx -> calls .rb_node ;
530- parent = NULL ;
531- while (* pp ) {
532- parent = * pp ;
533- call = rb_entry (parent , struct rxrpc_call , sock_node );
534-
535- if (user_call_ID < call -> user_call_ID )
536- pp = & (* pp )-> rb_left ;
537- else if (user_call_ID > call -> user_call_ID )
538- pp = & (* pp )-> rb_right ;
539- else
540- BUG ();
541- }
542-
543- write_lock_bh (& call -> state_lock );
544- switch (call -> state ) {
545- case RXRPC_CALL_SERVER_ACCEPTING :
546- call -> state = RXRPC_CALL_SERVER_RECV_REQUEST ;
547- break ;
548- case RXRPC_CALL_COMPLETE :
549- ret = call -> error ;
550- goto out_release ;
551- default :
552- BUG ();
553- }
554-
555- /* formalise the acceptance */
556- call -> notify_rx = notify_rx ;
557- call -> user_call_ID = user_call_ID ;
558- rxrpc_get_call (call , rxrpc_call_got_userid );
559- rb_link_node (& call -> sock_node , parent , pp );
560- rb_insert_color (& call -> sock_node , & rx -> calls );
561- if (test_and_set_bit (RXRPC_CALL_HAS_USERID , & call -> flags ))
562- BUG ();
563-
564- write_unlock_bh (& call -> state_lock );
565- write_unlock (& rx -> call_lock );
566- rxrpc_notify_socket (call );
567- rxrpc_service_prealloc (rx , GFP_KERNEL );
568- release_sock (& rx -> sk );
569- _leave (" = %p{%d}" , call , call -> debug_id );
570- return call ;
571-
572- out_release :
573- _debug ("release %p" , call );
574- write_unlock_bh (& call -> state_lock );
575- write_unlock (& rx -> call_lock );
576- rxrpc_release_call (rx , call );
577- rxrpc_put_call (call , rxrpc_call_put );
578- goto out ;
579-
580- id_in_use :
581- ret = - EBADSLT ;
582- write_unlock (& rx -> call_lock );
583- out :
584- rxrpc_service_prealloc (rx , GFP_KERNEL );
585- release_sock (& rx -> sk );
586- _leave (" = %d" , ret );
587- return ERR_PTR (ret );
588- }
589-
590- /*
591- * Handle rejection of a call by userspace
592- * - reject the call at the front of the queue
593- */
594- int rxrpc_reject_call (struct rxrpc_sock * rx )
595- {
596- struct rxrpc_call * call ;
597- bool abort = false;
598- int ret ;
599-
600- _enter ("" );
601-
602- ASSERT (!irqs_disabled ());
603-
604- write_lock (& rx -> call_lock );
605-
606- if (list_empty (& rx -> to_be_accepted )) {
607- write_unlock (& rx -> call_lock );
608- return - ENODATA ;
609- }
610-
611- /* Dequeue the first call and check it's still valid. We gain
612- * responsibility for the queue's reference.
613- */
614- call = list_entry (rx -> to_be_accepted .next ,
615- struct rxrpc_call , accept_link );
616- list_del_init (& call -> accept_link );
617- sk_acceptq_removed (& rx -> sk );
618- rxrpc_see_call (call );
449+ struct rxrpc_backlog * b = rx -> backlog ;
619450
620- write_lock_bh (& call -> state_lock );
621- switch (call -> state ) {
622- case RXRPC_CALL_SERVER_ACCEPTING :
623- __rxrpc_abort_call ("REJ" , call , 1 , RX_USER_ABORT , - ECONNABORTED );
624- abort = true;
625- fallthrough ;
626- case RXRPC_CALL_COMPLETE :
627- ret = call -> error ;
628- goto out_discard ;
629- default :
630- BUG ();
631- }
451+ if (rx -> sk .sk_state == RXRPC_CLOSE )
452+ return - ESHUTDOWN ;
632453
633- out_discard :
634- write_unlock_bh (& call -> state_lock );
635- write_unlock (& rx -> call_lock );
636- if (abort ) {
637- rxrpc_send_abort_packet (call );
638- rxrpc_release_call (rx , call );
639- rxrpc_put_call (call , rxrpc_call_put );
640- }
641- rxrpc_service_prealloc (rx , GFP_KERNEL );
642- _leave (" = %d" , ret );
643- return ret ;
454+ return rxrpc_service_prealloc_one (rx , b , NULL , NULL , user_call_ID ,
455+ GFP_KERNEL ,
456+ atomic_inc_return (& rxrpc_debug_id ));
644457}
645458
646459/*
0 commit comments