Skip to content

Commit

Permalink
Merge tag 'rxrpc-next-20171111' of git://git.kernel.org/pub/scm/linux…
Browse files Browse the repository at this point in the history
…/kernel/git/dhowells/linux-fs

David Howells says:

====================
rxrpc: Fixes

Here are some patches that fix some things in AF_RXRPC:

 (1) Prevent notifications from being passed to a kernel service for a call
     that it has ended.

 (2) Fix a null pointer deference that occurs under some circumstances when an
     ACK is generated.

 (3) Fix a number of things to do with call expiration.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
  • Loading branch information
davem330 committed Nov 14, 2017
2 parents 442866f + dcbefc3 commit 166c881
Show file tree
Hide file tree
Showing 7 changed files with 36 additions and 7 deletions.
16 changes: 16 additions & 0 deletions net/rxrpc/af_rxrpc.c
Original file line number Diff line number Diff line change
Expand Up @@ -322,6 +322,14 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
}
EXPORT_SYMBOL(rxrpc_kernel_begin_call);

/*
* Dummy function used to stop the notifier talking to recvmsg().
*/
static void rxrpc_dummy_notify_rx(struct sock *sk, struct rxrpc_call *rxcall,
unsigned long call_user_ID)
{
}

/**
* rxrpc_kernel_end_call - Allow a kernel service to end a call it was using
* @sock: The socket the call is on
Expand All @@ -336,6 +344,14 @@ void rxrpc_kernel_end_call(struct socket *sock, struct rxrpc_call *call)

mutex_lock(&call->user_mutex);
rxrpc_release_call(rxrpc_sk(sock->sk), call);

/* Make sure we're not going to call back into a kernel service */
if (call->notify_rx) {
spin_lock_bh(&call->notify_lock);
call->notify_rx = rxrpc_dummy_notify_rx;
spin_unlock_bh(&call->notify_lock);
}

mutex_unlock(&call->user_mutex);
rxrpc_put_call(call, rxrpc_call_put_kernel);
}
Expand Down
1 change: 1 addition & 0 deletions net/rxrpc/ar-internal.h
Original file line number Diff line number Diff line change
Expand Up @@ -525,6 +525,7 @@ struct rxrpc_call {
unsigned long flags;
unsigned long events;
spinlock_t lock;
spinlock_t notify_lock; /* Kernel notification lock */
rwlock_t state_lock; /* lock for state transition */
u32 abort_code; /* Local/remote abort code */
int error; /* Local error incurred */
Expand Down
2 changes: 1 addition & 1 deletion net/rxrpc/call_event.c
Original file line number Diff line number Diff line change
Expand Up @@ -386,7 +386,7 @@ void rxrpc_process_call(struct work_struct *work)

now = ktime_get_real();
if (ktime_before(call->expire_at, now)) {
rxrpc_abort_call("EXP", call, 0, RX_CALL_TIMEOUT, -ETIME);
rxrpc_abort_call("EXP", call, 0, RX_USER_ABORT, -ETIME);
set_bit(RXRPC_CALL_EV_ABORT, &call->events);
goto recheck_state;
}
Expand Down
1 change: 1 addition & 0 deletions net/rxrpc/call_object.c
Original file line number Diff line number Diff line change
Expand Up @@ -124,6 +124,7 @@ struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
INIT_LIST_HEAD(&call->sock_link);
init_waitqueue_head(&call->waitq);
spin_lock_init(&call->lock);
spin_lock_init(&call->notify_lock);
rwlock_init(&call->state_lock);
atomic_set(&call->usage, 1);
call->debug_id = atomic_inc_return(&rxrpc_debug_id);
Expand Down
2 changes: 0 additions & 2 deletions net/rxrpc/input.c
Original file line number Diff line number Diff line change
Expand Up @@ -298,8 +298,6 @@ static bool rxrpc_end_tx_phase(struct rxrpc_call *call, bool reply_begun,

write_unlock(&call->state_lock);
if (call->state == RXRPC_CALL_CLIENT_AWAIT_REPLY) {
rxrpc_propose_ACK(call, RXRPC_ACK_IDLE, 0, 0, false, true,
rxrpc_propose_ack_client_tx_end);
trace_rxrpc_transmit(call, rxrpc_transmit_await_reply);
} else {
trace_rxrpc_transmit(call, rxrpc_transmit_end);
Expand Down
19 changes: 15 additions & 4 deletions net/rxrpc/output.c
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,8 @@ struct rxrpc_abort_buffer {
/*
* Fill out an ACK packet.
*/
static size_t rxrpc_fill_out_ack(struct rxrpc_call *call,
static size_t rxrpc_fill_out_ack(struct rxrpc_connection *conn,
struct rxrpc_call *call,
struct rxrpc_ack_buffer *pkt,
rxrpc_seq_t *_hard_ack,
rxrpc_seq_t *_top,
Expand Down Expand Up @@ -77,8 +78,8 @@ static size_t rxrpc_fill_out_ack(struct rxrpc_call *call,
} while (before_eq(seq, top));
}

mtu = call->conn->params.peer->if_mtu;
mtu -= call->conn->params.peer->hdrsize;
mtu = conn->params.peer->if_mtu;
mtu -= conn->params.peer->hdrsize;
jmax = (call->nr_jumbo_bad > 3) ? 1 : rxrpc_rx_jumbo_max;
pkt->ackinfo.rxMTU = htonl(rxrpc_rx_mtu);
pkt->ackinfo.maxMTU = htonl(mtu);
Expand Down Expand Up @@ -148,7 +149,7 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping)
}
call->ackr_reason = 0;
}
n = rxrpc_fill_out_ack(call, pkt, &hard_ack, &top, reason);
n = rxrpc_fill_out_ack(conn, call, pkt, &hard_ack, &top, reason);

spin_unlock_bh(&call->lock);

Expand Down Expand Up @@ -221,6 +222,16 @@ int rxrpc_send_abort_packet(struct rxrpc_call *call)
rxrpc_serial_t serial;
int ret;

/* Don't bother sending aborts for a client call once the server has
* hard-ACK'd all of its request data. After that point, we're not
* going to stop the operation proceeding, and whilst we might limit
* the reply, it's not worth it if we can send a new call on the same
* channel instead, thereby closing off this call.
*/
if (rxrpc_is_client_call(call) &&
test_bit(RXRPC_CALL_TX_LAST, &call->flags))
return 0;

spin_lock_bh(&call->lock);
if (call->conn)
conn = rxrpc_get_connection_maybe(call->conn);
Expand Down
2 changes: 2 additions & 0 deletions net/rxrpc/recvmsg.c
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,9 @@ void rxrpc_notify_socket(struct rxrpc_call *call)
sk = &rx->sk;
if (rx && sk->sk_state < RXRPC_CLOSE) {
if (call->notify_rx) {
spin_lock_bh(&call->notify_lock);
call->notify_rx(sk, call, call->user_call_ID);
spin_unlock_bh(&call->notify_lock);
} else {
write_lock_bh(&rx->recvmsg_lock);
if (list_empty(&call->recvmsg_link)) {
Expand Down

0 comments on commit 166c881

Please sign in to comment.