Merge branch 'rxrpc-fixes'
David Howells says: ==================== rxrpc: Fixes for I/O thread conversion/SACK table expansion Here are some fixes for AF_RXRPC: (1) Fix missing unlock in rxrpc's sendmsg. (2) Fix (lack of) propagation of security settings to rxrpc_call. (3) Fix NULL ptr deref in rxrpc_unuse_local(). (4) Fix problem with kthread_run() not invoking the I/O thread function if the kthread gets stopped first. Possibly this should actually be fixed in the kthread code. (5) Fix locking problem as putting a peer (which may be done from RCU) may now invoke kthread_stop(). (6) Fix switched parameters in a couple of trace calls. (7) Fix I/O thread's checking for kthread stop to make sure it completes all outstanding work before returning so that calls are cleaned up. (8) Fix an uninitialised var in the new rxperf test server. (9) Fix the return value of rxrpc_new_incoming_call() so that the checks on it work correctly. The patches fix at least one syzbot bug[1] and probably some others that don't have reproducers[2][3][4]. I think it also fixes another[5], but that showed another failure during testing that was different to the original. There's also an outstanding bug in rxrpc_put_peer()[6] that is fixed by a combination of several patches in my rxrpc-next branch, but I haven't included that here. ==================== Tested-by: Marc Dionne <marc.dionne@auristor.com> Tested-by: kafs-testing+fedora36_64checkkafs-build-164@auristor.com Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
98dbec0a0a
|
@ -471,7 +471,7 @@ TRACE_EVENT(rxrpc_peer,
|
|||
TP_STRUCT__entry(
|
||||
__field(unsigned int, peer )
|
||||
__field(int, ref )
|
||||
__field(int, why )
|
||||
__field(enum rxrpc_peer_trace, why )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
|
|
|
@ -287,6 +287,7 @@ struct rxrpc_local {
|
|||
struct hlist_node link;
|
||||
struct socket *socket; /* my UDP socket */
|
||||
struct task_struct *io_thread;
|
||||
struct completion io_thread_ready; /* Indication that the I/O thread started */
|
||||
struct rxrpc_sock __rcu *service; /* Service(s) listening on this endpoint */
|
||||
struct rw_semaphore defrag_sem; /* control re-enablement of IP DF bit */
|
||||
struct sk_buff_head rx_queue; /* Received packets */
|
||||
|
@ -811,9 +812,9 @@ extern struct workqueue_struct *rxrpc_workqueue;
|
|||
*/
|
||||
int rxrpc_service_prealloc(struct rxrpc_sock *, gfp_t);
|
||||
void rxrpc_discard_prealloc(struct rxrpc_sock *);
|
||||
bool rxrpc_new_incoming_call(struct rxrpc_local *, struct rxrpc_peer *,
|
||||
struct rxrpc_connection *, struct sockaddr_rxrpc *,
|
||||
struct sk_buff *);
|
||||
int rxrpc_new_incoming_call(struct rxrpc_local *, struct rxrpc_peer *,
|
||||
struct rxrpc_connection *, struct sockaddr_rxrpc *,
|
||||
struct sk_buff *);
|
||||
void rxrpc_accept_incoming_calls(struct rxrpc_local *);
|
||||
int rxrpc_user_charge_accept(struct rxrpc_sock *, unsigned long);
|
||||
|
||||
|
@ -1072,7 +1073,6 @@ void rxrpc_destroy_all_peers(struct rxrpc_net *);
|
|||
struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *, enum rxrpc_peer_trace);
|
||||
struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *, enum rxrpc_peer_trace);
|
||||
void rxrpc_put_peer(struct rxrpc_peer *, enum rxrpc_peer_trace);
|
||||
void rxrpc_put_peer_locked(struct rxrpc_peer *, enum rxrpc_peer_trace);
|
||||
|
||||
/*
|
||||
* proc.c
|
||||
|
|
|
@ -326,11 +326,11 @@ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
|
|||
* If we want to report an error, we mark the skb with the packet type and
|
||||
* abort code and return false.
|
||||
*/
|
||||
bool rxrpc_new_incoming_call(struct rxrpc_local *local,
|
||||
struct rxrpc_peer *peer,
|
||||
struct rxrpc_connection *conn,
|
||||
struct sockaddr_rxrpc *peer_srx,
|
||||
struct sk_buff *skb)
|
||||
int rxrpc_new_incoming_call(struct rxrpc_local *local,
|
||||
struct rxrpc_peer *peer,
|
||||
struct rxrpc_connection *conn,
|
||||
struct sockaddr_rxrpc *peer_srx,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
const struct rxrpc_security *sec = NULL;
|
||||
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
|
||||
|
@ -342,7 +342,7 @@ bool rxrpc_new_incoming_call(struct rxrpc_local *local,
|
|||
/* Don't set up a call for anything other than the first DATA packet. */
|
||||
if (sp->hdr.seq != 1 ||
|
||||
sp->hdr.type != RXRPC_PACKET_TYPE_DATA)
|
||||
return true; /* Just discard */
|
||||
return 0; /* Just discard */
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
|
@ -413,7 +413,7 @@ bool rxrpc_new_incoming_call(struct rxrpc_local *local,
|
|||
_leave(" = %p{%d}", call, call->debug_id);
|
||||
rxrpc_input_call_event(call, skb);
|
||||
rxrpc_put_call(call, rxrpc_call_put_input);
|
||||
return true;
|
||||
return 0;
|
||||
|
||||
unsupported_service:
|
||||
trace_rxrpc_abort(0, "INV", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
|
||||
|
@ -425,10 +425,10 @@ no_call:
|
|||
reject:
|
||||
rcu_read_unlock();
|
||||
_leave(" = f [%u]", skb->mark);
|
||||
return false;
|
||||
return -EPROTO;
|
||||
discard:
|
||||
rcu_read_unlock();
|
||||
return true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -217,6 +217,7 @@ static struct rxrpc_call *rxrpc_alloc_client_call(struct rxrpc_sock *rx,
|
|||
call->tx_total_len = p->tx_total_len;
|
||||
call->key = key_get(cp->key);
|
||||
call->local = rxrpc_get_local(cp->local, rxrpc_local_get_call);
|
||||
call->security_level = cp->security_level;
|
||||
if (p->kernel)
|
||||
__set_bit(RXRPC_CALL_KERNEL, &call->flags);
|
||||
if (cp->upgrade)
|
||||
|
|
|
@ -551,8 +551,6 @@ static void rxrpc_activate_one_channel(struct rxrpc_connection *conn,
|
|||
call->conn = rxrpc_get_connection(conn, rxrpc_conn_get_activate_call);
|
||||
call->cid = conn->proto.cid | channel;
|
||||
call->call_id = call_id;
|
||||
call->security = conn->security;
|
||||
call->security_ix = conn->security_ix;
|
||||
call->dest_srx.srx_service = conn->service_id;
|
||||
|
||||
trace_rxrpc_connect_call(call);
|
||||
|
|
|
@ -292,7 +292,7 @@ protocol_error:
|
|||
skb->mark = RXRPC_SKB_MARK_REJECT_ABORT;
|
||||
reject_packet:
|
||||
rxrpc_reject_packet(local, skb);
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -384,7 +384,7 @@ static int rxrpc_input_packet_on_conn(struct rxrpc_connection *conn,
|
|||
if (rxrpc_to_client(sp))
|
||||
goto bad_message;
|
||||
if (rxrpc_new_incoming_call(conn->local, conn->peer, conn,
|
||||
peer_srx, skb))
|
||||
peer_srx, skb) == 0)
|
||||
return 0;
|
||||
goto reject_packet;
|
||||
}
|
||||
|
@ -425,6 +425,9 @@ int rxrpc_io_thread(void *data)
|
|||
struct rxrpc_local *local = data;
|
||||
struct rxrpc_call *call;
|
||||
struct sk_buff *skb;
|
||||
bool should_stop;
|
||||
|
||||
complete(&local->io_thread_ready);
|
||||
|
||||
skb_queue_head_init(&rx_queue);
|
||||
|
||||
|
@ -476,13 +479,14 @@ int rxrpc_io_thread(void *data)
|
|||
}
|
||||
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
should_stop = kthread_should_stop();
|
||||
if (!skb_queue_empty(&local->rx_queue) ||
|
||||
!list_empty(&local->call_attend_q)) {
|
||||
__set_current_state(TASK_RUNNING);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (kthread_should_stop())
|
||||
if (should_stop)
|
||||
break;
|
||||
schedule();
|
||||
}
|
||||
|
|
|
@ -97,6 +97,7 @@ static struct rxrpc_local *rxrpc_alloc_local(struct rxrpc_net *rxnet,
|
|||
local->rxnet = rxnet;
|
||||
INIT_HLIST_NODE(&local->link);
|
||||
init_rwsem(&local->defrag_sem);
|
||||
init_completion(&local->io_thread_ready);
|
||||
skb_queue_head_init(&local->rx_queue);
|
||||
INIT_LIST_HEAD(&local->call_attend_q);
|
||||
local->client_bundles = RB_ROOT;
|
||||
|
@ -189,6 +190,7 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
|
|||
goto error_sock;
|
||||
}
|
||||
|
||||
wait_for_completion(&local->io_thread_ready);
|
||||
local->io_thread = io_thread;
|
||||
_leave(" = 0");
|
||||
return 0;
|
||||
|
@ -357,10 +359,11 @@ struct rxrpc_local *rxrpc_use_local(struct rxrpc_local *local,
|
|||
*/
|
||||
void rxrpc_unuse_local(struct rxrpc_local *local, enum rxrpc_local_trace why)
|
||||
{
|
||||
unsigned int debug_id = local->debug_id;
|
||||
unsigned int debug_id;
|
||||
int r, u;
|
||||
|
||||
if (local) {
|
||||
debug_id = local->debug_id;
|
||||
r = refcount_read(&local->ref);
|
||||
u = atomic_dec_return(&local->active_users);
|
||||
trace_rxrpc_local(debug_id, why, r, u);
|
||||
|
|
|
@ -235,6 +235,7 @@ static void rxrpc_peer_keepalive_dispatch(struct rxrpc_net *rxnet,
|
|||
struct rxrpc_peer *peer;
|
||||
const u8 mask = ARRAY_SIZE(rxnet->peer_keepalive) - 1;
|
||||
time64_t keepalive_at;
|
||||
bool use;
|
||||
int slot;
|
||||
|
||||
spin_lock(&rxnet->peer_hash_lock);
|
||||
|
@ -247,9 +248,10 @@ static void rxrpc_peer_keepalive_dispatch(struct rxrpc_net *rxnet,
|
|||
if (!rxrpc_get_peer_maybe(peer, rxrpc_peer_get_keepalive))
|
||||
continue;
|
||||
|
||||
if (__rxrpc_use_local(peer->local, rxrpc_local_use_peer_keepalive)) {
|
||||
spin_unlock(&rxnet->peer_hash_lock);
|
||||
use = __rxrpc_use_local(peer->local, rxrpc_local_use_peer_keepalive);
|
||||
spin_unlock(&rxnet->peer_hash_lock);
|
||||
|
||||
if (use) {
|
||||
keepalive_at = peer->last_tx_at + RXRPC_KEEPALIVE_TIME;
|
||||
slot = keepalive_at - base;
|
||||
_debug("%02x peer %u t=%d {%pISp}",
|
||||
|
@ -270,9 +272,11 @@ static void rxrpc_peer_keepalive_dispatch(struct rxrpc_net *rxnet,
|
|||
spin_lock(&rxnet->peer_hash_lock);
|
||||
list_add_tail(&peer->keepalive_link,
|
||||
&rxnet->peer_keepalive[slot & mask]);
|
||||
spin_unlock(&rxnet->peer_hash_lock);
|
||||
rxrpc_unuse_local(peer->local, rxrpc_local_unuse_peer_keepalive);
|
||||
}
|
||||
rxrpc_put_peer_locked(peer, rxrpc_peer_put_keepalive);
|
||||
rxrpc_put_peer(peer, rxrpc_peer_put_keepalive);
|
||||
spin_lock(&rxnet->peer_hash_lock);
|
||||
}
|
||||
|
||||
spin_unlock(&rxnet->peer_hash_lock);
|
||||
|
|
|
@ -226,7 +226,7 @@ struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *local, gfp_t gfp,
|
|||
rxrpc_peer_init_rtt(peer);
|
||||
|
||||
peer->cong_ssthresh = RXRPC_TX_MAX_WINDOW;
|
||||
trace_rxrpc_peer(peer->debug_id, why, 1);
|
||||
trace_rxrpc_peer(peer->debug_id, 1, why);
|
||||
}
|
||||
|
||||
_leave(" = %p", peer);
|
||||
|
@ -382,7 +382,7 @@ struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *peer, enum rxrpc_peer_trace
|
|||
int r;
|
||||
|
||||
__refcount_inc(&peer->ref, &r);
|
||||
trace_rxrpc_peer(peer->debug_id, why, r + 1);
|
||||
trace_rxrpc_peer(peer->debug_id, r + 1, why);
|
||||
return peer;
|
||||
}
|
||||
|
||||
|
@ -438,25 +438,6 @@ void rxrpc_put_peer(struct rxrpc_peer *peer, enum rxrpc_peer_trace why)
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Drop a ref on a peer record where the caller already holds the
|
||||
* peer_hash_lock.
|
||||
*/
|
||||
void rxrpc_put_peer_locked(struct rxrpc_peer *peer, enum rxrpc_peer_trace why)
|
||||
{
|
||||
unsigned int debug_id = peer->debug_id;
|
||||
bool dead;
|
||||
int r;
|
||||
|
||||
dead = __refcount_dec_and_test(&peer->ref, &r);
|
||||
trace_rxrpc_peer(debug_id, r - 1, why);
|
||||
if (dead) {
|
||||
hash_del_rcu(&peer->hash_link);
|
||||
list_del_init(&peer->keepalive_link);
|
||||
rxrpc_free_peer(peer);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Make sure all peer records have been discarded.
|
||||
*/
|
||||
|
|
|
@ -275,7 +275,7 @@ static void rxperf_deliver_to_call(struct work_struct *work)
|
|||
struct rxperf_call *call = container_of(work, struct rxperf_call, work);
|
||||
enum rxperf_call_state state;
|
||||
u32 abort_code, remote_abort = 0;
|
||||
int ret;
|
||||
int ret = 0;
|
||||
|
||||
if (call->state == RXPERF_CALL_COMPLETE)
|
||||
return;
|
||||
|
|
|
@ -67,13 +67,13 @@ const struct rxrpc_security *rxrpc_security_lookup(u8 security_index)
|
|||
*/
|
||||
int rxrpc_init_client_call_security(struct rxrpc_call *call)
|
||||
{
|
||||
const struct rxrpc_security *sec;
|
||||
const struct rxrpc_security *sec = &rxrpc_no_security;
|
||||
struct rxrpc_key_token *token;
|
||||
struct key *key = call->key;
|
||||
int ret;
|
||||
|
||||
if (!key)
|
||||
return 0;
|
||||
goto found;
|
||||
|
||||
ret = key_validate(key);
|
||||
if (ret < 0)
|
||||
|
@ -88,7 +88,7 @@ int rxrpc_init_client_call_security(struct rxrpc_call *call)
|
|||
|
||||
found:
|
||||
call->security = sec;
|
||||
_leave(" = 0");
|
||||
call->security_ix = sec->security_index;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -625,7 +625,7 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
|
|||
if (call->tx_total_len != -1 ||
|
||||
call->tx_pending ||
|
||||
call->tx_top != 0)
|
||||
goto error_put;
|
||||
goto out_put_unlock;
|
||||
call->tx_total_len = p.call.tx_total_len;
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue