2019-05-21 01:08:01 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2017-05-25 00:02:32 +08:00
|
|
|
/* rxrpc network namespace handling.
|
|
|
|
*
|
|
|
|
* Copyright (C) 2017 Red Hat, Inc. All Rights Reserved.
|
|
|
|
* Written by David Howells (dhowells@redhat.com)
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/proc_fs.h>
|
|
|
|
#include "ar-internal.h"
|
|
|
|
|
|
|
|
unsigned int rxrpc_net_id;
|
|
|
|
|
2017-11-24 18:18:42 +08:00
|
|
|
static void rxrpc_client_conn_reap_timeout(struct timer_list *timer)
|
|
|
|
{
|
|
|
|
struct rxrpc_net *rxnet =
|
|
|
|
container_of(timer, struct rxrpc_net, client_conn_reap_timer);
|
|
|
|
|
|
|
|
if (rxnet->live)
|
|
|
|
rxrpc_queue_work(&rxnet->client_conn_reaper);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void rxrpc_service_conn_reap_timeout(struct timer_list *timer)
|
|
|
|
{
|
|
|
|
struct rxrpc_net *rxnet =
|
|
|
|
container_of(timer, struct rxrpc_net, service_conn_reap_timer);
|
|
|
|
|
|
|
|
if (rxnet->live)
|
|
|
|
rxrpc_queue_work(&rxnet->service_conn_reaper);
|
|
|
|
}
|
|
|
|
|
2018-03-31 04:04:43 +08:00
|
|
|
static void rxrpc_peer_keepalive_timeout(struct timer_list *timer)
|
|
|
|
{
|
|
|
|
struct rxrpc_net *rxnet =
|
|
|
|
container_of(timer, struct rxrpc_net, peer_keepalive_timer);
|
|
|
|
|
|
|
|
if (rxnet->live)
|
|
|
|
rxrpc_queue_work(&rxnet->peer_keepalive_work);
|
|
|
|
}
|
|
|
|
|
2017-05-25 00:02:32 +08:00
|
|
|
/*
|
|
|
|
* Initialise a per-network namespace record.
|
|
|
|
*/
|
|
|
|
static __net_init int rxrpc_init_net(struct net *net)
|
|
|
|
{
|
|
|
|
struct rxrpc_net *rxnet = rxrpc_net(net);
|
2018-03-31 04:04:43 +08:00
|
|
|
int ret, i;
|
2017-05-25 00:02:32 +08:00
|
|
|
|
2017-11-24 18:18:42 +08:00
|
|
|
rxnet->live = true;
|
2017-05-25 00:02:32 +08:00
|
|
|
get_random_bytes(&rxnet->epoch, sizeof(rxnet->epoch));
|
|
|
|
rxnet->epoch |= RXRPC_RANDOM_EPOCH;
|
|
|
|
|
|
|
|
INIT_LIST_HEAD(&rxnet->calls);
|
|
|
|
rwlock_init(&rxnet->call_lock);
|
2018-03-31 04:05:23 +08:00
|
|
|
atomic_set(&rxnet->nr_calls, 1);
|
2017-05-25 00:02:32 +08:00
|
|
|
|
2018-03-31 04:05:33 +08:00
|
|
|
atomic_set(&rxnet->nr_conns, 1);
|
2017-05-25 00:02:32 +08:00
|
|
|
INIT_LIST_HEAD(&rxnet->conn_proc_list);
|
|
|
|
INIT_LIST_HEAD(&rxnet->service_conns);
|
|
|
|
rwlock_init(&rxnet->conn_lock);
|
2017-11-24 18:18:42 +08:00
|
|
|
INIT_WORK(&rxnet->service_conn_reaper,
|
|
|
|
rxrpc_service_connection_reaper);
|
|
|
|
timer_setup(&rxnet->service_conn_reap_timer,
|
|
|
|
rxrpc_service_conn_reap_timeout, 0);
|
2017-05-25 00:02:32 +08:00
|
|
|
|
rxrpc: Rewrite the client connection manager
Rewrite the rxrpc client connection manager so that it can support multiple
connections for a given security key to a peer. The following changes are
made:
(1) For each open socket, the code currently maintains an rbtree with the
connections placed into it, keyed by communications parameters. This
is tricky to maintain as connections can be culled from the tree or
replaced within it. Connections can require replacement for a number
of reasons, e.g. their IDs span too great a range for the IDR data
type to represent efficiently, the call ID numbers on that conn would
overflow or the conn got aborted.
This is changed so that there's now a connection bundle object placed
in the tree, keyed on the same parameters. The bundle, however, does
not need to be replaced.
(2) An rxrpc_bundle object can now manage the available channels for a set
of parallel connections. The lock that manages this is moved there
from the rxrpc_connection struct (channel_lock).
(3) There'a a dummy bundle for all incoming connections to share so that
they have a channel_lock too. It might be better to give each
incoming connection its own bundle. This bundle is not needed to
manage which channels incoming calls are made on because that's the
solely at whim of the client.
(4) The restrictions on how many client connections are around are
removed. Instead, a previous patch limits the number of client calls
that can be allocated. Ordinarily, client connections are reaped
after 2 minutes on the idle queue, but when more than a certain number
of connections are in existence, the reaper starts reaping them after
2s of idleness instead to get the numbers back down.
It could also be made such that new call allocations are forced to
wait until the number of outstanding connections subsides.
Signed-off-by: David Howells <dhowells@redhat.com>
2020-07-01 18:15:32 +08:00
|
|
|
atomic_set(&rxnet->nr_client_conns, 0);
|
2017-05-25 00:02:32 +08:00
|
|
|
rxnet->kill_all_client_conns = false;
|
|
|
|
spin_lock_init(&rxnet->client_conn_cache_lock);
|
|
|
|
spin_lock_init(&rxnet->client_conn_discard_lock);
|
|
|
|
INIT_LIST_HEAD(&rxnet->idle_client_conns);
|
2017-11-24 18:18:42 +08:00
|
|
|
INIT_WORK(&rxnet->client_conn_reaper,
|
|
|
|
rxrpc_discard_expired_client_conns);
|
|
|
|
timer_setup(&rxnet->client_conn_reap_timer,
|
|
|
|
rxrpc_client_conn_reap_timeout, 0);
|
2017-05-25 00:02:32 +08:00
|
|
|
|
|
|
|
INIT_LIST_HEAD(&rxnet->local_endpoints);
|
|
|
|
mutex_init(&rxnet->local_mutex);
|
2018-03-31 04:04:43 +08:00
|
|
|
|
2017-05-25 00:02:32 +08:00
|
|
|
hash_init(rxnet->peer_hash);
|
|
|
|
spin_lock_init(&rxnet->peer_hash_lock);
|
2018-03-31 04:04:43 +08:00
|
|
|
for (i = 0; i < ARRAY_SIZE(rxnet->peer_keepalive); i++)
|
2018-08-08 18:30:02 +08:00
|
|
|
INIT_LIST_HEAD(&rxnet->peer_keepalive[i]);
|
|
|
|
INIT_LIST_HEAD(&rxnet->peer_keepalive_new);
|
2018-03-31 04:04:43 +08:00
|
|
|
timer_setup(&rxnet->peer_keepalive_timer,
|
|
|
|
rxrpc_peer_keepalive_timeout, 0);
|
|
|
|
INIT_WORK(&rxnet->peer_keepalive_work, rxrpc_peer_keepalive_worker);
|
2018-08-08 18:30:02 +08:00
|
|
|
rxnet->peer_keepalive_base = ktime_get_seconds();
|
2017-05-25 00:02:32 +08:00
|
|
|
|
|
|
|
ret = -ENOMEM;
|
|
|
|
rxnet->proc_net = proc_net_mkdir(net, "rxrpc", net->proc_net);
|
|
|
|
if (!rxnet->proc_net)
|
|
|
|
goto err_proc;
|
|
|
|
|
2018-04-11 01:42:55 +08:00
|
|
|
proc_create_net("calls", 0444, rxnet->proc_net, &rxrpc_call_seq_ops,
|
|
|
|
sizeof(struct seq_net_private));
|
|
|
|
proc_create_net("conns", 0444, rxnet->proc_net,
|
|
|
|
&rxrpc_connection_seq_ops,
|
|
|
|
sizeof(struct seq_net_private));
|
2018-10-15 18:31:03 +08:00
|
|
|
proc_create_net("peers", 0444, rxnet->proc_net,
|
|
|
|
&rxrpc_peer_seq_ops,
|
|
|
|
sizeof(struct seq_net_private));
|
2017-05-25 00:02:32 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_proc:
|
2017-11-24 18:18:42 +08:00
|
|
|
rxnet->live = false;
|
2017-05-25 00:02:32 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Clean up a per-network namespace record.
|
|
|
|
*/
|
|
|
|
static __net_exit void rxrpc_exit_net(struct net *net)
|
|
|
|
{
|
|
|
|
struct rxrpc_net *rxnet = rxrpc_net(net);
|
|
|
|
|
2017-11-24 18:18:42 +08:00
|
|
|
rxnet->live = false;
|
2022-04-13 18:16:25 +08:00
|
|
|
del_timer_sync(&rxnet->peer_keepalive_timer);
|
2018-03-31 04:04:43 +08:00
|
|
|
cancel_work_sync(&rxnet->peer_keepalive_work);
|
2022-04-13 18:16:25 +08:00
|
|
|
/* Remove the timer again as the worker may have restarted it. */
|
2022-04-05 02:34:39 +08:00
|
|
|
del_timer_sync(&rxnet->peer_keepalive_timer);
|
2017-05-25 00:02:32 +08:00
|
|
|
rxrpc_destroy_all_calls(rxnet);
|
|
|
|
rxrpc_destroy_all_connections(rxnet);
|
2018-03-31 04:05:44 +08:00
|
|
|
rxrpc_destroy_all_peers(rxnet);
|
2017-05-25 00:02:32 +08:00
|
|
|
rxrpc_destroy_all_locals(rxnet);
|
|
|
|
proc_remove(rxnet->proc_net);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct pernet_operations rxrpc_net_ops = {
|
|
|
|
.init = rxrpc_init_net,
|
|
|
|
.exit = rxrpc_exit_net,
|
|
|
|
.id = &rxrpc_net_id,
|
|
|
|
.size = sizeof(struct rxrpc_net),
|
|
|
|
};
|