2019-05-27 14:55:01 +08:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
2007-04-27 06:48:28 +08:00
|
|
|
/* AF_RXRPC internal definitions
|
|
|
|
*
|
|
|
|
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
|
|
|
|
* Written by David Howells (dhowells@redhat.com)
|
|
|
|
*/
|
|
|
|
|
2016-04-04 21:00:32 +08:00
|
|
|
#include <linux/atomic.h>
|
2016-07-01 14:51:50 +08:00
|
|
|
#include <linux/seqlock.h>
|
2020-05-11 21:54:34 +08:00
|
|
|
#include <linux/win_minmax.h>
|
2017-05-25 00:02:32 +08:00
|
|
|
#include <net/net_namespace.h>
|
|
|
|
#include <net/netns/generic.h>
|
2016-04-08 00:23:58 +08:00
|
|
|
#include <net/sock.h>
|
2016-04-04 21:00:32 +08:00
|
|
|
#include <net/af_rxrpc.h>
|
2017-07-21 17:07:10 +08:00
|
|
|
#include "protocol.h"
|
2007-04-27 06:48:28 +08:00
|
|
|
|
|
|
|
#if 0
|
|
|
|
#define CHECK_SLAB_OKAY(X) \
|
|
|
|
BUG_ON(atomic_read((X)) >> (sizeof(atomic_t) - 2) == \
|
|
|
|
(POISON_FREE << 8 | POISON_FREE))
|
|
|
|
#else
|
2016-03-04 23:56:19 +08:00
|
|
|
#define CHECK_SLAB_OKAY(X) do {} while (0)
|
2007-04-27 06:48:28 +08:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#define FCRYPT_BSIZE 8
|
|
|
|
struct rxrpc_crypt {
|
|
|
|
union {
|
|
|
|
u8 x[FCRYPT_BSIZE];
|
2008-03-29 11:08:38 +08:00
|
|
|
__be32 n[2];
|
2007-04-27 06:48:28 +08:00
|
|
|
};
|
|
|
|
} __attribute__((aligned(8)));
|
|
|
|
|
[AF_RXRPC]: Add an interface to the AF_RXRPC module for the AFS filesystem to use
Add an interface to the AF_RXRPC module so that the AFS filesystem module can
more easily make use of the services available. AFS still opens a socket but
then uses the action functions in lieu of sendmsg() and registers an intercept
functions to grab messages before they're queued on the socket Rx queue.
This permits AFS (or whatever) to:
(1) Avoid the overhead of using the recvmsg() call.
(2) Use different keys directly on individual client calls on one socket
rather than having to open a whole slew of sockets, one for each key it
might want to use.
(3) Avoid calling request_key() at the point of issue of a call or opening of
a socket. This is done instead by AFS at the point of open(), unlink() or
other VFS operation and the key handed through.
(4) Request the use of something other than GFP_KERNEL to allocate memory.
Furthermore:
(*) The socket buffer markings used by RxRPC are made available for AFS so
that it can interpret the cooked RxRPC messages itself.
(*) rxgen (un)marshalling abort codes are made available.
The following documentation for the kernel interface is added to
Documentation/networking/rxrpc.txt:
=========================
AF_RXRPC KERNEL INTERFACE
=========================
The AF_RXRPC module also provides an interface for use by in-kernel utilities
such as the AFS filesystem. This permits such a utility to:
(1) Use different keys directly on individual client calls on one socket
rather than having to open a whole slew of sockets, one for each key it
might want to use.
(2) Avoid having RxRPC call request_key() at the point of issue of a call or
opening of a socket. Instead the utility is responsible for requesting a
key at the appropriate point. AFS, for instance, would do this during VFS
operations such as open() or unlink(). The key is then handed through
when the call is initiated.
(3) Request the use of something other than GFP_KERNEL to allocate memory.
(4) Avoid the overhead of using the recvmsg() call. RxRPC messages can be
intercepted before they get put into the socket Rx queue and the socket
buffers manipulated directly.
To use the RxRPC facility, a kernel utility must still open an AF_RXRPC socket,
bind an addess as appropriate and listen if it's to be a server socket, but
then it passes this to the kernel interface functions.
The kernel interface functions are as follows:
(*) Begin a new client call.
struct rxrpc_call *
rxrpc_kernel_begin_call(struct socket *sock,
struct sockaddr_rxrpc *srx,
struct key *key,
unsigned long user_call_ID,
gfp_t gfp);
This allocates the infrastructure to make a new RxRPC call and assigns
call and connection numbers. The call will be made on the UDP port that
the socket is bound to. The call will go to the destination address of a
connected client socket unless an alternative is supplied (srx is
non-NULL).
If a key is supplied then this will be used to secure the call instead of
the key bound to the socket with the RXRPC_SECURITY_KEY sockopt. Calls
secured in this way will still share connections if at all possible.
The user_call_ID is equivalent to that supplied to sendmsg() in the
control data buffer. It is entirely feasible to use this to point to a
kernel data structure.
If this function is successful, an opaque reference to the RxRPC call is
returned. The caller now holds a reference on this and it must be
properly ended.
(*) End a client call.
void rxrpc_kernel_end_call(struct rxrpc_call *call);
This is used to end a previously begun call. The user_call_ID is expunged
from AF_RXRPC's knowledge and will not be seen again in association with
the specified call.
(*) Send data through a call.
int rxrpc_kernel_send_data(struct rxrpc_call *call, struct msghdr *msg,
size_t len);
This is used to supply either the request part of a client call or the
reply part of a server call. msg.msg_iovlen and msg.msg_iov specify the
data buffers to be used. msg_iov may not be NULL and must point
exclusively to in-kernel virtual addresses. msg.msg_flags may be given
MSG_MORE if there will be subsequent data sends for this call.
The msg must not specify a destination address, control data or any flags
other than MSG_MORE. len is the total amount of data to transmit.
(*) Abort a call.
void rxrpc_kernel_abort_call(struct rxrpc_call *call, u32 abort_code);
This is used to abort a call if it's still in an abortable state. The
abort code specified will be placed in the ABORT message sent.
(*) Intercept received RxRPC messages.
typedef void (*rxrpc_interceptor_t)(struct sock *sk,
unsigned long user_call_ID,
struct sk_buff *skb);
void
rxrpc_kernel_intercept_rx_messages(struct socket *sock,
rxrpc_interceptor_t interceptor);
This installs an interceptor function on the specified AF_RXRPC socket.
All messages that would otherwise wind up in the socket's Rx queue are
then diverted to this function. Note that care must be taken to process
the messages in the right order to maintain DATA message sequentiality.
The interceptor function itself is provided with the address of the socket
and handling the incoming message, the ID assigned by the kernel utility
to the call and the socket buffer containing the message.
The skb->mark field indicates the type of message:
MARK MEANING
=============================== =======================================
RXRPC_SKB_MARK_DATA Data message
RXRPC_SKB_MARK_FINAL_ACK Final ACK received for an incoming call
RXRPC_SKB_MARK_BUSY Client call rejected as server busy
RXRPC_SKB_MARK_REMOTE_ABORT Call aborted by peer
RXRPC_SKB_MARK_NET_ERROR Network error detected
RXRPC_SKB_MARK_LOCAL_ERROR Local error encountered
RXRPC_SKB_MARK_NEW_CALL New incoming call awaiting acceptance
The remote abort message can be probed with rxrpc_kernel_get_abort_code().
The two error messages can be probed with rxrpc_kernel_get_error_number().
A new call can be accepted with rxrpc_kernel_accept_call().
Data messages can have their contents extracted with the usual bunch of
socket buffer manipulation functions. A data message can be determined to
be the last one in a sequence with rxrpc_kernel_is_data_last(). When a
data message has been used up, rxrpc_kernel_data_delivered() should be
called on it..
Non-data messages should be handled to rxrpc_kernel_free_skb() to dispose
of. It is possible to get extra refs on all types of message for later
freeing, but this may pin the state of a call until the message is finally
freed.
(*) Accept an incoming call.
struct rxrpc_call *
rxrpc_kernel_accept_call(struct socket *sock,
unsigned long user_call_ID);
This is used to accept an incoming call and to assign it a call ID. This
function is similar to rxrpc_kernel_begin_call() and calls accepted must
be ended in the same way.
If this function is successful, an opaque reference to the RxRPC call is
returned. The caller now holds a reference on this and it must be
properly ended.
(*) Reject an incoming call.
int rxrpc_kernel_reject_call(struct socket *sock);
This is used to reject the first incoming call on the socket's queue with
a BUSY message. -ENODATA is returned if there were no incoming calls.
Other errors may be returned if the call had been aborted (-ECONNABORTED)
or had timed out (-ETIME).
(*) Record the delivery of a data message and free it.
void rxrpc_kernel_data_delivered(struct sk_buff *skb);
This is used to record a data message as having been delivered and to
update the ACK state for the call. The socket buffer will be freed.
(*) Free a message.
void rxrpc_kernel_free_skb(struct sk_buff *skb);
This is used to free a non-DATA socket buffer intercepted from an AF_RXRPC
socket.
(*) Determine if a data message is the last one on a call.
bool rxrpc_kernel_is_data_last(struct sk_buff *skb);
This is used to determine if a socket buffer holds the last data message
to be received for a call (true will be returned if it does, false
if not).
The data message will be part of the reply on a client call and the
request on an incoming call. In the latter case there will be more
messages, but in the former case there will not.
(*) Get the abort code from an abort message.
u32 rxrpc_kernel_get_abort_code(struct sk_buff *skb);
This is used to extract the abort code from a remote abort message.
(*) Get the error number from a local or network error message.
int rxrpc_kernel_get_error_number(struct sk_buff *skb);
This is used to extract the error number from a message indicating either
a local error occurred or a network error occurred.
Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-04-27 06:50:17 +08:00
|
|
|
#define rxrpc_queue_work(WS) queue_work(rxrpc_workqueue, (WS))
|
|
|
|
#define rxrpc_queue_delayed_work(WS,D) \
|
|
|
|
queue_delayed_work(rxrpc_workqueue, (WS), (D))
|
|
|
|
|
2016-04-04 21:00:37 +08:00
|
|
|
struct rxrpc_connection;
|
|
|
|
|
rxrpc: Don't expose skbs to in-kernel users [ver #2]
Don't expose skbs to in-kernel users, such as the AFS filesystem, but
instead provide a notification hook the indicates that a call needs
attention and another that indicates that there's a new call to be
collected.
This makes the following possibilities more achievable:
(1) Call refcounting can be made simpler if skbs don't hold refs to calls.
(2) skbs referring to non-data events will be able to be freed much sooner
rather than being queued for AFS to pick up as rxrpc_kernel_recv_data
will be able to consult the call state.
(3) We can shortcut the receive phase when a call is remotely aborted
because we don't have to go through all the packets to get to the one
cancelling the operation.
(4) It makes it easier to do encryption/decryption directly between AFS's
buffers and sk_buffs.
(5) Encryption/decryption can more easily be done in the AFS's thread
contexts - usually that of the userspace process that issued a syscall
- rather than in one of rxrpc's background threads on a workqueue.
(6) AFS will be able to wait synchronously on a call inside AF_RXRPC.
To make this work, the following interface function has been added:
int rxrpc_kernel_recv_data(
struct socket *sock, struct rxrpc_call *call,
void *buffer, size_t bufsize, size_t *_offset,
bool want_more, u32 *_abort_code);
This is the recvmsg equivalent. It allows the caller to find out about the
state of a specific call and to transfer received data into a buffer
piecemeal.
afs_extract_data() and rxrpc_kernel_recv_data() now do all the extraction
logic between them. They don't wait synchronously yet because the socket
lock needs to be dealt with.
Five interface functions have been removed:
rxrpc_kernel_is_data_last()
rxrpc_kernel_get_abort_code()
rxrpc_kernel_get_error_number()
rxrpc_kernel_free_skb()
rxrpc_kernel_data_consumed()
As a temporary hack, sk_buffs going to an in-kernel call are queued on the
rxrpc_call struct (->knlrecv_queue) rather than being handed over to the
in-kernel user. To process the queue internally, a temporary function,
temp_deliver_data() has been added. This will be replaced with common code
between the rxrpc_recvmsg() path and the kernel_rxrpc_recv_data() path in a
future patch.
Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-08-31 03:42:14 +08:00
|
|
|
/*
|
rxrpc: Emit BUSY packets when supposed to rather than ABORTs
In the input path, a received sk_buff can be marked for rejection by
setting RXRPC_SKB_MARK_* in skb->mark and, if needed, some auxiliary data
(such as an abort code) in skb->priority. The rejection is handled by
queueing the sk_buff up for dealing with in process context. The output
code reads the mark and priority and, theoretically, generates an
appropriate response packet.
However, if RXRPC_SKB_MARK_BUSY is set, this isn't noticed and an ABORT
message with a random abort code is generated (since skb->priority wasn't
set to anything).
Fix this by outputting the appropriate sort of packet.
Also, whilst we're at it, most of the marks are no longer used, so remove
them and rename the remaining two to something more obvious.
Fixes: 248f219cb8bc ("rxrpc: Rewrite the data and ack handling code")
Signed-off-by: David Howells <dhowells@redhat.com>
2018-09-27 22:13:08 +08:00
|
|
|
* Mark applied to socket buffers in skb->mark. skb->priority is used
|
|
|
|
* to pass supplementary information.
|
rxrpc: Don't expose skbs to in-kernel users [ver #2]
Don't expose skbs to in-kernel users, such as the AFS filesystem, but
instead provide a notification hook the indicates that a call needs
attention and another that indicates that there's a new call to be
collected.
This makes the following possibilities more achievable:
(1) Call refcounting can be made simpler if skbs don't hold refs to calls.
(2) skbs referring to non-data events will be able to be freed much sooner
rather than being queued for AFS to pick up as rxrpc_kernel_recv_data
will be able to consult the call state.
(3) We can shortcut the receive phase when a call is remotely aborted
because we don't have to go through all the packets to get to the one
cancelling the operation.
(4) It makes it easier to do encryption/decryption directly between AFS's
buffers and sk_buffs.
(5) Encryption/decryption can more easily be done in the AFS's thread
contexts - usually that of the userspace process that issued a syscall
- rather than in one of rxrpc's background threads on a workqueue.
(6) AFS will be able to wait synchronously on a call inside AF_RXRPC.
To make this work, the following interface function has been added:
int rxrpc_kernel_recv_data(
struct socket *sock, struct rxrpc_call *call,
void *buffer, size_t bufsize, size_t *_offset,
bool want_more, u32 *_abort_code);
This is the recvmsg equivalent. It allows the caller to find out about the
state of a specific call and to transfer received data into a buffer
piecemeal.
afs_extract_data() and rxrpc_kernel_recv_data() now do all the extraction
logic between them. They don't wait synchronously yet because the socket
lock needs to be dealt with.
Five interface functions have been removed:
rxrpc_kernel_is_data_last()
rxrpc_kernel_get_abort_code()
rxrpc_kernel_get_error_number()
rxrpc_kernel_free_skb()
rxrpc_kernel_data_consumed()
As a temporary hack, sk_buffs going to an in-kernel call are queued on the
rxrpc_call struct (->knlrecv_queue) rather than being handed over to the
in-kernel user. To process the queue internally, a temporary function,
temp_deliver_data() has been added. This will be replaced with common code
between the rxrpc_recvmsg() path and the kernel_rxrpc_recv_data() path in a
future patch.
Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-08-31 03:42:14 +08:00
|
|
|
*/
|
|
|
|
enum rxrpc_skb_mark {
|
rxrpc: Emit BUSY packets when supposed to rather than ABORTs
In the input path, a received sk_buff can be marked for rejection by
setting RXRPC_SKB_MARK_* in skb->mark and, if needed, some auxiliary data
(such as an abort code) in skb->priority. The rejection is handled by
queueing the sk_buff up for dealing with in process context. The output
code reads the mark and priority and, theoretically, generates an
appropriate response packet.
However, if RXRPC_SKB_MARK_BUSY is set, this isn't noticed and an ABORT
message with a random abort code is generated (since skb->priority wasn't
set to anything).
Fix this by outputting the appropriate sort of packet.
Also, whilst we're at it, most of the marks are no longer used, so remove
them and rename the remaining two to something more obvious.
Fixes: 248f219cb8bc ("rxrpc: Rewrite the data and ack handling code")
Signed-off-by: David Howells <dhowells@redhat.com>
2018-09-27 22:13:08 +08:00
|
|
|
RXRPC_SKB_MARK_REJECT_BUSY, /* Reject with BUSY */
|
|
|
|
RXRPC_SKB_MARK_REJECT_ABORT, /* Reject with ABORT (code in skb->priority) */
|
rxrpc: Don't expose skbs to in-kernel users [ver #2]
Don't expose skbs to in-kernel users, such as the AFS filesystem, but
instead provide a notification hook the indicates that a call needs
attention and another that indicates that there's a new call to be
collected.
This makes the following possibilities more achievable:
(1) Call refcounting can be made simpler if skbs don't hold refs to calls.
(2) skbs referring to non-data events will be able to be freed much sooner
rather than being queued for AFS to pick up as rxrpc_kernel_recv_data
will be able to consult the call state.
(3) We can shortcut the receive phase when a call is remotely aborted
because we don't have to go through all the packets to get to the one
cancelling the operation.
(4) It makes it easier to do encryption/decryption directly between AFS's
buffers and sk_buffs.
(5) Encryption/decryption can more easily be done in the AFS's thread
contexts - usually that of the userspace process that issued a syscall
- rather than in one of rxrpc's background threads on a workqueue.
(6) AFS will be able to wait synchronously on a call inside AF_RXRPC.
To make this work, the following interface function has been added:
int rxrpc_kernel_recv_data(
struct socket *sock, struct rxrpc_call *call,
void *buffer, size_t bufsize, size_t *_offset,
bool want_more, u32 *_abort_code);
This is the recvmsg equivalent. It allows the caller to find out about the
state of a specific call and to transfer received data into a buffer
piecemeal.
afs_extract_data() and rxrpc_kernel_recv_data() now do all the extraction
logic between them. They don't wait synchronously yet because the socket
lock needs to be dealt with.
Five interface functions have been removed:
rxrpc_kernel_is_data_last()
rxrpc_kernel_get_abort_code()
rxrpc_kernel_get_error_number()
rxrpc_kernel_free_skb()
rxrpc_kernel_data_consumed()
As a temporary hack, sk_buffs going to an in-kernel call are queued on the
rxrpc_call struct (->knlrecv_queue) rather than being handed over to the
in-kernel user. To process the queue internally, a temporary function,
temp_deliver_data() has been added. This will be replaced with common code
between the rxrpc_recvmsg() path and the kernel_rxrpc_recv_data() path in a
future patch.
Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-08-31 03:42:14 +08:00
|
|
|
};
|
|
|
|
|
2007-04-27 06:48:28 +08:00
|
|
|
/*
|
|
|
|
* sk_state for RxRPC sockets
|
|
|
|
*/
|
|
|
|
enum {
|
2016-06-10 06:02:51 +08:00
|
|
|
RXRPC_UNBOUND = 0,
|
|
|
|
RXRPC_CLIENT_UNBOUND, /* Unbound socket used as client */
|
2007-04-27 06:48:28 +08:00
|
|
|
RXRPC_CLIENT_BOUND, /* client local address bound */
|
|
|
|
RXRPC_SERVER_BOUND, /* server local address bound */
|
2017-06-05 21:30:49 +08:00
|
|
|
RXRPC_SERVER_BOUND2, /* second server local address bound */
|
2007-04-27 06:48:28 +08:00
|
|
|
RXRPC_SERVER_LISTENING, /* server listening for connections */
|
2017-01-05 18:38:36 +08:00
|
|
|
RXRPC_SERVER_LISTEN_DISABLED, /* server listening disabled */
|
2007-04-27 06:48:28 +08:00
|
|
|
RXRPC_CLOSE, /* socket is being closed */
|
|
|
|
};
|
|
|
|
|
2017-05-25 00:02:32 +08:00
|
|
|
/*
|
|
|
|
* Per-network namespace data.
|
|
|
|
*/
|
|
|
|
struct rxrpc_net {
|
|
|
|
struct proc_dir_entry *proc_net; /* Subdir in /proc/net */
|
|
|
|
u32 epoch; /* Local epoch for detecting local-end reset */
|
|
|
|
struct list_head calls; /* List of calls active in this namespace */
|
|
|
|
rwlock_t call_lock; /* Lock for ->calls */
|
2018-03-31 04:05:23 +08:00
|
|
|
atomic_t nr_calls; /* Count of allocated calls */
|
2017-05-25 00:02:32 +08:00
|
|
|
|
2018-03-31 04:05:33 +08:00
|
|
|
atomic_t nr_conns;
|
2017-05-25 00:02:32 +08:00
|
|
|
struct list_head conn_proc_list; /* List of conns in this namespace for proc */
|
|
|
|
struct list_head service_conns; /* Service conns in this namespace */
|
|
|
|
rwlock_t conn_lock; /* Lock for ->conn_proc_list, ->service_conns */
|
2017-11-24 18:18:42 +08:00
|
|
|
struct work_struct service_conn_reaper;
|
|
|
|
struct timer_list service_conn_reap_timer;
|
2017-05-25 00:02:32 +08:00
|
|
|
|
2017-11-24 18:18:42 +08:00
|
|
|
bool live;
|
rxrpc: Rewrite the client connection manager
Rewrite the rxrpc client connection manager so that it can support multiple
connections for a given security key to a peer. The following changes are
made:
(1) For each open socket, the code currently maintains an rbtree with the
connections placed into it, keyed by communications parameters. This
is tricky to maintain as connections can be culled from the tree or
replaced within it. Connections can require replacement for a number
of reasons, e.g. their IDs span too great a range for the IDR data
type to represent efficiently, the call ID numbers on that conn would
overflow or the conn got aborted.
This is changed so that there's now a connection bundle object placed
in the tree, keyed on the same parameters. The bundle, however, does
not need to be replaced.
(2) An rxrpc_bundle object can now manage the available channels for a set
of parallel connections. The lock that manages this is moved there
from the rxrpc_connection struct (channel_lock).
(3) There'a a dummy bundle for all incoming connections to share so that
they have a channel_lock too. It might be better to give each
incoming connection its own bundle. This bundle is not needed to
manage which channels incoming calls are made on because that's the
solely at whim of the client.
(4) The restrictions on how many client connections are around are
removed. Instead, a previous patch limits the number of client calls
that can be allocated. Ordinarily, client connections are reaped
after 2 minutes on the idle queue, but when more than a certain number
of connections are in existence, the reaper starts reaping them after
2s of idleness instead to get the numbers back down.
It could also be made such that new call allocations are forced to
wait until the number of outstanding connections subsides.
Signed-off-by: David Howells <dhowells@redhat.com>
2020-07-01 18:15:32 +08:00
|
|
|
|
|
|
|
bool kill_all_client_conns;
|
|
|
|
atomic_t nr_client_conns;
|
2017-05-25 00:02:32 +08:00
|
|
|
spinlock_t client_conn_cache_lock; /* Lock for ->*_client_conns */
|
|
|
|
spinlock_t client_conn_discard_lock; /* Prevent multiple discarders */
|
|
|
|
struct list_head idle_client_conns;
|
2017-11-24 18:18:42 +08:00
|
|
|
struct work_struct client_conn_reaper;
|
|
|
|
struct timer_list client_conn_reap_timer;
|
2017-05-25 00:02:32 +08:00
|
|
|
|
|
|
|
struct list_head local_endpoints;
|
|
|
|
struct mutex local_mutex; /* Lock for ->local_endpoints */
|
|
|
|
|
|
|
|
DECLARE_HASHTABLE (peer_hash, 10);
|
2018-03-31 04:04:43 +08:00
|
|
|
spinlock_t peer_hash_lock; /* Lock for ->peer_hash */
|
|
|
|
|
|
|
|
#define RXRPC_KEEPALIVE_TIME 20 /* NAT keepalive time in seconds */
|
|
|
|
u8 peer_keepalive_cursor;
|
2018-08-08 18:30:02 +08:00
|
|
|
time64_t peer_keepalive_base;
|
|
|
|
struct list_head peer_keepalive[32];
|
|
|
|
struct list_head peer_keepalive_new;
|
2018-03-31 04:04:43 +08:00
|
|
|
struct timer_list peer_keepalive_timer;
|
|
|
|
struct work_struct peer_keepalive_work;
|
2017-05-25 00:02:32 +08:00
|
|
|
};
|
|
|
|
|
2016-09-08 18:10:12 +08:00
|
|
|
/*
|
|
|
|
* Service backlog preallocation.
|
|
|
|
*
|
|
|
|
* This contains circular buffers of preallocated peers, connections and calls
|
|
|
|
* for incoming service calls and their head and tail pointers. This allows
|
|
|
|
* calls to be set up in the data_ready handler, thereby avoiding the need to
|
|
|
|
* shuffle packets around so much.
|
|
|
|
*/
|
|
|
|
struct rxrpc_backlog {
|
|
|
|
unsigned short peer_backlog_head;
|
|
|
|
unsigned short peer_backlog_tail;
|
|
|
|
unsigned short conn_backlog_head;
|
|
|
|
unsigned short conn_backlog_tail;
|
|
|
|
unsigned short call_backlog_head;
|
|
|
|
unsigned short call_backlog_tail;
|
|
|
|
#define RXRPC_BACKLOG_MAX 32
|
|
|
|
struct rxrpc_peer *peer_backlog[RXRPC_BACKLOG_MAX];
|
|
|
|
struct rxrpc_connection *conn_backlog[RXRPC_BACKLOG_MAX];
|
|
|
|
struct rxrpc_call *call_backlog[RXRPC_BACKLOG_MAX];
|
|
|
|
};
|
|
|
|
|
2007-04-27 06:48:28 +08:00
|
|
|
/*
|
|
|
|
* RxRPC socket definition
|
|
|
|
*/
|
|
|
|
struct rxrpc_sock {
|
|
|
|
/* WARNING: sk has to be the first member */
|
|
|
|
struct sock sk;
|
rxrpc: Don't expose skbs to in-kernel users [ver #2]
Don't expose skbs to in-kernel users, such as the AFS filesystem, but
instead provide a notification hook the indicates that a call needs
attention and another that indicates that there's a new call to be
collected.
This makes the following possibilities more achievable:
(1) Call refcounting can be made simpler if skbs don't hold refs to calls.
(2) skbs referring to non-data events will be able to be freed much sooner
rather than being queued for AFS to pick up as rxrpc_kernel_recv_data
will be able to consult the call state.
(3) We can shortcut the receive phase when a call is remotely aborted
because we don't have to go through all the packets to get to the one
cancelling the operation.
(4) It makes it easier to do encryption/decryption directly between AFS's
buffers and sk_buffs.
(5) Encryption/decryption can more easily be done in the AFS's thread
contexts - usually that of the userspace process that issued a syscall
- rather than in one of rxrpc's background threads on a workqueue.
(6) AFS will be able to wait synchronously on a call inside AF_RXRPC.
To make this work, the following interface function has been added:
int rxrpc_kernel_recv_data(
struct socket *sock, struct rxrpc_call *call,
void *buffer, size_t bufsize, size_t *_offset,
bool want_more, u32 *_abort_code);
This is the recvmsg equivalent. It allows the caller to find out about the
state of a specific call and to transfer received data into a buffer
piecemeal.
afs_extract_data() and rxrpc_kernel_recv_data() now do all the extraction
logic between them. They don't wait synchronously yet because the socket
lock needs to be dealt with.
Five interface functions have been removed:
rxrpc_kernel_is_data_last()
rxrpc_kernel_get_abort_code()
rxrpc_kernel_get_error_number()
rxrpc_kernel_free_skb()
rxrpc_kernel_data_consumed()
As a temporary hack, sk_buffs going to an in-kernel call are queued on the
rxrpc_call struct (->knlrecv_queue) rather than being handed over to the
in-kernel user. To process the queue internally, a temporary function,
temp_deliver_data() has been added. This will be replaced with common code
between the rxrpc_recvmsg() path and the kernel_rxrpc_recv_data() path in a
future patch.
Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-08-31 03:42:14 +08:00
|
|
|
rxrpc_notify_new_call_t notify_new_call; /* Func to notify of new call */
|
2016-09-08 18:10:12 +08:00
|
|
|
rxrpc_discard_new_call_t discard_new_call; /* Func to discard a new call */
|
2007-04-27 06:48:28 +08:00
|
|
|
struct rxrpc_local *local; /* local endpoint */
|
2016-09-08 18:10:12 +08:00
|
|
|
struct rxrpc_backlog *backlog; /* Preallocation for services */
|
rxrpc: Rewrite the data and ack handling code
Rewrite the data and ack handling code such that:
(1) Parsing of received ACK and ABORT packets and the distribution and the
filing of DATA packets happens entirely within the data_ready context
called from the UDP socket. This allows us to process and discard ACK
and ABORT packets much more quickly (they're no longer stashed on a
queue for a background thread to process).
(2) We avoid calling skb_clone(), pskb_pull() and pskb_trim(). We instead
keep track of the offset and length of the content of each packet in
the sk_buff metadata. This means we don't do any allocation in the
receive path.
(3) Jumbo DATA packet parsing is now done in data_ready context. Rather
than cloning the packet once for each subpacket and pulling/trimming
it, we file the packet multiple times with an annotation for each
indicating which subpacket is there. From that we can directly
calculate the offset and length.
(4) A call's receive queue can be accessed without taking locks (memory
barriers do have to be used, though).
(5) Incoming calls are set up from preallocated resources and immediately
made live. They can than have packets queued upon them and ACKs
generated. If insufficient resources exist, DATA packet #1 is given a
BUSY reply and other DATA packets are discarded).
(6) sk_buffs no longer take a ref on their parent call.
To make this work, the following changes are made:
(1) Each call's receive buffer is now a circular buffer of sk_buff
pointers (rxtx_buffer) rather than a number of sk_buff_heads spread
between the call and the socket. This permits each sk_buff to be in
the buffer multiple times. The receive buffer is reused for the
transmit buffer.
(2) A circular buffer of annotations (rxtx_annotations) is kept parallel
to the data buffer. Transmission phase annotations indicate whether a
buffered packet has been ACK'd or not and whether it needs
retransmission.
Receive phase annotations indicate whether a slot holds a whole packet
or a jumbo subpacket and, if the latter, which subpacket. They also
note whether the packet has been decrypted in place.
(3) DATA packet window tracking is much simplified. Each phase has just
two numbers representing the window (rx_hard_ack/rx_top and
tx_hard_ack/tx_top).
The hard_ack number is the sequence number before base of the window,
representing the last packet the other side says it has consumed.
hard_ack starts from 0 and the first packet is sequence number 1.
The top number is the sequence number of the highest-numbered packet
residing in the buffer. Packets between hard_ack+1 and top are
soft-ACK'd to indicate they've been received, but not yet consumed.
Four macros, before(), before_eq(), after() and after_eq() are added
to compare sequence numbers within the window. This allows for the
top of the window to wrap when the hard-ack sequence number gets close
to the limit.
Two flags, RXRPC_CALL_RX_LAST and RXRPC_CALL_TX_LAST, are added also
to indicate when rx_top and tx_top point at the packets with the
LAST_PACKET bit set, indicating the end of the phase.
(4) Calls are queued on the socket 'receive queue' rather than packets.
This means that we don't need have to invent dummy packets to queue to
indicate abnormal/terminal states and we don't have to keep metadata
packets (such as ABORTs) around
(5) The offset and length of a (sub)packet's content are now passed to
the verify_packet security op. This is currently expected to decrypt
the packet in place and validate it.
However, there's now nowhere to store the revised offset and length of
the actual data within the decrypted blob (there may be a header and
padding to skip) because an sk_buff may represent multiple packets, so
a locate_data security op is added to retrieve these details from the
sk_buff content when needed.
(6) recvmsg() now has to handle jumbo subpackets, where each subpacket is
individually secured and needs to be individually decrypted. The code
to do this is broken out into rxrpc_recvmsg_data() and shared with the
kernel API. It now iterates over the call's receive buffer rather
than walking the socket receive queue.
Additional changes:
(1) The timers are condensed to a single timer that is set for the soonest
of three timeouts (delayed ACK generation, DATA retransmission and
call lifespan).
(2) Transmission of ACK and ABORT packets is effected immediately from
process-context socket ops/kernel API calls that cause them instead of
them being punted off to a background work item. The data_ready
handler still has to defer to the background, though.
(3) A shutdown op is added to the AF_RXRPC socket so that the AFS
filesystem can shut down the socket and flush its own work items
before closing the socket to deal with any in-progress service calls.
Future additional changes that will need to be considered:
(1) Make sure that a call doesn't hog the front of the queue by receiving
data from the network as fast as userspace is consuming it to the
exclusion of other calls.
(2) Transmit delayed ACKs from within recvmsg() when we've consumed
sufficiently more packets to avoid the background work item needing to
run.
Signed-off-by: David Howells <dhowells@redhat.com>
2016-09-08 18:10:12 +08:00
|
|
|
spinlock_t incoming_lock; /* Incoming call vs service shutdown lock */
|
|
|
|
struct list_head sock_calls; /* List of calls owned by this socket */
|
|
|
|
struct list_head to_be_accepted; /* calls awaiting acceptance */
|
|
|
|
struct list_head recvmsg_q; /* Calls awaiting recvmsg's attention */
|
|
|
|
rwlock_t recvmsg_lock; /* Lock for recvmsg_q */
|
2007-04-27 06:48:28 +08:00
|
|
|
struct key *key; /* security for this socket */
|
|
|
|
struct key *securities; /* list of server security descriptors */
|
2016-09-08 18:10:12 +08:00
|
|
|
struct rb_root calls; /* User ID -> call mapping */
|
2007-04-27 06:48:28 +08:00
|
|
|
unsigned long flags;
|
2016-06-10 06:02:51 +08:00
|
|
|
#define RXRPC_SOCK_CONNECTED 0 /* connect_srx is set */
|
2007-04-27 06:48:28 +08:00
|
|
|
rwlock_t call_lock; /* lock for calls */
|
|
|
|
u32 min_sec_level; /* minimum security level */
|
|
|
|
#define RXRPC_SECURITY_MAX RXRPC_SECURITY_ENCRYPT
|
2016-04-04 21:00:37 +08:00
|
|
|
bool exclusive; /* Exclusive connection for a client socket */
|
2017-06-05 21:30:49 +08:00
|
|
|
u16 second_service; /* Additional service bound to the endpoint */
|
rxrpc: Implement service upgrade
Implement AuriStor's service upgrade facility. There are three problems
that this is meant to deal with:
(1) Various of the standard AFS RPC calls have IPv4 addresses in their
requests and/or replies - but there's no room for including IPv6
addresses.
(2) Definition of IPv6-specific RPC operations in the standard operation
sets has not yet been achieved.
(3) One could envision the creation a new service on the same port that as
the original service. The new service could implement improved
operations - and the client could try this first, falling back to the
original service if it's not there.
Unfortunately, certain servers ignore packets addressed to a service
they don't implement and don't respond in any way - not even with an
ABORT. This means that the client must then wait for the call timeout
to occur.
What service upgrade does is to see if the connection is marked as being
'upgradeable' and if so, change the service ID in the server and thus the
request and reply formats. Note that the upgrade isn't mandatory - a
server that supports only the original call set will ignore the upgrade
request.
In the protocol, the procedure is then as follows:
(1) To request an upgrade, the first DATA packet in a new connection must
have the userStatus set to 1 (this is normally 0). The userStatus
value is normally ignored by the server.
(2) If the server doesn't support upgrading, the reply packets will
contain the same service ID as for the first request packet.
(3) If the server does support upgrading, all future reply packets on that
connection will contain the new service ID and the new service ID will
be applied to *all* further calls on that connection as well.
(4) The RPC op used to probe the upgrade must take the same request data
as the shadow call in the upgrade set (but may return a different
reply). GetCapability RPC ops were added to all standard sets for
just this purpose. Ops where the request formats differ cannot be
used for probing.
(5) The client must wait for completion of the probe before sending any
further RPC ops to the same destination. It should then use the
service ID that recvmsg() reported back in all future calls.
(6) The shadow service must have call definitions for all the operation
IDs defined by the original service.
To support service upgrading, a server should:
(1) Call bind() twice on its AF_RXRPC socket before calling listen().
Each bind() should supply a different service ID, but the transport
addresses must be the same. This allows the server to receive
requests with either service ID.
(2) Enable automatic upgrading by calling setsockopt(), specifying
RXRPC_UPGRADEABLE_SERVICE and passing in a two-member array of
unsigned shorts as the argument:
unsigned short optval[2];
This specifies a pair of service IDs. They must be different and must
match the service IDs bound to the socket. Member 0 is the service ID
to upgrade from and member 1 is the service ID to upgrade to.
Signed-off-by: David Howells <dhowells@redhat.com>
2017-06-05 21:30:49 +08:00
|
|
|
struct {
|
|
|
|
/* Service upgrade information */
|
|
|
|
u16 from; /* Service ID to upgrade (if not 0) */
|
|
|
|
u16 to; /* service ID to upgrade to */
|
|
|
|
} service_upgrade;
|
2016-04-04 21:00:37 +08:00
|
|
|
sa_family_t family; /* Protocol family created with */
|
rxrpc: Implement service upgrade
Implement AuriStor's service upgrade facility. There are three problems
that this is meant to deal with:
(1) Various of the standard AFS RPC calls have IPv4 addresses in their
requests and/or replies - but there's no room for including IPv6
addresses.
(2) Definition of IPv6-specific RPC operations in the standard operation
sets has not yet been achieved.
(3) One could envision the creation a new service on the same port that as
the original service. The new service could implement improved
operations - and the client could try this first, falling back to the
original service if it's not there.
Unfortunately, certain servers ignore packets addressed to a service
they don't implement and don't respond in any way - not even with an
ABORT. This means that the client must then wait for the call timeout
to occur.
What service upgrade does is to see if the connection is marked as being
'upgradeable' and if so, change the service ID in the server and thus the
request and reply formats. Note that the upgrade isn't mandatory - a
server that supports only the original call set will ignore the upgrade
request.
In the protocol, the procedure is then as follows:
(1) To request an upgrade, the first DATA packet in a new connection must
have the userStatus set to 1 (this is normally 0). The userStatus
value is normally ignored by the server.
(2) If the server doesn't support upgrading, the reply packets will
contain the same service ID as for the first request packet.
(3) If the server does support upgrading, all future reply packets on that
connection will contain the new service ID and the new service ID will
be applied to *all* further calls on that connection as well.
(4) The RPC op used to probe the upgrade must take the same request data
as the shadow call in the upgrade set (but may return a different
reply). GetCapability RPC ops were added to all standard sets for
just this purpose. Ops where the request formats differ cannot be
used for probing.
(5) The client must wait for completion of the probe before sending any
further RPC ops to the same destination. It should then use the
service ID that recvmsg() reported back in all future calls.
(6) The shadow service must have call definitions for all the operation
IDs defined by the original service.
To support service upgrading, a server should:
(1) Call bind() twice on its AF_RXRPC socket before calling listen().
Each bind() should supply a different service ID, but the transport
addresses must be the same. This allows the server to receive
requests with either service ID.
(2) Enable automatic upgrading by calling setsockopt(), specifying
RXRPC_UPGRADEABLE_SERVICE and passing in a two-member array of
unsigned shorts as the argument:
unsigned short optval[2];
This specifies a pair of service IDs. They must be different and must
match the service IDs bound to the socket. Member 0 is the service ID
to upgrade from and member 1 is the service ID to upgrade to.
Signed-off-by: David Howells <dhowells@redhat.com>
2017-06-05 21:30:49 +08:00
|
|
|
struct sockaddr_rxrpc srx; /* Primary Service/local addresses */
|
2016-06-10 06:02:51 +08:00
|
|
|
struct sockaddr_rxrpc connect_srx; /* Default client address from connect() */
|
2007-04-27 06:48:28 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
#define rxrpc_sk(__sk) container_of((__sk), struct rxrpc_sock, sk)
|
|
|
|
|
2016-03-04 23:53:46 +08:00
|
|
|
/*
|
|
|
|
* CPU-byteorder normalised Rx packet header.
|
|
|
|
*/
|
|
|
|
struct rxrpc_host_header {
|
|
|
|
u32 epoch; /* client boot timestamp */
|
|
|
|
u32 cid; /* connection and channel ID */
|
|
|
|
u32 callNumber; /* call ID (0 for connection-level packets) */
|
|
|
|
u32 seq; /* sequence number of pkt in call stream */
|
|
|
|
u32 serial; /* serial number of pkt sent to network */
|
|
|
|
u8 type; /* packet type */
|
|
|
|
u8 flags; /* packet flags */
|
|
|
|
u8 userStatus; /* app-layer defined status */
|
|
|
|
u8 securityIndex; /* security protocol ID */
|
|
|
|
union {
|
|
|
|
u16 _rsvd; /* reserved */
|
|
|
|
u16 cksum; /* kerberos security checksum */
|
|
|
|
};
|
|
|
|
u16 serviceId; /* service ID */
|
|
|
|
} __packed;
|
|
|
|
|
2007-04-27 06:48:28 +08:00
|
|
|
/*
|
|
|
|
* RxRPC socket buffer private variables
|
|
|
|
* - max 48 bytes (struct sk_buff::cb)
|
|
|
|
*/
|
|
|
|
struct rxrpc_skb_priv {
|
2019-08-19 16:25:38 +08:00
|
|
|
atomic_t nr_ring_pins; /* Number of rxtx ring pins */
|
2019-08-19 16:25:37 +08:00
|
|
|
u8 nr_subpackets; /* Number of subpackets */
|
|
|
|
u8 rx_flags; /* Received packet flags */
|
|
|
|
#define RXRPC_SKB_INCL_LAST 0x01 /* - Includes last packet */
|
2019-08-19 16:25:37 +08:00
|
|
|
#define RXRPC_SKB_TX_BUFFER 0x02 /* - Is transmit buffer */
|
2007-04-27 06:48:28 +08:00
|
|
|
union {
|
|
|
|
int remain; /* amount of space remaining for next write */
|
2019-08-19 16:25:37 +08:00
|
|
|
|
|
|
|
/* List of requested ACKs on subpackets */
|
|
|
|
unsigned long rx_req_ack[(RXRPC_MAX_NR_JUMBO + BITS_PER_LONG - 1) /
|
|
|
|
BITS_PER_LONG];
|
2007-04-27 06:48:28 +08:00
|
|
|
};
|
|
|
|
|
2016-03-04 23:53:46 +08:00
|
|
|
struct rxrpc_host_header hdr; /* RxRPC packet header from this packet */
|
2007-04-27 06:48:28 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
#define rxrpc_skb(__skb) ((struct rxrpc_skb_priv *) &(__skb)->cb)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* RxRPC security module interface
|
|
|
|
*/
|
|
|
|
struct rxrpc_security {
|
|
|
|
const char *name; /* name of this service */
|
|
|
|
u8 security_index; /* security type provided */
|
2019-12-21 00:17:16 +08:00
|
|
|
u32 no_key_abort; /* Abort code indicating no key */
|
2007-04-27 06:48:28 +08:00
|
|
|
|
2016-04-08 00:23:51 +08:00
|
|
|
/* Initialise a security service */
|
|
|
|
int (*init)(void);
|
|
|
|
|
|
|
|
/* Clean up a security service */
|
|
|
|
void (*exit)(void);
|
|
|
|
|
2007-04-27 06:48:28 +08:00
|
|
|
/* initialise a connection's security */
|
|
|
|
int (*init_connection_security)(struct rxrpc_connection *);
|
|
|
|
|
|
|
|
/* prime a connection's packet security */
|
2016-06-27 05:55:24 +08:00
|
|
|
int (*prime_packet_security)(struct rxrpc_connection *);
|
2007-04-27 06:48:28 +08:00
|
|
|
|
|
|
|
/* impose security on a packet */
|
2016-06-27 05:55:24 +08:00
|
|
|
int (*secure_packet)(struct rxrpc_call *,
|
2007-04-27 06:48:28 +08:00
|
|
|
struct sk_buff *,
|
|
|
|
size_t,
|
|
|
|
void *);
|
|
|
|
|
|
|
|
/* verify the security on a received packet */
|
2016-09-07 05:19:51 +08:00
|
|
|
int (*verify_packet)(struct rxrpc_call *, struct sk_buff *,
|
rxrpc: Rewrite the data and ack handling code
Rewrite the data and ack handling code such that:
(1) Parsing of received ACK and ABORT packets and the distribution and the
filing of DATA packets happens entirely within the data_ready context
called from the UDP socket. This allows us to process and discard ACK
and ABORT packets much more quickly (they're no longer stashed on a
queue for a background thread to process).
(2) We avoid calling skb_clone(), pskb_pull() and pskb_trim(). We instead
keep track of the offset and length of the content of each packet in
the sk_buff metadata. This means we don't do any allocation in the
receive path.
(3) Jumbo DATA packet parsing is now done in data_ready context. Rather
than cloning the packet once for each subpacket and pulling/trimming
it, we file the packet multiple times with an annotation for each
indicating which subpacket is there. From that we can directly
calculate the offset and length.
(4) A call's receive queue can be accessed without taking locks (memory
barriers do have to be used, though).
(5) Incoming calls are set up from preallocated resources and immediately
made live. They can than have packets queued upon them and ACKs
generated. If insufficient resources exist, DATA packet #1 is given a
BUSY reply and other DATA packets are discarded).
(6) sk_buffs no longer take a ref on their parent call.
To make this work, the following changes are made:
(1) Each call's receive buffer is now a circular buffer of sk_buff
pointers (rxtx_buffer) rather than a number of sk_buff_heads spread
between the call and the socket. This permits each sk_buff to be in
the buffer multiple times. The receive buffer is reused for the
transmit buffer.
(2) A circular buffer of annotations (rxtx_annotations) is kept parallel
to the data buffer. Transmission phase annotations indicate whether a
buffered packet has been ACK'd or not and whether it needs
retransmission.
Receive phase annotations indicate whether a slot holds a whole packet
or a jumbo subpacket and, if the latter, which subpacket. They also
note whether the packet has been decrypted in place.
(3) DATA packet window tracking is much simplified. Each phase has just
two numbers representing the window (rx_hard_ack/rx_top and
tx_hard_ack/tx_top).
The hard_ack number is the sequence number before base of the window,
representing the last packet the other side says it has consumed.
hard_ack starts from 0 and the first packet is sequence number 1.
The top number is the sequence number of the highest-numbered packet
residing in the buffer. Packets between hard_ack+1 and top are
soft-ACK'd to indicate they've been received, but not yet consumed.
Four macros, before(), before_eq(), after() and after_eq() are added
to compare sequence numbers within the window. This allows for the
top of the window to wrap when the hard-ack sequence number gets close
to the limit.
Two flags, RXRPC_CALL_RX_LAST and RXRPC_CALL_TX_LAST, are added also
to indicate when rx_top and tx_top point at the packets with the
LAST_PACKET bit set, indicating the end of the phase.
(4) Calls are queued on the socket 'receive queue' rather than packets.
This means that we don't need have to invent dummy packets to queue to
indicate abnormal/terminal states and we don't have to keep metadata
packets (such as ABORTs) around
(5) The offset and length of a (sub)packet's content are now passed to
the verify_packet security op. This is currently expected to decrypt
the packet in place and validate it.
However, there's now nowhere to store the revised offset and length of
the actual data within the decrypted blob (there may be a header and
padding to skip) because an sk_buff may represent multiple packets, so
a locate_data security op is added to retrieve these details from the
sk_buff content when needed.
(6) recvmsg() now has to handle jumbo subpackets, where each subpacket is
individually secured and needs to be individually decrypted. The code
to do this is broken out into rxrpc_recvmsg_data() and shared with the
kernel API. It now iterates over the call's receive buffer rather
than walking the socket receive queue.
Additional changes:
(1) The timers are condensed to a single timer that is set for the soonest
of three timeouts (delayed ACK generation, DATA retransmission and
call lifespan).
(2) Transmission of ACK and ABORT packets is effected immediately from
process-context socket ops/kernel API calls that cause them instead of
them being punted off to a background work item. The data_ready
handler still has to defer to the background, though.
(3) A shutdown op is added to the AF_RXRPC socket so that the AFS
filesystem can shut down the socket and flush its own work items
before closing the socket to deal with any in-progress service calls.
Future additional changes that will need to be considered:
(1) Make sure that a call doesn't hog the front of the queue by receiving
data from the network as fast as userspace is consuming it to the
exclusion of other calls.
(2) Transmit delayed ACKs from within recvmsg() when we've consumed
sufficiently more packets to avoid the background work item needing to
run.
Signed-off-by: David Howells <dhowells@redhat.com>
2016-09-08 18:10:12 +08:00
|
|
|
unsigned int, unsigned int, rxrpc_seq_t, u16);
|
|
|
|
|
2019-07-30 22:56:57 +08:00
|
|
|
/* Free crypto request on a call */
|
|
|
|
void (*free_call_crypto)(struct rxrpc_call *);
|
|
|
|
|
rxrpc: Rewrite the data and ack handling code
Rewrite the data and ack handling code such that:
(1) Parsing of received ACK and ABORT packets and the distribution and the
filing of DATA packets happens entirely within the data_ready context
called from the UDP socket. This allows us to process and discard ACK
and ABORT packets much more quickly (they're no longer stashed on a
queue for a background thread to process).
(2) We avoid calling skb_clone(), pskb_pull() and pskb_trim(). We instead
keep track of the offset and length of the content of each packet in
the sk_buff metadata. This means we don't do any allocation in the
receive path.
(3) Jumbo DATA packet parsing is now done in data_ready context. Rather
than cloning the packet once for each subpacket and pulling/trimming
it, we file the packet multiple times with an annotation for each
indicating which subpacket is there. From that we can directly
calculate the offset and length.
(4) A call's receive queue can be accessed without taking locks (memory
barriers do have to be used, though).
(5) Incoming calls are set up from preallocated resources and immediately
made live. They can than have packets queued upon them and ACKs
generated. If insufficient resources exist, DATA packet #1 is given a
BUSY reply and other DATA packets are discarded).
(6) sk_buffs no longer take a ref on their parent call.
To make this work, the following changes are made:
(1) Each call's receive buffer is now a circular buffer of sk_buff
pointers (rxtx_buffer) rather than a number of sk_buff_heads spread
between the call and the socket. This permits each sk_buff to be in
the buffer multiple times. The receive buffer is reused for the
transmit buffer.
(2) A circular buffer of annotations (rxtx_annotations) is kept parallel
to the data buffer. Transmission phase annotations indicate whether a
buffered packet has been ACK'd or not and whether it needs
retransmission.
Receive phase annotations indicate whether a slot holds a whole packet
or a jumbo subpacket and, if the latter, which subpacket. They also
note whether the packet has been decrypted in place.
(3) DATA packet window tracking is much simplified. Each phase has just
two numbers representing the window (rx_hard_ack/rx_top and
tx_hard_ack/tx_top).
The hard_ack number is the sequence number before base of the window,
representing the last packet the other side says it has consumed.
hard_ack starts from 0 and the first packet is sequence number 1.
The top number is the sequence number of the highest-numbered packet
residing in the buffer. Packets between hard_ack+1 and top are
soft-ACK'd to indicate they've been received, but not yet consumed.
Four macros, before(), before_eq(), after() and after_eq() are added
to compare sequence numbers within the window. This allows for the
top of the window to wrap when the hard-ack sequence number gets close
to the limit.
Two flags, RXRPC_CALL_RX_LAST and RXRPC_CALL_TX_LAST, are added also
to indicate when rx_top and tx_top point at the packets with the
LAST_PACKET bit set, indicating the end of the phase.
(4) Calls are queued on the socket 'receive queue' rather than packets.
This means that we don't need have to invent dummy packets to queue to
indicate abnormal/terminal states and we don't have to keep metadata
packets (such as ABORTs) around
(5) The offset and length of a (sub)packet's content are now passed to
the verify_packet security op. This is currently expected to decrypt
the packet in place and validate it.
However, there's now nowhere to store the revised offset and length of
the actual data within the decrypted blob (there may be a header and
padding to skip) because an sk_buff may represent multiple packets, so
a locate_data security op is added to retrieve these details from the
sk_buff content when needed.
(6) recvmsg() now has to handle jumbo subpackets, where each subpacket is
individually secured and needs to be individually decrypted. The code
to do this is broken out into rxrpc_recvmsg_data() and shared with the
kernel API. It now iterates over the call's receive buffer rather
than walking the socket receive queue.
Additional changes:
(1) The timers are condensed to a single timer that is set for the soonest
of three timeouts (delayed ACK generation, DATA retransmission and
call lifespan).
(2) Transmission of ACK and ABORT packets is effected immediately from
process-context socket ops/kernel API calls that cause them instead of
them being punted off to a background work item. The data_ready
handler still has to defer to the background, though.
(3) A shutdown op is added to the AF_RXRPC socket so that the AFS
filesystem can shut down the socket and flush its own work items
before closing the socket to deal with any in-progress service calls.
Future additional changes that will need to be considered:
(1) Make sure that a call doesn't hog the front of the queue by receiving
data from the network as fast as userspace is consuming it to the
exclusion of other calls.
(2) Transmit delayed ACKs from within recvmsg() when we've consumed
sufficiently more packets to avoid the background work item needing to
run.
Signed-off-by: David Howells <dhowells@redhat.com>
2016-09-08 18:10:12 +08:00
|
|
|
/* Locate the data in a received packet that has been verified. */
|
|
|
|
void (*locate_data)(struct rxrpc_call *, struct sk_buff *,
|
|
|
|
unsigned int *, unsigned int *);
|
2007-04-27 06:48:28 +08:00
|
|
|
|
|
|
|
/* issue a challenge */
|
|
|
|
int (*issue_challenge)(struct rxrpc_connection *);
|
|
|
|
|
|
|
|
/* respond to a challenge */
|
|
|
|
int (*respond_to_challenge)(struct rxrpc_connection *,
|
|
|
|
struct sk_buff *,
|
|
|
|
u32 *);
|
|
|
|
|
|
|
|
/* verify a response */
|
|
|
|
int (*verify_response)(struct rxrpc_connection *,
|
|
|
|
struct sk_buff *,
|
|
|
|
u32 *);
|
|
|
|
|
|
|
|
/* clear connection security */
|
|
|
|
void (*clear)(struct rxrpc_connection *);
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
2016-04-04 21:00:35 +08:00
|
|
|
* RxRPC local transport endpoint description
|
|
|
|
* - owned by a single AF_RXRPC socket
|
|
|
|
* - pointed to by transport socket struct sk_user_data
|
2007-04-27 06:48:28 +08:00
|
|
|
*/
|
|
|
|
struct rxrpc_local {
|
2016-04-04 21:00:35 +08:00
|
|
|
struct rcu_head rcu;
|
2019-08-09 22:20:41 +08:00
|
|
|
atomic_t active_users; /* Number of users of the local endpoint */
|
|
|
|
atomic_t usage; /* Number of references to the structure */
|
2017-05-25 00:02:32 +08:00
|
|
|
struct rxrpc_net *rxnet; /* The network ns in which this resides */
|
2016-04-04 21:00:35 +08:00
|
|
|
struct list_head link;
|
2007-04-27 06:48:28 +08:00
|
|
|
struct socket *socket; /* my UDP socket */
|
2016-04-04 21:00:35 +08:00
|
|
|
struct work_struct processor;
|
2016-09-30 05:37:15 +08:00
|
|
|
struct rxrpc_sock __rcu *service; /* Service(s) listening on this endpoint */
|
2007-04-27 06:48:28 +08:00
|
|
|
struct rw_semaphore defrag_sem; /* control re-enablement of IP DF bit */
|
|
|
|
struct sk_buff_head reject_queue; /* packets awaiting rejection */
|
2015-04-01 23:31:26 +08:00
|
|
|
struct sk_buff_head event_queue; /* endpoint event packets awaiting processing */
|
rxrpc: Rewrite the client connection manager
Rewrite the rxrpc client connection manager so that it can support multiple
connections for a given security key to a peer. The following changes are
made:
(1) For each open socket, the code currently maintains an rbtree with the
connections placed into it, keyed by communications parameters. This
is tricky to maintain as connections can be culled from the tree or
replaced within it. Connections can require replacement for a number
of reasons, e.g. their IDs span too great a range for the IDR data
type to represent efficiently, the call ID numbers on that conn would
overflow or the conn got aborted.
This is changed so that there's now a connection bundle object placed
in the tree, keyed on the same parameters. The bundle, however, does
not need to be replaced.
(2) An rxrpc_bundle object can now manage the available channels for a set
of parallel connections. The lock that manages this is moved there
from the rxrpc_connection struct (channel_lock).
(3) There'a a dummy bundle for all incoming connections to share so that
they have a channel_lock too. It might be better to give each
incoming connection its own bundle. This bundle is not needed to
manage which channels incoming calls are made on because that's the
solely at whim of the client.
(4) The restrictions on how many client connections are around are
removed. Instead, a previous patch limits the number of client calls
that can be allocated. Ordinarily, client connections are reaped
after 2 minutes on the idle queue, but when more than a certain number
of connections are in existence, the reaper starts reaping them after
2s of idleness instead to get the numbers back down.
It could also be made such that new call allocations are forced to
wait until the number of outstanding connections subsides.
Signed-off-by: David Howells <dhowells@redhat.com>
2020-07-01 18:15:32 +08:00
|
|
|
struct rb_root client_bundles; /* Client connection bundles by socket params */
|
|
|
|
spinlock_t client_bundles_lock; /* Lock for client_bundles */
|
2007-04-27 06:48:28 +08:00
|
|
|
spinlock_t lock; /* access lock */
|
|
|
|
rwlock_t services_lock; /* lock for services list */
|
|
|
|
int debug_id; /* debug ID for printks */
|
2016-04-04 21:00:35 +08:00
|
|
|
bool dead;
|
2017-11-24 18:18:42 +08:00
|
|
|
bool service_closed; /* Service socket closed */
|
2007-04-27 06:48:28 +08:00
|
|
|
struct sockaddr_rxrpc srx; /* local address */
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* RxRPC remote transport endpoint definition
|
2016-04-04 21:00:32 +08:00
|
|
|
* - matched by local endpoint, remote port, address and protocol type
|
2007-04-27 06:48:28 +08:00
|
|
|
*/
|
|
|
|
struct rxrpc_peer {
|
2016-04-04 21:00:32 +08:00
|
|
|
struct rcu_head rcu; /* This must be first */
|
|
|
|
atomic_t usage;
|
|
|
|
unsigned long hash_key;
|
|
|
|
struct hlist_node hash_link;
|
|
|
|
struct rxrpc_local *local;
|
2016-04-04 21:00:34 +08:00
|
|
|
struct hlist_head error_targets; /* targets for net error distribution */
|
2016-06-17 17:06:56 +08:00
|
|
|
struct rb_root service_conns; /* Service connections */
|
2018-08-08 18:30:02 +08:00
|
|
|
struct list_head keepalive_link; /* Link in net->peer_keepalive[] */
|
2018-03-31 04:04:43 +08:00
|
|
|
time64_t last_tx_at; /* Last time packet sent here */
|
2016-07-01 14:51:50 +08:00
|
|
|
seqlock_t service_conn_lock;
|
2007-04-27 06:48:28 +08:00
|
|
|
spinlock_t lock; /* access lock */
|
2012-04-15 13:58:06 +08:00
|
|
|
unsigned int if_mtu; /* interface MTU for this peer */
|
|
|
|
unsigned int mtu; /* network MTU for this peer */
|
|
|
|
unsigned int maxdata; /* data size (MTU - hdrsize) */
|
2007-04-27 06:48:28 +08:00
|
|
|
unsigned short hdrsize; /* header size (IP + UDP + RxRPC) */
|
|
|
|
int debug_id; /* debug ID for printks */
|
|
|
|
struct sockaddr_rxrpc srx; /* remote address */
|
|
|
|
|
|
|
|
/* calculated RTT cache */
|
|
|
|
#define RXRPC_RTT_CACHE_SIZE 32
|
rxrpc: Fix the packet reception routine
The rxrpc_input_packet() function and its call tree was built around the
assumption that data_ready() handler called from UDP to inform a kernel
service that there is data to be had was non-reentrant. This means that
certain locking could be dispensed with.
This, however, turns out not to be the case with a multi-queue network card
that can deliver packets to multiple cpus simultaneously. Each of those
cpus can be in the rxrpc_input_packet() function at the same time.
Fix by adding or changing some structure members:
(1) Add peer->rtt_input_lock to serialise access to the RTT buffer.
(2) Make conn->service_id into a 32-bit variable so that it can be
cmpxchg'd on all arches.
(3) Add call->input_lock to serialise access to the Rx/Tx state. Note
that although the Rx and Tx states are (almost) entirely separate,
there's no point completing the separation and having separate locks
since it's a bi-phasal RPC protocol rather than a bi-direction
streaming protocol. Data transmission and data reception do not take
place simultaneously on any particular call.
and making the following functional changes:
(1) In rxrpc_input_data(), hold call->input_lock around the core to
prevent simultaneous producing of packets into the Rx ring and
updating of tracking state for a particular call.
(2) In rxrpc_input_ping_response(), only read call->ping_serial once, and
check it before checking RXRPC_CALL_PINGING as that's a cheaper test.
The bit test and bit clear can then be combined. No further locking
is needed here.
(3) In rxrpc_input_ack(), take call->input_lock after we've parsed much of
the ACK packet. The superseded ACK check is then done both before and
after the lock is taken.
The handing of ackinfo data is split, parsing before the lock is taken
and processing with it held. This is keyed on rxMTU being non-zero.
Congestion management is also done within the locked section.
(4) In rxrpc_input_ackall(), take call->input_lock around the Tx window
rotation. The ACKALL packet carries no information and is only really
useful after all packets have been transmitted since it's imprecise.
(5) In rxrpc_input_implicit_end_call(), we use rx->incoming_lock to
prevent calls being simultaneously implicitly ended on two cpus and
also to prevent any races with incoming call setup.
(6) In rxrpc_input_packet(), use cmpxchg() to effect the service upgrade
on a connection. It is only permitted to happen once for a
connection.
(7) In rxrpc_new_incoming_call(), we have to recheck the routing inside
rx->incoming_lock to see if someone else set up the call, connection
or peer whilst we were getting there. We can't trust the values from
the earlier routing check unless we pin refs on them - which we want
to avoid.
Further, we need to allow for an incoming call to have its state
changed on another CPU between us making it live and us adjusting it
because the conn is now in the RXRPC_CONN_SERVICE state.
(8) In rxrpc_peer_add_rtt(), take peer->rtt_input_lock around the access
to the RTT buffer. Don't need to lock around setting peer->rtt.
For reference, the inventory of state-accessing or state-altering functions
used by the packet input procedure is:
> rxrpc_input_packet()
* PACKET CHECKING
* ROUTING
> rxrpc_post_packet_to_local()
> rxrpc_find_connection_rcu() - uses RCU
> rxrpc_lookup_peer_rcu() - uses RCU
> rxrpc_find_service_conn_rcu() - uses RCU
> idr_find() - uses RCU
* CONNECTION-LEVEL PROCESSING
- Service upgrade
- Can only happen once per conn
! Changed to use cmpxchg
> rxrpc_post_packet_to_conn()
- Setting conn->hi_serial
- Probably safe not using locks
- Maybe use cmpxchg
* CALL-LEVEL PROCESSING
> Old-call checking
> rxrpc_input_implicit_end_call()
> rxrpc_call_completed()
> rxrpc_queue_call()
! Need to take rx->incoming_lock
> __rxrpc_disconnect_call()
> rxrpc_notify_socket()
> rxrpc_new_incoming_call()
- Uses rx->incoming_lock for the entire process
- Might be able to drop this earlier in favour of the call lock
> rxrpc_incoming_call()
! Conflicts with rxrpc_input_implicit_end_call()
> rxrpc_send_ping()
- Don't need locks to check rtt state
> rxrpc_propose_ACK
* PACKET DISTRIBUTION
> rxrpc_input_call_packet()
> rxrpc_input_data()
* QUEUE DATA PACKET ON CALL
> rxrpc_reduce_call_timer()
- Uses timer_reduce()
! Needs call->input_lock()
> rxrpc_receiving_reply()
! Needs locking around ack state
> rxrpc_rotate_tx_window()
> rxrpc_end_tx_phase()
> rxrpc_proto_abort()
> rxrpc_input_dup_data()
- Fills the Rx buffer
- rxrpc_propose_ACK()
- rxrpc_notify_socket()
> rxrpc_input_ack()
* APPLY ACK PACKET TO CALL AND DISCARD PACKET
> rxrpc_input_ping_response()
- Probably doesn't need any extra locking
! Need READ_ONCE() on call->ping_serial
> rxrpc_input_check_for_lost_ack()
- Takes call->lock to consult Tx buffer
> rxrpc_peer_add_rtt()
! Needs to take a lock (peer->rtt_input_lock)
! Could perhaps manage with cmpxchg() and xadd() instead
> rxrpc_input_requested_ack
- Consults Tx buffer
! Probably needs a lock
> rxrpc_peer_add_rtt()
> rxrpc_propose_ack()
> rxrpc_input_ackinfo()
- Changes call->tx_winsize
! Use cmpxchg to handle change
! Should perhaps track serial number
- Uses peer->lock to record MTU specification changes
> rxrpc_proto_abort()
! Need to take call->input_lock
> rxrpc_rotate_tx_window()
> rxrpc_end_tx_phase()
> rxrpc_input_soft_acks()
- Consults the Tx buffer
> rxrpc_congestion_management()
- Modifies the Tx annotations
! Needs call->input_lock()
> rxrpc_queue_call()
> rxrpc_input_abort()
* APPLY ABORT PACKET TO CALL AND DISCARD PACKET
> rxrpc_set_call_completion()
> rxrpc_notify_socket()
> rxrpc_input_ackall()
* APPLY ACKALL PACKET TO CALL AND DISCARD PACKET
! Need to take call->input_lock
> rxrpc_rotate_tx_window()
> rxrpc_end_tx_phase()
> rxrpc_reject_packet()
There are some functions used by the above that queue the packet, after
which the procedure is terminated:
- rxrpc_post_packet_to_local()
- local->event_queue is an sk_buff_head
- local->processor is a work_struct
- rxrpc_post_packet_to_conn()
- conn->rx_queue is an sk_buff_head
- conn->processor is a work_struct
- rxrpc_reject_packet()
- local->reject_queue is an sk_buff_head
- local->processor is a work_struct
And some that offload processing to process context:
- rxrpc_notify_socket()
- Uses RCU lock
- Uses call->notify_lock to call call->notify_rx
- Uses call->recvmsg_lock to queue recvmsg side
- rxrpc_queue_call()
- call->processor is a work_struct
- rxrpc_propose_ACK()
- Uses call->lock to wrap __rxrpc_propose_ACK()
And a bunch that complete a call, all of which use call->state_lock to
protect the call state:
- rxrpc_call_completed()
- rxrpc_set_call_completion()
- rxrpc_abort_call()
- rxrpc_proto_abort()
- Also uses rxrpc_queue_call()
Fixes: 17926a79320a ("[AF_RXRPC]: Provide secure RxRPC sockets for use by userspace and kernel both")
Signed-off-by: David Howells <dhowells@redhat.com>
2018-10-08 22:46:25 +08:00
|
|
|
spinlock_t rtt_input_lock; /* RTT lock for input routine */
|
2016-09-22 07:29:31 +08:00
|
|
|
ktime_t rtt_last_req; /* Time of last RTT request */
|
2020-05-11 21:54:34 +08:00
|
|
|
unsigned int rtt_count; /* Number of samples we've got */
|
|
|
|
|
|
|
|
u32 srtt_us; /* smoothed round trip time << 3 in usecs */
|
|
|
|
u32 mdev_us; /* medium deviation */
|
|
|
|
u32 mdev_max_us; /* maximal mdev for the last rtt period */
|
|
|
|
u32 rttvar_us; /* smoothed mdev_max */
|
|
|
|
u32 rto_j; /* Retransmission timeout in jiffies */
|
|
|
|
u8 backoff; /* Backoff timeout */
|
2017-06-15 00:56:50 +08:00
|
|
|
|
|
|
|
u8 cong_cwnd; /* Congestion window size */
|
2007-04-27 06:48:28 +08:00
|
|
|
};
|
|
|
|
|
2016-04-04 21:00:36 +08:00
|
|
|
/*
|
|
|
|
* Keys for matching a connection.
|
|
|
|
*/
|
|
|
|
struct rxrpc_conn_proto {
|
2016-06-30 19:16:21 +08:00
|
|
|
union {
|
|
|
|
struct {
|
|
|
|
u32 epoch; /* epoch of this connection */
|
|
|
|
u32 cid; /* connection ID */
|
|
|
|
};
|
|
|
|
u64 index_key;
|
2016-04-04 21:00:36 +08:00
|
|
|
};
|
|
|
|
};
|
|
|
|
|
|
|
|
struct rxrpc_conn_parameters {
|
|
|
|
struct rxrpc_local *local; /* Representation of local endpoint */
|
|
|
|
struct rxrpc_peer *peer; /* Remote endpoint */
|
|
|
|
struct key *key; /* Security details */
|
|
|
|
bool exclusive; /* T if conn is exclusive */
|
2017-06-05 21:30:49 +08:00
|
|
|
bool upgrade; /* T if service ID can be upgraded */
|
2016-04-04 21:00:36 +08:00
|
|
|
u16 service_id; /* Service ID for this connection */
|
|
|
|
u32 security_level; /* Security level selected */
|
|
|
|
};
|
|
|
|
|
2016-06-27 17:32:02 +08:00
|
|
|
/*
|
|
|
|
* Bits in the connection flags.
|
|
|
|
*/
|
|
|
|
enum rxrpc_conn_flag {
|
|
|
|
RXRPC_CONN_HAS_IDR, /* Has a client conn ID assigned */
|
2016-06-30 17:45:22 +08:00
|
|
|
RXRPC_CONN_IN_SERVICE_CONNS, /* Conn is in peer->service_conns */
|
rxrpc: Improve management and caching of client connection objects
Improve the management and caching of client rxrpc connection objects.
From this point, client connections will be managed separately from service
connections because AF_RXRPC controls the creation and re-use of client
connections but doesn't have that luxury with service connections.
Further, there will be limits on the numbers of client connections that may
be live on a machine. No direct restriction will be placed on the number
of client calls, excepting that each client connection can support a
maximum of four concurrent calls.
Note that, for a number of reasons, we don't want to simply discard a
client connection as soon as the last call is apparently finished:
(1) Security is negotiated per-connection and the context is then shared
between all calls on that connection. The context can be negotiated
again if the connection lapses, but that involves holding up calls
whilst at least two packets are exchanged and various crypto bits are
performed - so we'd ideally like to cache it for a little while at
least.
(2) If a packet goes astray, we will need to retransmit a final ACK or
ABORT packet. To make this work, we need to keep around the
connection details for a little while.
(3) The locally held structures represent some amount of setup time, to be
weighed against their occupation of memory when idle.
To this end, the client connection cache is managed by a state machine on
each connection. There are five states:
(1) INACTIVE - The connection is not held in any list and may not have
been exposed to the world. If it has been previously exposed, it was
discarded from the idle list after expiring.
(2) WAITING - The connection is waiting for the number of client conns to
drop below the maximum capacity. Calls may be in progress upon it
from when it was active and got culled.
The connection is on the rxrpc_waiting_client_conns list which is kept
in to-be-granted order. Culled conns with waiters go to the back of
the queue just like new conns.
(3) ACTIVE - The connection has at least one call in progress upon it, it
may freely grant available channels to new calls and calls may be
waiting on it for channels to become available.
The connection is on the rxrpc_active_client_conns list which is kept
in activation order for culling purposes.
(4) CULLED - The connection got summarily culled to try and free up
capacity. Calls currently in progress on the connection are allowed
to continue, but new calls will have to wait. There can be no waiters
in this state - the conn would have to go to the WAITING state
instead.
(5) IDLE - The connection has no calls in progress upon it and must have
been exposed to the world (ie. the EXPOSED flag must be set). When it
expires, the EXPOSED flag is cleared and the connection transitions to
the INACTIVE state.
The connection is on the rxrpc_idle_client_conns list which is kept in
order of how soon they'll expire.
A connection in the ACTIVE or CULLED state must have at least one active
call upon it; if in the WAITING state it may have active calls upon it;
other states may not have active calls.
As long as a connection remains active and doesn't get culled, it may
continue to process calls - even if there are connections on the wait
queue. This simplifies things a bit and reduces the amount of checking we
need do.
There are a couple flags of relevance to the cache:
(1) EXPOSED - The connection ID got exposed to the world. If this flag is
set, an extra ref is added to the connection preventing it from being
reaped when it has no calls outstanding. This flag is cleared and the
ref dropped when a conn is discarded from the idle list.
(2) DONT_REUSE - The connection should be discarded as soon as possible and
should not be reused.
This commit also provides a number of new settings:
(*) /proc/net/rxrpc/max_client_conns
The maximum number of live client connections. Above this number, new
connections get added to the wait list and must wait for an active
conn to be culled. Culled connections can be reused, but they will go
to the back of the wait list and have to wait.
(*) /proc/net/rxrpc/reap_client_conns
If the number of desired connections exceeds the maximum above, the
active connection list will be culled until there are only this many
left in it.
(*) /proc/net/rxrpc/idle_conn_expiry
The normal expiry time for a client connection, provided there are
fewer than reap_client_conns of them around.
(*) /proc/net/rxrpc/idle_conn_fast_expiry
The expedited expiry time, used when there are more than
reap_client_conns of them around.
Note that I combined the Tx wait queue with the channel grant wait queue to
save space as only one of these should be in use at once.
Note also that, for the moment, the service connection cache still uses the
old connection management code.
Signed-off-by: David Howells <dhowells@redhat.com>
2016-08-24 14:30:52 +08:00
|
|
|
RXRPC_CONN_DONT_REUSE, /* Don't reuse this connection */
|
2017-06-05 21:30:49 +08:00
|
|
|
RXRPC_CONN_PROBING_FOR_UPGRADE, /* Probing for service upgrade */
|
2017-11-24 18:18:41 +08:00
|
|
|
RXRPC_CONN_FINAL_ACK_0, /* Need final ACK for channel 0 */
|
|
|
|
RXRPC_CONN_FINAL_ACK_1, /* Need final ACK for channel 1 */
|
|
|
|
RXRPC_CONN_FINAL_ACK_2, /* Need final ACK for channel 2 */
|
|
|
|
RXRPC_CONN_FINAL_ACK_3, /* Need final ACK for channel 3 */
|
2016-06-27 17:32:02 +08:00
|
|
|
};
|
|
|
|
|
2017-11-24 18:18:41 +08:00
|
|
|
#define RXRPC_CONN_FINAL_ACK_MASK ((1UL << RXRPC_CONN_FINAL_ACK_0) | \
|
|
|
|
(1UL << RXRPC_CONN_FINAL_ACK_1) | \
|
|
|
|
(1UL << RXRPC_CONN_FINAL_ACK_2) | \
|
|
|
|
(1UL << RXRPC_CONN_FINAL_ACK_3))
|
|
|
|
|
2016-06-27 17:32:02 +08:00
|
|
|
/*
|
|
|
|
* Events that can be raised upon a connection.
|
|
|
|
*/
|
|
|
|
enum rxrpc_conn_event {
|
|
|
|
RXRPC_CONN_EV_CHALLENGE, /* Send challenge packet */
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The connection protocol state.
|
|
|
|
*/
|
|
|
|
enum rxrpc_conn_proto_state {
|
|
|
|
RXRPC_CONN_UNUSED, /* Connection not yet attempted */
|
|
|
|
RXRPC_CONN_CLIENT, /* Client connection */
|
2016-09-08 18:10:12 +08:00
|
|
|
RXRPC_CONN_SERVICE_PREALLOC, /* Service connection preallocation */
|
2016-06-27 17:32:02 +08:00
|
|
|
RXRPC_CONN_SERVICE_UNSECURED, /* Service unsecured connection */
|
|
|
|
RXRPC_CONN_SERVICE_CHALLENGING, /* Service challenging for security */
|
|
|
|
RXRPC_CONN_SERVICE, /* Service secured connection */
|
|
|
|
RXRPC_CONN_REMOTELY_ABORTED, /* Conn aborted by peer */
|
|
|
|
RXRPC_CONN_LOCALLY_ABORTED, /* Conn aborted locally */
|
|
|
|
RXRPC_CONN__NR_STATES
|
|
|
|
};
|
|
|
|
|
rxrpc: Rewrite the client connection manager
Rewrite the rxrpc client connection manager so that it can support multiple
connections for a given security key to a peer. The following changes are
made:
(1) For each open socket, the code currently maintains an rbtree with the
connections placed into it, keyed by communications parameters. This
is tricky to maintain as connections can be culled from the tree or
replaced within it. Connections can require replacement for a number
of reasons, e.g. their IDs span too great a range for the IDR data
type to represent efficiently, the call ID numbers on that conn would
overflow or the conn got aborted.
This is changed so that there's now a connection bundle object placed
in the tree, keyed on the same parameters. The bundle, however, does
not need to be replaced.
(2) An rxrpc_bundle object can now manage the available channels for a set
of parallel connections. The lock that manages this is moved there
from the rxrpc_connection struct (channel_lock).
(3) There'a a dummy bundle for all incoming connections to share so that
they have a channel_lock too. It might be better to give each
incoming connection its own bundle. This bundle is not needed to
manage which channels incoming calls are made on because that's the
solely at whim of the client.
(4) The restrictions on how many client connections are around are
removed. Instead, a previous patch limits the number of client calls
that can be allocated. Ordinarily, client connections are reaped
after 2 minutes on the idle queue, but when more than a certain number
of connections are in existence, the reaper starts reaping them after
2s of idleness instead to get the numbers back down.
It could also be made such that new call allocations are forced to
wait until the number of outstanding connections subsides.
Signed-off-by: David Howells <dhowells@redhat.com>
2020-07-01 18:15:32 +08:00
|
|
|
/*
|
|
|
|
* RxRPC client connection bundle.
|
|
|
|
*/
|
|
|
|
struct rxrpc_bundle {
|
|
|
|
struct rxrpc_conn_parameters params;
|
|
|
|
atomic_t usage;
|
|
|
|
unsigned int debug_id;
|
|
|
|
bool try_upgrade; /* True if the bundle is attempting upgrade */
|
|
|
|
bool alloc_conn; /* True if someone's getting a conn */
|
2020-09-14 20:10:00 +08:00
|
|
|
short alloc_error; /* Error from last conn allocation */
|
rxrpc: Rewrite the client connection manager
Rewrite the rxrpc client connection manager so that it can support multiple
connections for a given security key to a peer. The following changes are
made:
(1) For each open socket, the code currently maintains an rbtree with the
connections placed into it, keyed by communications parameters. This
is tricky to maintain as connections can be culled from the tree or
replaced within it. Connections can require replacement for a number
of reasons, e.g. their IDs span too great a range for the IDR data
type to represent efficiently, the call ID numbers on that conn would
overflow or the conn got aborted.
This is changed so that there's now a connection bundle object placed
in the tree, keyed on the same parameters. The bundle, however, does
not need to be replaced.
(2) An rxrpc_bundle object can now manage the available channels for a set
of parallel connections. The lock that manages this is moved there
from the rxrpc_connection struct (channel_lock).
(3) There'a a dummy bundle for all incoming connections to share so that
they have a channel_lock too. It might be better to give each
incoming connection its own bundle. This bundle is not needed to
manage which channels incoming calls are made on because that's the
solely at whim of the client.
(4) The restrictions on how many client connections are around are
removed. Instead, a previous patch limits the number of client calls
that can be allocated. Ordinarily, client connections are reaped
after 2 minutes on the idle queue, but when more than a certain number
of connections are in existence, the reaper starts reaping them after
2s of idleness instead to get the numbers back down.
It could also be made such that new call allocations are forced to
wait until the number of outstanding connections subsides.
Signed-off-by: David Howells <dhowells@redhat.com>
2020-07-01 18:15:32 +08:00
|
|
|
spinlock_t channel_lock;
|
|
|
|
struct rb_node local_node; /* Node in local->client_conns */
|
|
|
|
struct list_head waiting_calls; /* Calls waiting for channels */
|
|
|
|
unsigned long avail_chans; /* Mask of available channels */
|
|
|
|
struct rxrpc_connection *conns[4]; /* The connections in the bundle (max 4) */
|
|
|
|
};
|
|
|
|
|
2007-04-27 06:48:28 +08:00
|
|
|
/*
|
|
|
|
* RxRPC connection definition
|
2016-06-17 17:06:56 +08:00
|
|
|
* - matched by { local, peer, epoch, conn_id, direction }
|
2007-04-27 06:48:28 +08:00
|
|
|
* - each connection can only handle four simultaneous calls
|
|
|
|
*/
|
|
|
|
struct rxrpc_connection {
|
2016-04-04 21:00:36 +08:00
|
|
|
struct rxrpc_conn_proto proto;
|
|
|
|
struct rxrpc_conn_parameters params;
|
|
|
|
|
rxrpc: Improve management and caching of client connection objects
Improve the management and caching of client rxrpc connection objects.
From this point, client connections will be managed separately from service
connections because AF_RXRPC controls the creation and re-use of client
connections but doesn't have that luxury with service connections.
Further, there will be limits on the numbers of client connections that may
be live on a machine. No direct restriction will be placed on the number
of client calls, excepting that each client connection can support a
maximum of four concurrent calls.
Note that, for a number of reasons, we don't want to simply discard a
client connection as soon as the last call is apparently finished:
(1) Security is negotiated per-connection and the context is then shared
between all calls on that connection. The context can be negotiated
again if the connection lapses, but that involves holding up calls
whilst at least two packets are exchanged and various crypto bits are
performed - so we'd ideally like to cache it for a little while at
least.
(2) If a packet goes astray, we will need to retransmit a final ACK or
ABORT packet. To make this work, we need to keep around the
connection details for a little while.
(3) The locally held structures represent some amount of setup time, to be
weighed against their occupation of memory when idle.
To this end, the client connection cache is managed by a state machine on
each connection. There are five states:
(1) INACTIVE - The connection is not held in any list and may not have
been exposed to the world. If it has been previously exposed, it was
discarded from the idle list after expiring.
(2) WAITING - The connection is waiting for the number of client conns to
drop below the maximum capacity. Calls may be in progress upon it
from when it was active and got culled.
The connection is on the rxrpc_waiting_client_conns list which is kept
in to-be-granted order. Culled conns with waiters go to the back of
the queue just like new conns.
(3) ACTIVE - The connection has at least one call in progress upon it, it
may freely grant available channels to new calls and calls may be
waiting on it for channels to become available.
The connection is on the rxrpc_active_client_conns list which is kept
in activation order for culling purposes.
(4) CULLED - The connection got summarily culled to try and free up
capacity. Calls currently in progress on the connection are allowed
to continue, but new calls will have to wait. There can be no waiters
in this state - the conn would have to go to the WAITING state
instead.
(5) IDLE - The connection has no calls in progress upon it and must have
been exposed to the world (ie. the EXPOSED flag must be set). When it
expires, the EXPOSED flag is cleared and the connection transitions to
the INACTIVE state.
The connection is on the rxrpc_idle_client_conns list which is kept in
order of how soon they'll expire.
A connection in the ACTIVE or CULLED state must have at least one active
call upon it; if in the WAITING state it may have active calls upon it;
other states may not have active calls.
As long as a connection remains active and doesn't get culled, it may
continue to process calls - even if there are connections on the wait
queue. This simplifies things a bit and reduces the amount of checking we
need do.
There are a couple flags of relevance to the cache:
(1) EXPOSED - The connection ID got exposed to the world. If this flag is
set, an extra ref is added to the connection preventing it from being
reaped when it has no calls outstanding. This flag is cleared and the
ref dropped when a conn is discarded from the idle list.
(2) DONT_REUSE - The connection should be discarded as soon as possible and
should not be reused.
This commit also provides a number of new settings:
(*) /proc/net/rxrpc/max_client_conns
The maximum number of live client connections. Above this number, new
connections get added to the wait list and must wait for an active
conn to be culled. Culled connections can be reused, but they will go
to the back of the wait list and have to wait.
(*) /proc/net/rxrpc/reap_client_conns
If the number of desired connections exceeds the maximum above, the
active connection list will be culled until there are only this many
left in it.
(*) /proc/net/rxrpc/idle_conn_expiry
The normal expiry time for a client connection, provided there are
fewer than reap_client_conns of them around.
(*) /proc/net/rxrpc/idle_conn_fast_expiry
The expedited expiry time, used when there are more than
reap_client_conns of them around.
Note that I combined the Tx wait queue with the channel grant wait queue to
save space as only one of these should be in use at once.
Note also that, for the moment, the service connection cache still uses the
old connection management code.
Signed-off-by: David Howells <dhowells@redhat.com>
2016-08-24 14:30:52 +08:00
|
|
|
atomic_t usage;
|
|
|
|
struct rcu_head rcu;
|
|
|
|
struct list_head cache_link;
|
rxrpc: Call channels should have separate call number spaces
Each channel on a connection has a separate, independent number space from
which to allocate callNumber values. It is entirely possible, for example,
to have a connection with four active calls, each with call number 1.
Note that the callNumber values for any particular channel don't have to
start at 1, but they are supposed to increment monotonically for that
channel from a client's perspective and may not be reused once the call
number is transmitted (until the epoch cycles all the way back round).
Currently, however, call numbers are allocated on a per-connection basis
and, further, are held in an rb-tree. The rb-tree is redundant as the four
channel pointers in the rxrpc_connection struct are entirely capable of
pointing to all the calls currently in progress on a connection.
To this end, make the following changes:
(1) Handle call number allocation independently per channel.
(2) Get rid of the conn->calls rb-tree. This is overkill as a connection
may have a maximum of four calls in progress at any one time. Use the
pointers in the channels[] array instead, indexed by the channel
number from the packet.
(3) For each channel, save the result of the last call that was in
progress on that channel in conn->channels[] so that the final ACK or
ABORT packet can be replayed if necessary. Any call earlier than that
is just ignored. If we've seen the next call number in a packet, the
last one is most definitely defunct.
(4) When generating a RESPONSE packet for a connection, the call number
counter for each channel must be included in it.
(5) When parsing a RESPONSE packet for a connection, the call number
counters contained therein should be used to set the minimum expected
call numbers on each channel.
To do in future commits:
(1) Replay terminal packets based on the last call stored in
conn->channels[].
(2) Connections should be retired before the callNumber space on any
channel runs out.
(3) A server is expected to disregard or reject any new incoming call that
has a call number less than the current call number counter. The call
number counter for that channel must be advanced to the new call
number.
Note that the server cannot just require that the next call that it
sees on a channel be exactly the call number counter + 1 because then
there's a scenario that could cause a problem: The client transmits a
packet to initiate a connection, the network goes out, the server
sends an ACK (which gets lost), the client sends an ABORT (which also
gets lost); the network then reconnects, the client then reuses the
call number for the next call (it doesn't know the server already saw
the call number), but the server thinks it already has the first
packet of this call (it doesn't know that the client doesn't know that
it saw the call number the first time).
Signed-off-by: David Howells <dhowells@redhat.com>
2016-06-27 21:39:44 +08:00
|
|
|
|
rxrpc: Rewrite the client connection manager
Rewrite the rxrpc client connection manager so that it can support multiple
connections for a given security key to a peer. The following changes are
made:
(1) For each open socket, the code currently maintains an rbtree with the
connections placed into it, keyed by communications parameters. This
is tricky to maintain as connections can be culled from the tree or
replaced within it. Connections can require replacement for a number
of reasons, e.g. their IDs span too great a range for the IDR data
type to represent efficiently, the call ID numbers on that conn would
overflow or the conn got aborted.
This is changed so that there's now a connection bundle object placed
in the tree, keyed on the same parameters. The bundle, however, does
not need to be replaced.
(2) An rxrpc_bundle object can now manage the available channels for a set
of parallel connections. The lock that manages this is moved there
from the rxrpc_connection struct (channel_lock).
(3) There'a a dummy bundle for all incoming connections to share so that
they have a channel_lock too. It might be better to give each
incoming connection its own bundle. This bundle is not needed to
manage which channels incoming calls are made on because that's the
solely at whim of the client.
(4) The restrictions on how many client connections are around are
removed. Instead, a previous patch limits the number of client calls
that can be allocated. Ordinarily, client connections are reaped
after 2 minutes on the idle queue, but when more than a certain number
of connections are in existence, the reaper starts reaping them after
2s of idleness instead to get the numbers back down.
It could also be made such that new call allocations are forced to
wait until the number of outstanding connections subsides.
Signed-off-by: David Howells <dhowells@redhat.com>
2020-07-01 18:15:32 +08:00
|
|
|
unsigned char act_chans; /* Mask of active channels */
|
rxrpc: Call channels should have separate call number spaces
Each channel on a connection has a separate, independent number space from
which to allocate callNumber values. It is entirely possible, for example,
to have a connection with four active calls, each with call number 1.
Note that the callNumber values for any particular channel don't have to
start at 1, but they are supposed to increment monotonically for that
channel from a client's perspective and may not be reused once the call
number is transmitted (until the epoch cycles all the way back round).
Currently, however, call numbers are allocated on a per-connection basis
and, further, are held in an rb-tree. The rb-tree is redundant as the four
channel pointers in the rxrpc_connection struct are entirely capable of
pointing to all the calls currently in progress on a connection.
To this end, make the following changes:
(1) Handle call number allocation independently per channel.
(2) Get rid of the conn->calls rb-tree. This is overkill as a connection
may have a maximum of four calls in progress at any one time. Use the
pointers in the channels[] array instead, indexed by the channel
number from the packet.
(3) For each channel, save the result of the last call that was in
progress on that channel in conn->channels[] so that the final ACK or
ABORT packet can be replayed if necessary. Any call earlier than that
is just ignored. If we've seen the next call number in a packet, the
last one is most definitely defunct.
(4) When generating a RESPONSE packet for a connection, the call number
counter for each channel must be included in it.
(5) When parsing a RESPONSE packet for a connection, the call number
counters contained therein should be used to set the minimum expected
call numbers on each channel.
To do in future commits:
(1) Replay terminal packets based on the last call stored in
conn->channels[].
(2) Connections should be retired before the callNumber space on any
channel runs out.
(3) A server is expected to disregard or reject any new incoming call that
has a call number less than the current call number counter. The call
number counter for that channel must be advanced to the new call
number.
Note that the server cannot just require that the next call that it
sees on a channel be exactly the call number counter + 1 because then
there's a scenario that could cause a problem: The client transmits a
packet to initiate a connection, the network goes out, the server
sends an ACK (which gets lost), the client sends an ABORT (which also
gets lost); the network then reconnects, the client then reuses the
call number for the next call (it doesn't know the server already saw
the call number), but the server thinks it already has the first
packet of this call (it doesn't know that the client doesn't know that
it saw the call number the first time).
Signed-off-by: David Howells <dhowells@redhat.com>
2016-06-27 21:39:44 +08:00
|
|
|
struct rxrpc_channel {
|
2017-11-24 18:18:41 +08:00
|
|
|
unsigned long final_ack_at; /* Time at which to issue final ACK */
|
rxrpc: Call channels should have separate call number spaces
Each channel on a connection has a separate, independent number space from
which to allocate callNumber values. It is entirely possible, for example,
to have a connection with four active calls, each with call number 1.
Note that the callNumber values for any particular channel don't have to
start at 1, but they are supposed to increment monotonically for that
channel from a client's perspective and may not be reused once the call
number is transmitted (until the epoch cycles all the way back round).
Currently, however, call numbers are allocated on a per-connection basis
and, further, are held in an rb-tree. The rb-tree is redundant as the four
channel pointers in the rxrpc_connection struct are entirely capable of
pointing to all the calls currently in progress on a connection.
To this end, make the following changes:
(1) Handle call number allocation independently per channel.
(2) Get rid of the conn->calls rb-tree. This is overkill as a connection
may have a maximum of four calls in progress at any one time. Use the
pointers in the channels[] array instead, indexed by the channel
number from the packet.
(3) For each channel, save the result of the last call that was in
progress on that channel in conn->channels[] so that the final ACK or
ABORT packet can be replayed if necessary. Any call earlier than that
is just ignored. If we've seen the next call number in a packet, the
last one is most definitely defunct.
(4) When generating a RESPONSE packet for a connection, the call number
counter for each channel must be included in it.
(5) When parsing a RESPONSE packet for a connection, the call number
counters contained therein should be used to set the minimum expected
call numbers on each channel.
To do in future commits:
(1) Replay terminal packets based on the last call stored in
conn->channels[].
(2) Connections should be retired before the callNumber space on any
channel runs out.
(3) A server is expected to disregard or reject any new incoming call that
has a call number less than the current call number counter. The call
number counter for that channel must be advanced to the new call
number.
Note that the server cannot just require that the next call that it
sees on a channel be exactly the call number counter + 1 because then
there's a scenario that could cause a problem: The client transmits a
packet to initiate a connection, the network goes out, the server
sends an ACK (which gets lost), the client sends an ABORT (which also
gets lost); the network then reconnects, the client then reuses the
call number for the next call (it doesn't know the server already saw
the call number), but the server thinks it already has the first
packet of this call (it doesn't know that the client doesn't know that
it saw the call number the first time).
Signed-off-by: David Howells <dhowells@redhat.com>
2016-06-27 21:39:44 +08:00
|
|
|
struct rxrpc_call __rcu *call; /* Active call */
|
2018-07-24 00:18:37 +08:00
|
|
|
unsigned int call_debug_id; /* call->debug_id */
|
rxrpc: Call channels should have separate call number spaces
Each channel on a connection has a separate, independent number space from
which to allocate callNumber values. It is entirely possible, for example,
to have a connection with four active calls, each with call number 1.
Note that the callNumber values for any particular channel don't have to
start at 1, but they are supposed to increment monotonically for that
channel from a client's perspective and may not be reused once the call
number is transmitted (until the epoch cycles all the way back round).
Currently, however, call numbers are allocated on a per-connection basis
and, further, are held in an rb-tree. The rb-tree is redundant as the four
channel pointers in the rxrpc_connection struct are entirely capable of
pointing to all the calls currently in progress on a connection.
To this end, make the following changes:
(1) Handle call number allocation independently per channel.
(2) Get rid of the conn->calls rb-tree. This is overkill as a connection
may have a maximum of four calls in progress at any one time. Use the
pointers in the channels[] array instead, indexed by the channel
number from the packet.
(3) For each channel, save the result of the last call that was in
progress on that channel in conn->channels[] so that the final ACK or
ABORT packet can be replayed if necessary. Any call earlier than that
is just ignored. If we've seen the next call number in a packet, the
last one is most definitely defunct.
(4) When generating a RESPONSE packet for a connection, the call number
counter for each channel must be included in it.
(5) When parsing a RESPONSE packet for a connection, the call number
counters contained therein should be used to set the minimum expected
call numbers on each channel.
To do in future commits:
(1) Replay terminal packets based on the last call stored in
conn->channels[].
(2) Connections should be retired before the callNumber space on any
channel runs out.
(3) A server is expected to disregard or reject any new incoming call that
has a call number less than the current call number counter. The call
number counter for that channel must be advanced to the new call
number.
Note that the server cannot just require that the next call that it
sees on a channel be exactly the call number counter + 1 because then
there's a scenario that could cause a problem: The client transmits a
packet to initiate a connection, the network goes out, the server
sends an ACK (which gets lost), the client sends an ABORT (which also
gets lost); the network then reconnects, the client then reuses the
call number for the next call (it doesn't know the server already saw
the call number), but the server thinks it already has the first
packet of this call (it doesn't know that the client doesn't know that
it saw the call number the first time).
Signed-off-by: David Howells <dhowells@redhat.com>
2016-06-27 21:39:44 +08:00
|
|
|
u32 call_id; /* ID of current call */
|
|
|
|
u32 call_counter; /* Call ID counter */
|
|
|
|
u32 last_call; /* ID of last call */
|
2016-08-23 22:27:25 +08:00
|
|
|
u8 last_type; /* Type of last packet */
|
|
|
|
union {
|
|
|
|
u32 last_seq;
|
|
|
|
u32 last_abort;
|
|
|
|
};
|
rxrpc: Call channels should have separate call number spaces
Each channel on a connection has a separate, independent number space from
which to allocate callNumber values. It is entirely possible, for example,
to have a connection with four active calls, each with call number 1.
Note that the callNumber values for any particular channel don't have to
start at 1, but they are supposed to increment monotonically for that
channel from a client's perspective and may not be reused once the call
number is transmitted (until the epoch cycles all the way back round).
Currently, however, call numbers are allocated on a per-connection basis
and, further, are held in an rb-tree. The rb-tree is redundant as the four
channel pointers in the rxrpc_connection struct are entirely capable of
pointing to all the calls currently in progress on a connection.
To this end, make the following changes:
(1) Handle call number allocation independently per channel.
(2) Get rid of the conn->calls rb-tree. This is overkill as a connection
may have a maximum of four calls in progress at any one time. Use the
pointers in the channels[] array instead, indexed by the channel
number from the packet.
(3) For each channel, save the result of the last call that was in
progress on that channel in conn->channels[] so that the final ACK or
ABORT packet can be replayed if necessary. Any call earlier than that
is just ignored. If we've seen the next call number in a packet, the
last one is most definitely defunct.
(4) When generating a RESPONSE packet for a connection, the call number
counter for each channel must be included in it.
(5) When parsing a RESPONSE packet for a connection, the call number
counters contained therein should be used to set the minimum expected
call numbers on each channel.
To do in future commits:
(1) Replay terminal packets based on the last call stored in
conn->channels[].
(2) Connections should be retired before the callNumber space on any
channel runs out.
(3) A server is expected to disregard or reject any new incoming call that
has a call number less than the current call number counter. The call
number counter for that channel must be advanced to the new call
number.
Note that the server cannot just require that the next call that it
sees on a channel be exactly the call number counter + 1 because then
there's a scenario that could cause a problem: The client transmits a
packet to initiate a connection, the network goes out, the server
sends an ACK (which gets lost), the client sends an ABORT (which also
gets lost); the network then reconnects, the client then reuses the
call number for the next call (it doesn't know the server already saw
the call number), but the server thinks it already has the first
packet of this call (it doesn't know that the client doesn't know that
it saw the call number the first time).
Signed-off-by: David Howells <dhowells@redhat.com>
2016-06-27 21:39:44 +08:00
|
|
|
} channels[RXRPC_MAXCALLS];
|
2016-06-17 22:42:35 +08:00
|
|
|
|
2017-11-24 18:18:41 +08:00
|
|
|
struct timer_list timer; /* Conn event timer */
|
2007-04-27 06:48:28 +08:00
|
|
|
struct work_struct processor; /* connection event processor */
|
rxrpc: Rewrite the client connection manager
Rewrite the rxrpc client connection manager so that it can support multiple
connections for a given security key to a peer. The following changes are
made:
(1) For each open socket, the code currently maintains an rbtree with the
connections placed into it, keyed by communications parameters. This
is tricky to maintain as connections can be culled from the tree or
replaced within it. Connections can require replacement for a number
of reasons, e.g. their IDs span too great a range for the IDR data
type to represent efficiently, the call ID numbers on that conn would
overflow or the conn got aborted.
This is changed so that there's now a connection bundle object placed
in the tree, keyed on the same parameters. The bundle, however, does
not need to be replaced.
(2) An rxrpc_bundle object can now manage the available channels for a set
of parallel connections. The lock that manages this is moved there
from the rxrpc_connection struct (channel_lock).
(3) There'a a dummy bundle for all incoming connections to share so that
they have a channel_lock too. It might be better to give each
incoming connection its own bundle. This bundle is not needed to
manage which channels incoming calls are made on because that's the
solely at whim of the client.
(4) The restrictions on how many client connections are around are
removed. Instead, a previous patch limits the number of client calls
that can be allocated. Ordinarily, client connections are reaped
after 2 minutes on the idle queue, but when more than a certain number
of connections are in existence, the reaper starts reaping them after
2s of idleness instead to get the numbers back down.
It could also be made such that new call allocations are forced to
wait until the number of outstanding connections subsides.
Signed-off-by: David Howells <dhowells@redhat.com>
2020-07-01 18:15:32 +08:00
|
|
|
struct rxrpc_bundle *bundle; /* Client connection bundle */
|
|
|
|
struct rb_node service_node; /* Node in peer->service_conns */
|
2016-08-24 14:30:52 +08:00
|
|
|
struct list_head proc_link; /* link in procfs list */
|
2007-04-27 06:48:28 +08:00
|
|
|
struct list_head link; /* link in master connection list */
|
|
|
|
struct sk_buff_head rx_queue; /* received conn-level packets */
|
2016-04-08 00:23:51 +08:00
|
|
|
const struct rxrpc_security *security; /* applied security module */
|
2007-04-27 06:48:28 +08:00
|
|
|
struct key *server_key; /* security for this service */
|
2018-09-19 10:10:47 +08:00
|
|
|
struct crypto_sync_skcipher *cipher; /* encryption handle */
|
2007-04-27 06:48:28 +08:00
|
|
|
struct rxrpc_crypt csum_iv; /* packet checksum base */
|
2016-04-04 21:00:37 +08:00
|
|
|
unsigned long flags;
|
2007-04-27 06:48:28 +08:00
|
|
|
unsigned long events;
|
2016-08-23 22:27:24 +08:00
|
|
|
unsigned long idle_timestamp; /* Time at which last became idle */
|
2007-04-27 06:48:28 +08:00
|
|
|
spinlock_t state_lock; /* state-change lock */
|
2016-09-08 18:10:11 +08:00
|
|
|
enum rxrpc_conn_proto_state state; /* current state of connection */
|
2018-10-08 22:46:17 +08:00
|
|
|
u32 abort_code; /* Abort code of connection abort */
|
2007-04-27 06:48:28 +08:00
|
|
|
int debug_id; /* debug ID for printks */
|
|
|
|
atomic_t serial; /* packet serial number counter */
|
2016-08-23 22:27:25 +08:00
|
|
|
unsigned int hi_serial; /* highest serial number received */
|
2016-09-22 07:29:31 +08:00
|
|
|
u32 security_nonce; /* response re-use preventer */
|
rxrpc: Fix the packet reception routine
The rxrpc_input_packet() function and its call tree was built around the
assumption that data_ready() handler called from UDP to inform a kernel
service that there is data to be had was non-reentrant. This means that
certain locking could be dispensed with.
This, however, turns out not to be the case with a multi-queue network card
that can deliver packets to multiple cpus simultaneously. Each of those
cpus can be in the rxrpc_input_packet() function at the same time.
Fix by adding or changing some structure members:
(1) Add peer->rtt_input_lock to serialise access to the RTT buffer.
(2) Make conn->service_id into a 32-bit variable so that it can be
cmpxchg'd on all arches.
(3) Add call->input_lock to serialise access to the Rx/Tx state. Note
that although the Rx and Tx states are (almost) entirely separate,
there's no point completing the separation and having separate locks
since it's a bi-phasal RPC protocol rather than a bi-direction
streaming protocol. Data transmission and data reception do not take
place simultaneously on any particular call.
and making the following functional changes:
(1) In rxrpc_input_data(), hold call->input_lock around the core to
prevent simultaneous producing of packets into the Rx ring and
updating of tracking state for a particular call.
(2) In rxrpc_input_ping_response(), only read call->ping_serial once, and
check it before checking RXRPC_CALL_PINGING as that's a cheaper test.
The bit test and bit clear can then be combined. No further locking
is needed here.
(3) In rxrpc_input_ack(), take call->input_lock after we've parsed much of
the ACK packet. The superseded ACK check is then done both before and
after the lock is taken.
The handing of ackinfo data is split, parsing before the lock is taken
and processing with it held. This is keyed on rxMTU being non-zero.
Congestion management is also done within the locked section.
(4) In rxrpc_input_ackall(), take call->input_lock around the Tx window
rotation. The ACKALL packet carries no information and is only really
useful after all packets have been transmitted since it's imprecise.
(5) In rxrpc_input_implicit_end_call(), we use rx->incoming_lock to
prevent calls being simultaneously implicitly ended on two cpus and
also to prevent any races with incoming call setup.
(6) In rxrpc_input_packet(), use cmpxchg() to effect the service upgrade
on a connection. It is only permitted to happen once for a
connection.
(7) In rxrpc_new_incoming_call(), we have to recheck the routing inside
rx->incoming_lock to see if someone else set up the call, connection
or peer whilst we were getting there. We can't trust the values from
the earlier routing check unless we pin refs on them - which we want
to avoid.
Further, we need to allow for an incoming call to have its state
changed on another CPU between us making it live and us adjusting it
because the conn is now in the RXRPC_CONN_SERVICE state.
(8) In rxrpc_peer_add_rtt(), take peer->rtt_input_lock around the access
to the RTT buffer. Don't need to lock around setting peer->rtt.
For reference, the inventory of state-accessing or state-altering functions
used by the packet input procedure is:
> rxrpc_input_packet()
* PACKET CHECKING
* ROUTING
> rxrpc_post_packet_to_local()
> rxrpc_find_connection_rcu() - uses RCU
> rxrpc_lookup_peer_rcu() - uses RCU
> rxrpc_find_service_conn_rcu() - uses RCU
> idr_find() - uses RCU
* CONNECTION-LEVEL PROCESSING
- Service upgrade
- Can only happen once per conn
! Changed to use cmpxchg
> rxrpc_post_packet_to_conn()
- Setting conn->hi_serial
- Probably safe not using locks
- Maybe use cmpxchg
* CALL-LEVEL PROCESSING
> Old-call checking
> rxrpc_input_implicit_end_call()
> rxrpc_call_completed()
> rxrpc_queue_call()
! Need to take rx->incoming_lock
> __rxrpc_disconnect_call()
> rxrpc_notify_socket()
> rxrpc_new_incoming_call()
- Uses rx->incoming_lock for the entire process
- Might be able to drop this earlier in favour of the call lock
> rxrpc_incoming_call()
! Conflicts with rxrpc_input_implicit_end_call()
> rxrpc_send_ping()
- Don't need locks to check rtt state
> rxrpc_propose_ACK
* PACKET DISTRIBUTION
> rxrpc_input_call_packet()
> rxrpc_input_data()
* QUEUE DATA PACKET ON CALL
> rxrpc_reduce_call_timer()
- Uses timer_reduce()
! Needs call->input_lock()
> rxrpc_receiving_reply()
! Needs locking around ack state
> rxrpc_rotate_tx_window()
> rxrpc_end_tx_phase()
> rxrpc_proto_abort()
> rxrpc_input_dup_data()
- Fills the Rx buffer
- rxrpc_propose_ACK()
- rxrpc_notify_socket()
> rxrpc_input_ack()
* APPLY ACK PACKET TO CALL AND DISCARD PACKET
> rxrpc_input_ping_response()
- Probably doesn't need any extra locking
! Need READ_ONCE() on call->ping_serial
> rxrpc_input_check_for_lost_ack()
- Takes call->lock to consult Tx buffer
> rxrpc_peer_add_rtt()
! Needs to take a lock (peer->rtt_input_lock)
! Could perhaps manage with cmpxchg() and xadd() instead
> rxrpc_input_requested_ack
- Consults Tx buffer
! Probably needs a lock
> rxrpc_peer_add_rtt()
> rxrpc_propose_ack()
> rxrpc_input_ackinfo()
- Changes call->tx_winsize
! Use cmpxchg to handle change
! Should perhaps track serial number
- Uses peer->lock to record MTU specification changes
> rxrpc_proto_abort()
! Need to take call->input_lock
> rxrpc_rotate_tx_window()
> rxrpc_end_tx_phase()
> rxrpc_input_soft_acks()
- Consults the Tx buffer
> rxrpc_congestion_management()
- Modifies the Tx annotations
! Needs call->input_lock()
> rxrpc_queue_call()
> rxrpc_input_abort()
* APPLY ABORT PACKET TO CALL AND DISCARD PACKET
> rxrpc_set_call_completion()
> rxrpc_notify_socket()
> rxrpc_input_ackall()
* APPLY ACKALL PACKET TO CALL AND DISCARD PACKET
! Need to take call->input_lock
> rxrpc_rotate_tx_window()
> rxrpc_end_tx_phase()
> rxrpc_reject_packet()
There are some functions used by the above that queue the packet, after
which the procedure is terminated:
- rxrpc_post_packet_to_local()
- local->event_queue is an sk_buff_head
- local->processor is a work_struct
- rxrpc_post_packet_to_conn()
- conn->rx_queue is an sk_buff_head
- conn->processor is a work_struct
- rxrpc_reject_packet()
- local->reject_queue is an sk_buff_head
- local->processor is a work_struct
And some that offload processing to process context:
- rxrpc_notify_socket()
- Uses RCU lock
- Uses call->notify_lock to call call->notify_rx
- Uses call->recvmsg_lock to queue recvmsg side
- rxrpc_queue_call()
- call->processor is a work_struct
- rxrpc_propose_ACK()
- Uses call->lock to wrap __rxrpc_propose_ACK()
And a bunch that complete a call, all of which use call->state_lock to
protect the call state:
- rxrpc_call_completed()
- rxrpc_set_call_completion()
- rxrpc_abort_call()
- rxrpc_proto_abort()
- Also uses rxrpc_queue_call()
Fixes: 17926a79320a ("[AF_RXRPC]: Provide secure RxRPC sockets for use by userspace and kernel both")
Signed-off-by: David Howells <dhowells@redhat.com>
2018-10-08 22:46:25 +08:00
|
|
|
u32 service_id; /* Service ID, possibly upgraded */
|
2007-04-27 06:48:28 +08:00
|
|
|
u8 size_align; /* data size alignment (for security) */
|
|
|
|
u8 security_size; /* security header size */
|
|
|
|
u8 security_ix; /* security type */
|
|
|
|
u8 out_clientflag; /* RXRPC_CLIENT_INITIATED if we are client */
|
rxrpc: Rewrite the client connection manager
Rewrite the rxrpc client connection manager so that it can support multiple
connections for a given security key to a peer. The following changes are
made:
(1) For each open socket, the code currently maintains an rbtree with the
connections placed into it, keyed by communications parameters. This
is tricky to maintain as connections can be culled from the tree or
replaced within it. Connections can require replacement for a number
of reasons, e.g. their IDs span too great a range for the IDR data
type to represent efficiently, the call ID numbers on that conn would
overflow or the conn got aborted.
This is changed so that there's now a connection bundle object placed
in the tree, keyed on the same parameters. The bundle, however, does
not need to be replaced.
(2) An rxrpc_bundle object can now manage the available channels for a set
of parallel connections. The lock that manages this is moved there
from the rxrpc_connection struct (channel_lock).
(3) There'a a dummy bundle for all incoming connections to share so that
they have a channel_lock too. It might be better to give each
incoming connection its own bundle. This bundle is not needed to
manage which channels incoming calls are made on because that's the
solely at whim of the client.
(4) The restrictions on how many client connections are around are
removed. Instead, a previous patch limits the number of client calls
that can be allocated. Ordinarily, client connections are reaped
after 2 minutes on the idle queue, but when more than a certain number
of connections are in existence, the reaper starts reaping them after
2s of idleness instead to get the numbers back down.
It could also be made such that new call allocations are forced to
wait until the number of outstanding connections subsides.
Signed-off-by: David Howells <dhowells@redhat.com>
2020-07-01 18:15:32 +08:00
|
|
|
u8 bundle_shift; /* Index into bundle->avail_chans */
|
2018-10-08 22:46:17 +08:00
|
|
|
short error; /* Local error code */
|
2007-04-27 06:48:28 +08:00
|
|
|
};
|
|
|
|
|
2018-09-27 22:13:08 +08:00
|
|
|
static inline bool rxrpc_to_server(const struct rxrpc_skb_priv *sp)
|
|
|
|
{
|
|
|
|
return sp->hdr.flags & RXRPC_CLIENT_INITIATED;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool rxrpc_to_client(const struct rxrpc_skb_priv *sp)
|
|
|
|
{
|
|
|
|
return !rxrpc_to_server(sp);
|
|
|
|
}
|
|
|
|
|
2016-03-04 23:53:46 +08:00
|
|
|
/*
|
|
|
|
* Flags in call->flags.
|
|
|
|
*/
|
|
|
|
enum rxrpc_call_flag {
|
|
|
|
RXRPC_CALL_RELEASED, /* call has been released - no more message to userspace */
|
|
|
|
RXRPC_CALL_HAS_USERID, /* has a user ID attached */
|
2016-08-23 22:27:24 +08:00
|
|
|
RXRPC_CALL_IS_SERVICE, /* Call is service call */
|
rxrpc: Improve management and caching of client connection objects
Improve the management and caching of client rxrpc connection objects.
From this point, client connections will be managed separately from service
connections because AF_RXRPC controls the creation and re-use of client
connections but doesn't have that luxury with service connections.
Further, there will be limits on the numbers of client connections that may
be live on a machine. No direct restriction will be placed on the number
of client calls, excepting that each client connection can support a
maximum of four concurrent calls.
Note that, for a number of reasons, we don't want to simply discard a
client connection as soon as the last call is apparently finished:
(1) Security is negotiated per-connection and the context is then shared
between all calls on that connection. The context can be negotiated
again if the connection lapses, but that involves holding up calls
whilst at least two packets are exchanged and various crypto bits are
performed - so we'd ideally like to cache it for a little while at
least.
(2) If a packet goes astray, we will need to retransmit a final ACK or
ABORT packet. To make this work, we need to keep around the
connection details for a little while.
(3) The locally held structures represent some amount of setup time, to be
weighed against their occupation of memory when idle.
To this end, the client connection cache is managed by a state machine on
each connection. There are five states:
(1) INACTIVE - The connection is not held in any list and may not have
been exposed to the world. If it has been previously exposed, it was
discarded from the idle list after expiring.
(2) WAITING - The connection is waiting for the number of client conns to
drop below the maximum capacity. Calls may be in progress upon it
from when it was active and got culled.
The connection is on the rxrpc_waiting_client_conns list which is kept
in to-be-granted order. Culled conns with waiters go to the back of
the queue just like new conns.
(3) ACTIVE - The connection has at least one call in progress upon it, it
may freely grant available channels to new calls and calls may be
waiting on it for channels to become available.
The connection is on the rxrpc_active_client_conns list which is kept
in activation order for culling purposes.
(4) CULLED - The connection got summarily culled to try and free up
capacity. Calls currently in progress on the connection are allowed
to continue, but new calls will have to wait. There can be no waiters
in this state - the conn would have to go to the WAITING state
instead.
(5) IDLE - The connection has no calls in progress upon it and must have
been exposed to the world (ie. the EXPOSED flag must be set). When it
expires, the EXPOSED flag is cleared and the connection transitions to
the INACTIVE state.
The connection is on the rxrpc_idle_client_conns list which is kept in
order of how soon they'll expire.
A connection in the ACTIVE or CULLED state must have at least one active
call upon it; if in the WAITING state it may have active calls upon it;
other states may not have active calls.
As long as a connection remains active and doesn't get culled, it may
continue to process calls - even if there are connections on the wait
queue. This simplifies things a bit and reduces the amount of checking we
need do.
There are a couple flags of relevance to the cache:
(1) EXPOSED - The connection ID got exposed to the world. If this flag is
set, an extra ref is added to the connection preventing it from being
reaped when it has no calls outstanding. This flag is cleared and the
ref dropped when a conn is discarded from the idle list.
(2) DONT_REUSE - The connection should be discarded as soon as possible and
should not be reused.
This commit also provides a number of new settings:
(*) /proc/net/rxrpc/max_client_conns
The maximum number of live client connections. Above this number, new
connections get added to the wait list and must wait for an active
conn to be culled. Culled connections can be reused, but they will go
to the back of the wait list and have to wait.
(*) /proc/net/rxrpc/reap_client_conns
If the number of desired connections exceeds the maximum above, the
active connection list will be culled until there are only this many
left in it.
(*) /proc/net/rxrpc/idle_conn_expiry
The normal expiry time for a client connection, provided there are
fewer than reap_client_conns of them around.
(*) /proc/net/rxrpc/idle_conn_fast_expiry
The expedited expiry time, used when there are more than
reap_client_conns of them around.
Note that I combined the Tx wait queue with the channel grant wait queue to
save space as only one of these should be in use at once.
Note also that, for the moment, the service connection cache still uses the
old connection management code.
Signed-off-by: David Howells <dhowells@redhat.com>
2016-08-24 14:30:52 +08:00
|
|
|
RXRPC_CALL_EXPOSED, /* The call was exposed to the world */
|
rxrpc: Rewrite the data and ack handling code
Rewrite the data and ack handling code such that:
(1) Parsing of received ACK and ABORT packets and the distribution and the
filing of DATA packets happens entirely within the data_ready context
called from the UDP socket. This allows us to process and discard ACK
and ABORT packets much more quickly (they're no longer stashed on a
queue for a background thread to process).
(2) We avoid calling skb_clone(), pskb_pull() and pskb_trim(). We instead
keep track of the offset and length of the content of each packet in
the sk_buff metadata. This means we don't do any allocation in the
receive path.
(3) Jumbo DATA packet parsing is now done in data_ready context. Rather
than cloning the packet once for each subpacket and pulling/trimming
it, we file the packet multiple times with an annotation for each
indicating which subpacket is there. From that we can directly
calculate the offset and length.
(4) A call's receive queue can be accessed without taking locks (memory
barriers do have to be used, though).
(5) Incoming calls are set up from preallocated resources and immediately
made live. They can than have packets queued upon them and ACKs
generated. If insufficient resources exist, DATA packet #1 is given a
BUSY reply and other DATA packets are discarded).
(6) sk_buffs no longer take a ref on their parent call.
To make this work, the following changes are made:
(1) Each call's receive buffer is now a circular buffer of sk_buff
pointers (rxtx_buffer) rather than a number of sk_buff_heads spread
between the call and the socket. This permits each sk_buff to be in
the buffer multiple times. The receive buffer is reused for the
transmit buffer.
(2) A circular buffer of annotations (rxtx_annotations) is kept parallel
to the data buffer. Transmission phase annotations indicate whether a
buffered packet has been ACK'd or not and whether it needs
retransmission.
Receive phase annotations indicate whether a slot holds a whole packet
or a jumbo subpacket and, if the latter, which subpacket. They also
note whether the packet has been decrypted in place.
(3) DATA packet window tracking is much simplified. Each phase has just
two numbers representing the window (rx_hard_ack/rx_top and
tx_hard_ack/tx_top).
The hard_ack number is the sequence number before base of the window,
representing the last packet the other side says it has consumed.
hard_ack starts from 0 and the first packet is sequence number 1.
The top number is the sequence number of the highest-numbered packet
residing in the buffer. Packets between hard_ack+1 and top are
soft-ACK'd to indicate they've been received, but not yet consumed.
Four macros, before(), before_eq(), after() and after_eq() are added
to compare sequence numbers within the window. This allows for the
top of the window to wrap when the hard-ack sequence number gets close
to the limit.
Two flags, RXRPC_CALL_RX_LAST and RXRPC_CALL_TX_LAST, are added also
to indicate when rx_top and tx_top point at the packets with the
LAST_PACKET bit set, indicating the end of the phase.
(4) Calls are queued on the socket 'receive queue' rather than packets.
This means that we don't need have to invent dummy packets to queue to
indicate abnormal/terminal states and we don't have to keep metadata
packets (such as ABORTs) around
(5) The offset and length of a (sub)packet's content are now passed to
the verify_packet security op. This is currently expected to decrypt
the packet in place and validate it.
However, there's now nowhere to store the revised offset and length of
the actual data within the decrypted blob (there may be a header and
padding to skip) because an sk_buff may represent multiple packets, so
a locate_data security op is added to retrieve these details from the
sk_buff content when needed.
(6) recvmsg() now has to handle jumbo subpackets, where each subpacket is
individually secured and needs to be individually decrypted. The code
to do this is broken out into rxrpc_recvmsg_data() and shared with the
kernel API. It now iterates over the call's receive buffer rather
than walking the socket receive queue.
Additional changes:
(1) The timers are condensed to a single timer that is set for the soonest
of three timeouts (delayed ACK generation, DATA retransmission and
call lifespan).
(2) Transmission of ACK and ABORT packets is effected immediately from
process-context socket ops/kernel API calls that cause them instead of
them being punted off to a background work item. The data_ready
handler still has to defer to the background, though.
(3) A shutdown op is added to the AF_RXRPC socket so that the AFS
filesystem can shut down the socket and flush its own work items
before closing the socket to deal with any in-progress service calls.
Future additional changes that will need to be considered:
(1) Make sure that a call doesn't hog the front of the queue by receiving
data from the network as fast as userspace is consuming it to the
exclusion of other calls.
(2) Transmit delayed ACKs from within recvmsg() when we've consumed
sufficiently more packets to avoid the background work item needing to
run.
Signed-off-by: David Howells <dhowells@redhat.com>
2016-09-08 18:10:12 +08:00
|
|
|
RXRPC_CALL_RX_LAST, /* Received the last packet (at rxtx_top) */
|
|
|
|
RXRPC_CALL_TX_LAST, /* Last packet in Tx buffer (at rxtx_top) */
|
2016-10-06 15:11:49 +08:00
|
|
|
RXRPC_CALL_SEND_PING, /* A ping will need to be sent */
|
2016-09-25 01:05:27 +08:00
|
|
|
RXRPC_CALL_RETRANS_TIMEOUT, /* Retransmission due to timeout occurred */
|
2018-05-11 06:26:00 +08:00
|
|
|
RXRPC_CALL_BEGAN_RX_TIMER, /* We began the expect_rx_by timer */
|
rxrpc: Fix handling of call quietly cancelled out on server
Sometimes an in-progress call will stop responding on the fileserver when
the fileserver quietly cancels the call with an internally marked abort
(RX_CALL_DEAD), without sending an ABORT to the client.
This causes the client's call to eventually expire from lack of incoming
packets directed its way, which currently leads to it being cancelled
locally with ETIME. Note that it's not currently clear as to why this
happens as it's really hard to reproduce.
The rotation policy implement by kAFS, however, doesn't differentiate
between ETIME meaning we didn't get any response from the server and ETIME
meaning the call got cancelled mid-flow. The latter leads to an oops when
fetching data as the rotation partially resets the afs_read descriptor,
which can result in a cleared page pointer being dereferenced because that
page has already been filled.
Handle this by the following means:
(1) Set a flag on a call when we receive a packet for it.
(2) Store the highest packet serial number so far received for a call
(bearing in mind this may wrap).
(3) If, when the "not received anything recently" timeout expires on a
call, we've received at least one packet for a call and the connection
as a whole has received packets more recently than that call, then
cancel the call locally with ECONNRESET rather than ETIME.
This indicates that the call was definitely in progress on the server.
(4) In kAFS, if the rotation algorithm sees ECONNRESET rather than ETIME,
don't try the next server, but rather abort the call.
This avoids the oops as we don't try to reuse the afs_read struct.
Rather, as-yet ungotten pages will be reread at a later data.
Also:
(5) Add an rxrpc tracepoint to log detection of the call being reset.
Without this, I occasionally see an oops like the following:
general protection fault: 0000 [#1] SMP PTI
...
RIP: 0010:_copy_to_iter+0x204/0x310
RSP: 0018:ffff8800cae0f828 EFLAGS: 00010206
RAX: 0000000000000560 RBX: 0000000000000560 RCX: 0000000000000560
RDX: ffff8800cae0f968 RSI: ffff8800d58b3312 RDI: 0005080000000000
RBP: ffff8800cae0f968 R08: 0000000000000560 R09: ffff8800ca00f400
R10: ffff8800c36f28d4 R11: 00000000000008c4 R12: ffff8800cae0f958
R13: 0000000000000560 R14: ffff8800d58b3312 R15: 0000000000000560
FS: 00007fdaef108080(0000) GS:ffff8800ca680000(0000) knlGS:0000000000000000
CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
CR2: 00007fb28a8fa000 CR3: 00000000d2a76002 CR4: 00000000001606e0
Call Trace:
skb_copy_datagram_iter+0x14e/0x289
rxrpc_recvmsg_data.isra.0+0x6f3/0xf68
? trace_buffer_unlock_commit_regs+0x4f/0x89
rxrpc_kernel_recv_data+0x149/0x421
afs_extract_data+0x1e0/0x798
? afs_wait_for_call_to_complete+0xc9/0x52e
afs_deliver_fs_fetch_data+0x33a/0x5ab
afs_deliver_to_call+0x1ee/0x5e0
? afs_wait_for_call_to_complete+0xc9/0x52e
afs_wait_for_call_to_complete+0x12b/0x52e
? wake_up_q+0x54/0x54
afs_make_call+0x287/0x462
? afs_fs_fetch_data+0x3e6/0x3ed
? rcu_read_lock_sched_held+0x5d/0x63
afs_fs_fetch_data+0x3e6/0x3ed
afs_fetch_data+0xbb/0x14a
afs_readpages+0x317/0x40d
__do_page_cache_readahead+0x203/0x2ba
? ondemand_readahead+0x3a7/0x3c1
ondemand_readahead+0x3a7/0x3c1
generic_file_buffered_read+0x18b/0x62f
__vfs_read+0xdb/0xfe
vfs_read+0xb2/0x137
ksys_read+0x50/0x8c
do_syscall_64+0x7d/0x1a0
entry_SYSCALL_64_after_hwframe+0x49/0xbe
Note the weird value in RDI which is a result of trying to kmap() a NULL
page pointer.
Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-06-03 09:17:39 +08:00
|
|
|
RXRPC_CALL_RX_HEARD, /* The peer responded at least once to this call */
|
2018-07-24 00:18:38 +08:00
|
|
|
RXRPC_CALL_RX_UNDERRUN, /* Got data underrun */
|
2020-01-31 05:50:36 +08:00
|
|
|
RXRPC_CALL_DISCONNECTED, /* The call has been disconnected */
|
2020-07-02 21:59:46 +08:00
|
|
|
RXRPC_CALL_KERNEL, /* The call was made by the kernel */
|
rxrpc: Rewrite the client connection manager
Rewrite the rxrpc client connection manager so that it can support multiple
connections for a given security key to a peer. The following changes are
made:
(1) For each open socket, the code currently maintains an rbtree with the
connections placed into it, keyed by communications parameters. This
is tricky to maintain as connections can be culled from the tree or
replaced within it. Connections can require replacement for a number
of reasons, e.g. their IDs span too great a range for the IDR data
type to represent efficiently, the call ID numbers on that conn would
overflow or the conn got aborted.
This is changed so that there's now a connection bundle object placed
in the tree, keyed on the same parameters. The bundle, however, does
not need to be replaced.
(2) An rxrpc_bundle object can now manage the available channels for a set
of parallel connections. The lock that manages this is moved there
from the rxrpc_connection struct (channel_lock).
(3) There'a a dummy bundle for all incoming connections to share so that
they have a channel_lock too. It might be better to give each
incoming connection its own bundle. This bundle is not needed to
manage which channels incoming calls are made on because that's the
solely at whim of the client.
(4) The restrictions on how many client connections are around are
removed. Instead, a previous patch limits the number of client calls
that can be allocated. Ordinarily, client connections are reaped
after 2 minutes on the idle queue, but when more than a certain number
of connections are in existence, the reaper starts reaping them after
2s of idleness instead to get the numbers back down.
It could also be made such that new call allocations are forced to
wait until the number of outstanding connections subsides.
Signed-off-by: David Howells <dhowells@redhat.com>
2020-07-01 18:15:32 +08:00
|
|
|
RXRPC_CALL_UPGRADE, /* Service upgrade was requested for the call */
|
2016-03-04 23:53:46 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Events that can be raised on a call.
|
|
|
|
*/
|
|
|
|
enum rxrpc_call_event {
|
2016-03-04 23:53:46 +08:00
|
|
|
RXRPC_CALL_EV_ACK, /* need to generate ACK */
|
|
|
|
RXRPC_CALL_EV_ABORT, /* need to generate abort */
|
|
|
|
RXRPC_CALL_EV_RESEND, /* Tx resend required */
|
2016-10-06 15:11:49 +08:00
|
|
|
RXRPC_CALL_EV_PING, /* Ping send required */
|
2017-11-24 18:18:41 +08:00
|
|
|
RXRPC_CALL_EV_EXPIRED, /* Expiry occurred */
|
2017-11-24 18:18:42 +08:00
|
|
|
RXRPC_CALL_EV_ACK_LOST, /* ACK may be lost, send ping */
|
2016-03-04 23:53:46 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The states that a call can be in.
|
|
|
|
*/
|
|
|
|
enum rxrpc_call_state {
|
2016-06-17 22:42:35 +08:00
|
|
|
RXRPC_CALL_UNINITIALISED,
|
|
|
|
RXRPC_CALL_CLIENT_AWAIT_CONN, /* - client waiting for connection to become available */
|
2016-03-04 23:53:46 +08:00
|
|
|
RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
|
|
|
|
RXRPC_CALL_CLIENT_AWAIT_REPLY, /* - client awaiting reply */
|
|
|
|
RXRPC_CALL_CLIENT_RECV_REPLY, /* - client receiving reply phase */
|
2016-09-08 18:10:12 +08:00
|
|
|
RXRPC_CALL_SERVER_PREALLOC, /* - service preallocation */
|
2016-03-04 23:53:46 +08:00
|
|
|
RXRPC_CALL_SERVER_SECURING, /* - server securing request connection */
|
|
|
|
RXRPC_CALL_SERVER_RECV_REQUEST, /* - server receiving request */
|
|
|
|
RXRPC_CALL_SERVER_ACK_REQUEST, /* - server pending ACK of request */
|
|
|
|
RXRPC_CALL_SERVER_SEND_REPLY, /* - server sending reply */
|
|
|
|
RXRPC_CALL_SERVER_AWAIT_ACK, /* - server awaiting final ACK */
|
2016-08-30 16:49:28 +08:00
|
|
|
RXRPC_CALL_COMPLETE, /* - call complete */
|
|
|
|
NR__RXRPC_CALL_STATES
|
|
|
|
};
|
|
|
|
|
2019-01-11 00:59:13 +08:00
|
|
|
/*
|
|
|
|
* Call completion condition (state == RXRPC_CALL_COMPLETE).
|
|
|
|
*/
|
|
|
|
enum rxrpc_call_completion {
|
|
|
|
RXRPC_CALL_SUCCEEDED, /* - Normal termination */
|
|
|
|
RXRPC_CALL_REMOTELY_ABORTED, /* - call aborted by peer */
|
|
|
|
RXRPC_CALL_LOCALLY_ABORTED, /* - call aborted locally on error or close */
|
|
|
|
RXRPC_CALL_LOCAL_ERROR, /* - call failed due to local error */
|
|
|
|
RXRPC_CALL_NETWORK_ERROR, /* - call terminated by network error */
|
|
|
|
NR__RXRPC_CALL_COMPLETIONS
|
|
|
|
};
|
|
|
|
|
2016-09-25 01:05:27 +08:00
|
|
|
/*
|
|
|
|
* Call Tx congestion management modes.
|
|
|
|
*/
|
|
|
|
enum rxrpc_congest_mode {
|
|
|
|
RXRPC_CALL_SLOW_START,
|
|
|
|
RXRPC_CALL_CONGEST_AVOIDANCE,
|
|
|
|
RXRPC_CALL_PACKET_LOSS,
|
|
|
|
RXRPC_CALL_FAST_RETRANSMIT,
|
|
|
|
NR__RXRPC_CONGEST_MODES
|
|
|
|
};
|
|
|
|
|
2007-04-27 06:48:28 +08:00
|
|
|
/*
|
|
|
|
* RxRPC call definition
|
|
|
|
* - matched by { connection, call_id }
|
|
|
|
*/
|
|
|
|
struct rxrpc_call {
|
2016-06-28 00:11:19 +08:00
|
|
|
struct rcu_head rcu;
|
2007-04-27 06:48:28 +08:00
|
|
|
struct rxrpc_connection *conn; /* connection carrying call */
|
2016-08-24 21:31:43 +08:00
|
|
|
struct rxrpc_peer *peer; /* Peer record for remote address */
|
2016-09-07 16:19:31 +08:00
|
|
|
struct rxrpc_sock __rcu *socket; /* socket responsible */
|
2018-03-31 04:05:23 +08:00
|
|
|
struct rxrpc_net *rxnet; /* Network namespace to which call belongs */
|
2019-10-07 17:58:29 +08:00
|
|
|
const struct rxrpc_security *security; /* applied security module */
|
rxrpc: Fix deadlock between call creation and sendmsg/recvmsg
All the routines by which rxrpc is accessed from the outside are serialised
by means of the socket lock (sendmsg, recvmsg, bind,
rxrpc_kernel_begin_call(), ...) and this presents a problem:
(1) If a number of calls on the same socket are in the process of
connection to the same peer, a maximum of four concurrent live calls
are permitted before further calls need to wait for a slot.
(2) If a call is waiting for a slot, it is deep inside sendmsg() or
rxrpc_kernel_begin_call() and the entry function is holding the socket
lock.
(3) sendmsg() and recvmsg() or the in-kernel equivalents are prevented
from servicing the other calls as they need to take the socket lock to
do so.
(4) The socket is stuck until a call is aborted and makes its slot
available to the waiter.
Fix this by:
(1) Provide each call with a mutex ('user_mutex') that arbitrates access
by the users of rxrpc separately for each specific call.
(2) Make rxrpc_sendmsg() and rxrpc_recvmsg() unlock the socket as soon as
they've got a call and taken its mutex.
Note that I'm returning EWOULDBLOCK from recvmsg() if MSG_DONTWAIT is
set but someone else has the lock. Should I instead only return
EWOULDBLOCK if there's nothing currently to be done on a socket, and
sleep in this particular instance because there is something to be
done, but we appear to be blocked by the interrupt handler doing its
ping?
(3) Make rxrpc_new_client_call() unlock the socket after allocating a new
call, locking its user mutex and adding it to the socket's call tree.
The call is returned locked so that sendmsg() can add data to it
immediately.
From the moment the call is in the socket tree, it is subject to
access by sendmsg() and recvmsg() - even if it isn't connected yet.
(4) Lock new service calls in the UDP data_ready handler (in
rxrpc_new_incoming_call()) because they may already be in the socket's
tree and the data_ready handler makes them live immediately if a user
ID has already been preassigned.
Note that the new call is locked before any notifications are sent
that it is live, so doing mutex_trylock() *ought* to always succeed.
Userspace is prevented from doing sendmsg() on calls that are in a
too-early state in rxrpc_do_sendmsg().
(5) Make rxrpc_new_incoming_call() return the call with the user mutex
held so that a ping can be scheduled immediately under it.
Note that it might be worth moving the ping call into
rxrpc_new_incoming_call() and then we can drop the mutex there.
(6) Make rxrpc_accept_call() take the lock on the call it is accepting and
release the socket after adding the call to the socket's tree. This
is slightly tricky as we've dequeued the call by that point and have
to requeue it.
Note that requeuing emits a trace event.
(7) Make rxrpc_kernel_send_data() and rxrpc_kernel_recv_data() take the
new mutex immediately and don't bother with the socket mutex at all.
This patch has the nice bonus that calls on the same socket are now to some
extent parallelisable.
Note that we might want to move rxrpc_service_prealloc() calls out from the
socket lock and give it its own lock, so that we don't hang progress in
other calls because we're waiting for the allocator.
We probably also want to avoid calling rxrpc_notify_socket() from within
the socket lock (rxrpc_accept_call()).
Signed-off-by: David Howells <dhowells@redhat.com>
Tested-by: Marc Dionne <marc.c.dionne@auristor.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-02-27 23:43:06 +08:00
|
|
|
struct mutex user_mutex; /* User access mutex */
|
2017-11-24 18:18:41 +08:00
|
|
|
unsigned long ack_at; /* When deferred ACK needs to happen */
|
2017-11-24 18:18:42 +08:00
|
|
|
unsigned long ack_lost_at; /* When ACK is figured as lost */
|
2017-11-24 18:18:41 +08:00
|
|
|
unsigned long resend_at; /* When next resend needs to happen */
|
|
|
|
unsigned long ping_at; /* When next to send a ping */
|
2017-11-24 18:18:42 +08:00
|
|
|
unsigned long keepalive_at; /* When next to send a keepalive ping */
|
2017-11-24 18:18:41 +08:00
|
|
|
unsigned long expect_rx_by; /* When we expect to get a packet by */
|
|
|
|
unsigned long expect_req_by; /* When we expect to get a request DATA packet by */
|
|
|
|
unsigned long expect_term_by; /* When we expect call termination by */
|
|
|
|
u32 next_rx_timo; /* Timeout for next Rx packet (jif) */
|
|
|
|
u32 next_req_timo; /* Timeout for next Rx request packet (jif) */
|
2019-07-30 22:56:57 +08:00
|
|
|
struct skcipher_request *cipher_req; /* Packet cipher request buffer */
|
rxrpc: Rewrite the data and ack handling code
Rewrite the data and ack handling code such that:
(1) Parsing of received ACK and ABORT packets and the distribution and the
filing of DATA packets happens entirely within the data_ready context
called from the UDP socket. This allows us to process and discard ACK
and ABORT packets much more quickly (they're no longer stashed on a
queue for a background thread to process).
(2) We avoid calling skb_clone(), pskb_pull() and pskb_trim(). We instead
keep track of the offset and length of the content of each packet in
the sk_buff metadata. This means we don't do any allocation in the
receive path.
(3) Jumbo DATA packet parsing is now done in data_ready context. Rather
than cloning the packet once for each subpacket and pulling/trimming
it, we file the packet multiple times with an annotation for each
indicating which subpacket is there. From that we can directly
calculate the offset and length.
(4) A call's receive queue can be accessed without taking locks (memory
barriers do have to be used, though).
(5) Incoming calls are set up from preallocated resources and immediately
made live. They can than have packets queued upon them and ACKs
generated. If insufficient resources exist, DATA packet #1 is given a
BUSY reply and other DATA packets are discarded).
(6) sk_buffs no longer take a ref on their parent call.
To make this work, the following changes are made:
(1) Each call's receive buffer is now a circular buffer of sk_buff
pointers (rxtx_buffer) rather than a number of sk_buff_heads spread
between the call and the socket. This permits each sk_buff to be in
the buffer multiple times. The receive buffer is reused for the
transmit buffer.
(2) A circular buffer of annotations (rxtx_annotations) is kept parallel
to the data buffer. Transmission phase annotations indicate whether a
buffered packet has been ACK'd or not and whether it needs
retransmission.
Receive phase annotations indicate whether a slot holds a whole packet
or a jumbo subpacket and, if the latter, which subpacket. They also
note whether the packet has been decrypted in place.
(3) DATA packet window tracking is much simplified. Each phase has just
two numbers representing the window (rx_hard_ack/rx_top and
tx_hard_ack/tx_top).
The hard_ack number is the sequence number before base of the window,
representing the last packet the other side says it has consumed.
hard_ack starts from 0 and the first packet is sequence number 1.
The top number is the sequence number of the highest-numbered packet
residing in the buffer. Packets between hard_ack+1 and top are
soft-ACK'd to indicate they've been received, but not yet consumed.
Four macros, before(), before_eq(), after() and after_eq() are added
to compare sequence numbers within the window. This allows for the
top of the window to wrap when the hard-ack sequence number gets close
to the limit.
Two flags, RXRPC_CALL_RX_LAST and RXRPC_CALL_TX_LAST, are added also
to indicate when rx_top and tx_top point at the packets with the
LAST_PACKET bit set, indicating the end of the phase.
(4) Calls are queued on the socket 'receive queue' rather than packets.
This means that we don't need have to invent dummy packets to queue to
indicate abnormal/terminal states and we don't have to keep metadata
packets (such as ABORTs) around
(5) The offset and length of a (sub)packet's content are now passed to
the verify_packet security op. This is currently expected to decrypt
the packet in place and validate it.
However, there's now nowhere to store the revised offset and length of
the actual data within the decrypted blob (there may be a header and
padding to skip) because an sk_buff may represent multiple packets, so
a locate_data security op is added to retrieve these details from the
sk_buff content when needed.
(6) recvmsg() now has to handle jumbo subpackets, where each subpacket is
individually secured and needs to be individually decrypted. The code
to do this is broken out into rxrpc_recvmsg_data() and shared with the
kernel API. It now iterates over the call's receive buffer rather
than walking the socket receive queue.
Additional changes:
(1) The timers are condensed to a single timer that is set for the soonest
of three timeouts (delayed ACK generation, DATA retransmission and
call lifespan).
(2) Transmission of ACK and ABORT packets is effected immediately from
process-context socket ops/kernel API calls that cause them instead of
them being punted off to a background work item. The data_ready
handler still has to defer to the background, though.
(3) A shutdown op is added to the AF_RXRPC socket so that the AFS
filesystem can shut down the socket and flush its own work items
before closing the socket to deal with any in-progress service calls.
Future additional changes that will need to be considered:
(1) Make sure that a call doesn't hog the front of the queue by receiving
data from the network as fast as userspace is consuming it to the
exclusion of other calls.
(2) Transmit delayed ACKs from within recvmsg() when we've consumed
sufficiently more packets to avoid the background work item needing to
run.
Signed-off-by: David Howells <dhowells@redhat.com>
2016-09-08 18:10:12 +08:00
|
|
|
struct timer_list timer; /* Combined event timer */
|
|
|
|
struct work_struct processor; /* Event processor */
|
rxrpc: Don't expose skbs to in-kernel users [ver #2]
Don't expose skbs to in-kernel users, such as the AFS filesystem, but
instead provide a notification hook the indicates that a call needs
attention and another that indicates that there's a new call to be
collected.
This makes the following possibilities more achievable:
(1) Call refcounting can be made simpler if skbs don't hold refs to calls.
(2) skbs referring to non-data events will be able to be freed much sooner
rather than being queued for AFS to pick up as rxrpc_kernel_recv_data
will be able to consult the call state.
(3) We can shortcut the receive phase when a call is remotely aborted
because we don't have to go through all the packets to get to the one
cancelling the operation.
(4) It makes it easier to do encryption/decryption directly between AFS's
buffers and sk_buffs.
(5) Encryption/decryption can more easily be done in the AFS's thread
contexts - usually that of the userspace process that issued a syscall
- rather than in one of rxrpc's background threads on a workqueue.
(6) AFS will be able to wait synchronously on a call inside AF_RXRPC.
To make this work, the following interface function has been added:
int rxrpc_kernel_recv_data(
struct socket *sock, struct rxrpc_call *call,
void *buffer, size_t bufsize, size_t *_offset,
bool want_more, u32 *_abort_code);
This is the recvmsg equivalent. It allows the caller to find out about the
state of a specific call and to transfer received data into a buffer
piecemeal.
afs_extract_data() and rxrpc_kernel_recv_data() now do all the extraction
logic between them. They don't wait synchronously yet because the socket
lock needs to be dealt with.
Five interface functions have been removed:
rxrpc_kernel_is_data_last()
rxrpc_kernel_get_abort_code()
rxrpc_kernel_get_error_number()
rxrpc_kernel_free_skb()
rxrpc_kernel_data_consumed()
As a temporary hack, sk_buffs going to an in-kernel call are queued on the
rxrpc_call struct (->knlrecv_queue) rather than being handed over to the
in-kernel user. To process the queue internally, a temporary function,
temp_deliver_data() has been added. This will be replaced with common code
between the rxrpc_recvmsg() path and the kernel_rxrpc_recv_data() path in a
future patch.
Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-08-31 03:42:14 +08:00
|
|
|
rxrpc_notify_rx_t notify_rx; /* kernel service Rx notification function */
|
2007-04-27 06:48:28 +08:00
|
|
|
struct list_head link; /* link in master call list */
|
rxrpc: Rewrite the client connection manager
Rewrite the rxrpc client connection manager so that it can support multiple
connections for a given security key to a peer. The following changes are
made:
(1) For each open socket, the code currently maintains an rbtree with the
connections placed into it, keyed by communications parameters. This
is tricky to maintain as connections can be culled from the tree or
replaced within it. Connections can require replacement for a number
of reasons, e.g. their IDs span too great a range for the IDR data
type to represent efficiently, the call ID numbers on that conn would
overflow or the conn got aborted.
This is changed so that there's now a connection bundle object placed
in the tree, keyed on the same parameters. The bundle, however, does
not need to be replaced.
(2) An rxrpc_bundle object can now manage the available channels for a set
of parallel connections. The lock that manages this is moved there
from the rxrpc_connection struct (channel_lock).
(3) There'a a dummy bundle for all incoming connections to share so that
they have a channel_lock too. It might be better to give each
incoming connection its own bundle. This bundle is not needed to
manage which channels incoming calls are made on because that's the
solely at whim of the client.
(4) The restrictions on how many client connections are around are
removed. Instead, a previous patch limits the number of client calls
that can be allocated. Ordinarily, client connections are reaped
after 2 minutes on the idle queue, but when more than a certain number
of connections are in existence, the reaper starts reaping them after
2s of idleness instead to get the numbers back down.
It could also be made such that new call allocations are forced to
wait until the number of outstanding connections subsides.
Signed-off-by: David Howells <dhowells@redhat.com>
2020-07-01 18:15:32 +08:00
|
|
|
struct list_head chan_wait_link; /* Link in conn->bundle->waiting_calls */
|
2016-04-04 21:00:34 +08:00
|
|
|
struct hlist_node error_link; /* link in error distribution list */
|
rxrpc: Rewrite the data and ack handling code
Rewrite the data and ack handling code such that:
(1) Parsing of received ACK and ABORT packets and the distribution and the
filing of DATA packets happens entirely within the data_ready context
called from the UDP socket. This allows us to process and discard ACK
and ABORT packets much more quickly (they're no longer stashed on a
queue for a background thread to process).
(2) We avoid calling skb_clone(), pskb_pull() and pskb_trim(). We instead
keep track of the offset and length of the content of each packet in
the sk_buff metadata. This means we don't do any allocation in the
receive path.
(3) Jumbo DATA packet parsing is now done in data_ready context. Rather
than cloning the packet once for each subpacket and pulling/trimming
it, we file the packet multiple times with an annotation for each
indicating which subpacket is there. From that we can directly
calculate the offset and length.
(4) A call's receive queue can be accessed without taking locks (memory
barriers do have to be used, though).
(5) Incoming calls are set up from preallocated resources and immediately
made live. They can than have packets queued upon them and ACKs
generated. If insufficient resources exist, DATA packet #1 is given a
BUSY reply and other DATA packets are discarded).
(6) sk_buffs no longer take a ref on their parent call.
To make this work, the following changes are made:
(1) Each call's receive buffer is now a circular buffer of sk_buff
pointers (rxtx_buffer) rather than a number of sk_buff_heads spread
between the call and the socket. This permits each sk_buff to be in
the buffer multiple times. The receive buffer is reused for the
transmit buffer.
(2) A circular buffer of annotations (rxtx_annotations) is kept parallel
to the data buffer. Transmission phase annotations indicate whether a
buffered packet has been ACK'd or not and whether it needs
retransmission.
Receive phase annotations indicate whether a slot holds a whole packet
or a jumbo subpacket and, if the latter, which subpacket. They also
note whether the packet has been decrypted in place.
(3) DATA packet window tracking is much simplified. Each phase has just
two numbers representing the window (rx_hard_ack/rx_top and
tx_hard_ack/tx_top).
The hard_ack number is the sequence number before base of the window,
representing the last packet the other side says it has consumed.
hard_ack starts from 0 and the first packet is sequence number 1.
The top number is the sequence number of the highest-numbered packet
residing in the buffer. Packets between hard_ack+1 and top are
soft-ACK'd to indicate they've been received, but not yet consumed.
Four macros, before(), before_eq(), after() and after_eq() are added
to compare sequence numbers within the window. This allows for the
top of the window to wrap when the hard-ack sequence number gets close
to the limit.
Two flags, RXRPC_CALL_RX_LAST and RXRPC_CALL_TX_LAST, are added also
to indicate when rx_top and tx_top point at the packets with the
LAST_PACKET bit set, indicating the end of the phase.
(4) Calls are queued on the socket 'receive queue' rather than packets.
This means that we don't need have to invent dummy packets to queue to
indicate abnormal/terminal states and we don't have to keep metadata
packets (such as ABORTs) around
(5) The offset and length of a (sub)packet's content are now passed to
the verify_packet security op. This is currently expected to decrypt
the packet in place and validate it.
However, there's now nowhere to store the revised offset and length of
the actual data within the decrypted blob (there may be a header and
padding to skip) because an sk_buff may represent multiple packets, so
a locate_data security op is added to retrieve these details from the
sk_buff content when needed.
(6) recvmsg() now has to handle jumbo subpackets, where each subpacket is
individually secured and needs to be individually decrypted. The code
to do this is broken out into rxrpc_recvmsg_data() and shared with the
kernel API. It now iterates over the call's receive buffer rather
than walking the socket receive queue.
Additional changes:
(1) The timers are condensed to a single timer that is set for the soonest
of three timeouts (delayed ACK generation, DATA retransmission and
call lifespan).
(2) Transmission of ACK and ABORT packets is effected immediately from
process-context socket ops/kernel API calls that cause them instead of
them being punted off to a background work item. The data_ready
handler still has to defer to the background, though.
(3) A shutdown op is added to the AF_RXRPC socket so that the AFS
filesystem can shut down the socket and flush its own work items
before closing the socket to deal with any in-progress service calls.
Future additional changes that will need to be considered:
(1) Make sure that a call doesn't hog the front of the queue by receiving
data from the network as fast as userspace is consuming it to the
exclusion of other calls.
(2) Transmit delayed ACKs from within recvmsg() when we've consumed
sufficiently more packets to avoid the background work item needing to
run.
Signed-off-by: David Howells <dhowells@redhat.com>
2016-09-08 18:10:12 +08:00
|
|
|
struct list_head accept_link; /* Link in rx->acceptq */
|
|
|
|
struct list_head recvmsg_link; /* Link in rx->recvmsg_q */
|
|
|
|
struct list_head sock_link; /* Link in rx->sock_calls */
|
|
|
|
struct rb_node sock_node; /* Node in rx->calls */
|
2007-04-27 06:48:28 +08:00
|
|
|
struct sk_buff *tx_pending; /* Tx socket buffer being filled */
|
rxrpc: Improve management and caching of client connection objects
Improve the management and caching of client rxrpc connection objects.
From this point, client connections will be managed separately from service
connections because AF_RXRPC controls the creation and re-use of client
connections but doesn't have that luxury with service connections.
Further, there will be limits on the numbers of client connections that may
be live on a machine. No direct restriction will be placed on the number
of client calls, excepting that each client connection can support a
maximum of four concurrent calls.
Note that, for a number of reasons, we don't want to simply discard a
client connection as soon as the last call is apparently finished:
(1) Security is negotiated per-connection and the context is then shared
between all calls on that connection. The context can be negotiated
again if the connection lapses, but that involves holding up calls
whilst at least two packets are exchanged and various crypto bits are
performed - so we'd ideally like to cache it for a little while at
least.
(2) If a packet goes astray, we will need to retransmit a final ACK or
ABORT packet. To make this work, we need to keep around the
connection details for a little while.
(3) The locally held structures represent some amount of setup time, to be
weighed against their occupation of memory when idle.
To this end, the client connection cache is managed by a state machine on
each connection. There are five states:
(1) INACTIVE - The connection is not held in any list and may not have
been exposed to the world. If it has been previously exposed, it was
discarded from the idle list after expiring.
(2) WAITING - The connection is waiting for the number of client conns to
drop below the maximum capacity. Calls may be in progress upon it
from when it was active and got culled.
The connection is on the rxrpc_waiting_client_conns list which is kept
in to-be-granted order. Culled conns with waiters go to the back of
the queue just like new conns.
(3) ACTIVE - The connection has at least one call in progress upon it, it
may freely grant available channels to new calls and calls may be
waiting on it for channels to become available.
The connection is on the rxrpc_active_client_conns list which is kept
in activation order for culling purposes.
(4) CULLED - The connection got summarily culled to try and free up
capacity. Calls currently in progress on the connection are allowed
to continue, but new calls will have to wait. There can be no waiters
in this state - the conn would have to go to the WAITING state
instead.
(5) IDLE - The connection has no calls in progress upon it and must have
been exposed to the world (ie. the EXPOSED flag must be set). When it
expires, the EXPOSED flag is cleared and the connection transitions to
the INACTIVE state.
The connection is on the rxrpc_idle_client_conns list which is kept in
order of how soon they'll expire.
A connection in the ACTIVE or CULLED state must have at least one active
call upon it; if in the WAITING state it may have active calls upon it;
other states may not have active calls.
As long as a connection remains active and doesn't get culled, it may
continue to process calls - even if there are connections on the wait
queue. This simplifies things a bit and reduces the amount of checking we
need do.
There are a couple flags of relevance to the cache:
(1) EXPOSED - The connection ID got exposed to the world. If this flag is
set, an extra ref is added to the connection preventing it from being
reaped when it has no calls outstanding. This flag is cleared and the
ref dropped when a conn is discarded from the idle list.
(2) DONT_REUSE - The connection should be discarded as soon as possible and
should not be reused.
This commit also provides a number of new settings:
(*) /proc/net/rxrpc/max_client_conns
The maximum number of live client connections. Above this number, new
connections get added to the wait list and must wait for an active
conn to be culled. Culled connections can be reused, but they will go
to the back of the wait list and have to wait.
(*) /proc/net/rxrpc/reap_client_conns
If the number of desired connections exceeds the maximum above, the
active connection list will be culled until there are only this many
left in it.
(*) /proc/net/rxrpc/idle_conn_expiry
The normal expiry time for a client connection, provided there are
fewer than reap_client_conns of them around.
(*) /proc/net/rxrpc/idle_conn_fast_expiry
The expedited expiry time, used when there are more than
reap_client_conns of them around.
Note that I combined the Tx wait queue with the channel grant wait queue to
save space as only one of these should be in use at once.
Note also that, for the moment, the service connection cache still uses the
old connection management code.
Signed-off-by: David Howells <dhowells@redhat.com>
2016-08-24 14:30:52 +08:00
|
|
|
wait_queue_head_t waitq; /* Wait queue for channel or Tx */
|
2017-06-07 19:40:03 +08:00
|
|
|
s64 tx_total_len; /* Total length left to be transmitted (or -1) */
|
2016-06-27 05:55:24 +08:00
|
|
|
__be32 crypto_buf[2]; /* Temporary packet crypto buffer */
|
2007-04-27 06:48:28 +08:00
|
|
|
unsigned long user_call_ID; /* user-defined call ID */
|
|
|
|
unsigned long flags;
|
|
|
|
unsigned long events;
|
|
|
|
spinlock_t lock;
|
2017-11-02 23:06:08 +08:00
|
|
|
spinlock_t notify_lock; /* Kernel notification lock */
|
2007-04-27 06:48:28 +08:00
|
|
|
rwlock_t state_lock; /* lock for state transition */
|
2016-08-30 16:49:28 +08:00
|
|
|
u32 abort_code; /* Local/remote abort code */
|
|
|
|
int error; /* Local error incurred */
|
2016-09-08 18:10:11 +08:00
|
|
|
enum rxrpc_call_state state; /* current state of call */
|
|
|
|
enum rxrpc_call_completion completion; /* Call completion condition */
|
2007-04-27 06:48:28 +08:00
|
|
|
atomic_t usage;
|
2016-08-23 22:27:24 +08:00
|
|
|
u16 service_id; /* service ID */
|
2016-09-07 22:19:25 +08:00
|
|
|
u8 security_ix; /* Security type */
|
2020-03-13 17:22:09 +08:00
|
|
|
enum rxrpc_interruptibility interruptibility; /* At what point call may be interrupted */
|
2016-08-23 22:27:24 +08:00
|
|
|
u32 call_id; /* call ID on connection */
|
|
|
|
u32 cid; /* connection ID plus channel index */
|
|
|
|
int debug_id; /* debug ID for printks */
|
2016-09-22 07:29:31 +08:00
|
|
|
unsigned short rx_pkt_offset; /* Current recvmsg packet offset */
|
|
|
|
unsigned short rx_pkt_len; /* Current recvmsg packet len */
|
2019-10-31 20:13:46 +08:00
|
|
|
bool rx_pkt_last; /* Current recvmsg packet is last */
|
2007-04-27 06:48:28 +08:00
|
|
|
|
rxrpc: Rewrite the data and ack handling code
Rewrite the data and ack handling code such that:
(1) Parsing of received ACK and ABORT packets and the distribution and the
filing of DATA packets happens entirely within the data_ready context
called from the UDP socket. This allows us to process and discard ACK
and ABORT packets much more quickly (they're no longer stashed on a
queue for a background thread to process).
(2) We avoid calling skb_clone(), pskb_pull() and pskb_trim(). We instead
keep track of the offset and length of the content of each packet in
the sk_buff metadata. This means we don't do any allocation in the
receive path.
(3) Jumbo DATA packet parsing is now done in data_ready context. Rather
than cloning the packet once for each subpacket and pulling/trimming
it, we file the packet multiple times with an annotation for each
indicating which subpacket is there. From that we can directly
calculate the offset and length.
(4) A call's receive queue can be accessed without taking locks (memory
barriers do have to be used, though).
(5) Incoming calls are set up from preallocated resources and immediately
made live. They can than have packets queued upon them and ACKs
generated. If insufficient resources exist, DATA packet #1 is given a
BUSY reply and other DATA packets are discarded).
(6) sk_buffs no longer take a ref on their parent call.
To make this work, the following changes are made:
(1) Each call's receive buffer is now a circular buffer of sk_buff
pointers (rxtx_buffer) rather than a number of sk_buff_heads spread
between the call and the socket. This permits each sk_buff to be in
the buffer multiple times. The receive buffer is reused for the
transmit buffer.
(2) A circular buffer of annotations (rxtx_annotations) is kept parallel
to the data buffer. Transmission phase annotations indicate whether a
buffered packet has been ACK'd or not and whether it needs
retransmission.
Receive phase annotations indicate whether a slot holds a whole packet
or a jumbo subpacket and, if the latter, which subpacket. They also
note whether the packet has been decrypted in place.
(3) DATA packet window tracking is much simplified. Each phase has just
two numbers representing the window (rx_hard_ack/rx_top and
tx_hard_ack/tx_top).
The hard_ack number is the sequence number before base of the window,
representing the last packet the other side says it has consumed.
hard_ack starts from 0 and the first packet is sequence number 1.
The top number is the sequence number of the highest-numbered packet
residing in the buffer. Packets between hard_ack+1 and top are
soft-ACK'd to indicate they've been received, but not yet consumed.
Four macros, before(), before_eq(), after() and after_eq() are added
to compare sequence numbers within the window. This allows for the
top of the window to wrap when the hard-ack sequence number gets close
to the limit.
Two flags, RXRPC_CALL_RX_LAST and RXRPC_CALL_TX_LAST, are added also
to indicate when rx_top and tx_top point at the packets with the
LAST_PACKET bit set, indicating the end of the phase.
(4) Calls are queued on the socket 'receive queue' rather than packets.
This means that we don't need have to invent dummy packets to queue to
indicate abnormal/terminal states and we don't have to keep metadata
packets (such as ABORTs) around
(5) The offset and length of a (sub)packet's content are now passed to
the verify_packet security op. This is currently expected to decrypt
the packet in place and validate it.
However, there's now nowhere to store the revised offset and length of
the actual data within the decrypted blob (there may be a header and
padding to skip) because an sk_buff may represent multiple packets, so
a locate_data security op is added to retrieve these details from the
sk_buff content when needed.
(6) recvmsg() now has to handle jumbo subpackets, where each subpacket is
individually secured and needs to be individually decrypted. The code
to do this is broken out into rxrpc_recvmsg_data() and shared with the
kernel API. It now iterates over the call's receive buffer rather
than walking the socket receive queue.
Additional changes:
(1) The timers are condensed to a single timer that is set for the soonest
of three timeouts (delayed ACK generation, DATA retransmission and
call lifespan).
(2) Transmission of ACK and ABORT packets is effected immediately from
process-context socket ops/kernel API calls that cause them instead of
them being punted off to a background work item. The data_ready
handler still has to defer to the background, though.
(3) A shutdown op is added to the AF_RXRPC socket so that the AFS
filesystem can shut down the socket and flush its own work items
before closing the socket to deal with any in-progress service calls.
Future additional changes that will need to be considered:
(1) Make sure that a call doesn't hog the front of the queue by receiving
data from the network as fast as userspace is consuming it to the
exclusion of other calls.
(2) Transmit delayed ACKs from within recvmsg() when we've consumed
sufficiently more packets to avoid the background work item needing to
run.
Signed-off-by: David Howells <dhowells@redhat.com>
2016-09-08 18:10:12 +08:00
|
|
|
/* Rx/Tx circular buffer, depending on phase.
|
|
|
|
*
|
|
|
|
* In the Rx phase, packets are annotated with 0 or the number of the
|
|
|
|
* segment of a jumbo packet each buffer refers to. There can be up to
|
|
|
|
* 47 segments in a maximum-size UDP packet.
|
|
|
|
*
|
|
|
|
* In the Tx phase, packets are annotated with which buffers have been
|
|
|
|
* acked.
|
|
|
|
*/
|
|
|
|
#define RXRPC_RXTX_BUFF_SIZE 64
|
|
|
|
#define RXRPC_RXTX_BUFF_MASK (RXRPC_RXTX_BUFF_SIZE - 1)
|
2018-07-24 00:18:37 +08:00
|
|
|
#define RXRPC_INIT_RX_WINDOW_SIZE 63
|
rxrpc: Rewrite the data and ack handling code
Rewrite the data and ack handling code such that:
(1) Parsing of received ACK and ABORT packets and the distribution and the
filing of DATA packets happens entirely within the data_ready context
called from the UDP socket. This allows us to process and discard ACK
and ABORT packets much more quickly (they're no longer stashed on a
queue for a background thread to process).
(2) We avoid calling skb_clone(), pskb_pull() and pskb_trim(). We instead
keep track of the offset and length of the content of each packet in
the sk_buff metadata. This means we don't do any allocation in the
receive path.
(3) Jumbo DATA packet parsing is now done in data_ready context. Rather
than cloning the packet once for each subpacket and pulling/trimming
it, we file the packet multiple times with an annotation for each
indicating which subpacket is there. From that we can directly
calculate the offset and length.
(4) A call's receive queue can be accessed without taking locks (memory
barriers do have to be used, though).
(5) Incoming calls are set up from preallocated resources and immediately
made live. They can than have packets queued upon them and ACKs
generated. If insufficient resources exist, DATA packet #1 is given a
BUSY reply and other DATA packets are discarded).
(6) sk_buffs no longer take a ref on their parent call.
To make this work, the following changes are made:
(1) Each call's receive buffer is now a circular buffer of sk_buff
pointers (rxtx_buffer) rather than a number of sk_buff_heads spread
between the call and the socket. This permits each sk_buff to be in
the buffer multiple times. The receive buffer is reused for the
transmit buffer.
(2) A circular buffer of annotations (rxtx_annotations) is kept parallel
to the data buffer. Transmission phase annotations indicate whether a
buffered packet has been ACK'd or not and whether it needs
retransmission.
Receive phase annotations indicate whether a slot holds a whole packet
or a jumbo subpacket and, if the latter, which subpacket. They also
note whether the packet has been decrypted in place.
(3) DATA packet window tracking is much simplified. Each phase has just
two numbers representing the window (rx_hard_ack/rx_top and
tx_hard_ack/tx_top).
The hard_ack number is the sequence number before base of the window,
representing the last packet the other side says it has consumed.
hard_ack starts from 0 and the first packet is sequence number 1.
The top number is the sequence number of the highest-numbered packet
residing in the buffer. Packets between hard_ack+1 and top are
soft-ACK'd to indicate they've been received, but not yet consumed.
Four macros, before(), before_eq(), after() and after_eq() are added
to compare sequence numbers within the window. This allows for the
top of the window to wrap when the hard-ack sequence number gets close
to the limit.
Two flags, RXRPC_CALL_RX_LAST and RXRPC_CALL_TX_LAST, are added also
to indicate when rx_top and tx_top point at the packets with the
LAST_PACKET bit set, indicating the end of the phase.
(4) Calls are queued on the socket 'receive queue' rather than packets.
This means that we don't need have to invent dummy packets to queue to
indicate abnormal/terminal states and we don't have to keep metadata
packets (such as ABORTs) around
(5) The offset and length of a (sub)packet's content are now passed to
the verify_packet security op. This is currently expected to decrypt
the packet in place and validate it.
However, there's now nowhere to store the revised offset and length of
the actual data within the decrypted blob (there may be a header and
padding to skip) because an sk_buff may represent multiple packets, so
a locate_data security op is added to retrieve these details from the
sk_buff content when needed.
(6) recvmsg() now has to handle jumbo subpackets, where each subpacket is
individually secured and needs to be individually decrypted. The code
to do this is broken out into rxrpc_recvmsg_data() and shared with the
kernel API. It now iterates over the call's receive buffer rather
than walking the socket receive queue.
Additional changes:
(1) The timers are condensed to a single timer that is set for the soonest
of three timeouts (delayed ACK generation, DATA retransmission and
call lifespan).
(2) Transmission of ACK and ABORT packets is effected immediately from
process-context socket ops/kernel API calls that cause them instead of
them being punted off to a background work item. The data_ready
handler still has to defer to the background, though.
(3) A shutdown op is added to the AF_RXRPC socket so that the AFS
filesystem can shut down the socket and flush its own work items
before closing the socket to deal with any in-progress service calls.
Future additional changes that will need to be considered:
(1) Make sure that a call doesn't hog the front of the queue by receiving
data from the network as fast as userspace is consuming it to the
exclusion of other calls.
(2) Transmit delayed ACKs from within recvmsg() when we've consumed
sufficiently more packets to avoid the background work item needing to
run.
Signed-off-by: David Howells <dhowells@redhat.com>
2016-09-08 18:10:12 +08:00
|
|
|
struct sk_buff **rxtx_buffer;
|
|
|
|
u8 *rxtx_annotations;
|
|
|
|
#define RXRPC_TX_ANNO_ACK 0
|
|
|
|
#define RXRPC_TX_ANNO_UNACK 1
|
|
|
|
#define RXRPC_TX_ANNO_NAK 2
|
|
|
|
#define RXRPC_TX_ANNO_RETRANS 3
|
2016-09-22 07:29:32 +08:00
|
|
|
#define RXRPC_TX_ANNO_MASK 0x03
|
2016-09-23 19:39:22 +08:00
|
|
|
#define RXRPC_TX_ANNO_LAST 0x04
|
|
|
|
#define RXRPC_TX_ANNO_RESENT 0x08
|
|
|
|
|
2019-08-27 16:51:30 +08:00
|
|
|
#define RXRPC_RX_ANNO_SUBPACKET 0x3f /* Subpacket number in jumbogram */
|
rxrpc: Rewrite the data and ack handling code
Rewrite the data and ack handling code such that:
(1) Parsing of received ACK and ABORT packets and the distribution and the
filing of DATA packets happens entirely within the data_ready context
called from the UDP socket. This allows us to process and discard ACK
and ABORT packets much more quickly (they're no longer stashed on a
queue for a background thread to process).
(2) We avoid calling skb_clone(), pskb_pull() and pskb_trim(). We instead
keep track of the offset and length of the content of each packet in
the sk_buff metadata. This means we don't do any allocation in the
receive path.
(3) Jumbo DATA packet parsing is now done in data_ready context. Rather
than cloning the packet once for each subpacket and pulling/trimming
it, we file the packet multiple times with an annotation for each
indicating which subpacket is there. From that we can directly
calculate the offset and length.
(4) A call's receive queue can be accessed without taking locks (memory
barriers do have to be used, though).
(5) Incoming calls are set up from preallocated resources and immediately
made live. They can than have packets queued upon them and ACKs
generated. If insufficient resources exist, DATA packet #1 is given a
BUSY reply and other DATA packets are discarded).
(6) sk_buffs no longer take a ref on their parent call.
To make this work, the following changes are made:
(1) Each call's receive buffer is now a circular buffer of sk_buff
pointers (rxtx_buffer) rather than a number of sk_buff_heads spread
between the call and the socket. This permits each sk_buff to be in
the buffer multiple times. The receive buffer is reused for the
transmit buffer.
(2) A circular buffer of annotations (rxtx_annotations) is kept parallel
to the data buffer. Transmission phase annotations indicate whether a
buffered packet has been ACK'd or not and whether it needs
retransmission.
Receive phase annotations indicate whether a slot holds a whole packet
or a jumbo subpacket and, if the latter, which subpacket. They also
note whether the packet has been decrypted in place.
(3) DATA packet window tracking is much simplified. Each phase has just
two numbers representing the window (rx_hard_ack/rx_top and
tx_hard_ack/tx_top).
The hard_ack number is the sequence number before base of the window,
representing the last packet the other side says it has consumed.
hard_ack starts from 0 and the first packet is sequence number 1.
The top number is the sequence number of the highest-numbered packet
residing in the buffer. Packets between hard_ack+1 and top are
soft-ACK'd to indicate they've been received, but not yet consumed.
Four macros, before(), before_eq(), after() and after_eq() are added
to compare sequence numbers within the window. This allows for the
top of the window to wrap when the hard-ack sequence number gets close
to the limit.
Two flags, RXRPC_CALL_RX_LAST and RXRPC_CALL_TX_LAST, are added also
to indicate when rx_top and tx_top point at the packets with the
LAST_PACKET bit set, indicating the end of the phase.
(4) Calls are queued on the socket 'receive queue' rather than packets.
This means that we don't need have to invent dummy packets to queue to
indicate abnormal/terminal states and we don't have to keep metadata
packets (such as ABORTs) around
(5) The offset and length of a (sub)packet's content are now passed to
the verify_packet security op. This is currently expected to decrypt
the packet in place and validate it.
However, there's now nowhere to store the revised offset and length of
the actual data within the decrypted blob (there may be a header and
padding to skip) because an sk_buff may represent multiple packets, so
a locate_data security op is added to retrieve these details from the
sk_buff content when needed.
(6) recvmsg() now has to handle jumbo subpackets, where each subpacket is
individually secured and needs to be individually decrypted. The code
to do this is broken out into rxrpc_recvmsg_data() and shared with the
kernel API. It now iterates over the call's receive buffer rather
than walking the socket receive queue.
Additional changes:
(1) The timers are condensed to a single timer that is set for the soonest
of three timeouts (delayed ACK generation, DATA retransmission and
call lifespan).
(2) Transmission of ACK and ABORT packets is effected immediately from
process-context socket ops/kernel API calls that cause them instead of
them being punted off to a background work item. The data_ready
handler still has to defer to the background, though.
(3) A shutdown op is added to the AF_RXRPC socket so that the AFS
filesystem can shut down the socket and flush its own work items
before closing the socket to deal with any in-progress service calls.
Future additional changes that will need to be considered:
(1) Make sure that a call doesn't hog the front of the queue by receiving
data from the network as fast as userspace is consuming it to the
exclusion of other calls.
(2) Transmit delayed ACKs from within recvmsg() when we've consumed
sufficiently more packets to avoid the background work item needing to
run.
Signed-off-by: David Howells <dhowells@redhat.com>
2016-09-08 18:10:12 +08:00
|
|
|
#define RXRPC_RX_ANNO_VERIFIED 0x80 /* Set if verified and decrypted */
|
|
|
|
rxrpc_seq_t tx_hard_ack; /* Dead slot in buffer; the first transmitted but
|
|
|
|
* not hard-ACK'd packet follows this.
|
|
|
|
*/
|
|
|
|
rxrpc_seq_t tx_top; /* Highest Tx slot allocated. */
|
2018-11-01 21:39:53 +08:00
|
|
|
u16 tx_backoff; /* Delay to insert due to Tx failure */
|
2016-09-25 01:05:27 +08:00
|
|
|
|
|
|
|
/* TCP-style slow-start congestion control [RFC5681]. Since the SMSS
|
|
|
|
* is fixed, we keep these numbers in terms of segments (ie. DATA
|
|
|
|
* packets) rather than bytes.
|
|
|
|
*/
|
|
|
|
#define RXRPC_TX_SMSS RXRPC_JUMBO_DATALEN
|
|
|
|
u8 cong_cwnd; /* Congestion window size */
|
|
|
|
u8 cong_extra; /* Extra to send for congestion management */
|
|
|
|
u8 cong_ssthresh; /* Slow-start threshold */
|
|
|
|
enum rxrpc_congest_mode cong_mode:8; /* Congestion management mode */
|
|
|
|
u8 cong_dup_acks; /* Count of ACKs showing missing packets */
|
|
|
|
u8 cong_cumul_acks; /* Cumulative ACK count */
|
|
|
|
ktime_t cong_tstamp; /* Last time cwnd was changed */
|
|
|
|
|
rxrpc: Rewrite the data and ack handling code
Rewrite the data and ack handling code such that:
(1) Parsing of received ACK and ABORT packets and the distribution and the
filing of DATA packets happens entirely within the data_ready context
called from the UDP socket. This allows us to process and discard ACK
and ABORT packets much more quickly (they're no longer stashed on a
queue for a background thread to process).
(2) We avoid calling skb_clone(), pskb_pull() and pskb_trim(). We instead
keep track of the offset and length of the content of each packet in
the sk_buff metadata. This means we don't do any allocation in the
receive path.
(3) Jumbo DATA packet parsing is now done in data_ready context. Rather
than cloning the packet once for each subpacket and pulling/trimming
it, we file the packet multiple times with an annotation for each
indicating which subpacket is there. From that we can directly
calculate the offset and length.
(4) A call's receive queue can be accessed without taking locks (memory
barriers do have to be used, though).
(5) Incoming calls are set up from preallocated resources and immediately
made live. They can than have packets queued upon them and ACKs
generated. If insufficient resources exist, DATA packet #1 is given a
BUSY reply and other DATA packets are discarded).
(6) sk_buffs no longer take a ref on their parent call.
To make this work, the following changes are made:
(1) Each call's receive buffer is now a circular buffer of sk_buff
pointers (rxtx_buffer) rather than a number of sk_buff_heads spread
between the call and the socket. This permits each sk_buff to be in
the buffer multiple times. The receive buffer is reused for the
transmit buffer.
(2) A circular buffer of annotations (rxtx_annotations) is kept parallel
to the data buffer. Transmission phase annotations indicate whether a
buffered packet has been ACK'd or not and whether it needs
retransmission.
Receive phase annotations indicate whether a slot holds a whole packet
or a jumbo subpacket and, if the latter, which subpacket. They also
note whether the packet has been decrypted in place.
(3) DATA packet window tracking is much simplified. Each phase has just
two numbers representing the window (rx_hard_ack/rx_top and
tx_hard_ack/tx_top).
The hard_ack number is the sequence number before base of the window,
representing the last packet the other side says it has consumed.
hard_ack starts from 0 and the first packet is sequence number 1.
The top number is the sequence number of the highest-numbered packet
residing in the buffer. Packets between hard_ack+1 and top are
soft-ACK'd to indicate they've been received, but not yet consumed.
Four macros, before(), before_eq(), after() and after_eq() are added
to compare sequence numbers within the window. This allows for the
top of the window to wrap when the hard-ack sequence number gets close
to the limit.
Two flags, RXRPC_CALL_RX_LAST and RXRPC_CALL_TX_LAST, are added also
to indicate when rx_top and tx_top point at the packets with the
LAST_PACKET bit set, indicating the end of the phase.
(4) Calls are queued on the socket 'receive queue' rather than packets.
This means that we don't need have to invent dummy packets to queue to
indicate abnormal/terminal states and we don't have to keep metadata
packets (such as ABORTs) around
(5) The offset and length of a (sub)packet's content are now passed to
the verify_packet security op. This is currently expected to decrypt
the packet in place and validate it.
However, there's now nowhere to store the revised offset and length of
the actual data within the decrypted blob (there may be a header and
padding to skip) because an sk_buff may represent multiple packets, so
a locate_data security op is added to retrieve these details from the
sk_buff content when needed.
(6) recvmsg() now has to handle jumbo subpackets, where each subpacket is
individually secured and needs to be individually decrypted. The code
to do this is broken out into rxrpc_recvmsg_data() and shared with the
kernel API. It now iterates over the call's receive buffer rather
than walking the socket receive queue.
Additional changes:
(1) The timers are condensed to a single timer that is set for the soonest
of three timeouts (delayed ACK generation, DATA retransmission and
call lifespan).
(2) Transmission of ACK and ABORT packets is effected immediately from
process-context socket ops/kernel API calls that cause them instead of
them being punted off to a background work item. The data_ready
handler still has to defer to the background, though.
(3) A shutdown op is added to the AF_RXRPC socket so that the AFS
filesystem can shut down the socket and flush its own work items
before closing the socket to deal with any in-progress service calls.
Future additional changes that will need to be considered:
(1) Make sure that a call doesn't hog the front of the queue by receiving
data from the network as fast as userspace is consuming it to the
exclusion of other calls.
(2) Transmit delayed ACKs from within recvmsg() when we've consumed
sufficiently more packets to avoid the background work item needing to
run.
Signed-off-by: David Howells <dhowells@redhat.com>
2016-09-08 18:10:12 +08:00
|
|
|
rxrpc_seq_t rx_hard_ack; /* Dead slot in buffer; the first received but not
|
|
|
|
* consumed packet follows this.
|
2007-04-27 06:48:28 +08:00
|
|
|
*/
|
rxrpc: Rewrite the data and ack handling code
Rewrite the data and ack handling code such that:
(1) Parsing of received ACK and ABORT packets and the distribution and the
filing of DATA packets happens entirely within the data_ready context
called from the UDP socket. This allows us to process and discard ACK
and ABORT packets much more quickly (they're no longer stashed on a
queue for a background thread to process).
(2) We avoid calling skb_clone(), pskb_pull() and pskb_trim(). We instead
keep track of the offset and length of the content of each packet in
the sk_buff metadata. This means we don't do any allocation in the
receive path.
(3) Jumbo DATA packet parsing is now done in data_ready context. Rather
than cloning the packet once for each subpacket and pulling/trimming
it, we file the packet multiple times with an annotation for each
indicating which subpacket is there. From that we can directly
calculate the offset and length.
(4) A call's receive queue can be accessed without taking locks (memory
barriers do have to be used, though).
(5) Incoming calls are set up from preallocated resources and immediately
made live. They can than have packets queued upon them and ACKs
generated. If insufficient resources exist, DATA packet #1 is given a
BUSY reply and other DATA packets are discarded).
(6) sk_buffs no longer take a ref on their parent call.
To make this work, the following changes are made:
(1) Each call's receive buffer is now a circular buffer of sk_buff
pointers (rxtx_buffer) rather than a number of sk_buff_heads spread
between the call and the socket. This permits each sk_buff to be in
the buffer multiple times. The receive buffer is reused for the
transmit buffer.
(2) A circular buffer of annotations (rxtx_annotations) is kept parallel
to the data buffer. Transmission phase annotations indicate whether a
buffered packet has been ACK'd or not and whether it needs
retransmission.
Receive phase annotations indicate whether a slot holds a whole packet
or a jumbo subpacket and, if the latter, which subpacket. They also
note whether the packet has been decrypted in place.
(3) DATA packet window tracking is much simplified. Each phase has just
two numbers representing the window (rx_hard_ack/rx_top and
tx_hard_ack/tx_top).
The hard_ack number is the sequence number before base of the window,
representing the last packet the other side says it has consumed.
hard_ack starts from 0 and the first packet is sequence number 1.
The top number is the sequence number of the highest-numbered packet
residing in the buffer. Packets between hard_ack+1 and top are
soft-ACK'd to indicate they've been received, but not yet consumed.
Four macros, before(), before_eq(), after() and after_eq() are added
to compare sequence numbers within the window. This allows for the
top of the window to wrap when the hard-ack sequence number gets close
to the limit.
Two flags, RXRPC_CALL_RX_LAST and RXRPC_CALL_TX_LAST, are added also
to indicate when rx_top and tx_top point at the packets with the
LAST_PACKET bit set, indicating the end of the phase.
(4) Calls are queued on the socket 'receive queue' rather than packets.
This means that we don't need have to invent dummy packets to queue to
indicate abnormal/terminal states and we don't have to keep metadata
packets (such as ABORTs) around
(5) The offset and length of a (sub)packet's content are now passed to
the verify_packet security op. This is currently expected to decrypt
the packet in place and validate it.
However, there's now nowhere to store the revised offset and length of
the actual data within the decrypted blob (there may be a header and
padding to skip) because an sk_buff may represent multiple packets, so
a locate_data security op is added to retrieve these details from the
sk_buff content when needed.
(6) recvmsg() now has to handle jumbo subpackets, where each subpacket is
individually secured and needs to be individually decrypted. The code
to do this is broken out into rxrpc_recvmsg_data() and shared with the
kernel API. It now iterates over the call's receive buffer rather
than walking the socket receive queue.
Additional changes:
(1) The timers are condensed to a single timer that is set for the soonest
of three timeouts (delayed ACK generation, DATA retransmission and
call lifespan).
(2) Transmission of ACK and ABORT packets is effected immediately from
process-context socket ops/kernel API calls that cause them instead of
them being punted off to a background work item. The data_ready
handler still has to defer to the background, though.
(3) A shutdown op is added to the AF_RXRPC socket so that the AFS
filesystem can shut down the socket and flush its own work items
before closing the socket to deal with any in-progress service calls.
Future additional changes that will need to be considered:
(1) Make sure that a call doesn't hog the front of the queue by receiving
data from the network as fast as userspace is consuming it to the
exclusion of other calls.
(2) Transmit delayed ACKs from within recvmsg() when we've consumed
sufficiently more packets to avoid the background work item needing to
run.
Signed-off-by: David Howells <dhowells@redhat.com>
2016-09-08 18:10:12 +08:00
|
|
|
rxrpc_seq_t rx_top; /* Highest Rx slot allocated. */
|
|
|
|
rxrpc_seq_t rx_expect_next; /* Expected next packet sequence number */
|
rxrpc: Fix handling of call quietly cancelled out on server
Sometimes an in-progress call will stop responding on the fileserver when
the fileserver quietly cancels the call with an internally marked abort
(RX_CALL_DEAD), without sending an ABORT to the client.
This causes the client's call to eventually expire from lack of incoming
packets directed its way, which currently leads to it being cancelled
locally with ETIME. Note that it's not currently clear as to why this
happens as it's really hard to reproduce.
The rotation policy implement by kAFS, however, doesn't differentiate
between ETIME meaning we didn't get any response from the server and ETIME
meaning the call got cancelled mid-flow. The latter leads to an oops when
fetching data as the rotation partially resets the afs_read descriptor,
which can result in a cleared page pointer being dereferenced because that
page has already been filled.
Handle this by the following means:
(1) Set a flag on a call when we receive a packet for it.
(2) Store the highest packet serial number so far received for a call
(bearing in mind this may wrap).
(3) If, when the "not received anything recently" timeout expires on a
call, we've received at least one packet for a call and the connection
as a whole has received packets more recently than that call, then
cancel the call locally with ECONNRESET rather than ETIME.
This indicates that the call was definitely in progress on the server.
(4) In kAFS, if the rotation algorithm sees ECONNRESET rather than ETIME,
don't try the next server, but rather abort the call.
This avoids the oops as we don't try to reuse the afs_read struct.
Rather, as-yet ungotten pages will be reread at a later data.
Also:
(5) Add an rxrpc tracepoint to log detection of the call being reset.
Without this, I occasionally see an oops like the following:
general protection fault: 0000 [#1] SMP PTI
...
RIP: 0010:_copy_to_iter+0x204/0x310
RSP: 0018:ffff8800cae0f828 EFLAGS: 00010206
RAX: 0000000000000560 RBX: 0000000000000560 RCX: 0000000000000560
RDX: ffff8800cae0f968 RSI: ffff8800d58b3312 RDI: 0005080000000000
RBP: ffff8800cae0f968 R08: 0000000000000560 R09: ffff8800ca00f400
R10: ffff8800c36f28d4 R11: 00000000000008c4 R12: ffff8800cae0f958
R13: 0000000000000560 R14: ffff8800d58b3312 R15: 0000000000000560
FS: 00007fdaef108080(0000) GS:ffff8800ca680000(0000) knlGS:0000000000000000
CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
CR2: 00007fb28a8fa000 CR3: 00000000d2a76002 CR4: 00000000001606e0
Call Trace:
skb_copy_datagram_iter+0x14e/0x289
rxrpc_recvmsg_data.isra.0+0x6f3/0xf68
? trace_buffer_unlock_commit_regs+0x4f/0x89
rxrpc_kernel_recv_data+0x149/0x421
afs_extract_data+0x1e0/0x798
? afs_wait_for_call_to_complete+0xc9/0x52e
afs_deliver_fs_fetch_data+0x33a/0x5ab
afs_deliver_to_call+0x1ee/0x5e0
? afs_wait_for_call_to_complete+0xc9/0x52e
afs_wait_for_call_to_complete+0x12b/0x52e
? wake_up_q+0x54/0x54
afs_make_call+0x287/0x462
? afs_fs_fetch_data+0x3e6/0x3ed
? rcu_read_lock_sched_held+0x5d/0x63
afs_fs_fetch_data+0x3e6/0x3ed
afs_fetch_data+0xbb/0x14a
afs_readpages+0x317/0x40d
__do_page_cache_readahead+0x203/0x2ba
? ondemand_readahead+0x3a7/0x3c1
ondemand_readahead+0x3a7/0x3c1
generic_file_buffered_read+0x18b/0x62f
__vfs_read+0xdb/0xfe
vfs_read+0xb2/0x137
ksys_read+0x50/0x8c
do_syscall_64+0x7d/0x1a0
entry_SYSCALL_64_after_hwframe+0x49/0xbe
Note the weird value in RDI which is a result of trying to kmap() a NULL
page pointer.
Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-06-03 09:17:39 +08:00
|
|
|
rxrpc_serial_t rx_serial; /* Highest serial received for this call */
|
rxrpc: Rewrite the data and ack handling code
Rewrite the data and ack handling code such that:
(1) Parsing of received ACK and ABORT packets and the distribution and the
filing of DATA packets happens entirely within the data_ready context
called from the UDP socket. This allows us to process and discard ACK
and ABORT packets much more quickly (they're no longer stashed on a
queue for a background thread to process).
(2) We avoid calling skb_clone(), pskb_pull() and pskb_trim(). We instead
keep track of the offset and length of the content of each packet in
the sk_buff metadata. This means we don't do any allocation in the
receive path.
(3) Jumbo DATA packet parsing is now done in data_ready context. Rather
than cloning the packet once for each subpacket and pulling/trimming
it, we file the packet multiple times with an annotation for each
indicating which subpacket is there. From that we can directly
calculate the offset and length.
(4) A call's receive queue can be accessed without taking locks (memory
barriers do have to be used, though).
(5) Incoming calls are set up from preallocated resources and immediately
made live. They can than have packets queued upon them and ACKs
generated. If insufficient resources exist, DATA packet #1 is given a
BUSY reply and other DATA packets are discarded).
(6) sk_buffs no longer take a ref on their parent call.
To make this work, the following changes are made:
(1) Each call's receive buffer is now a circular buffer of sk_buff
pointers (rxtx_buffer) rather than a number of sk_buff_heads spread
between the call and the socket. This permits each sk_buff to be in
the buffer multiple times. The receive buffer is reused for the
transmit buffer.
(2) A circular buffer of annotations (rxtx_annotations) is kept parallel
to the data buffer. Transmission phase annotations indicate whether a
buffered packet has been ACK'd or not and whether it needs
retransmission.
Receive phase annotations indicate whether a slot holds a whole packet
or a jumbo subpacket and, if the latter, which subpacket. They also
note whether the packet has been decrypted in place.
(3) DATA packet window tracking is much simplified. Each phase has just
two numbers representing the window (rx_hard_ack/rx_top and
tx_hard_ack/tx_top).
The hard_ack number is the sequence number before base of the window,
representing the last packet the other side says it has consumed.
hard_ack starts from 0 and the first packet is sequence number 1.
The top number is the sequence number of the highest-numbered packet
residing in the buffer. Packets between hard_ack+1 and top are
soft-ACK'd to indicate they've been received, but not yet consumed.
Four macros, before(), before_eq(), after() and after_eq() are added
to compare sequence numbers within the window. This allows for the
top of the window to wrap when the hard-ack sequence number gets close
to the limit.
Two flags, RXRPC_CALL_RX_LAST and RXRPC_CALL_TX_LAST, are added also
to indicate when rx_top and tx_top point at the packets with the
LAST_PACKET bit set, indicating the end of the phase.
(4) Calls are queued on the socket 'receive queue' rather than packets.
This means that we don't need have to invent dummy packets to queue to
indicate abnormal/terminal states and we don't have to keep metadata
packets (such as ABORTs) around
(5) The offset and length of a (sub)packet's content are now passed to
the verify_packet security op. This is currently expected to decrypt
the packet in place and validate it.
However, there's now nowhere to store the revised offset and length of
the actual data within the decrypted blob (there may be a header and
padding to skip) because an sk_buff may represent multiple packets, so
a locate_data security op is added to retrieve these details from the
sk_buff content when needed.
(6) recvmsg() now has to handle jumbo subpackets, where each subpacket is
individually secured and needs to be individually decrypted. The code
to do this is broken out into rxrpc_recvmsg_data() and shared with the
kernel API. It now iterates over the call's receive buffer rather
than walking the socket receive queue.
Additional changes:
(1) The timers are condensed to a single timer that is set for the soonest
of three timeouts (delayed ACK generation, DATA retransmission and
call lifespan).
(2) Transmission of ACK and ABORT packets is effected immediately from
process-context socket ops/kernel API calls that cause them instead of
them being punted off to a background work item. The data_ready
handler still has to defer to the background, though.
(3) A shutdown op is added to the AF_RXRPC socket so that the AFS
filesystem can shut down the socket and flush its own work items
before closing the socket to deal with any in-progress service calls.
Future additional changes that will need to be considered:
(1) Make sure that a call doesn't hog the front of the queue by receiving
data from the network as fast as userspace is consuming it to the
exclusion of other calls.
(2) Transmit delayed ACKs from within recvmsg() when we've consumed
sufficiently more packets to avoid the background work item needing to
run.
Signed-off-by: David Howells <dhowells@redhat.com>
2016-09-08 18:10:12 +08:00
|
|
|
u8 rx_winsize; /* Size of Rx window */
|
|
|
|
u8 tx_winsize; /* Maximum size of Tx window */
|
2016-09-17 17:49:14 +08:00
|
|
|
bool tx_phase; /* T if transmission phase, F if receive phase */
|
2016-09-14 05:36:22 +08:00
|
|
|
u8 nr_jumbo_bad; /* Number of jumbo dups/exceeds-windows */
|
2007-04-27 06:48:28 +08:00
|
|
|
|
rxrpc: Fix the packet reception routine
The rxrpc_input_packet() function and its call tree was built around the
assumption that data_ready() handler called from UDP to inform a kernel
service that there is data to be had was non-reentrant. This means that
certain locking could be dispensed with.
This, however, turns out not to be the case with a multi-queue network card
that can deliver packets to multiple cpus simultaneously. Each of those
cpus can be in the rxrpc_input_packet() function at the same time.
Fix by adding or changing some structure members:
(1) Add peer->rtt_input_lock to serialise access to the RTT buffer.
(2) Make conn->service_id into a 32-bit variable so that it can be
cmpxchg'd on all arches.
(3) Add call->input_lock to serialise access to the Rx/Tx state. Note
that although the Rx and Tx states are (almost) entirely separate,
there's no point completing the separation and having separate locks
since it's a bi-phasal RPC protocol rather than a bi-direction
streaming protocol. Data transmission and data reception do not take
place simultaneously on any particular call.
and making the following functional changes:
(1) In rxrpc_input_data(), hold call->input_lock around the core to
prevent simultaneous producing of packets into the Rx ring and
updating of tracking state for a particular call.
(2) In rxrpc_input_ping_response(), only read call->ping_serial once, and
check it before checking RXRPC_CALL_PINGING as that's a cheaper test.
The bit test and bit clear can then be combined. No further locking
is needed here.
(3) In rxrpc_input_ack(), take call->input_lock after we've parsed much of
the ACK packet. The superseded ACK check is then done both before and
after the lock is taken.
The handing of ackinfo data is split, parsing before the lock is taken
and processing with it held. This is keyed on rxMTU being non-zero.
Congestion management is also done within the locked section.
(4) In rxrpc_input_ackall(), take call->input_lock around the Tx window
rotation. The ACKALL packet carries no information and is only really
useful after all packets have been transmitted since it's imprecise.
(5) In rxrpc_input_implicit_end_call(), we use rx->incoming_lock to
prevent calls being simultaneously implicitly ended on two cpus and
also to prevent any races with incoming call setup.
(6) In rxrpc_input_packet(), use cmpxchg() to effect the service upgrade
on a connection. It is only permitted to happen once for a
connection.
(7) In rxrpc_new_incoming_call(), we have to recheck the routing inside
rx->incoming_lock to see if someone else set up the call, connection
or peer whilst we were getting there. We can't trust the values from
the earlier routing check unless we pin refs on them - which we want
to avoid.
Further, we need to allow for an incoming call to have its state
changed on another CPU between us making it live and us adjusting it
because the conn is now in the RXRPC_CONN_SERVICE state.
(8) In rxrpc_peer_add_rtt(), take peer->rtt_input_lock around the access
to the RTT buffer. Don't need to lock around setting peer->rtt.
For reference, the inventory of state-accessing or state-altering functions
used by the packet input procedure is:
> rxrpc_input_packet()
* PACKET CHECKING
* ROUTING
> rxrpc_post_packet_to_local()
> rxrpc_find_connection_rcu() - uses RCU
> rxrpc_lookup_peer_rcu() - uses RCU
> rxrpc_find_service_conn_rcu() - uses RCU
> idr_find() - uses RCU
* CONNECTION-LEVEL PROCESSING
- Service upgrade
- Can only happen once per conn
! Changed to use cmpxchg
> rxrpc_post_packet_to_conn()
- Setting conn->hi_serial
- Probably safe not using locks
- Maybe use cmpxchg
* CALL-LEVEL PROCESSING
> Old-call checking
> rxrpc_input_implicit_end_call()
> rxrpc_call_completed()
> rxrpc_queue_call()
! Need to take rx->incoming_lock
> __rxrpc_disconnect_call()
> rxrpc_notify_socket()
> rxrpc_new_incoming_call()
- Uses rx->incoming_lock for the entire process
- Might be able to drop this earlier in favour of the call lock
> rxrpc_incoming_call()
! Conflicts with rxrpc_input_implicit_end_call()
> rxrpc_send_ping()
- Don't need locks to check rtt state
> rxrpc_propose_ACK
* PACKET DISTRIBUTION
> rxrpc_input_call_packet()
> rxrpc_input_data()
* QUEUE DATA PACKET ON CALL
> rxrpc_reduce_call_timer()
- Uses timer_reduce()
! Needs call->input_lock()
> rxrpc_receiving_reply()
! Needs locking around ack state
> rxrpc_rotate_tx_window()
> rxrpc_end_tx_phase()
> rxrpc_proto_abort()
> rxrpc_input_dup_data()
- Fills the Rx buffer
- rxrpc_propose_ACK()
- rxrpc_notify_socket()
> rxrpc_input_ack()
* APPLY ACK PACKET TO CALL AND DISCARD PACKET
> rxrpc_input_ping_response()
- Probably doesn't need any extra locking
! Need READ_ONCE() on call->ping_serial
> rxrpc_input_check_for_lost_ack()
- Takes call->lock to consult Tx buffer
> rxrpc_peer_add_rtt()
! Needs to take a lock (peer->rtt_input_lock)
! Could perhaps manage with cmpxchg() and xadd() instead
> rxrpc_input_requested_ack
- Consults Tx buffer
! Probably needs a lock
> rxrpc_peer_add_rtt()
> rxrpc_propose_ack()
> rxrpc_input_ackinfo()
- Changes call->tx_winsize
! Use cmpxchg to handle change
! Should perhaps track serial number
- Uses peer->lock to record MTU specification changes
> rxrpc_proto_abort()
! Need to take call->input_lock
> rxrpc_rotate_tx_window()
> rxrpc_end_tx_phase()
> rxrpc_input_soft_acks()
- Consults the Tx buffer
> rxrpc_congestion_management()
- Modifies the Tx annotations
! Needs call->input_lock()
> rxrpc_queue_call()
> rxrpc_input_abort()
* APPLY ABORT PACKET TO CALL AND DISCARD PACKET
> rxrpc_set_call_completion()
> rxrpc_notify_socket()
> rxrpc_input_ackall()
* APPLY ACKALL PACKET TO CALL AND DISCARD PACKET
! Need to take call->input_lock
> rxrpc_rotate_tx_window()
> rxrpc_end_tx_phase()
> rxrpc_reject_packet()
There are some functions used by the above that queue the packet, after
which the procedure is terminated:
- rxrpc_post_packet_to_local()
- local->event_queue is an sk_buff_head
- local->processor is a work_struct
- rxrpc_post_packet_to_conn()
- conn->rx_queue is an sk_buff_head
- conn->processor is a work_struct
- rxrpc_reject_packet()
- local->reject_queue is an sk_buff_head
- local->processor is a work_struct
And some that offload processing to process context:
- rxrpc_notify_socket()
- Uses RCU lock
- Uses call->notify_lock to call call->notify_rx
- Uses call->recvmsg_lock to queue recvmsg side
- rxrpc_queue_call()
- call->processor is a work_struct
- rxrpc_propose_ACK()
- Uses call->lock to wrap __rxrpc_propose_ACK()
And a bunch that complete a call, all of which use call->state_lock to
protect the call state:
- rxrpc_call_completed()
- rxrpc_set_call_completion()
- rxrpc_abort_call()
- rxrpc_proto_abort()
- Also uses rxrpc_queue_call()
Fixes: 17926a79320a ("[AF_RXRPC]: Provide secure RxRPC sockets for use by userspace and kernel both")
Signed-off-by: David Howells <dhowells@redhat.com>
2018-10-08 22:46:25 +08:00
|
|
|
spinlock_t input_lock; /* Lock for packet input to this call */
|
|
|
|
|
2007-04-27 06:48:28 +08:00
|
|
|
/* receive-phase ACK management */
|
2009-09-16 15:01:13 +08:00
|
|
|
u8 ackr_reason; /* reason to ACK */
|
2016-03-04 23:53:46 +08:00
|
|
|
rxrpc_serial_t ackr_serial; /* serial of packet being ACK'd */
|
2019-04-12 23:34:16 +08:00
|
|
|
rxrpc_serial_t ackr_first_seq; /* first sequence number received */
|
rxrpc: Rewrite the data and ack handling code
Rewrite the data and ack handling code such that:
(1) Parsing of received ACK and ABORT packets and the distribution and the
filing of DATA packets happens entirely within the data_ready context
called from the UDP socket. This allows us to process and discard ACK
and ABORT packets much more quickly (they're no longer stashed on a
queue for a background thread to process).
(2) We avoid calling skb_clone(), pskb_pull() and pskb_trim(). We instead
keep track of the offset and length of the content of each packet in
the sk_buff metadata. This means we don't do any allocation in the
receive path.
(3) Jumbo DATA packet parsing is now done in data_ready context. Rather
than cloning the packet once for each subpacket and pulling/trimming
it, we file the packet multiple times with an annotation for each
indicating which subpacket is there. From that we can directly
calculate the offset and length.
(4) A call's receive queue can be accessed without taking locks (memory
barriers do have to be used, though).
(5) Incoming calls are set up from preallocated resources and immediately
made live. They can than have packets queued upon them and ACKs
generated. If insufficient resources exist, DATA packet #1 is given a
BUSY reply and other DATA packets are discarded).
(6) sk_buffs no longer take a ref on their parent call.
To make this work, the following changes are made:
(1) Each call's receive buffer is now a circular buffer of sk_buff
pointers (rxtx_buffer) rather than a number of sk_buff_heads spread
between the call and the socket. This permits each sk_buff to be in
the buffer multiple times. The receive buffer is reused for the
transmit buffer.
(2) A circular buffer of annotations (rxtx_annotations) is kept parallel
to the data buffer. Transmission phase annotations indicate whether a
buffered packet has been ACK'd or not and whether it needs
retransmission.
Receive phase annotations indicate whether a slot holds a whole packet
or a jumbo subpacket and, if the latter, which subpacket. They also
note whether the packet has been decrypted in place.
(3) DATA packet window tracking is much simplified. Each phase has just
two numbers representing the window (rx_hard_ack/rx_top and
tx_hard_ack/tx_top).
The hard_ack number is the sequence number before base of the window,
representing the last packet the other side says it has consumed.
hard_ack starts from 0 and the first packet is sequence number 1.
The top number is the sequence number of the highest-numbered packet
residing in the buffer. Packets between hard_ack+1 and top are
soft-ACK'd to indicate they've been received, but not yet consumed.
Four macros, before(), before_eq(), after() and after_eq() are added
to compare sequence numbers within the window. This allows for the
top of the window to wrap when the hard-ack sequence number gets close
to the limit.
Two flags, RXRPC_CALL_RX_LAST and RXRPC_CALL_TX_LAST, are added also
to indicate when rx_top and tx_top point at the packets with the
LAST_PACKET bit set, indicating the end of the phase.
(4) Calls are queued on the socket 'receive queue' rather than packets.
This means that we don't need have to invent dummy packets to queue to
indicate abnormal/terminal states and we don't have to keep metadata
packets (such as ABORTs) around
(5) The offset and length of a (sub)packet's content are now passed to
the verify_packet security op. This is currently expected to decrypt
the packet in place and validate it.
However, there's now nowhere to store the revised offset and length of
the actual data within the decrypted blob (there may be a header and
padding to skip) because an sk_buff may represent multiple packets, so
a locate_data security op is added to retrieve these details from the
sk_buff content when needed.
(6) recvmsg() now has to handle jumbo subpackets, where each subpacket is
individually secured and needs to be individually decrypted. The code
to do this is broken out into rxrpc_recvmsg_data() and shared with the
kernel API. It now iterates over the call's receive buffer rather
than walking the socket receive queue.
Additional changes:
(1) The timers are condensed to a single timer that is set for the soonest
of three timeouts (delayed ACK generation, DATA retransmission and
call lifespan).
(2) Transmission of ACK and ABORT packets is effected immediately from
process-context socket ops/kernel API calls that cause them instead of
them being punted off to a background work item. The data_ready
handler still has to defer to the background, though.
(3) A shutdown op is added to the AF_RXRPC socket so that the AFS
filesystem can shut down the socket and flush its own work items
before closing the socket to deal with any in-progress service calls.
Future additional changes that will need to be considered:
(1) Make sure that a call doesn't hog the front of the queue by receiving
data from the network as fast as userspace is consuming it to the
exclusion of other calls.
(2) Transmit delayed ACKs from within recvmsg() when we've consumed
sufficiently more packets to avoid the background work item needing to
run.
Signed-off-by: David Howells <dhowells@redhat.com>
2016-09-08 18:10:12 +08:00
|
|
|
rxrpc_seq_t ackr_prev_seq; /* previous sequence number received */
|
2016-09-25 01:05:26 +08:00
|
|
|
rxrpc_seq_t ackr_consumed; /* Highest packet shown consumed */
|
|
|
|
rxrpc_seq_t ackr_seen; /* Highest packet shown seen */
|
2016-10-06 15:11:49 +08:00
|
|
|
|
rxrpc: Fix loss of RTT samples due to interposed ACK
The Rx protocol has a mechanism to help generate RTT samples that works by
a client transmitting a REQUESTED-type ACK when it receives a DATA packet
that has the REQUEST_ACK flag set.
The peer, however, may interpose other ACKs before transmitting the
REQUESTED-ACK, as can be seen in the following trace excerpt:
rxrpc_tx_data: c=00000044 DATA d0b5ece8:00000001 00000001 q=00000001 fl=07
rxrpc_rx_ack: c=00000044 00000001 PNG r=00000000 f=00000002 p=00000000 n=0
rxrpc_rx_ack: c=00000044 00000002 REQ r=00000001 f=00000002 p=00000001 n=0
...
DATA packet 1 (q=xx) has REQUEST_ACK set (bit 1 of fl=xx). The incoming
ping (labelled PNG) hard-acks the request DATA packet (f=xx exceeds the
sequence number of the DATA packet), causing it to be discarded from the Tx
ring. The ACK that was requested (labelled REQ, r=xx references the serial
of the DATA packet) comes after the ping, but the sk_buff holding the
timestamp has gone and the RTT sample is lost.
This is particularly noticeable on RPC calls used to probe the service
offered by the peer. A lot of peers end up with an unknown RTT because we
only ever sent a single RPC. This confuses the server rotation algorithm.
Fix this by caching the information about the outgoing packet in RTT
calculations in the rxrpc_call struct rather than looking in the Tx ring.
A four-deep buffer is maintained and both REQUEST_ACK-flagged DATA and
PING-ACK transmissions are recorded in there. When the appropriate
response ACK is received, the buffer is checked for a match and, if found,
an RTT sample is recorded.
If a received ACK refers to a packet with a later serial number than an
entry in the cache, that entry is presumed lost and the entry is made
available to record a new transmission.
ACKs types other than REQUESTED-type and PING-type cause any matching
sample to be cancelled as they don't necessarily represent a useful
measurement.
If there's no space in the buffer on ping/data transmission, the sample
base is discarded.
Fixes: 50235c4b5a2f ("rxrpc: Obtain RTT data by requesting ACKs on DATA packets")
Signed-off-by: David Howells <dhowells@redhat.com>
2020-08-20 06:29:16 +08:00
|
|
|
/* RTT management */
|
|
|
|
rxrpc_serial_t rtt_serial[4]; /* Serial number of DATA or PING sent */
|
|
|
|
ktime_t rtt_sent_at[4]; /* Time packet sent */
|
|
|
|
unsigned long rtt_avail; /* Mask of available slots in bits 0-3,
|
|
|
|
* Mask of pending samples in 8-11 */
|
|
|
|
#define RXRPC_CALL_RTT_AVAIL_MASK 0xf
|
|
|
|
#define RXRPC_CALL_RTT_PEND_SHIFT 8
|
2007-04-27 06:48:28 +08:00
|
|
|
|
rxrpc: Rewrite the data and ack handling code
Rewrite the data and ack handling code such that:
(1) Parsing of received ACK and ABORT packets and the distribution and the
filing of DATA packets happens entirely within the data_ready context
called from the UDP socket. This allows us to process and discard ACK
and ABORT packets much more quickly (they're no longer stashed on a
queue for a background thread to process).
(2) We avoid calling skb_clone(), pskb_pull() and pskb_trim(). We instead
keep track of the offset and length of the content of each packet in
the sk_buff metadata. This means we don't do any allocation in the
receive path.
(3) Jumbo DATA packet parsing is now done in data_ready context. Rather
than cloning the packet once for each subpacket and pulling/trimming
it, we file the packet multiple times with an annotation for each
indicating which subpacket is there. From that we can directly
calculate the offset and length.
(4) A call's receive queue can be accessed without taking locks (memory
barriers do have to be used, though).
(5) Incoming calls are set up from preallocated resources and immediately
made live. They can than have packets queued upon them and ACKs
generated. If insufficient resources exist, DATA packet #1 is given a
BUSY reply and other DATA packets are discarded).
(6) sk_buffs no longer take a ref on their parent call.
To make this work, the following changes are made:
(1) Each call's receive buffer is now a circular buffer of sk_buff
pointers (rxtx_buffer) rather than a number of sk_buff_heads spread
between the call and the socket. This permits each sk_buff to be in
the buffer multiple times. The receive buffer is reused for the
transmit buffer.
(2) A circular buffer of annotations (rxtx_annotations) is kept parallel
to the data buffer. Transmission phase annotations indicate whether a
buffered packet has been ACK'd or not and whether it needs
retransmission.
Receive phase annotations indicate whether a slot holds a whole packet
or a jumbo subpacket and, if the latter, which subpacket. They also
note whether the packet has been decrypted in place.
(3) DATA packet window tracking is much simplified. Each phase has just
two numbers representing the window (rx_hard_ack/rx_top and
tx_hard_ack/tx_top).
The hard_ack number is the sequence number before base of the window,
representing the last packet the other side says it has consumed.
hard_ack starts from 0 and the first packet is sequence number 1.
The top number is the sequence number of the highest-numbered packet
residing in the buffer. Packets between hard_ack+1 and top are
soft-ACK'd to indicate they've been received, but not yet consumed.
Four macros, before(), before_eq(), after() and after_eq() are added
to compare sequence numbers within the window. This allows for the
top of the window to wrap when the hard-ack sequence number gets close
to the limit.
Two flags, RXRPC_CALL_RX_LAST and RXRPC_CALL_TX_LAST, are added also
to indicate when rx_top and tx_top point at the packets with the
LAST_PACKET bit set, indicating the end of the phase.
(4) Calls are queued on the socket 'receive queue' rather than packets.
This means that we don't need have to invent dummy packets to queue to
indicate abnormal/terminal states and we don't have to keep metadata
packets (such as ABORTs) around
(5) The offset and length of a (sub)packet's content are now passed to
the verify_packet security op. This is currently expected to decrypt
the packet in place and validate it.
However, there's now nowhere to store the revised offset and length of
the actual data within the decrypted blob (there may be a header and
padding to skip) because an sk_buff may represent multiple packets, so
a locate_data security op is added to retrieve these details from the
sk_buff content when needed.
(6) recvmsg() now has to handle jumbo subpackets, where each subpacket is
individually secured and needs to be individually decrypted. The code
to do this is broken out into rxrpc_recvmsg_data() and shared with the
kernel API. It now iterates over the call's receive buffer rather
than walking the socket receive queue.
Additional changes:
(1) The timers are condensed to a single timer that is set for the soonest
of three timeouts (delayed ACK generation, DATA retransmission and
call lifespan).
(2) Transmission of ACK and ABORT packets is effected immediately from
process-context socket ops/kernel API calls that cause them instead of
them being punted off to a background work item. The data_ready
handler still has to defer to the background, though.
(3) A shutdown op is added to the AF_RXRPC socket so that the AFS
filesystem can shut down the socket and flush its own work items
before closing the socket to deal with any in-progress service calls.
Future additional changes that will need to be considered:
(1) Make sure that a call doesn't hog the front of the queue by receiving
data from the network as fast as userspace is consuming it to the
exclusion of other calls.
(2) Transmit delayed ACKs from within recvmsg() when we've consumed
sufficiently more packets to avoid the background work item needing to
run.
Signed-off-by: David Howells <dhowells@redhat.com>
2016-09-08 18:10:12 +08:00
|
|
|
/* transmission-phase ACK management */
|
2016-09-25 01:05:27 +08:00
|
|
|
ktime_t acks_latest_ts; /* Timestamp of latest ACK received */
|
2016-09-25 01:05:26 +08:00
|
|
|
rxrpc_seq_t acks_lowest_nak; /* Lowest NACK in the buffer (or ==tx_hard_ack) */
|
2017-11-24 18:18:42 +08:00
|
|
|
rxrpc_seq_t acks_lost_top; /* tx_top at the time lost-ack ping sent */
|
|
|
|
rxrpc_serial_t acks_lost_ping; /* Serial number of probe ACK */
|
2016-09-25 01:05:26 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
2016-09-25 01:05:27 +08:00
|
|
|
* Summary of a new ACK and the changes it made to the Tx buffer packet states.
|
2016-09-25 01:05:26 +08:00
|
|
|
*/
|
|
|
|
struct rxrpc_ack_summary {
|
|
|
|
u8 ack_reason;
|
|
|
|
u8 nr_acks; /* Number of ACKs in packet */
|
|
|
|
u8 nr_nacks; /* Number of NACKs in packet */
|
|
|
|
u8 nr_new_acks; /* Number of new ACKs in packet */
|
|
|
|
u8 nr_new_nacks; /* Number of new NACKs in packet */
|
|
|
|
u8 nr_rot_new_acks; /* Number of rotated new ACKs */
|
|
|
|
bool new_low_nack; /* T if new low NACK found */
|
2016-09-25 01:05:27 +08:00
|
|
|
bool retrans_timeo; /* T if reTx due to timeout happened */
|
|
|
|
u8 flight_size; /* Number of unreceived transmissions */
|
|
|
|
/* Place to stash values for tracing */
|
|
|
|
enum rxrpc_congest_mode mode:8;
|
|
|
|
u8 cwnd;
|
|
|
|
u8 ssthresh;
|
|
|
|
u8 dup_acks;
|
|
|
|
u8 cumulative_acks;
|
2007-04-27 06:48:28 +08:00
|
|
|
};
|
|
|
|
|
2017-11-24 18:18:41 +08:00
|
|
|
/*
|
|
|
|
* sendmsg() cmsg-specified parameters.
|
|
|
|
*/
|
|
|
|
enum rxrpc_command {
|
|
|
|
RXRPC_CMD_SEND_DATA, /* send data message */
|
|
|
|
RXRPC_CMD_SEND_ABORT, /* request abort generation */
|
|
|
|
RXRPC_CMD_REJECT_BUSY, /* [server] reject a call as busy */
|
2020-10-01 04:27:18 +08:00
|
|
|
RXRPC_CMD_CHARGE_ACCEPT, /* [server] charge accept preallocation */
|
2017-11-24 18:18:41 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
struct rxrpc_call_params {
|
|
|
|
s64 tx_total_len; /* Total Tx data length (if send data) */
|
|
|
|
unsigned long user_call_ID; /* User's call ID */
|
|
|
|
struct {
|
|
|
|
u32 hard; /* Maximum lifetime (sec) */
|
|
|
|
u32 idle; /* Max time since last data packet (msec) */
|
|
|
|
u32 normal; /* Max time since last call packet (msec) */
|
|
|
|
} timeouts;
|
|
|
|
u8 nr_timeouts; /* Number of timeouts specified */
|
2020-07-02 21:59:46 +08:00
|
|
|
bool kernel; /* T if kernel is making the call */
|
2020-03-13 17:22:09 +08:00
|
|
|
enum rxrpc_interruptibility interruptibility; /* How is interruptible is the call? */
|
2017-11-24 18:18:41 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
struct rxrpc_send_params {
|
|
|
|
struct rxrpc_call_params call;
|
|
|
|
u32 abort_code; /* Abort code to Tx (if abort) */
|
|
|
|
enum rxrpc_command command : 8; /* The command to implement */
|
|
|
|
bool exclusive; /* Shared or exclusive call */
|
|
|
|
bool upgrade; /* If the connection is upgradeable */
|
|
|
|
};
|
|
|
|
|
2016-08-23 22:27:24 +08:00
|
|
|
#include <trace/events/rxrpc.h>
|
|
|
|
|
2007-04-27 06:48:28 +08:00
|
|
|
/*
|
[AF_RXRPC]: Add an interface to the AF_RXRPC module for the AFS filesystem to use
Add an interface to the AF_RXRPC module so that the AFS filesystem module can
more easily make use of the services available. AFS still opens a socket but
then uses the action functions in lieu of sendmsg() and registers an intercept
functions to grab messages before they're queued on the socket Rx queue.
This permits AFS (or whatever) to:
(1) Avoid the overhead of using the recvmsg() call.
(2) Use different keys directly on individual client calls on one socket
rather than having to open a whole slew of sockets, one for each key it
might want to use.
(3) Avoid calling request_key() at the point of issue of a call or opening of
a socket. This is done instead by AFS at the point of open(), unlink() or
other VFS operation and the key handed through.
(4) Request the use of something other than GFP_KERNEL to allocate memory.
Furthermore:
(*) The socket buffer markings used by RxRPC are made available for AFS so
that it can interpret the cooked RxRPC messages itself.
(*) rxgen (un)marshalling abort codes are made available.
The following documentation for the kernel interface is added to
Documentation/networking/rxrpc.txt:
=========================
AF_RXRPC KERNEL INTERFACE
=========================
The AF_RXRPC module also provides an interface for use by in-kernel utilities
such as the AFS filesystem. This permits such a utility to:
(1) Use different keys directly on individual client calls on one socket
rather than having to open a whole slew of sockets, one for each key it
might want to use.
(2) Avoid having RxRPC call request_key() at the point of issue of a call or
opening of a socket. Instead the utility is responsible for requesting a
key at the appropriate point. AFS, for instance, would do this during VFS
operations such as open() or unlink(). The key is then handed through
when the call is initiated.
(3) Request the use of something other than GFP_KERNEL to allocate memory.
(4) Avoid the overhead of using the recvmsg() call. RxRPC messages can be
intercepted before they get put into the socket Rx queue and the socket
buffers manipulated directly.
To use the RxRPC facility, a kernel utility must still open an AF_RXRPC socket,
bind an addess as appropriate and listen if it's to be a server socket, but
then it passes this to the kernel interface functions.
The kernel interface functions are as follows:
(*) Begin a new client call.
struct rxrpc_call *
rxrpc_kernel_begin_call(struct socket *sock,
struct sockaddr_rxrpc *srx,
struct key *key,
unsigned long user_call_ID,
gfp_t gfp);
This allocates the infrastructure to make a new RxRPC call and assigns
call and connection numbers. The call will be made on the UDP port that
the socket is bound to. The call will go to the destination address of a
connected client socket unless an alternative is supplied (srx is
non-NULL).
If a key is supplied then this will be used to secure the call instead of
the key bound to the socket with the RXRPC_SECURITY_KEY sockopt. Calls
secured in this way will still share connections if at all possible.
The user_call_ID is equivalent to that supplied to sendmsg() in the
control data buffer. It is entirely feasible to use this to point to a
kernel data structure.
If this function is successful, an opaque reference to the RxRPC call is
returned. The caller now holds a reference on this and it must be
properly ended.
(*) End a client call.
void rxrpc_kernel_end_call(struct rxrpc_call *call);
This is used to end a previously begun call. The user_call_ID is expunged
from AF_RXRPC's knowledge and will not be seen again in association with
the specified call.
(*) Send data through a call.
int rxrpc_kernel_send_data(struct rxrpc_call *call, struct msghdr *msg,
size_t len);
This is used to supply either the request part of a client call or the
reply part of a server call. msg.msg_iovlen and msg.msg_iov specify the
data buffers to be used. msg_iov may not be NULL and must point
exclusively to in-kernel virtual addresses. msg.msg_flags may be given
MSG_MORE if there will be subsequent data sends for this call.
The msg must not specify a destination address, control data or any flags
other than MSG_MORE. len is the total amount of data to transmit.
(*) Abort a call.
void rxrpc_kernel_abort_call(struct rxrpc_call *call, u32 abort_code);
This is used to abort a call if it's still in an abortable state. The
abort code specified will be placed in the ABORT message sent.
(*) Intercept received RxRPC messages.
typedef void (*rxrpc_interceptor_t)(struct sock *sk,
unsigned long user_call_ID,
struct sk_buff *skb);
void
rxrpc_kernel_intercept_rx_messages(struct socket *sock,
rxrpc_interceptor_t interceptor);
This installs an interceptor function on the specified AF_RXRPC socket.
All messages that would otherwise wind up in the socket's Rx queue are
then diverted to this function. Note that care must be taken to process
the messages in the right order to maintain DATA message sequentiality.
The interceptor function itself is provided with the address of the socket
and handling the incoming message, the ID assigned by the kernel utility
to the call and the socket buffer containing the message.
The skb->mark field indicates the type of message:
MARK MEANING
=============================== =======================================
RXRPC_SKB_MARK_DATA Data message
RXRPC_SKB_MARK_FINAL_ACK Final ACK received for an incoming call
RXRPC_SKB_MARK_BUSY Client call rejected as server busy
RXRPC_SKB_MARK_REMOTE_ABORT Call aborted by peer
RXRPC_SKB_MARK_NET_ERROR Network error detected
RXRPC_SKB_MARK_LOCAL_ERROR Local error encountered
RXRPC_SKB_MARK_NEW_CALL New incoming call awaiting acceptance
The remote abort message can be probed with rxrpc_kernel_get_abort_code().
The two error messages can be probed with rxrpc_kernel_get_error_number().
A new call can be accepted with rxrpc_kernel_accept_call().
Data messages can have their contents extracted with the usual bunch of
socket buffer manipulation functions. A data message can be determined to
be the last one in a sequence with rxrpc_kernel_is_data_last(). When a
data message has been used up, rxrpc_kernel_data_delivered() should be
called on it..
Non-data messages should be handled to rxrpc_kernel_free_skb() to dispose
of. It is possible to get extra refs on all types of message for later
freeing, but this may pin the state of a call until the message is finally
freed.
(*) Accept an incoming call.
struct rxrpc_call *
rxrpc_kernel_accept_call(struct socket *sock,
unsigned long user_call_ID);
This is used to accept an incoming call and to assign it a call ID. This
function is similar to rxrpc_kernel_begin_call() and calls accepted must
be ended in the same way.
If this function is successful, an opaque reference to the RxRPC call is
returned. The caller now holds a reference on this and it must be
properly ended.
(*) Reject an incoming call.
int rxrpc_kernel_reject_call(struct socket *sock);
This is used to reject the first incoming call on the socket's queue with
a BUSY message. -ENODATA is returned if there were no incoming calls.
Other errors may be returned if the call had been aborted (-ECONNABORTED)
or had timed out (-ETIME).
(*) Record the delivery of a data message and free it.
void rxrpc_kernel_data_delivered(struct sk_buff *skb);
This is used to record a data message as having been delivered and to
update the ACK state for the call. The socket buffer will be freed.
(*) Free a message.
void rxrpc_kernel_free_skb(struct sk_buff *skb);
This is used to free a non-DATA socket buffer intercepted from an AF_RXRPC
socket.
(*) Determine if a data message is the last one on a call.
bool rxrpc_kernel_is_data_last(struct sk_buff *skb);
This is used to determine if a socket buffer holds the last data message
to be received for a call (true will be returned if it does, false
if not).
The data message will be part of the reply on a client call and the
request on an incoming call. In the latter case there will be more
messages, but in the former case there will not.
(*) Get the abort code from an abort message.
u32 rxrpc_kernel_get_abort_code(struct sk_buff *skb);
This is used to extract the abort code from a remote abort message.
(*) Get the error number from a local or network error message.
int rxrpc_kernel_get_error_number(struct sk_buff *skb);
This is used to extract the error number from a message indicating either
a local error occurred or a network error occurred.
Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-04-27 06:50:17 +08:00
|
|
|
* af_rxrpc.c
|
2007-04-27 06:48:28 +08:00
|
|
|
*/
|
2016-09-17 17:49:14 +08:00
|
|
|
extern atomic_t rxrpc_n_tx_skbs, rxrpc_n_rx_skbs;
|
[AF_RXRPC]: Add an interface to the AF_RXRPC module for the AFS filesystem to use
Add an interface to the AF_RXRPC module so that the AFS filesystem module can
more easily make use of the services available. AFS still opens a socket but
then uses the action functions in lieu of sendmsg() and registers an intercept
functions to grab messages before they're queued on the socket Rx queue.
This permits AFS (or whatever) to:
(1) Avoid the overhead of using the recvmsg() call.
(2) Use different keys directly on individual client calls on one socket
rather than having to open a whole slew of sockets, one for each key it
might want to use.
(3) Avoid calling request_key() at the point of issue of a call or opening of
a socket. This is done instead by AFS at the point of open(), unlink() or
other VFS operation and the key handed through.
(4) Request the use of something other than GFP_KERNEL to allocate memory.
Furthermore:
(*) The socket buffer markings used by RxRPC are made available for AFS so
that it can interpret the cooked RxRPC messages itself.
(*) rxgen (un)marshalling abort codes are made available.
The following documentation for the kernel interface is added to
Documentation/networking/rxrpc.txt:
=========================
AF_RXRPC KERNEL INTERFACE
=========================
The AF_RXRPC module also provides an interface for use by in-kernel utilities
such as the AFS filesystem. This permits such a utility to:
(1) Use different keys directly on individual client calls on one socket
rather than having to open a whole slew of sockets, one for each key it
might want to use.
(2) Avoid having RxRPC call request_key() at the point of issue of a call or
opening of a socket. Instead the utility is responsible for requesting a
key at the appropriate point. AFS, for instance, would do this during VFS
operations such as open() or unlink(). The key is then handed through
when the call is initiated.
(3) Request the use of something other than GFP_KERNEL to allocate memory.
(4) Avoid the overhead of using the recvmsg() call. RxRPC messages can be
intercepted before they get put into the socket Rx queue and the socket
buffers manipulated directly.
To use the RxRPC facility, a kernel utility must still open an AF_RXRPC socket,
bind an addess as appropriate and listen if it's to be a server socket, but
then it passes this to the kernel interface functions.
The kernel interface functions are as follows:
(*) Begin a new client call.
struct rxrpc_call *
rxrpc_kernel_begin_call(struct socket *sock,
struct sockaddr_rxrpc *srx,
struct key *key,
unsigned long user_call_ID,
gfp_t gfp);
This allocates the infrastructure to make a new RxRPC call and assigns
call and connection numbers. The call will be made on the UDP port that
the socket is bound to. The call will go to the destination address of a
connected client socket unless an alternative is supplied (srx is
non-NULL).
If a key is supplied then this will be used to secure the call instead of
the key bound to the socket with the RXRPC_SECURITY_KEY sockopt. Calls
secured in this way will still share connections if at all possible.
The user_call_ID is equivalent to that supplied to sendmsg() in the
control data buffer. It is entirely feasible to use this to point to a
kernel data structure.
If this function is successful, an opaque reference to the RxRPC call is
returned. The caller now holds a reference on this and it must be
properly ended.
(*) End a client call.
void rxrpc_kernel_end_call(struct rxrpc_call *call);
This is used to end a previously begun call. The user_call_ID is expunged
from AF_RXRPC's knowledge and will not be seen again in association with
the specified call.
(*) Send data through a call.
int rxrpc_kernel_send_data(struct rxrpc_call *call, struct msghdr *msg,
size_t len);
This is used to supply either the request part of a client call or the
reply part of a server call. msg.msg_iovlen and msg.msg_iov specify the
data buffers to be used. msg_iov may not be NULL and must point
exclusively to in-kernel virtual addresses. msg.msg_flags may be given
MSG_MORE if there will be subsequent data sends for this call.
The msg must not specify a destination address, control data or any flags
other than MSG_MORE. len is the total amount of data to transmit.
(*) Abort a call.
void rxrpc_kernel_abort_call(struct rxrpc_call *call, u32 abort_code);
This is used to abort a call if it's still in an abortable state. The
abort code specified will be placed in the ABORT message sent.
(*) Intercept received RxRPC messages.
typedef void (*rxrpc_interceptor_t)(struct sock *sk,
unsigned long user_call_ID,
struct sk_buff *skb);
void
rxrpc_kernel_intercept_rx_messages(struct socket *sock,
rxrpc_interceptor_t interceptor);
This installs an interceptor function on the specified AF_RXRPC socket.
All messages that would otherwise wind up in the socket's Rx queue are
then diverted to this function. Note that care must be taken to process
the messages in the right order to maintain DATA message sequentiality.
The interceptor function itself is provided with the address of the socket
and handling the incoming message, the ID assigned by the kernel utility
to the call and the socket buffer containing the message.
The skb->mark field indicates the type of message:
MARK MEANING
=============================== =======================================
RXRPC_SKB_MARK_DATA Data message
RXRPC_SKB_MARK_FINAL_ACK Final ACK received for an incoming call
RXRPC_SKB_MARK_BUSY Client call rejected as server busy
RXRPC_SKB_MARK_REMOTE_ABORT Call aborted by peer
RXRPC_SKB_MARK_NET_ERROR Network error detected
RXRPC_SKB_MARK_LOCAL_ERROR Local error encountered
RXRPC_SKB_MARK_NEW_CALL New incoming call awaiting acceptance
The remote abort message can be probed with rxrpc_kernel_get_abort_code().
The two error messages can be probed with rxrpc_kernel_get_error_number().
A new call can be accepted with rxrpc_kernel_accept_call().
Data messages can have their contents extracted with the usual bunch of
socket buffer manipulation functions. A data message can be determined to
be the last one in a sequence with rxrpc_kernel_is_data_last(). When a
data message has been used up, rxrpc_kernel_data_delivered() should be
called on it..
Non-data messages should be handled to rxrpc_kernel_free_skb() to dispose
of. It is possible to get extra refs on all types of message for later
freeing, but this may pin the state of a call until the message is finally
freed.
(*) Accept an incoming call.
struct rxrpc_call *
rxrpc_kernel_accept_call(struct socket *sock,
unsigned long user_call_ID);
This is used to accept an incoming call and to assign it a call ID. This
function is similar to rxrpc_kernel_begin_call() and calls accepted must
be ended in the same way.
If this function is successful, an opaque reference to the RxRPC call is
returned. The caller now holds a reference on this and it must be
properly ended.
(*) Reject an incoming call.
int rxrpc_kernel_reject_call(struct socket *sock);
This is used to reject the first incoming call on the socket's queue with
a BUSY message. -ENODATA is returned if there were no incoming calls.
Other errors may be returned if the call had been aborted (-ECONNABORTED)
or had timed out (-ETIME).
(*) Record the delivery of a data message and free it.
void rxrpc_kernel_data_delivered(struct sk_buff *skb);
This is used to record a data message as having been delivered and to
update the ACK state for the call. The socket buffer will be freed.
(*) Free a message.
void rxrpc_kernel_free_skb(struct sk_buff *skb);
This is used to free a non-DATA socket buffer intercepted from an AF_RXRPC
socket.
(*) Determine if a data message is the last one on a call.
bool rxrpc_kernel_is_data_last(struct sk_buff *skb);
This is used to determine if a socket buffer holds the last data message
to be received for a call (true will be returned if it does, false
if not).
The data message will be part of the reply on a client call and the
request on an incoming call. In the latter case there will be more
messages, but in the former case there will not.
(*) Get the abort code from an abort message.
u32 rxrpc_kernel_get_abort_code(struct sk_buff *skb);
This is used to extract the abort code from a remote abort message.
(*) Get the error number from a local or network error message.
int rxrpc_kernel_get_error_number(struct sk_buff *skb);
This is used to extract the error number from a message indicating either
a local error occurred or a network error occurred.
Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-04-27 06:50:17 +08:00
|
|
|
extern struct workqueue_struct *rxrpc_workqueue;
|
2007-04-27 06:48:28 +08:00
|
|
|
|
|
|
|
/*
|
2016-06-13 20:30:30 +08:00
|
|
|
* call_accept.c
|
2007-04-27 06:48:28 +08:00
|
|
|
*/
|
2016-09-08 18:10:12 +08:00
|
|
|
int rxrpc_service_prealloc(struct rxrpc_sock *, gfp_t);
|
|
|
|
void rxrpc_discard_prealloc(struct rxrpc_sock *);
|
rxrpc: Rewrite the data and ack handling code
Rewrite the data and ack handling code such that:
(1) Parsing of received ACK and ABORT packets and the distribution and the
filing of DATA packets happens entirely within the data_ready context
called from the UDP socket. This allows us to process and discard ACK
and ABORT packets much more quickly (they're no longer stashed on a
queue for a background thread to process).
(2) We avoid calling skb_clone(), pskb_pull() and pskb_trim(). We instead
keep track of the offset and length of the content of each packet in
the sk_buff metadata. This means we don't do any allocation in the
receive path.
(3) Jumbo DATA packet parsing is now done in data_ready context. Rather
than cloning the packet once for each subpacket and pulling/trimming
it, we file the packet multiple times with an annotation for each
indicating which subpacket is there. From that we can directly
calculate the offset and length.
(4) A call's receive queue can be accessed without taking locks (memory
barriers do have to be used, though).
(5) Incoming calls are set up from preallocated resources and immediately
made live. They can than have packets queued upon them and ACKs
generated. If insufficient resources exist, DATA packet #1 is given a
BUSY reply and other DATA packets are discarded).
(6) sk_buffs no longer take a ref on their parent call.
To make this work, the following changes are made:
(1) Each call's receive buffer is now a circular buffer of sk_buff
pointers (rxtx_buffer) rather than a number of sk_buff_heads spread
between the call and the socket. This permits each sk_buff to be in
the buffer multiple times. The receive buffer is reused for the
transmit buffer.
(2) A circular buffer of annotations (rxtx_annotations) is kept parallel
to the data buffer. Transmission phase annotations indicate whether a
buffered packet has been ACK'd or not and whether it needs
retransmission.
Receive phase annotations indicate whether a slot holds a whole packet
or a jumbo subpacket and, if the latter, which subpacket. They also
note whether the packet has been decrypted in place.
(3) DATA packet window tracking is much simplified. Each phase has just
two numbers representing the window (rx_hard_ack/rx_top and
tx_hard_ack/tx_top).
The hard_ack number is the sequence number before base of the window,
representing the last packet the other side says it has consumed.
hard_ack starts from 0 and the first packet is sequence number 1.
The top number is the sequence number of the highest-numbered packet
residing in the buffer. Packets between hard_ack+1 and top are
soft-ACK'd to indicate they've been received, but not yet consumed.
Four macros, before(), before_eq(), after() and after_eq() are added
to compare sequence numbers within the window. This allows for the
top of the window to wrap when the hard-ack sequence number gets close
to the limit.
Two flags, RXRPC_CALL_RX_LAST and RXRPC_CALL_TX_LAST, are added also
to indicate when rx_top and tx_top point at the packets with the
LAST_PACKET bit set, indicating the end of the phase.
(4) Calls are queued on the socket 'receive queue' rather than packets.
This means that we don't need have to invent dummy packets to queue to
indicate abnormal/terminal states and we don't have to keep metadata
packets (such as ABORTs) around
(5) The offset and length of a (sub)packet's content are now passed to
the verify_packet security op. This is currently expected to decrypt
the packet in place and validate it.
However, there's now nowhere to store the revised offset and length of
the actual data within the decrypted blob (there may be a header and
padding to skip) because an sk_buff may represent multiple packets, so
a locate_data security op is added to retrieve these details from the
sk_buff content when needed.
(6) recvmsg() now has to handle jumbo subpackets, where each subpacket is
individually secured and needs to be individually decrypted. The code
to do this is broken out into rxrpc_recvmsg_data() and shared with the
kernel API. It now iterates over the call's receive buffer rather
than walking the socket receive queue.
Additional changes:
(1) The timers are condensed to a single timer that is set for the soonest
of three timeouts (delayed ACK generation, DATA retransmission and
call lifespan).
(2) Transmission of ACK and ABORT packets is effected immediately from
process-context socket ops/kernel API calls that cause them instead of
them being punted off to a background work item. The data_ready
handler still has to defer to the background, though.
(3) A shutdown op is added to the AF_RXRPC socket so that the AFS
filesystem can shut down the socket and flush its own work items
before closing the socket to deal with any in-progress service calls.
Future additional changes that will need to be considered:
(1) Make sure that a call doesn't hog the front of the queue by receiving
data from the network as fast as userspace is consuming it to the
exclusion of other calls.
(2) Transmit delayed ACKs from within recvmsg() when we've consumed
sufficiently more packets to avoid the background work item needing to
run.
Signed-off-by: David Howells <dhowells@redhat.com>
2016-09-08 18:10:12 +08:00
|
|
|
struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *,
|
2018-09-27 22:13:09 +08:00
|
|
|
struct rxrpc_sock *,
|
rxrpc: Rewrite the data and ack handling code
Rewrite the data and ack handling code such that:
(1) Parsing of received ACK and ABORT packets and the distribution and the
filing of DATA packets happens entirely within the data_ready context
called from the UDP socket. This allows us to process and discard ACK
and ABORT packets much more quickly (they're no longer stashed on a
queue for a background thread to process).
(2) We avoid calling skb_clone(), pskb_pull() and pskb_trim(). We instead
keep track of the offset and length of the content of each packet in
the sk_buff metadata. This means we don't do any allocation in the
receive path.
(3) Jumbo DATA packet parsing is now done in data_ready context. Rather
than cloning the packet once for each subpacket and pulling/trimming
it, we file the packet multiple times with an annotation for each
indicating which subpacket is there. From that we can directly
calculate the offset and length.
(4) A call's receive queue can be accessed without taking locks (memory
barriers do have to be used, though).
(5) Incoming calls are set up from preallocated resources and immediately
made live. They can than have packets queued upon them and ACKs
generated. If insufficient resources exist, DATA packet #1 is given a
BUSY reply and other DATA packets are discarded).
(6) sk_buffs no longer take a ref on their parent call.
To make this work, the following changes are made:
(1) Each call's receive buffer is now a circular buffer of sk_buff
pointers (rxtx_buffer) rather than a number of sk_buff_heads spread
between the call and the socket. This permits each sk_buff to be in
the buffer multiple times. The receive buffer is reused for the
transmit buffer.
(2) A circular buffer of annotations (rxtx_annotations) is kept parallel
to the data buffer. Transmission phase annotations indicate whether a
buffered packet has been ACK'd or not and whether it needs
retransmission.
Receive phase annotations indicate whether a slot holds a whole packet
or a jumbo subpacket and, if the latter, which subpacket. They also
note whether the packet has been decrypted in place.
(3) DATA packet window tracking is much simplified. Each phase has just
two numbers representing the window (rx_hard_ack/rx_top and
tx_hard_ack/tx_top).
The hard_ack number is the sequence number before base of the window,
representing the last packet the other side says it has consumed.
hard_ack starts from 0 and the first packet is sequence number 1.
The top number is the sequence number of the highest-numbered packet
residing in the buffer. Packets between hard_ack+1 and top are
soft-ACK'd to indicate they've been received, but not yet consumed.
Four macros, before(), before_eq(), after() and after_eq() are added
to compare sequence numbers within the window. This allows for the
top of the window to wrap when the hard-ack sequence number gets close
to the limit.
Two flags, RXRPC_CALL_RX_LAST and RXRPC_CALL_TX_LAST, are added also
to indicate when rx_top and tx_top point at the packets with the
LAST_PACKET bit set, indicating the end of the phase.
(4) Calls are queued on the socket 'receive queue' rather than packets.
This means that we don't need have to invent dummy packets to queue to
indicate abnormal/terminal states and we don't have to keep metadata
packets (such as ABORTs) around
(5) The offset and length of a (sub)packet's content are now passed to
the verify_packet security op. This is currently expected to decrypt
the packet in place and validate it.
However, there's now nowhere to store the revised offset and length of
the actual data within the decrypted blob (there may be a header and
padding to skip) because an sk_buff may represent multiple packets, so
a locate_data security op is added to retrieve these details from the
sk_buff content when needed.
(6) recvmsg() now has to handle jumbo subpackets, where each subpacket is
individually secured and needs to be individually decrypted. The code
to do this is broken out into rxrpc_recvmsg_data() and shared with the
kernel API. It now iterates over the call's receive buffer rather
than walking the socket receive queue.
Additional changes:
(1) The timers are condensed to a single timer that is set for the soonest
of three timeouts (delayed ACK generation, DATA retransmission and
call lifespan).
(2) Transmission of ACK and ABORT packets is effected immediately from
process-context socket ops/kernel API calls that cause them instead of
them being punted off to a background work item. The data_ready
handler still has to defer to the background, though.
(3) A shutdown op is added to the AF_RXRPC socket so that the AFS
filesystem can shut down the socket and flush its own work items
before closing the socket to deal with any in-progress service calls.
Future additional changes that will need to be considered:
(1) Make sure that a call doesn't hog the front of the queue by receiving
data from the network as fast as userspace is consuming it to the
exclusion of other calls.
(2) Transmit delayed ACKs from within recvmsg() when we've consumed
sufficiently more packets to avoid the background work item needing to
run.
Signed-off-by: David Howells <dhowells@redhat.com>
2016-09-08 18:10:12 +08:00
|
|
|
struct sk_buff *);
|
2016-04-04 21:00:35 +08:00
|
|
|
void rxrpc_accept_incoming_calls(struct rxrpc_local *);
|
2020-10-01 04:27:18 +08:00
|
|
|
int rxrpc_user_charge_accept(struct rxrpc_sock *, unsigned long);
|
2007-04-27 06:48:28 +08:00
|
|
|
|
|
|
|
/*
|
2016-06-13 20:30:30 +08:00
|
|
|
* call_event.c
|
2007-04-27 06:48:28 +08:00
|
|
|
*/
|
2019-08-09 22:20:41 +08:00
|
|
|
void rxrpc_propose_ACK(struct rxrpc_call *, u8, u32, bool, bool,
|
2016-09-23 20:50:40 +08:00
|
|
|
enum rxrpc_propose_ack_trace);
|
2013-10-19 04:48:25 +08:00
|
|
|
void rxrpc_process_call(struct work_struct *);
|
2007-04-27 06:48:28 +08:00
|
|
|
|
2017-11-24 18:18:41 +08:00
|
|
|
static inline void rxrpc_reduce_call_timer(struct rxrpc_call *call,
|
|
|
|
unsigned long expire_at,
|
|
|
|
unsigned long now,
|
|
|
|
enum rxrpc_timer_trace why)
|
|
|
|
{
|
|
|
|
trace_rxrpc_timer(call, why, now);
|
|
|
|
timer_reduce(&call->timer, expire_at);
|
|
|
|
}
|
|
|
|
|
2007-04-27 06:48:28 +08:00
|
|
|
/*
|
2016-06-13 20:30:30 +08:00
|
|
|
* call_object.c
|
2007-04-27 06:48:28 +08:00
|
|
|
*/
|
2016-08-30 16:49:28 +08:00
|
|
|
extern const char *const rxrpc_call_states[];
|
|
|
|
extern const char *const rxrpc_call_completions[];
|
2016-03-10 07:22:56 +08:00
|
|
|
extern unsigned int rxrpc_max_call_lifetime;
|
2007-04-27 06:48:28 +08:00
|
|
|
extern struct kmem_cache *rxrpc_call_jar;
|
|
|
|
|
2016-06-10 06:02:51 +08:00
|
|
|
struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *, unsigned long);
|
2018-03-28 06:03:00 +08:00
|
|
|
struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *, gfp_t, unsigned int);
|
2016-06-10 06:02:51 +08:00
|
|
|
struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *,
|
2016-04-04 21:00:36 +08:00
|
|
|
struct rxrpc_conn_parameters *,
|
2016-06-17 22:42:35 +08:00
|
|
|
struct sockaddr_rxrpc *,
|
2018-03-28 06:03:00 +08:00
|
|
|
struct rxrpc_call_params *, gfp_t,
|
|
|
|
unsigned int);
|
rxrpc: Rewrite the data and ack handling code
Rewrite the data and ack handling code such that:
(1) Parsing of received ACK and ABORT packets and the distribution and the
filing of DATA packets happens entirely within the data_ready context
called from the UDP socket. This allows us to process and discard ACK
and ABORT packets much more quickly (they're no longer stashed on a
queue for a background thread to process).
(2) We avoid calling skb_clone(), pskb_pull() and pskb_trim(). We instead
keep track of the offset and length of the content of each packet in
the sk_buff metadata. This means we don't do any allocation in the
receive path.
(3) Jumbo DATA packet parsing is now done in data_ready context. Rather
than cloning the packet once for each subpacket and pulling/trimming
it, we file the packet multiple times with an annotation for each
indicating which subpacket is there. From that we can directly
calculate the offset and length.
(4) A call's receive queue can be accessed without taking locks (memory
barriers do have to be used, though).
(5) Incoming calls are set up from preallocated resources and immediately
made live. They can than have packets queued upon them and ACKs
generated. If insufficient resources exist, DATA packet #1 is given a
BUSY reply and other DATA packets are discarded).
(6) sk_buffs no longer take a ref on their parent call.
To make this work, the following changes are made:
(1) Each call's receive buffer is now a circular buffer of sk_buff
pointers (rxtx_buffer) rather than a number of sk_buff_heads spread
between the call and the socket. This permits each sk_buff to be in
the buffer multiple times. The receive buffer is reused for the
transmit buffer.
(2) A circular buffer of annotations (rxtx_annotations) is kept parallel
to the data buffer. Transmission phase annotations indicate whether a
buffered packet has been ACK'd or not and whether it needs
retransmission.
Receive phase annotations indicate whether a slot holds a whole packet
or a jumbo subpacket and, if the latter, which subpacket. They also
note whether the packet has been decrypted in place.
(3) DATA packet window tracking is much simplified. Each phase has just
two numbers representing the window (rx_hard_ack/rx_top and
tx_hard_ack/tx_top).
The hard_ack number is the sequence number before base of the window,
representing the last packet the other side says it has consumed.
hard_ack starts from 0 and the first packet is sequence number 1.
The top number is the sequence number of the highest-numbered packet
residing in the buffer. Packets between hard_ack+1 and top are
soft-ACK'd to indicate they've been received, but not yet consumed.
Four macros, before(), before_eq(), after() and after_eq() are added
to compare sequence numbers within the window. This allows for the
top of the window to wrap when the hard-ack sequence number gets close
to the limit.
Two flags, RXRPC_CALL_RX_LAST and RXRPC_CALL_TX_LAST, are added also
to indicate when rx_top and tx_top point at the packets with the
LAST_PACKET bit set, indicating the end of the phase.
(4) Calls are queued on the socket 'receive queue' rather than packets.
This means that we don't need have to invent dummy packets to queue to
indicate abnormal/terminal states and we don't have to keep metadata
packets (such as ABORTs) around
(5) The offset and length of a (sub)packet's content are now passed to
the verify_packet security op. This is currently expected to decrypt
the packet in place and validate it.
However, there's now nowhere to store the revised offset and length of
the actual data within the decrypted blob (there may be a header and
padding to skip) because an sk_buff may represent multiple packets, so
a locate_data security op is added to retrieve these details from the
sk_buff content when needed.
(6) recvmsg() now has to handle jumbo subpackets, where each subpacket is
individually secured and needs to be individually decrypted. The code
to do this is broken out into rxrpc_recvmsg_data() and shared with the
kernel API. It now iterates over the call's receive buffer rather
than walking the socket receive queue.
Additional changes:
(1) The timers are condensed to a single timer that is set for the soonest
of three timeouts (delayed ACK generation, DATA retransmission and
call lifespan).
(2) Transmission of ACK and ABORT packets is effected immediately from
process-context socket ops/kernel API calls that cause them instead of
them being punted off to a background work item. The data_ready
handler still has to defer to the background, though.
(3) A shutdown op is added to the AF_RXRPC socket so that the AFS
filesystem can shut down the socket and flush its own work items
before closing the socket to deal with any in-progress service calls.
Future additional changes that will need to be considered:
(1) Make sure that a call doesn't hog the front of the queue by receiving
data from the network as fast as userspace is consuming it to the
exclusion of other calls.
(2) Transmit delayed ACKs from within recvmsg() when we've consumed
sufficiently more packets to avoid the background work item needing to
run.
Signed-off-by: David Howells <dhowells@redhat.com>
2016-09-08 18:10:12 +08:00
|
|
|
void rxrpc_incoming_call(struct rxrpc_sock *, struct rxrpc_call *,
|
|
|
|
struct sk_buff *);
|
2016-09-07 16:19:31 +08:00
|
|
|
void rxrpc_release_call(struct rxrpc_sock *, struct rxrpc_call *);
|
2013-10-19 04:48:25 +08:00
|
|
|
void rxrpc_release_calls_on_socket(struct rxrpc_sock *);
|
2016-09-07 16:19:31 +08:00
|
|
|
bool __rxrpc_queue_call(struct rxrpc_call *);
|
|
|
|
bool rxrpc_queue_call(struct rxrpc_call *);
|
2016-08-30 16:49:29 +08:00
|
|
|
void rxrpc_see_call(struct rxrpc_call *);
|
2016-09-07 21:34:21 +08:00
|
|
|
void rxrpc_get_call(struct rxrpc_call *, enum rxrpc_call_trace);
|
|
|
|
void rxrpc_put_call(struct rxrpc_call *, enum rxrpc_call_trace);
|
2016-09-08 18:10:12 +08:00
|
|
|
void rxrpc_cleanup_call(struct rxrpc_call *);
|
2017-05-25 00:02:32 +08:00
|
|
|
void rxrpc_destroy_all_calls(struct rxrpc_net *);
|
2007-04-27 06:48:28 +08:00
|
|
|
|
2016-08-23 22:27:24 +08:00
|
|
|
static inline bool rxrpc_is_service_call(const struct rxrpc_call *call)
|
|
|
|
{
|
|
|
|
return test_bit(RXRPC_CALL_IS_SERVICE, &call->flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool rxrpc_is_client_call(const struct rxrpc_call *call)
|
|
|
|
{
|
|
|
|
return !rxrpc_is_service_call(call);
|
|
|
|
}
|
|
|
|
|
2016-04-04 21:00:37 +08:00
|
|
|
/*
|
|
|
|
* conn_client.c
|
|
|
|
*/
|
rxrpc: Improve management and caching of client connection objects
Improve the management and caching of client rxrpc connection objects.
From this point, client connections will be managed separately from service
connections because AF_RXRPC controls the creation and re-use of client
connections but doesn't have that luxury with service connections.
Further, there will be limits on the numbers of client connections that may
be live on a machine. No direct restriction will be placed on the number
of client calls, excepting that each client connection can support a
maximum of four concurrent calls.
Note that, for a number of reasons, we don't want to simply discard a
client connection as soon as the last call is apparently finished:
(1) Security is negotiated per-connection and the context is then shared
between all calls on that connection. The context can be negotiated
again if the connection lapses, but that involves holding up calls
whilst at least two packets are exchanged and various crypto bits are
performed - so we'd ideally like to cache it for a little while at
least.
(2) If a packet goes astray, we will need to retransmit a final ACK or
ABORT packet. To make this work, we need to keep around the
connection details for a little while.
(3) The locally held structures represent some amount of setup time, to be
weighed against their occupation of memory when idle.
To this end, the client connection cache is managed by a state machine on
each connection. There are five states:
(1) INACTIVE - The connection is not held in any list and may not have
been exposed to the world. If it has been previously exposed, it was
discarded from the idle list after expiring.
(2) WAITING - The connection is waiting for the number of client conns to
drop below the maximum capacity. Calls may be in progress upon it
from when it was active and got culled.
The connection is on the rxrpc_waiting_client_conns list which is kept
in to-be-granted order. Culled conns with waiters go to the back of
the queue just like new conns.
(3) ACTIVE - The connection has at least one call in progress upon it, it
may freely grant available channels to new calls and calls may be
waiting on it for channels to become available.
The connection is on the rxrpc_active_client_conns list which is kept
in activation order for culling purposes.
(4) CULLED - The connection got summarily culled to try and free up
capacity. Calls currently in progress on the connection are allowed
to continue, but new calls will have to wait. There can be no waiters
in this state - the conn would have to go to the WAITING state
instead.
(5) IDLE - The connection has no calls in progress upon it and must have
been exposed to the world (ie. the EXPOSED flag must be set). When it
expires, the EXPOSED flag is cleared and the connection transitions to
the INACTIVE state.
The connection is on the rxrpc_idle_client_conns list which is kept in
order of how soon they'll expire.
A connection in the ACTIVE or CULLED state must have at least one active
call upon it; if in the WAITING state it may have active calls upon it;
other states may not have active calls.
As long as a connection remains active and doesn't get culled, it may
continue to process calls - even if there are connections on the wait
queue. This simplifies things a bit and reduces the amount of checking we
need do.
There are a couple flags of relevance to the cache:
(1) EXPOSED - The connection ID got exposed to the world. If this flag is
set, an extra ref is added to the connection preventing it from being
reaped when it has no calls outstanding. This flag is cleared and the
ref dropped when a conn is discarded from the idle list.
(2) DONT_REUSE - The connection should be discarded as soon as possible and
should not be reused.
This commit also provides a number of new settings:
(*) /proc/net/rxrpc/max_client_conns
The maximum number of live client connections. Above this number, new
connections get added to the wait list and must wait for an active
conn to be culled. Culled connections can be reused, but they will go
to the back of the wait list and have to wait.
(*) /proc/net/rxrpc/reap_client_conns
If the number of desired connections exceeds the maximum above, the
active connection list will be culled until there are only this many
left in it.
(*) /proc/net/rxrpc/idle_conn_expiry
The normal expiry time for a client connection, provided there are
fewer than reap_client_conns of them around.
(*) /proc/net/rxrpc/idle_conn_fast_expiry
The expedited expiry time, used when there are more than
reap_client_conns of them around.
Note that I combined the Tx wait queue with the channel grant wait queue to
save space as only one of these should be in use at once.
Note also that, for the moment, the service connection cache still uses the
old connection management code.
Signed-off-by: David Howells <dhowells@redhat.com>
2016-08-24 14:30:52 +08:00
|
|
|
extern unsigned int rxrpc_reap_client_connections;
|
2017-11-24 18:18:41 +08:00
|
|
|
extern unsigned long rxrpc_conn_idle_client_expiry;
|
|
|
|
extern unsigned long rxrpc_conn_idle_client_fast_expiry;
|
2016-04-04 21:00:37 +08:00
|
|
|
extern struct idr rxrpc_client_conn_ids;
|
|
|
|
|
2016-06-27 17:32:02 +08:00
|
|
|
void rxrpc_destroy_client_conn_ids(void);
|
rxrpc: Rewrite the client connection manager
Rewrite the rxrpc client connection manager so that it can support multiple
connections for a given security key to a peer. The following changes are
made:
(1) For each open socket, the code currently maintains an rbtree with the
connections placed into it, keyed by communications parameters. This
is tricky to maintain as connections can be culled from the tree or
replaced within it. Connections can require replacement for a number
of reasons, e.g. their IDs span too great a range for the IDR data
type to represent efficiently, the call ID numbers on that conn would
overflow or the conn got aborted.
This is changed so that there's now a connection bundle object placed
in the tree, keyed on the same parameters. The bundle, however, does
not need to be replaced.
(2) An rxrpc_bundle object can now manage the available channels for a set
of parallel connections. The lock that manages this is moved there
from the rxrpc_connection struct (channel_lock).
(3) There'a a dummy bundle for all incoming connections to share so that
they have a channel_lock too. It might be better to give each
incoming connection its own bundle. This bundle is not needed to
manage which channels incoming calls are made on because that's the
solely at whim of the client.
(4) The restrictions on how many client connections are around are
removed. Instead, a previous patch limits the number of client calls
that can be allocated. Ordinarily, client connections are reaped
after 2 minutes on the idle queue, but when more than a certain number
of connections are in existence, the reaper starts reaping them after
2s of idleness instead to get the numbers back down.
It could also be made such that new call allocations are forced to
wait until the number of outstanding connections subsides.
Signed-off-by: David Howells <dhowells@redhat.com>
2020-07-01 18:15:32 +08:00
|
|
|
struct rxrpc_bundle *rxrpc_get_bundle(struct rxrpc_bundle *);
|
|
|
|
void rxrpc_put_bundle(struct rxrpc_bundle *);
|
2018-10-05 21:05:34 +08:00
|
|
|
int rxrpc_connect_call(struct rxrpc_sock *, struct rxrpc_call *,
|
|
|
|
struct rxrpc_conn_parameters *, struct sockaddr_rxrpc *,
|
|
|
|
gfp_t);
|
rxrpc: Improve management and caching of client connection objects
Improve the management and caching of client rxrpc connection objects.
From this point, client connections will be managed separately from service
connections because AF_RXRPC controls the creation and re-use of client
connections but doesn't have that luxury with service connections.
Further, there will be limits on the numbers of client connections that may
be live on a machine. No direct restriction will be placed on the number
of client calls, excepting that each client connection can support a
maximum of four concurrent calls.
Note that, for a number of reasons, we don't want to simply discard a
client connection as soon as the last call is apparently finished:
(1) Security is negotiated per-connection and the context is then shared
between all calls on that connection. The context can be negotiated
again if the connection lapses, but that involves holding up calls
whilst at least two packets are exchanged and various crypto bits are
performed - so we'd ideally like to cache it for a little while at
least.
(2) If a packet goes astray, we will need to retransmit a final ACK or
ABORT packet. To make this work, we need to keep around the
connection details for a little while.
(3) The locally held structures represent some amount of setup time, to be
weighed against their occupation of memory when idle.
To this end, the client connection cache is managed by a state machine on
each connection. There are five states:
(1) INACTIVE - The connection is not held in any list and may not have
been exposed to the world. If it has been previously exposed, it was
discarded from the idle list after expiring.
(2) WAITING - The connection is waiting for the number of client conns to
drop below the maximum capacity. Calls may be in progress upon it
from when it was active and got culled.
The connection is on the rxrpc_waiting_client_conns list which is kept
in to-be-granted order. Culled conns with waiters go to the back of
the queue just like new conns.
(3) ACTIVE - The connection has at least one call in progress upon it, it
may freely grant available channels to new calls and calls may be
waiting on it for channels to become available.
The connection is on the rxrpc_active_client_conns list which is kept
in activation order for culling purposes.
(4) CULLED - The connection got summarily culled to try and free up
capacity. Calls currently in progress on the connection are allowed
to continue, but new calls will have to wait. There can be no waiters
in this state - the conn would have to go to the WAITING state
instead.
(5) IDLE - The connection has no calls in progress upon it and must have
been exposed to the world (ie. the EXPOSED flag must be set). When it
expires, the EXPOSED flag is cleared and the connection transitions to
the INACTIVE state.
The connection is on the rxrpc_idle_client_conns list which is kept in
order of how soon they'll expire.
A connection in the ACTIVE or CULLED state must have at least one active
call upon it; if in the WAITING state it may have active calls upon it;
other states may not have active calls.
As long as a connection remains active and doesn't get culled, it may
continue to process calls - even if there are connections on the wait
queue. This simplifies things a bit and reduces the amount of checking we
need do.
There are a couple flags of relevance to the cache:
(1) EXPOSED - The connection ID got exposed to the world. If this flag is
set, an extra ref is added to the connection preventing it from being
reaped when it has no calls outstanding. This flag is cleared and the
ref dropped when a conn is discarded from the idle list.
(2) DONT_REUSE - The connection should be discarded as soon as possible and
should not be reused.
This commit also provides a number of new settings:
(*) /proc/net/rxrpc/max_client_conns
The maximum number of live client connections. Above this number, new
connections get added to the wait list and must wait for an active
conn to be culled. Culled connections can be reused, but they will go
to the back of the wait list and have to wait.
(*) /proc/net/rxrpc/reap_client_conns
If the number of desired connections exceeds the maximum above, the
active connection list will be culled until there are only this many
left in it.
(*) /proc/net/rxrpc/idle_conn_expiry
The normal expiry time for a client connection, provided there are
fewer than reap_client_conns of them around.
(*) /proc/net/rxrpc/idle_conn_fast_expiry
The expedited expiry time, used when there are more than
reap_client_conns of them around.
Note that I combined the Tx wait queue with the channel grant wait queue to
save space as only one of these should be in use at once.
Note also that, for the moment, the service connection cache still uses the
old connection management code.
Signed-off-by: David Howells <dhowells@redhat.com>
2016-08-24 14:30:52 +08:00
|
|
|
void rxrpc_expose_client_call(struct rxrpc_call *);
|
rxrpc: Rewrite the client connection manager
Rewrite the rxrpc client connection manager so that it can support multiple
connections for a given security key to a peer. The following changes are
made:
(1) For each open socket, the code currently maintains an rbtree with the
connections placed into it, keyed by communications parameters. This
is tricky to maintain as connections can be culled from the tree or
replaced within it. Connections can require replacement for a number
of reasons, e.g. their IDs span too great a range for the IDR data
type to represent efficiently, the call ID numbers on that conn would
overflow or the conn got aborted.
This is changed so that there's now a connection bundle object placed
in the tree, keyed on the same parameters. The bundle, however, does
not need to be replaced.
(2) An rxrpc_bundle object can now manage the available channels for a set
of parallel connections. The lock that manages this is moved there
from the rxrpc_connection struct (channel_lock).
(3) There'a a dummy bundle for all incoming connections to share so that
they have a channel_lock too. It might be better to give each
incoming connection its own bundle. This bundle is not needed to
manage which channels incoming calls are made on because that's the
solely at whim of the client.
(4) The restrictions on how many client connections are around are
removed. Instead, a previous patch limits the number of client calls
that can be allocated. Ordinarily, client connections are reaped
after 2 minutes on the idle queue, but when more than a certain number
of connections are in existence, the reaper starts reaping them after
2s of idleness instead to get the numbers back down.
It could also be made such that new call allocations are forced to
wait until the number of outstanding connections subsides.
Signed-off-by: David Howells <dhowells@redhat.com>
2020-07-01 18:15:32 +08:00
|
|
|
void rxrpc_disconnect_client_call(struct rxrpc_bundle *, struct rxrpc_call *);
|
rxrpc: Improve management and caching of client connection objects
Improve the management and caching of client rxrpc connection objects.
From this point, client connections will be managed separately from service
connections because AF_RXRPC controls the creation and re-use of client
connections but doesn't have that luxury with service connections.
Further, there will be limits on the numbers of client connections that may
be live on a machine. No direct restriction will be placed on the number
of client calls, excepting that each client connection can support a
maximum of four concurrent calls.
Note that, for a number of reasons, we don't want to simply discard a
client connection as soon as the last call is apparently finished:
(1) Security is negotiated per-connection and the context is then shared
between all calls on that connection. The context can be negotiated
again if the connection lapses, but that involves holding up calls
whilst at least two packets are exchanged and various crypto bits are
performed - so we'd ideally like to cache it for a little while at
least.
(2) If a packet goes astray, we will need to retransmit a final ACK or
ABORT packet. To make this work, we need to keep around the
connection details for a little while.
(3) The locally held structures represent some amount of setup time, to be
weighed against their occupation of memory when idle.
To this end, the client connection cache is managed by a state machine on
each connection. There are five states:
(1) INACTIVE - The connection is not held in any list and may not have
been exposed to the world. If it has been previously exposed, it was
discarded from the idle list after expiring.
(2) WAITING - The connection is waiting for the number of client conns to
drop below the maximum capacity. Calls may be in progress upon it
from when it was active and got culled.
The connection is on the rxrpc_waiting_client_conns list which is kept
in to-be-granted order. Culled conns with waiters go to the back of
the queue just like new conns.
(3) ACTIVE - The connection has at least one call in progress upon it, it
may freely grant available channels to new calls and calls may be
waiting on it for channels to become available.
The connection is on the rxrpc_active_client_conns list which is kept
in activation order for culling purposes.
(4) CULLED - The connection got summarily culled to try and free up
capacity. Calls currently in progress on the connection are allowed
to continue, but new calls will have to wait. There can be no waiters
in this state - the conn would have to go to the WAITING state
instead.
(5) IDLE - The connection has no calls in progress upon it and must have
been exposed to the world (ie. the EXPOSED flag must be set). When it
expires, the EXPOSED flag is cleared and the connection transitions to
the INACTIVE state.
The connection is on the rxrpc_idle_client_conns list which is kept in
order of how soon they'll expire.
A connection in the ACTIVE or CULLED state must have at least one active
call upon it; if in the WAITING state it may have active calls upon it;
other states may not have active calls.
As long as a connection remains active and doesn't get culled, it may
continue to process calls - even if there are connections on the wait
queue. This simplifies things a bit and reduces the amount of checking we
need do.
There are a couple flags of relevance to the cache:
(1) EXPOSED - The connection ID got exposed to the world. If this flag is
set, an extra ref is added to the connection preventing it from being
reaped when it has no calls outstanding. This flag is cleared and the
ref dropped when a conn is discarded from the idle list.
(2) DONT_REUSE - The connection should be discarded as soon as possible and
should not be reused.
This commit also provides a number of new settings:
(*) /proc/net/rxrpc/max_client_conns
The maximum number of live client connections. Above this number, new
connections get added to the wait list and must wait for an active
conn to be culled. Culled connections can be reused, but they will go
to the back of the wait list and have to wait.
(*) /proc/net/rxrpc/reap_client_conns
If the number of desired connections exceeds the maximum above, the
active connection list will be culled until there are only this many
left in it.
(*) /proc/net/rxrpc/idle_conn_expiry
The normal expiry time for a client connection, provided there are
fewer than reap_client_conns of them around.
(*) /proc/net/rxrpc/idle_conn_fast_expiry
The expedited expiry time, used when there are more than
reap_client_conns of them around.
Note that I combined the Tx wait queue with the channel grant wait queue to
save space as only one of these should be in use at once.
Note also that, for the moment, the service connection cache still uses the
old connection management code.
Signed-off-by: David Howells <dhowells@redhat.com>
2016-08-24 14:30:52 +08:00
|
|
|
void rxrpc_put_client_conn(struct rxrpc_connection *);
|
2017-05-25 00:02:32 +08:00
|
|
|
void rxrpc_discard_expired_client_conns(struct work_struct *);
|
|
|
|
void rxrpc_destroy_all_client_connections(struct rxrpc_net *);
|
2019-08-29 21:12:11 +08:00
|
|
|
void rxrpc_clean_up_local_conns(struct rxrpc_local *);
|
2016-04-04 21:00:37 +08:00
|
|
|
|
2007-04-27 06:48:28 +08:00
|
|
|
/*
|
2016-06-13 20:30:30 +08:00
|
|
|
* conn_event.c
|
|
|
|
*/
|
|
|
|
void rxrpc_process_connection(struct work_struct *);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* conn_object.c
|
2007-04-27 06:48:28 +08:00
|
|
|
*/
|
2016-03-10 07:22:56 +08:00
|
|
|
extern unsigned int rxrpc_connection_expiry;
|
2017-11-24 18:18:42 +08:00
|
|
|
extern unsigned int rxrpc_closed_conn_expiry;
|
2007-04-27 06:48:28 +08:00
|
|
|
|
2016-04-04 21:00:40 +08:00
|
|
|
struct rxrpc_connection *rxrpc_alloc_connection(gfp_t);
|
2016-07-01 14:51:50 +08:00
|
|
|
struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *,
|
2018-09-27 22:13:09 +08:00
|
|
|
struct sk_buff *,
|
|
|
|
struct rxrpc_peer **);
|
rxrpc: Improve management and caching of client connection objects
Improve the management and caching of client rxrpc connection objects.
From this point, client connections will be managed separately from service
connections because AF_RXRPC controls the creation and re-use of client
connections but doesn't have that luxury with service connections.
Further, there will be limits on the numbers of client connections that may
be live on a machine. No direct restriction will be placed on the number
of client calls, excepting that each client connection can support a
maximum of four concurrent calls.
Note that, for a number of reasons, we don't want to simply discard a
client connection as soon as the last call is apparently finished:
(1) Security is negotiated per-connection and the context is then shared
between all calls on that connection. The context can be negotiated
again if the connection lapses, but that involves holding up calls
whilst at least two packets are exchanged and various crypto bits are
performed - so we'd ideally like to cache it for a little while at
least.
(2) If a packet goes astray, we will need to retransmit a final ACK or
ABORT packet. To make this work, we need to keep around the
connection details for a little while.
(3) The locally held structures represent some amount of setup time, to be
weighed against their occupation of memory when idle.
To this end, the client connection cache is managed by a state machine on
each connection. There are five states:
(1) INACTIVE - The connection is not held in any list and may not have
been exposed to the world. If it has been previously exposed, it was
discarded from the idle list after expiring.
(2) WAITING - The connection is waiting for the number of client conns to
drop below the maximum capacity. Calls may be in progress upon it
from when it was active and got culled.
The connection is on the rxrpc_waiting_client_conns list which is kept
in to-be-granted order. Culled conns with waiters go to the back of
the queue just like new conns.
(3) ACTIVE - The connection has at least one call in progress upon it, it
may freely grant available channels to new calls and calls may be
waiting on it for channels to become available.
The connection is on the rxrpc_active_client_conns list which is kept
in activation order for culling purposes.
(4) CULLED - The connection got summarily culled to try and free up
capacity. Calls currently in progress on the connection are allowed
to continue, but new calls will have to wait. There can be no waiters
in this state - the conn would have to go to the WAITING state
instead.
(5) IDLE - The connection has no calls in progress upon it and must have
been exposed to the world (ie. the EXPOSED flag must be set). When it
expires, the EXPOSED flag is cleared and the connection transitions to
the INACTIVE state.
The connection is on the rxrpc_idle_client_conns list which is kept in
order of how soon they'll expire.
A connection in the ACTIVE or CULLED state must have at least one active
call upon it; if in the WAITING state it may have active calls upon it;
other states may not have active calls.
As long as a connection remains active and doesn't get culled, it may
continue to process calls - even if there are connections on the wait
queue. This simplifies things a bit and reduces the amount of checking we
need do.
There are a couple flags of relevance to the cache:
(1) EXPOSED - The connection ID got exposed to the world. If this flag is
set, an extra ref is added to the connection preventing it from being
reaped when it has no calls outstanding. This flag is cleared and the
ref dropped when a conn is discarded from the idle list.
(2) DONT_REUSE - The connection should be discarded as soon as possible and
should not be reused.
This commit also provides a number of new settings:
(*) /proc/net/rxrpc/max_client_conns
The maximum number of live client connections. Above this number, new
connections get added to the wait list and must wait for an active
conn to be culled. Culled connections can be reused, but they will go
to the back of the wait list and have to wait.
(*) /proc/net/rxrpc/reap_client_conns
If the number of desired connections exceeds the maximum above, the
active connection list will be culled until there are only this many
left in it.
(*) /proc/net/rxrpc/idle_conn_expiry
The normal expiry time for a client connection, provided there are
fewer than reap_client_conns of them around.
(*) /proc/net/rxrpc/idle_conn_fast_expiry
The expedited expiry time, used when there are more than
reap_client_conns of them around.
Note that I combined the Tx wait queue with the channel grant wait queue to
save space as only one of these should be in use at once.
Note also that, for the moment, the service connection cache still uses the
old connection management code.
Signed-off-by: David Howells <dhowells@redhat.com>
2016-08-24 14:30:52 +08:00
|
|
|
void __rxrpc_disconnect_call(struct rxrpc_connection *, struct rxrpc_call *);
|
2016-06-17 22:42:35 +08:00
|
|
|
void rxrpc_disconnect_call(struct rxrpc_call *);
|
rxrpc: Improve management and caching of client connection objects
Improve the management and caching of client rxrpc connection objects.
From this point, client connections will be managed separately from service
connections because AF_RXRPC controls the creation and re-use of client
connections but doesn't have that luxury with service connections.
Further, there will be limits on the numbers of client connections that may
be live on a machine. No direct restriction will be placed on the number
of client calls, excepting that each client connection can support a
maximum of four concurrent calls.
Note that, for a number of reasons, we don't want to simply discard a
client connection as soon as the last call is apparently finished:
(1) Security is negotiated per-connection and the context is then shared
between all calls on that connection. The context can be negotiated
again if the connection lapses, but that involves holding up calls
whilst at least two packets are exchanged and various crypto bits are
performed - so we'd ideally like to cache it for a little while at
least.
(2) If a packet goes astray, we will need to retransmit a final ACK or
ABORT packet. To make this work, we need to keep around the
connection details for a little while.
(3) The locally held structures represent some amount of setup time, to be
weighed against their occupation of memory when idle.
To this end, the client connection cache is managed by a state machine on
each connection. There are five states:
(1) INACTIVE - The connection is not held in any list and may not have
been exposed to the world. If it has been previously exposed, it was
discarded from the idle list after expiring.
(2) WAITING - The connection is waiting for the number of client conns to
drop below the maximum capacity. Calls may be in progress upon it
from when it was active and got culled.
The connection is on the rxrpc_waiting_client_conns list which is kept
in to-be-granted order. Culled conns with waiters go to the back of
the queue just like new conns.
(3) ACTIVE - The connection has at least one call in progress upon it, it
may freely grant available channels to new calls and calls may be
waiting on it for channels to become available.
The connection is on the rxrpc_active_client_conns list which is kept
in activation order for culling purposes.
(4) CULLED - The connection got summarily culled to try and free up
capacity. Calls currently in progress on the connection are allowed
to continue, but new calls will have to wait. There can be no waiters
in this state - the conn would have to go to the WAITING state
instead.
(5) IDLE - The connection has no calls in progress upon it and must have
been exposed to the world (ie. the EXPOSED flag must be set). When it
expires, the EXPOSED flag is cleared and the connection transitions to
the INACTIVE state.
The connection is on the rxrpc_idle_client_conns list which is kept in
order of how soon they'll expire.
A connection in the ACTIVE or CULLED state must have at least one active
call upon it; if in the WAITING state it may have active calls upon it;
other states may not have active calls.
As long as a connection remains active and doesn't get culled, it may
continue to process calls - even if there are connections on the wait
queue. This simplifies things a bit and reduces the amount of checking we
need do.
There are a couple flags of relevance to the cache:
(1) EXPOSED - The connection ID got exposed to the world. If this flag is
set, an extra ref is added to the connection preventing it from being
reaped when it has no calls outstanding. This flag is cleared and the
ref dropped when a conn is discarded from the idle list.
(2) DONT_REUSE - The connection should be discarded as soon as possible and
should not be reused.
This commit also provides a number of new settings:
(*) /proc/net/rxrpc/max_client_conns
The maximum number of live client connections. Above this number, new
connections get added to the wait list and must wait for an active
conn to be culled. Culled connections can be reused, but they will go
to the back of the wait list and have to wait.
(*) /proc/net/rxrpc/reap_client_conns
If the number of desired connections exceeds the maximum above, the
active connection list will be culled until there are only this many
left in it.
(*) /proc/net/rxrpc/idle_conn_expiry
The normal expiry time for a client connection, provided there are
fewer than reap_client_conns of them around.
(*) /proc/net/rxrpc/idle_conn_fast_expiry
The expedited expiry time, used when there are more than
reap_client_conns of them around.
Note that I combined the Tx wait queue with the channel grant wait queue to
save space as only one of these should be in use at once.
Note also that, for the moment, the service connection cache still uses the
old connection management code.
Signed-off-by: David Howells <dhowells@redhat.com>
2016-08-24 14:30:52 +08:00
|
|
|
void rxrpc_kill_connection(struct rxrpc_connection *);
|
2016-09-17 17:49:14 +08:00
|
|
|
bool rxrpc_queue_conn(struct rxrpc_connection *);
|
|
|
|
void rxrpc_see_connection(struct rxrpc_connection *);
|
rxrpc: Rewrite the client connection manager
Rewrite the rxrpc client connection manager so that it can support multiple
connections for a given security key to a peer. The following changes are
made:
(1) For each open socket, the code currently maintains an rbtree with the
connections placed into it, keyed by communications parameters. This
is tricky to maintain as connections can be culled from the tree or
replaced within it. Connections can require replacement for a number
of reasons, e.g. their IDs span too great a range for the IDR data
type to represent efficiently, the call ID numbers on that conn would
overflow or the conn got aborted.
This is changed so that there's now a connection bundle object placed
in the tree, keyed on the same parameters. The bundle, however, does
not need to be replaced.
(2) An rxrpc_bundle object can now manage the available channels for a set
of parallel connections. The lock that manages this is moved there
from the rxrpc_connection struct (channel_lock).
(3) There'a a dummy bundle for all incoming connections to share so that
they have a channel_lock too. It might be better to give each
incoming connection its own bundle. This bundle is not needed to
manage which channels incoming calls are made on because that's the
solely at whim of the client.
(4) The restrictions on how many client connections are around are
removed. Instead, a previous patch limits the number of client calls
that can be allocated. Ordinarily, client connections are reaped
after 2 minutes on the idle queue, but when more than a certain number
of connections are in existence, the reaper starts reaping them after
2s of idleness instead to get the numbers back down.
It could also be made such that new call allocations are forced to
wait until the number of outstanding connections subsides.
Signed-off-by: David Howells <dhowells@redhat.com>
2020-07-01 18:15:32 +08:00
|
|
|
struct rxrpc_connection *rxrpc_get_connection(struct rxrpc_connection *);
|
2016-09-17 17:49:14 +08:00
|
|
|
struct rxrpc_connection *rxrpc_get_connection_maybe(struct rxrpc_connection *);
|
|
|
|
void rxrpc_put_service_conn(struct rxrpc_connection *);
|
2017-05-25 00:02:32 +08:00
|
|
|
void rxrpc_service_connection_reaper(struct work_struct *);
|
|
|
|
void rxrpc_destroy_all_connections(struct rxrpc_net *);
|
2007-04-27 06:48:28 +08:00
|
|
|
|
2016-04-04 21:00:36 +08:00
|
|
|
static inline bool rxrpc_conn_is_client(const struct rxrpc_connection *conn)
|
|
|
|
{
|
|
|
|
return conn->out_clientflag;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool rxrpc_conn_is_service(const struct rxrpc_connection *conn)
|
|
|
|
{
|
2016-06-30 19:16:21 +08:00
|
|
|
return !rxrpc_conn_is_client(conn);
|
2016-04-04 21:00:36 +08:00
|
|
|
}
|
|
|
|
|
2016-08-23 22:27:24 +08:00
|
|
|
static inline void rxrpc_put_connection(struct rxrpc_connection *conn)
|
|
|
|
{
|
rxrpc: Improve management and caching of client connection objects
Improve the management and caching of client rxrpc connection objects.
From this point, client connections will be managed separately from service
connections because AF_RXRPC controls the creation and re-use of client
connections but doesn't have that luxury with service connections.
Further, there will be limits on the numbers of client connections that may
be live on a machine. No direct restriction will be placed on the number
of client calls, excepting that each client connection can support a
maximum of four concurrent calls.
Note that, for a number of reasons, we don't want to simply discard a
client connection as soon as the last call is apparently finished:
(1) Security is negotiated per-connection and the context is then shared
between all calls on that connection. The context can be negotiated
again if the connection lapses, but that involves holding up calls
whilst at least two packets are exchanged and various crypto bits are
performed - so we'd ideally like to cache it for a little while at
least.
(2) If a packet goes astray, we will need to retransmit a final ACK or
ABORT packet. To make this work, we need to keep around the
connection details for a little while.
(3) The locally held structures represent some amount of setup time, to be
weighed against their occupation of memory when idle.
To this end, the client connection cache is managed by a state machine on
each connection. There are five states:
(1) INACTIVE - The connection is not held in any list and may not have
been exposed to the world. If it has been previously exposed, it was
discarded from the idle list after expiring.
(2) WAITING - The connection is waiting for the number of client conns to
drop below the maximum capacity. Calls may be in progress upon it
from when it was active and got culled.
The connection is on the rxrpc_waiting_client_conns list which is kept
in to-be-granted order. Culled conns with waiters go to the back of
the queue just like new conns.
(3) ACTIVE - The connection has at least one call in progress upon it, it
may freely grant available channels to new calls and calls may be
waiting on it for channels to become available.
The connection is on the rxrpc_active_client_conns list which is kept
in activation order for culling purposes.
(4) CULLED - The connection got summarily culled to try and free up
capacity. Calls currently in progress on the connection are allowed
to continue, but new calls will have to wait. There can be no waiters
in this state - the conn would have to go to the WAITING state
instead.
(5) IDLE - The connection has no calls in progress upon it and must have
been exposed to the world (ie. the EXPOSED flag must be set). When it
expires, the EXPOSED flag is cleared and the connection transitions to
the INACTIVE state.
The connection is on the rxrpc_idle_client_conns list which is kept in
order of how soon they'll expire.
A connection in the ACTIVE or CULLED state must have at least one active
call upon it; if in the WAITING state it may have active calls upon it;
other states may not have active calls.
As long as a connection remains active and doesn't get culled, it may
continue to process calls - even if there are connections on the wait
queue. This simplifies things a bit and reduces the amount of checking we
need do.
There are a couple flags of relevance to the cache:
(1) EXPOSED - The connection ID got exposed to the world. If this flag is
set, an extra ref is added to the connection preventing it from being
reaped when it has no calls outstanding. This flag is cleared and the
ref dropped when a conn is discarded from the idle list.
(2) DONT_REUSE - The connection should be discarded as soon as possible and
should not be reused.
This commit also provides a number of new settings:
(*) /proc/net/rxrpc/max_client_conns
The maximum number of live client connections. Above this number, new
connections get added to the wait list and must wait for an active
conn to be culled. Culled connections can be reused, but they will go
to the back of the wait list and have to wait.
(*) /proc/net/rxrpc/reap_client_conns
If the number of desired connections exceeds the maximum above, the
active connection list will be culled until there are only this many
left in it.
(*) /proc/net/rxrpc/idle_conn_expiry
The normal expiry time for a client connection, provided there are
fewer than reap_client_conns of them around.
(*) /proc/net/rxrpc/idle_conn_fast_expiry
The expedited expiry time, used when there are more than
reap_client_conns of them around.
Note that I combined the Tx wait queue with the channel grant wait queue to
save space as only one of these should be in use at once.
Note also that, for the moment, the service connection cache still uses the
old connection management code.
Signed-off-by: David Howells <dhowells@redhat.com>
2016-08-24 14:30:52 +08:00
|
|
|
if (!conn)
|
|
|
|
return;
|
|
|
|
|
2016-09-17 17:49:14 +08:00
|
|
|
if (rxrpc_conn_is_client(conn))
|
|
|
|
rxrpc_put_client_conn(conn);
|
|
|
|
else
|
|
|
|
rxrpc_put_service_conn(conn);
|
2016-06-27 17:32:02 +08:00
|
|
|
}
|
|
|
|
|
2017-11-24 18:18:41 +08:00
|
|
|
static inline void rxrpc_reduce_conn_timer(struct rxrpc_connection *conn,
|
|
|
|
unsigned long expire_at)
|
|
|
|
{
|
|
|
|
timer_reduce(&conn->timer, expire_at);
|
|
|
|
}
|
|
|
|
|
2016-04-04 21:00:40 +08:00
|
|
|
/*
|
|
|
|
* conn_service.c
|
|
|
|
*/
|
2016-07-01 14:51:50 +08:00
|
|
|
struct rxrpc_connection *rxrpc_find_service_conn_rcu(struct rxrpc_peer *,
|
|
|
|
struct sk_buff *);
|
2017-05-25 00:02:32 +08:00
|
|
|
struct rxrpc_connection *rxrpc_prealloc_service_connection(struct rxrpc_net *, gfp_t);
|
2019-12-21 00:17:16 +08:00
|
|
|
void rxrpc_new_incoming_connection(struct rxrpc_sock *, struct rxrpc_connection *,
|
|
|
|
const struct rxrpc_security *, struct key *,
|
|
|
|
struct sk_buff *);
|
2016-06-30 17:45:22 +08:00
|
|
|
void rxrpc_unpublish_service_conn(struct rxrpc_connection *);
|
2016-04-04 21:00:40 +08:00
|
|
|
|
2007-04-27 06:48:28 +08:00
|
|
|
/*
|
2016-06-13 20:30:30 +08:00
|
|
|
* input.c
|
2007-04-27 06:48:28 +08:00
|
|
|
*/
|
2018-10-04 18:10:51 +08:00
|
|
|
int rxrpc_input_packet(struct sock *, struct sk_buff *);
|
2007-04-27 06:48:28 +08:00
|
|
|
|
|
|
|
/*
|
2016-06-13 20:30:30 +08:00
|
|
|
* insecure.c
|
2007-04-27 06:48:28 +08:00
|
|
|
*/
|
2016-06-13 20:30:30 +08:00
|
|
|
extern const struct rxrpc_security rxrpc_no_security;
|
2007-04-27 06:48:28 +08:00
|
|
|
|
|
|
|
/*
|
2016-06-13 20:30:30 +08:00
|
|
|
* key.c
|
2007-04-27 06:48:28 +08:00
|
|
|
*/
|
2016-06-13 20:30:30 +08:00
|
|
|
extern struct key_type key_type_rxrpc;
|
|
|
|
extern struct key_type key_type_rxrpc_s;
|
|
|
|
|
2020-07-23 14:09:07 +08:00
|
|
|
int rxrpc_request_key(struct rxrpc_sock *, sockptr_t , int);
|
|
|
|
int rxrpc_server_keyring(struct rxrpc_sock *, sockptr_t, int);
|
2017-08-29 17:15:40 +08:00
|
|
|
int rxrpc_get_server_data_key(struct rxrpc_connection *, const void *, time64_t,
|
2016-06-13 20:30:30 +08:00
|
|
|
u32);
|
2007-04-27 06:48:28 +08:00
|
|
|
|
2016-04-04 21:00:34 +08:00
|
|
|
/*
|
|
|
|
* local_event.c
|
|
|
|
*/
|
2016-04-04 21:00:35 +08:00
|
|
|
extern void rxrpc_process_local_events(struct rxrpc_local *);
|
2016-04-04 21:00:34 +08:00
|
|
|
|
2007-04-27 06:48:28 +08:00
|
|
|
/*
|
2016-06-13 20:30:30 +08:00
|
|
|
* local_object.c
|
2007-04-27 06:48:28 +08:00
|
|
|
*/
|
2017-05-25 00:02:32 +08:00
|
|
|
struct rxrpc_local *rxrpc_lookup_local(struct net *, const struct sockaddr_rxrpc *);
|
2018-03-31 04:05:28 +08:00
|
|
|
struct rxrpc_local *rxrpc_get_local(struct rxrpc_local *);
|
|
|
|
struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *);
|
|
|
|
void rxrpc_put_local(struct rxrpc_local *);
|
2019-08-09 22:20:41 +08:00
|
|
|
struct rxrpc_local *rxrpc_use_local(struct rxrpc_local *);
|
|
|
|
void rxrpc_unuse_local(struct rxrpc_local *);
|
2018-03-31 04:05:28 +08:00
|
|
|
void rxrpc_queue_local(struct rxrpc_local *);
|
2017-05-25 00:02:32 +08:00
|
|
|
void rxrpc_destroy_all_locals(struct rxrpc_net *);
|
2007-04-27 06:48:28 +08:00
|
|
|
|
2020-01-31 05:50:36 +08:00
|
|
|
static inline bool __rxrpc_unuse_local(struct rxrpc_local *local)
|
|
|
|
{
|
|
|
|
return atomic_dec_return(&local->active_users) == 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool __rxrpc_use_local(struct rxrpc_local *local)
|
|
|
|
{
|
|
|
|
return atomic_fetch_add_unless(&local->active_users, 1, 0) != 0;
|
|
|
|
}
|
|
|
|
|
2007-04-27 06:48:28 +08:00
|
|
|
/*
|
2016-06-13 20:30:30 +08:00
|
|
|
* misc.c
|
2007-04-27 06:48:28 +08:00
|
|
|
*/
|
2016-06-13 20:30:30 +08:00
|
|
|
extern unsigned int rxrpc_max_backlog __read_mostly;
|
2017-11-24 18:18:41 +08:00
|
|
|
extern unsigned long rxrpc_requested_ack_delay;
|
|
|
|
extern unsigned long rxrpc_soft_ack_delay;
|
|
|
|
extern unsigned long rxrpc_idle_ack_delay;
|
2016-06-13 20:30:30 +08:00
|
|
|
extern unsigned int rxrpc_rx_window_size;
|
|
|
|
extern unsigned int rxrpc_rx_mtu;
|
|
|
|
extern unsigned int rxrpc_rx_jumbo_max;
|
2007-04-27 06:48:28 +08:00
|
|
|
|
2016-06-13 20:30:30 +08:00
|
|
|
extern const s8 rxrpc_ack_priority[];
|
|
|
|
|
2017-05-25 00:02:32 +08:00
|
|
|
/*
|
|
|
|
* net_ns.c
|
|
|
|
*/
|
|
|
|
extern unsigned int rxrpc_net_id;
|
|
|
|
extern struct pernet_operations rxrpc_net_ops;
|
|
|
|
|
|
|
|
static inline struct rxrpc_net *rxrpc_net(struct net *net)
|
|
|
|
{
|
|
|
|
return net_generic(net, rxrpc_net_id);
|
|
|
|
}
|
|
|
|
|
2007-04-27 06:48:28 +08:00
|
|
|
/*
|
2016-06-13 20:30:30 +08:00
|
|
|
* output.c
|
2007-04-27 06:48:28 +08:00
|
|
|
*/
|
2017-11-24 18:18:42 +08:00
|
|
|
int rxrpc_send_ack_packet(struct rxrpc_call *, bool, rxrpc_serial_t *);
|
2016-10-06 15:11:49 +08:00
|
|
|
int rxrpc_send_abort_packet(struct rxrpc_call *);
|
2016-09-30 05:37:15 +08:00
|
|
|
int rxrpc_send_data_packet(struct rxrpc_call *, struct sk_buff *, bool);
|
rxrpc: Rewrite the data and ack handling code
Rewrite the data and ack handling code such that:
(1) Parsing of received ACK and ABORT packets and the distribution and the
filing of DATA packets happens entirely within the data_ready context
called from the UDP socket. This allows us to process and discard ACK
and ABORT packets much more quickly (they're no longer stashed on a
queue for a background thread to process).
(2) We avoid calling skb_clone(), pskb_pull() and pskb_trim(). We instead
keep track of the offset and length of the content of each packet in
the sk_buff metadata. This means we don't do any allocation in the
receive path.
(3) Jumbo DATA packet parsing is now done in data_ready context. Rather
than cloning the packet once for each subpacket and pulling/trimming
it, we file the packet multiple times with an annotation for each
indicating which subpacket is there. From that we can directly
calculate the offset and length.
(4) A call's receive queue can be accessed without taking locks (memory
barriers do have to be used, though).
(5) Incoming calls are set up from preallocated resources and immediately
made live. They can than have packets queued upon them and ACKs
generated. If insufficient resources exist, DATA packet #1 is given a
BUSY reply and other DATA packets are discarded).
(6) sk_buffs no longer take a ref on their parent call.
To make this work, the following changes are made:
(1) Each call's receive buffer is now a circular buffer of sk_buff
pointers (rxtx_buffer) rather than a number of sk_buff_heads spread
between the call and the socket. This permits each sk_buff to be in
the buffer multiple times. The receive buffer is reused for the
transmit buffer.
(2) A circular buffer of annotations (rxtx_annotations) is kept parallel
to the data buffer. Transmission phase annotations indicate whether a
buffered packet has been ACK'd or not and whether it needs
retransmission.
Receive phase annotations indicate whether a slot holds a whole packet
or a jumbo subpacket and, if the latter, which subpacket. They also
note whether the packet has been decrypted in place.
(3) DATA packet window tracking is much simplified. Each phase has just
two numbers representing the window (rx_hard_ack/rx_top and
tx_hard_ack/tx_top).
The hard_ack number is the sequence number before base of the window,
representing the last packet the other side says it has consumed.
hard_ack starts from 0 and the first packet is sequence number 1.
The top number is the sequence number of the highest-numbered packet
residing in the buffer. Packets between hard_ack+1 and top are
soft-ACK'd to indicate they've been received, but not yet consumed.
Four macros, before(), before_eq(), after() and after_eq() are added
to compare sequence numbers within the window. This allows for the
top of the window to wrap when the hard-ack sequence number gets close
to the limit.
Two flags, RXRPC_CALL_RX_LAST and RXRPC_CALL_TX_LAST, are added also
to indicate when rx_top and tx_top point at the packets with the
LAST_PACKET bit set, indicating the end of the phase.
(4) Calls are queued on the socket 'receive queue' rather than packets.
This means that we don't need have to invent dummy packets to queue to
indicate abnormal/terminal states and we don't have to keep metadata
packets (such as ABORTs) around
(5) The offset and length of a (sub)packet's content are now passed to
the verify_packet security op. This is currently expected to decrypt
the packet in place and validate it.
However, there's now nowhere to store the revised offset and length of
the actual data within the decrypted blob (there may be a header and
padding to skip) because an sk_buff may represent multiple packets, so
a locate_data security op is added to retrieve these details from the
sk_buff content when needed.
(6) recvmsg() now has to handle jumbo subpackets, where each subpacket is
individually secured and needs to be individually decrypted. The code
to do this is broken out into rxrpc_recvmsg_data() and shared with the
kernel API. It now iterates over the call's receive buffer rather
than walking the socket receive queue.
Additional changes:
(1) The timers are condensed to a single timer that is set for the soonest
of three timeouts (delayed ACK generation, DATA retransmission and
call lifespan).
(2) Transmission of ACK and ABORT packets is effected immediately from
process-context socket ops/kernel API calls that cause them instead of
them being punted off to a background work item. The data_ready
handler still has to defer to the background, though.
(3) A shutdown op is added to the AF_RXRPC socket so that the AFS
filesystem can shut down the socket and flush its own work items
before closing the socket to deal with any in-progress service calls.
Future additional changes that will need to be considered:
(1) Make sure that a call doesn't hog the front of the queue by receiving
data from the network as fast as userspace is consuming it to the
exclusion of other calls.
(2) Transmit delayed ACKs from within recvmsg() when we've consumed
sufficiently more packets to avoid the background work item needing to
run.
Signed-off-by: David Howells <dhowells@redhat.com>
2016-09-08 18:10:12 +08:00
|
|
|
void rxrpc_reject_packets(struct rxrpc_local *);
|
2018-03-31 04:04:43 +08:00
|
|
|
void rxrpc_send_keepalive(struct rxrpc_peer *);
|
2007-04-27 06:48:28 +08:00
|
|
|
|
|
|
|
/*
|
2016-04-04 21:00:32 +08:00
|
|
|
* peer_event.c
|
2016-06-13 20:30:30 +08:00
|
|
|
*/
|
2016-04-04 21:00:32 +08:00
|
|
|
void rxrpc_error_report(struct sock *);
|
2018-03-31 04:04:43 +08:00
|
|
|
void rxrpc_peer_keepalive_worker(struct work_struct *);
|
2016-06-13 20:30:30 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* peer_object.c
|
2007-04-27 06:48:28 +08:00
|
|
|
*/
|
2016-04-04 21:00:32 +08:00
|
|
|
struct rxrpc_peer *rxrpc_lookup_peer_rcu(struct rxrpc_local *,
|
|
|
|
const struct sockaddr_rxrpc *);
|
2018-10-05 21:05:34 +08:00
|
|
|
struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_sock *, struct rxrpc_local *,
|
2016-04-04 21:00:32 +08:00
|
|
|
struct sockaddr_rxrpc *, gfp_t);
|
|
|
|
struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *, gfp_t);
|
2018-10-05 21:05:34 +08:00
|
|
|
void rxrpc_new_incoming_peer(struct rxrpc_sock *, struct rxrpc_local *,
|
|
|
|
struct rxrpc_peer *);
|
2018-03-31 04:05:44 +08:00
|
|
|
void rxrpc_destroy_all_peers(struct rxrpc_net *);
|
2018-03-31 04:05:38 +08:00
|
|
|
struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *);
|
|
|
|
struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *);
|
|
|
|
void rxrpc_put_peer(struct rxrpc_peer *);
|
2019-07-30 21:42:50 +08:00
|
|
|
void rxrpc_put_peer_locked(struct rxrpc_peer *);
|
2007-04-27 06:48:28 +08:00
|
|
|
|
|
|
|
/*
|
2016-06-13 20:30:30 +08:00
|
|
|
* proc.c
|
2007-04-27 06:48:28 +08:00
|
|
|
*/
|
2018-04-11 01:42:55 +08:00
|
|
|
extern const struct seq_operations rxrpc_call_seq_ops;
|
|
|
|
extern const struct seq_operations rxrpc_connection_seq_ops;
|
2018-10-15 18:31:03 +08:00
|
|
|
extern const struct seq_operations rxrpc_peer_seq_ops;
|
2007-04-27 06:48:28 +08:00
|
|
|
|
|
|
|
/*
|
2016-06-13 20:30:30 +08:00
|
|
|
* recvmsg.c
|
2007-04-27 06:48:28 +08:00
|
|
|
*/
|
rxrpc: Rewrite the data and ack handling code
Rewrite the data and ack handling code such that:
(1) Parsing of received ACK and ABORT packets and the distribution and the
filing of DATA packets happens entirely within the data_ready context
called from the UDP socket. This allows us to process and discard ACK
and ABORT packets much more quickly (they're no longer stashed on a
queue for a background thread to process).
(2) We avoid calling skb_clone(), pskb_pull() and pskb_trim(). We instead
keep track of the offset and length of the content of each packet in
the sk_buff metadata. This means we don't do any allocation in the
receive path.
(3) Jumbo DATA packet parsing is now done in data_ready context. Rather
than cloning the packet once for each subpacket and pulling/trimming
it, we file the packet multiple times with an annotation for each
indicating which subpacket is there. From that we can directly
calculate the offset and length.
(4) A call's receive queue can be accessed without taking locks (memory
barriers do have to be used, though).
(5) Incoming calls are set up from preallocated resources and immediately
made live. They can than have packets queued upon them and ACKs
generated. If insufficient resources exist, DATA packet #1 is given a
BUSY reply and other DATA packets are discarded).
(6) sk_buffs no longer take a ref on their parent call.
To make this work, the following changes are made:
(1) Each call's receive buffer is now a circular buffer of sk_buff
pointers (rxtx_buffer) rather than a number of sk_buff_heads spread
between the call and the socket. This permits each sk_buff to be in
the buffer multiple times. The receive buffer is reused for the
transmit buffer.
(2) A circular buffer of annotations (rxtx_annotations) is kept parallel
to the data buffer. Transmission phase annotations indicate whether a
buffered packet has been ACK'd or not and whether it needs
retransmission.
Receive phase annotations indicate whether a slot holds a whole packet
or a jumbo subpacket and, if the latter, which subpacket. They also
note whether the packet has been decrypted in place.
(3) DATA packet window tracking is much simplified. Each phase has just
two numbers representing the window (rx_hard_ack/rx_top and
tx_hard_ack/tx_top).
The hard_ack number is the sequence number before base of the window,
representing the last packet the other side says it has consumed.
hard_ack starts from 0 and the first packet is sequence number 1.
The top number is the sequence number of the highest-numbered packet
residing in the buffer. Packets between hard_ack+1 and top are
soft-ACK'd to indicate they've been received, but not yet consumed.
Four macros, before(), before_eq(), after() and after_eq() are added
to compare sequence numbers within the window. This allows for the
top of the window to wrap when the hard-ack sequence number gets close
to the limit.
Two flags, RXRPC_CALL_RX_LAST and RXRPC_CALL_TX_LAST, are added also
to indicate when rx_top and tx_top point at the packets with the
LAST_PACKET bit set, indicating the end of the phase.
(4) Calls are queued on the socket 'receive queue' rather than packets.
This means that we don't need have to invent dummy packets to queue to
indicate abnormal/terminal states and we don't have to keep metadata
packets (such as ABORTs) around
(5) The offset and length of a (sub)packet's content are now passed to
the verify_packet security op. This is currently expected to decrypt
the packet in place and validate it.
However, there's now nowhere to store the revised offset and length of
the actual data within the decrypted blob (there may be a header and
padding to skip) because an sk_buff may represent multiple packets, so
a locate_data security op is added to retrieve these details from the
sk_buff content when needed.
(6) recvmsg() now has to handle jumbo subpackets, where each subpacket is
individually secured and needs to be individually decrypted. The code
to do this is broken out into rxrpc_recvmsg_data() and shared with the
kernel API. It now iterates over the call's receive buffer rather
than walking the socket receive queue.
Additional changes:
(1) The timers are condensed to a single timer that is set for the soonest
of three timeouts (delayed ACK generation, DATA retransmission and
call lifespan).
(2) Transmission of ACK and ABORT packets is effected immediately from
process-context socket ops/kernel API calls that cause them instead of
them being punted off to a background work item. The data_ready
handler still has to defer to the background, though.
(3) A shutdown op is added to the AF_RXRPC socket so that the AFS
filesystem can shut down the socket and flush its own work items
before closing the socket to deal with any in-progress service calls.
Future additional changes that will need to be considered:
(1) Make sure that a call doesn't hog the front of the queue by receiving
data from the network as fast as userspace is consuming it to the
exclusion of other calls.
(2) Transmit delayed ACKs from within recvmsg() when we've consumed
sufficiently more packets to avoid the background work item needing to
run.
Signed-off-by: David Howells <dhowells@redhat.com>
2016-09-08 18:10:12 +08:00
|
|
|
void rxrpc_notify_socket(struct rxrpc_call *);
|
2020-06-04 05:21:16 +08:00
|
|
|
bool __rxrpc_set_call_completion(struct rxrpc_call *, enum rxrpc_call_completion, u32, int);
|
|
|
|
bool rxrpc_set_call_completion(struct rxrpc_call *, enum rxrpc_call_completion, u32, int);
|
|
|
|
bool __rxrpc_call_completed(struct rxrpc_call *);
|
|
|
|
bool rxrpc_call_completed(struct rxrpc_call *);
|
|
|
|
bool __rxrpc_abort_call(const char *, struct rxrpc_call *, rxrpc_seq_t, u32, int);
|
|
|
|
bool rxrpc_abort_call(const char *, struct rxrpc_call *, rxrpc_seq_t, u32, int);
|
2015-03-02 15:37:48 +08:00
|
|
|
int rxrpc_recvmsg(struct socket *, struct msghdr *, size_t, int);
|
2007-04-27 06:48:28 +08:00
|
|
|
|
2020-06-04 05:21:16 +08:00
|
|
|
/*
|
|
|
|
* Abort a call due to a protocol error.
|
|
|
|
*/
|
|
|
|
static inline bool __rxrpc_abort_eproto(struct rxrpc_call *call,
|
|
|
|
struct sk_buff *skb,
|
|
|
|
const char *eproto_why,
|
|
|
|
const char *why,
|
|
|
|
u32 abort_code)
|
|
|
|
{
|
|
|
|
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
|
|
|
|
|
|
|
|
trace_rxrpc_rx_eproto(call, sp->hdr.serial, eproto_why);
|
|
|
|
return rxrpc_abort_call(why, call, sp->hdr.seq, abort_code, -EPROTO);
|
|
|
|
}
|
|
|
|
|
|
|
|
#define rxrpc_abort_eproto(call, skb, eproto_why, abort_why, abort_code) \
|
|
|
|
__rxrpc_abort_eproto((call), (skb), tracepoint_string(eproto_why), \
|
|
|
|
(abort_why), (abort_code))
|
|
|
|
|
2020-05-11 21:54:34 +08:00
|
|
|
/*
|
|
|
|
* rtt.c
|
|
|
|
*/
|
rxrpc: Fix loss of RTT samples due to interposed ACK
The Rx protocol has a mechanism to help generate RTT samples that works by
a client transmitting a REQUESTED-type ACK when it receives a DATA packet
that has the REQUEST_ACK flag set.
The peer, however, may interpose other ACKs before transmitting the
REQUESTED-ACK, as can be seen in the following trace excerpt:
rxrpc_tx_data: c=00000044 DATA d0b5ece8:00000001 00000001 q=00000001 fl=07
rxrpc_rx_ack: c=00000044 00000001 PNG r=00000000 f=00000002 p=00000000 n=0
rxrpc_rx_ack: c=00000044 00000002 REQ r=00000001 f=00000002 p=00000001 n=0
...
DATA packet 1 (q=xx) has REQUEST_ACK set (bit 1 of fl=xx). The incoming
ping (labelled PNG) hard-acks the request DATA packet (f=xx exceeds the
sequence number of the DATA packet), causing it to be discarded from the Tx
ring. The ACK that was requested (labelled REQ, r=xx references the serial
of the DATA packet) comes after the ping, but the sk_buff holding the
timestamp has gone and the RTT sample is lost.
This is particularly noticeable on RPC calls used to probe the service
offered by the peer. A lot of peers end up with an unknown RTT because we
only ever sent a single RPC. This confuses the server rotation algorithm.
Fix this by caching the information about the outgoing packet in RTT
calculations in the rxrpc_call struct rather than looking in the Tx ring.
A four-deep buffer is maintained and both REQUEST_ACK-flagged DATA and
PING-ACK transmissions are recorded in there. When the appropriate
response ACK is received, the buffer is checked for a match and, if found,
an RTT sample is recorded.
If a received ACK refers to a packet with a later serial number than an
entry in the cache, that entry is presumed lost and the entry is made
available to record a new transmission.
ACKs types other than REQUESTED-type and PING-type cause any matching
sample to be cancelled as they don't necessarily represent a useful
measurement.
If there's no space in the buffer on ping/data transmission, the sample
base is discarded.
Fixes: 50235c4b5a2f ("rxrpc: Obtain RTT data by requesting ACKs on DATA packets")
Signed-off-by: David Howells <dhowells@redhat.com>
2020-08-20 06:29:16 +08:00
|
|
|
void rxrpc_peer_add_rtt(struct rxrpc_call *, enum rxrpc_rtt_rx_trace, int,
|
2020-05-11 21:54:34 +08:00
|
|
|
rxrpc_serial_t, rxrpc_serial_t, ktime_t, ktime_t);
|
|
|
|
unsigned long rxrpc_get_rto_backoff(struct rxrpc_peer *, bool);
|
|
|
|
void rxrpc_peer_init_rtt(struct rxrpc_peer *);
|
|
|
|
|
2007-04-27 06:48:28 +08:00
|
|
|
/*
|
2016-06-13 20:30:30 +08:00
|
|
|
* rxkad.c
|
|
|
|
*/
|
|
|
|
#ifdef CONFIG_RXKAD
|
|
|
|
extern const struct rxrpc_security rxkad;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/*
|
|
|
|
* security.c
|
2007-04-27 06:48:28 +08:00
|
|
|
*/
|
2016-04-08 00:23:51 +08:00
|
|
|
int __init rxrpc_init_security(void);
|
|
|
|
void rxrpc_exit_security(void);
|
2013-10-19 04:48:25 +08:00
|
|
|
int rxrpc_init_client_conn_security(struct rxrpc_connection *);
|
2019-12-21 00:17:16 +08:00
|
|
|
bool rxrpc_look_up_server_security(struct rxrpc_local *, struct rxrpc_sock *,
|
|
|
|
const struct rxrpc_security **, struct key **,
|
|
|
|
struct sk_buff *);
|
2016-09-07 21:43:39 +08:00
|
|
|
|
2016-09-03 05:39:45 +08:00
|
|
|
/*
|
|
|
|
* sendmsg.c
|
|
|
|
*/
|
|
|
|
int rxrpc_do_sendmsg(struct rxrpc_sock *, struct msghdr *, size_t);
|
2007-04-27 06:48:28 +08:00
|
|
|
|
|
|
|
/*
|
2016-06-13 20:30:30 +08:00
|
|
|
* skbuff.c
|
2007-04-27 06:48:28 +08:00
|
|
|
*/
|
rxrpc: Don't expose skbs to in-kernel users [ver #2]
Don't expose skbs to in-kernel users, such as the AFS filesystem, but
instead provide a notification hook the indicates that a call needs
attention and another that indicates that there's a new call to be
collected.
This makes the following possibilities more achievable:
(1) Call refcounting can be made simpler if skbs don't hold refs to calls.
(2) skbs referring to non-data events will be able to be freed much sooner
rather than being queued for AFS to pick up as rxrpc_kernel_recv_data
will be able to consult the call state.
(3) We can shortcut the receive phase when a call is remotely aborted
because we don't have to go through all the packets to get to the one
cancelling the operation.
(4) It makes it easier to do encryption/decryption directly between AFS's
buffers and sk_buffs.
(5) Encryption/decryption can more easily be done in the AFS's thread
contexts - usually that of the userspace process that issued a syscall
- rather than in one of rxrpc's background threads on a workqueue.
(6) AFS will be able to wait synchronously on a call inside AF_RXRPC.
To make this work, the following interface function has been added:
int rxrpc_kernel_recv_data(
struct socket *sock, struct rxrpc_call *call,
void *buffer, size_t bufsize, size_t *_offset,
bool want_more, u32 *_abort_code);
This is the recvmsg equivalent. It allows the caller to find out about the
state of a specific call and to transfer received data into a buffer
piecemeal.
afs_extract_data() and rxrpc_kernel_recv_data() now do all the extraction
logic between them. They don't wait synchronously yet because the socket
lock needs to be dealt with.
Five interface functions have been removed:
rxrpc_kernel_is_data_last()
rxrpc_kernel_get_abort_code()
rxrpc_kernel_get_error_number()
rxrpc_kernel_free_skb()
rxrpc_kernel_data_consumed()
As a temporary hack, sk_buffs going to an in-kernel call are queued on the
rxrpc_call struct (->knlrecv_queue) rather than being handed over to the
in-kernel user. To process the queue internally, a temporary function,
temp_deliver_data() has been added. This will be replaced with common code
between the rxrpc_recvmsg() path and the kernel_rxrpc_recv_data() path in a
future patch.
Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-08-31 03:42:14 +08:00
|
|
|
void rxrpc_kernel_data_consumed(struct rxrpc_call *, struct sk_buff *);
|
2013-10-19 04:48:25 +08:00
|
|
|
void rxrpc_packet_destructor(struct sk_buff *);
|
2016-09-17 17:49:14 +08:00
|
|
|
void rxrpc_new_skb(struct sk_buff *, enum rxrpc_skb_trace);
|
|
|
|
void rxrpc_see_skb(struct sk_buff *, enum rxrpc_skb_trace);
|
2019-08-27 17:13:46 +08:00
|
|
|
void rxrpc_eaten_skb(struct sk_buff *, enum rxrpc_skb_trace);
|
2016-09-17 17:49:14 +08:00
|
|
|
void rxrpc_get_skb(struct sk_buff *, enum rxrpc_skb_trace);
|
|
|
|
void rxrpc_free_skb(struct sk_buff *, enum rxrpc_skb_trace);
|
2016-08-23 22:27:24 +08:00
|
|
|
void rxrpc_purge_queue(struct sk_buff_head *);
|
2007-04-27 06:48:28 +08:00
|
|
|
|
2014-02-08 02:58:44 +08:00
|
|
|
/*
|
|
|
|
* sysctl.c
|
|
|
|
*/
|
|
|
|
#ifdef CONFIG_SYSCTL
|
|
|
|
extern int __init rxrpc_sysctl_init(void);
|
|
|
|
extern void rxrpc_sysctl_exit(void);
|
|
|
|
#else
|
|
|
|
static inline int __init rxrpc_sysctl_init(void) { return 0; }
|
|
|
|
static inline void rxrpc_sysctl_exit(void) {}
|
|
|
|
#endif
|
|
|
|
|
2016-04-04 21:00:32 +08:00
|
|
|
/*
|
|
|
|
* utils.c
|
|
|
|
*/
|
2018-10-04 16:32:28 +08:00
|
|
|
int rxrpc_extract_addr_from_skb(struct sockaddr_rxrpc *, struct sk_buff *);
|
2016-04-04 21:00:32 +08:00
|
|
|
|
rxrpc: Rewrite the data and ack handling code
Rewrite the data and ack handling code such that:
(1) Parsing of received ACK and ABORT packets and the distribution and the
filing of DATA packets happens entirely within the data_ready context
called from the UDP socket. This allows us to process and discard ACK
and ABORT packets much more quickly (they're no longer stashed on a
queue for a background thread to process).
(2) We avoid calling skb_clone(), pskb_pull() and pskb_trim(). We instead
keep track of the offset and length of the content of each packet in
the sk_buff metadata. This means we don't do any allocation in the
receive path.
(3) Jumbo DATA packet parsing is now done in data_ready context. Rather
than cloning the packet once for each subpacket and pulling/trimming
it, we file the packet multiple times with an annotation for each
indicating which subpacket is there. From that we can directly
calculate the offset and length.
(4) A call's receive queue can be accessed without taking locks (memory
barriers do have to be used, though).
(5) Incoming calls are set up from preallocated resources and immediately
made live. They can than have packets queued upon them and ACKs
generated. If insufficient resources exist, DATA packet #1 is given a
BUSY reply and other DATA packets are discarded).
(6) sk_buffs no longer take a ref on their parent call.
To make this work, the following changes are made:
(1) Each call's receive buffer is now a circular buffer of sk_buff
pointers (rxtx_buffer) rather than a number of sk_buff_heads spread
between the call and the socket. This permits each sk_buff to be in
the buffer multiple times. The receive buffer is reused for the
transmit buffer.
(2) A circular buffer of annotations (rxtx_annotations) is kept parallel
to the data buffer. Transmission phase annotations indicate whether a
buffered packet has been ACK'd or not and whether it needs
retransmission.
Receive phase annotations indicate whether a slot holds a whole packet
or a jumbo subpacket and, if the latter, which subpacket. They also
note whether the packet has been decrypted in place.
(3) DATA packet window tracking is much simplified. Each phase has just
two numbers representing the window (rx_hard_ack/rx_top and
tx_hard_ack/tx_top).
The hard_ack number is the sequence number before base of the window,
representing the last packet the other side says it has consumed.
hard_ack starts from 0 and the first packet is sequence number 1.
The top number is the sequence number of the highest-numbered packet
residing in the buffer. Packets between hard_ack+1 and top are
soft-ACK'd to indicate they've been received, but not yet consumed.
Four macros, before(), before_eq(), after() and after_eq() are added
to compare sequence numbers within the window. This allows for the
top of the window to wrap when the hard-ack sequence number gets close
to the limit.
Two flags, RXRPC_CALL_RX_LAST and RXRPC_CALL_TX_LAST, are added also
to indicate when rx_top and tx_top point at the packets with the
LAST_PACKET bit set, indicating the end of the phase.
(4) Calls are queued on the socket 'receive queue' rather than packets.
This means that we don't need have to invent dummy packets to queue to
indicate abnormal/terminal states and we don't have to keep metadata
packets (such as ABORTs) around
(5) The offset and length of a (sub)packet's content are now passed to
the verify_packet security op. This is currently expected to decrypt
the packet in place and validate it.
However, there's now nowhere to store the revised offset and length of
the actual data within the decrypted blob (there may be a header and
padding to skip) because an sk_buff may represent multiple packets, so
a locate_data security op is added to retrieve these details from the
sk_buff content when needed.
(6) recvmsg() now has to handle jumbo subpackets, where each subpacket is
individually secured and needs to be individually decrypted. The code
to do this is broken out into rxrpc_recvmsg_data() and shared with the
kernel API. It now iterates over the call's receive buffer rather
than walking the socket receive queue.
Additional changes:
(1) The timers are condensed to a single timer that is set for the soonest
of three timeouts (delayed ACK generation, DATA retransmission and
call lifespan).
(2) Transmission of ACK and ABORT packets is effected immediately from
process-context socket ops/kernel API calls that cause them instead of
them being punted off to a background work item. The data_ready
handler still has to defer to the background, though.
(3) A shutdown op is added to the AF_RXRPC socket so that the AFS
filesystem can shut down the socket and flush its own work items
before closing the socket to deal with any in-progress service calls.
Future additional changes that will need to be considered:
(1) Make sure that a call doesn't hog the front of the queue by receiving
data from the network as fast as userspace is consuming it to the
exclusion of other calls.
(2) Transmit delayed ACKs from within recvmsg() when we've consumed
sufficiently more packets to avoid the background work item needing to
run.
Signed-off-by: David Howells <dhowells@redhat.com>
2016-09-08 18:10:12 +08:00
|
|
|
static inline bool before(u32 seq1, u32 seq2)
|
|
|
|
{
|
|
|
|
return (s32)(seq1 - seq2) < 0;
|
|
|
|
}
|
|
|
|
static inline bool before_eq(u32 seq1, u32 seq2)
|
|
|
|
{
|
|
|
|
return (s32)(seq1 - seq2) <= 0;
|
|
|
|
}
|
|
|
|
static inline bool after(u32 seq1, u32 seq2)
|
|
|
|
{
|
|
|
|
return (s32)(seq1 - seq2) > 0;
|
|
|
|
}
|
|
|
|
static inline bool after_eq(u32 seq1, u32 seq2)
|
|
|
|
{
|
|
|
|
return (s32)(seq1 - seq2) >= 0;
|
|
|
|
}
|
|
|
|
|
2007-04-27 06:48:28 +08:00
|
|
|
/*
|
|
|
|
* debug tracing
|
|
|
|
*/
|
2012-04-15 13:58:06 +08:00
|
|
|
extern unsigned int rxrpc_debug;
|
2007-04-27 06:48:28 +08:00
|
|
|
|
|
|
|
#define dbgprintk(FMT,...) \
|
2008-04-03 17:45:30 +08:00
|
|
|
printk("[%-6.6s] "FMT"\n", current->comm ,##__VA_ARGS__)
|
2007-04-27 06:48:28 +08:00
|
|
|
|
2008-03-06 12:47:47 +08:00
|
|
|
#define kenter(FMT,...) dbgprintk("==> %s("FMT")",__func__ ,##__VA_ARGS__)
|
|
|
|
#define kleave(FMT,...) dbgprintk("<== %s()"FMT"",__func__ ,##__VA_ARGS__)
|
2007-04-27 06:48:28 +08:00
|
|
|
#define kdebug(FMT,...) dbgprintk(" "FMT ,##__VA_ARGS__)
|
|
|
|
#define kproto(FMT,...) dbgprintk("### "FMT ,##__VA_ARGS__)
|
|
|
|
#define knet(FMT,...) dbgprintk("@@@ "FMT ,##__VA_ARGS__)
|
|
|
|
|
|
|
|
|
|
|
|
#if defined(__KDEBUG)
|
|
|
|
#define _enter(FMT,...) kenter(FMT,##__VA_ARGS__)
|
|
|
|
#define _leave(FMT,...) kleave(FMT,##__VA_ARGS__)
|
|
|
|
#define _debug(FMT,...) kdebug(FMT,##__VA_ARGS__)
|
|
|
|
#define _proto(FMT,...) kproto(FMT,##__VA_ARGS__)
|
|
|
|
#define _net(FMT,...) knet(FMT,##__VA_ARGS__)
|
|
|
|
|
|
|
|
#elif defined(CONFIG_AF_RXRPC_DEBUG)
|
|
|
|
#define RXRPC_DEBUG_KENTER 0x01
|
|
|
|
#define RXRPC_DEBUG_KLEAVE 0x02
|
|
|
|
#define RXRPC_DEBUG_KDEBUG 0x04
|
|
|
|
#define RXRPC_DEBUG_KPROTO 0x08
|
|
|
|
#define RXRPC_DEBUG_KNET 0x10
|
|
|
|
|
|
|
|
#define _enter(FMT,...) \
|
|
|
|
do { \
|
|
|
|
if (unlikely(rxrpc_debug & RXRPC_DEBUG_KENTER)) \
|
|
|
|
kenter(FMT,##__VA_ARGS__); \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
#define _leave(FMT,...) \
|
|
|
|
do { \
|
|
|
|
if (unlikely(rxrpc_debug & RXRPC_DEBUG_KLEAVE)) \
|
|
|
|
kleave(FMT,##__VA_ARGS__); \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
#define _debug(FMT,...) \
|
|
|
|
do { \
|
|
|
|
if (unlikely(rxrpc_debug & RXRPC_DEBUG_KDEBUG)) \
|
|
|
|
kdebug(FMT,##__VA_ARGS__); \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
#define _proto(FMT,...) \
|
|
|
|
do { \
|
|
|
|
if (unlikely(rxrpc_debug & RXRPC_DEBUG_KPROTO)) \
|
|
|
|
kproto(FMT,##__VA_ARGS__); \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
#define _net(FMT,...) \
|
|
|
|
do { \
|
|
|
|
if (unlikely(rxrpc_debug & RXRPC_DEBUG_KNET)) \
|
|
|
|
knet(FMT,##__VA_ARGS__); \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
#else
|
2010-08-12 23:54:57 +08:00
|
|
|
#define _enter(FMT,...) no_printk("==> %s("FMT")",__func__ ,##__VA_ARGS__)
|
|
|
|
#define _leave(FMT,...) no_printk("<== %s()"FMT"",__func__ ,##__VA_ARGS__)
|
|
|
|
#define _debug(FMT,...) no_printk(" "FMT ,##__VA_ARGS__)
|
|
|
|
#define _proto(FMT,...) no_printk("### "FMT ,##__VA_ARGS__)
|
|
|
|
#define _net(FMT,...) no_printk("@@@ "FMT ,##__VA_ARGS__)
|
2007-04-27 06:48:28 +08:00
|
|
|
#endif
|
|
|
|
|
|
|
|
/*
|
|
|
|
* debug assertion checking
|
|
|
|
*/
|
|
|
|
#if 1 // defined(__KDEBUGALL)
|
|
|
|
|
|
|
|
#define ASSERT(X) \
|
|
|
|
do { \
|
|
|
|
if (unlikely(!(X))) { \
|
2016-06-03 03:08:52 +08:00
|
|
|
pr_err("Assertion failed\n"); \
|
2007-04-27 06:48:28 +08:00
|
|
|
BUG(); \
|
|
|
|
} \
|
2016-03-04 23:56:19 +08:00
|
|
|
} while (0)
|
2007-04-27 06:48:28 +08:00
|
|
|
|
|
|
|
#define ASSERTCMP(X, OP, Y) \
|
|
|
|
do { \
|
2016-09-08 18:10:11 +08:00
|
|
|
__typeof__(X) _x = (X); \
|
|
|
|
__typeof__(Y) _y = (__typeof__(X))(Y); \
|
2016-06-03 03:08:52 +08:00
|
|
|
if (unlikely(!(_x OP _y))) { \
|
2016-09-08 18:10:11 +08:00
|
|
|
pr_err("Assertion failed - %lu(0x%lx) %s %lu(0x%lx) is false\n", \
|
|
|
|
(unsigned long)_x, (unsigned long)_x, #OP, \
|
|
|
|
(unsigned long)_y, (unsigned long)_y); \
|
2007-04-27 06:48:28 +08:00
|
|
|
BUG(); \
|
|
|
|
} \
|
2016-03-04 23:56:19 +08:00
|
|
|
} while (0)
|
2007-04-27 06:48:28 +08:00
|
|
|
|
|
|
|
#define ASSERTIF(C, X) \
|
|
|
|
do { \
|
|
|
|
if (unlikely((C) && !(X))) { \
|
2016-06-03 03:08:52 +08:00
|
|
|
pr_err("Assertion failed\n"); \
|
2007-04-27 06:48:28 +08:00
|
|
|
BUG(); \
|
|
|
|
} \
|
2016-03-04 23:56:19 +08:00
|
|
|
} while (0)
|
2007-04-27 06:48:28 +08:00
|
|
|
|
|
|
|
#define ASSERTIFCMP(C, X, OP, Y) \
|
|
|
|
do { \
|
2016-09-08 18:10:11 +08:00
|
|
|
__typeof__(X) _x = (X); \
|
|
|
|
__typeof__(Y) _y = (__typeof__(X))(Y); \
|
2016-06-03 03:08:52 +08:00
|
|
|
if (unlikely((C) && !(_x OP _y))) { \
|
|
|
|
pr_err("Assertion failed - %lu(0x%lx) %s %lu(0x%lx) is false\n", \
|
2016-09-08 18:10:11 +08:00
|
|
|
(unsigned long)_x, (unsigned long)_x, #OP, \
|
|
|
|
(unsigned long)_y, (unsigned long)_y); \
|
2007-04-27 06:48:28 +08:00
|
|
|
BUG(); \
|
|
|
|
} \
|
2016-03-04 23:56:19 +08:00
|
|
|
} while (0)
|
2007-04-27 06:48:28 +08:00
|
|
|
|
|
|
|
#else
|
|
|
|
|
|
|
|
#define ASSERT(X) \
|
|
|
|
do { \
|
2016-03-04 23:56:19 +08:00
|
|
|
} while (0)
|
2007-04-27 06:48:28 +08:00
|
|
|
|
|
|
|
#define ASSERTCMP(X, OP, Y) \
|
|
|
|
do { \
|
2016-03-04 23:56:19 +08:00
|
|
|
} while (0)
|
2007-04-27 06:48:28 +08:00
|
|
|
|
|
|
|
#define ASSERTIF(C, X) \
|
|
|
|
do { \
|
2016-03-04 23:56:19 +08:00
|
|
|
} while (0)
|
2007-04-27 06:48:28 +08:00
|
|
|
|
|
|
|
#define ASSERTIFCMP(C, X, OP, Y) \
|
|
|
|
do { \
|
2016-03-04 23:56:19 +08:00
|
|
|
} while (0)
|
2007-04-27 06:48:28 +08:00
|
|
|
|
|
|
|
#endif /* __KDEBUGALL */
|