RxRPC development
-----BEGIN PGP SIGNATURE----- iQIVAwUAWr6a+fu3V2unywtrAQJDgQ/+Pdyv8EW7VtdPRNdePGJLh4WHVrCnSL64 hKzHV3gEt8PK7sJi53lkbvTYJ3hpB/SC9Li0TdOvph4ngH8pUer7HiXW9g2zrcy+ KsbhmWslMOH6uy+8f2yWoI0HmSz7XvzQATCKOJz5Lm7X0JfvSoKkhgn6VLxqG91C p3IAYr5735/mqZIqs+FCIP2jLIPtCH/Plv18F9w847dSS1qsH6twAsUVjzn9CtJv MHQ+Wo57tpoOwynnZO+CDLhZGdXJ5YIZ8wXNqu/EUaeAHXgqkpSd4IUZeguDpoJR YrkvMf8cQlcBDohHBBmIVz9OOIZ8A7WbygNf7UPS9DGYjNVL9vKxF89xwAKYA5Ky LYmUug8WYp2FIHxuWGomF15SrzbNNDfD/v7ZvjXmNxvJXXV+AbzNoXZifSAuyo2V 2bgolw22PbZAMgRvBr5OjtxGD65lpCD00ZJWBgFrWn8l3hdK/40NJW5s3nQCShmm NCUAFV+nHvazfUNfcBAbVRPXCN8Pc0Cuspr6HJ3Wi4q5lnpJQY+hS+KFNizL6iyu 1fNRcqBF8KnVM5bE5Y27o96W/RhNdFudYB+sAH5IEGRqqG5YjkwvPgILvXhTH2kI MDxHMFjRgvWijzLLPpBd9YAsPLddvE6WRlngqOrSp6ICP4o8Vmeb2mxgo4tbQBXu 3dIL1fEfhME= =Lyzx -----END PGP SIGNATURE----- Merge tag 'rxrpc-next-20180330' of git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-fs David Howells says: ==================== rxrpc: Fixes and more traces Here are some patches that add some more tracepoints to AF_RXRPC and fix some issues therein: (1) Fix the use of VERSION packets to keep firewall routes open. (2) Fix the incorrect current time usage in a tracepoint. (3) Fix Tx ring annotation corruption. (4) Fix accidental conversion of call-level abort into connection-level abort. (5) Fix calculation of resend time. (6) Remove a couple of unused variables. (7) Fix a bunch of checker warnings and an error. Note that not all warnings can be quashed as checker doesn't seem to correctly handle seqlocks. (8) Fix a potential race between call destruction and socket/net destruction. (9) Add a tracepoint to track rxrpc_local refcounting. (10) Fix an apparent leak of rxrpc_local objects. (11) Add a tracepoint to track rxrpc_peer refcounting. (12) Fix a leak of rxrpc_peer objects. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
e2e80c027f
|
@ -42,6 +42,22 @@ enum rxrpc_skb_trace {
|
|||
rxrpc_skb_tx_seen,
|
||||
};
|
||||
|
||||
enum rxrpc_local_trace {
|
||||
rxrpc_local_got,
|
||||
rxrpc_local_new,
|
||||
rxrpc_local_processing,
|
||||
rxrpc_local_put,
|
||||
rxrpc_local_queued,
|
||||
};
|
||||
|
||||
enum rxrpc_peer_trace {
|
||||
rxrpc_peer_got,
|
||||
rxrpc_peer_new,
|
||||
rxrpc_peer_processing,
|
||||
rxrpc_peer_put,
|
||||
rxrpc_peer_queued_error,
|
||||
};
|
||||
|
||||
enum rxrpc_conn_trace {
|
||||
rxrpc_conn_got,
|
||||
rxrpc_conn_new_client,
|
||||
|
@ -215,6 +231,20 @@ enum rxrpc_congest_change {
|
|||
EM(rxrpc_skb_tx_rotated, "Tx ROT") \
|
||||
E_(rxrpc_skb_tx_seen, "Tx SEE")
|
||||
|
||||
#define rxrpc_local_traces \
|
||||
EM(rxrpc_local_got, "GOT") \
|
||||
EM(rxrpc_local_new, "NEW") \
|
||||
EM(rxrpc_local_processing, "PRO") \
|
||||
EM(rxrpc_local_put, "PUT") \
|
||||
E_(rxrpc_local_queued, "QUE")
|
||||
|
||||
#define rxrpc_peer_traces \
|
||||
EM(rxrpc_peer_got, "GOT") \
|
||||
EM(rxrpc_peer_new, "NEW") \
|
||||
EM(rxrpc_peer_processing, "PRO") \
|
||||
EM(rxrpc_peer_put, "PUT") \
|
||||
E_(rxrpc_peer_queued_error, "QER")
|
||||
|
||||
#define rxrpc_conn_traces \
|
||||
EM(rxrpc_conn_got, "GOT") \
|
||||
EM(rxrpc_conn_new_client, "NWc") \
|
||||
|
@ -416,6 +446,7 @@ enum rxrpc_congest_change {
|
|||
#define E_(a, b) TRACE_DEFINE_ENUM(a);
|
||||
|
||||
rxrpc_skb_traces;
|
||||
rxrpc_local_traces;
|
||||
rxrpc_conn_traces;
|
||||
rxrpc_client_traces;
|
||||
rxrpc_call_traces;
|
||||
|
@ -439,6 +470,60 @@ rxrpc_congest_changes;
|
|||
#define EM(a, b) { a, b },
|
||||
#define E_(a, b) { a, b }
|
||||
|
||||
TRACE_EVENT(rxrpc_local,
|
||||
TP_PROTO(struct rxrpc_local *local, enum rxrpc_local_trace op,
|
||||
int usage, const void *where),
|
||||
|
||||
TP_ARGS(local, op, usage, where),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(unsigned int, local )
|
||||
__field(int, op )
|
||||
__field(int, usage )
|
||||
__field(const void *, where )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->local = local->debug_id;
|
||||
__entry->op = op;
|
||||
__entry->usage = usage;
|
||||
__entry->where = where;
|
||||
),
|
||||
|
||||
TP_printk("L=%08x %s u=%d sp=%pSR",
|
||||
__entry->local,
|
||||
__print_symbolic(__entry->op, rxrpc_local_traces),
|
||||
__entry->usage,
|
||||
__entry->where)
|
||||
);
|
||||
|
||||
TRACE_EVENT(rxrpc_peer,
|
||||
TP_PROTO(struct rxrpc_peer *peer, enum rxrpc_peer_trace op,
|
||||
int usage, const void *where),
|
||||
|
||||
TP_ARGS(peer, op, usage, where),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(unsigned int, peer )
|
||||
__field(int, op )
|
||||
__field(int, usage )
|
||||
__field(const void *, where )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->peer = peer->debug_id;
|
||||
__entry->op = op;
|
||||
__entry->usage = usage;
|
||||
__entry->where = where;
|
||||
),
|
||||
|
||||
TP_printk("P=%08x %s u=%d sp=%pSR",
|
||||
__entry->peer,
|
||||
__print_symbolic(__entry->op, rxrpc_peer_traces),
|
||||
__entry->usage,
|
||||
__entry->where)
|
||||
);
|
||||
|
||||
TRACE_EVENT(rxrpc_conn,
|
||||
TP_PROTO(struct rxrpc_connection *conn, enum rxrpc_conn_trace op,
|
||||
int usage, const void *where),
|
||||
|
|
|
@ -324,6 +324,7 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
|
|||
mutex_unlock(&call->user_mutex);
|
||||
}
|
||||
|
||||
rxrpc_put_peer(cp.peer);
|
||||
_leave(" = %p", call);
|
||||
return call;
|
||||
}
|
||||
|
@ -447,6 +448,7 @@ int rxrpc_kernel_retry_call(struct socket *sock, struct rxrpc_call *call,
|
|||
ret = rxrpc_retry_client_call(rx, call, &cp, srx, GFP_KERNEL);
|
||||
|
||||
mutex_unlock(&call->user_mutex);
|
||||
rxrpc_put_peer(cp.peer);
|
||||
_leave(" = %d", ret);
|
||||
return ret;
|
||||
}
|
||||
|
@ -762,6 +764,7 @@ static __poll_t rxrpc_poll(struct file *file, struct socket *sock,
|
|||
static int rxrpc_create(struct net *net, struct socket *sock, int protocol,
|
||||
int kern)
|
||||
{
|
||||
struct rxrpc_net *rxnet;
|
||||
struct rxrpc_sock *rx;
|
||||
struct sock *sk;
|
||||
|
||||
|
@ -801,6 +804,9 @@ static int rxrpc_create(struct net *net, struct socket *sock, int protocol,
|
|||
rwlock_init(&rx->call_lock);
|
||||
memset(&rx->srx, 0, sizeof(rx->srx));
|
||||
|
||||
rxnet = rxrpc_net(sock_net(&rx->sk));
|
||||
timer_reduce(&rxnet->peer_keepalive_timer, jiffies + 1);
|
||||
|
||||
_leave(" = 0 [%p]", rx);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -75,7 +75,9 @@ struct rxrpc_net {
|
|||
u32 epoch; /* Local epoch for detecting local-end reset */
|
||||
struct list_head calls; /* List of calls active in this namespace */
|
||||
rwlock_t call_lock; /* Lock for ->calls */
|
||||
atomic_t nr_calls; /* Count of allocated calls */
|
||||
|
||||
atomic_t nr_conns;
|
||||
struct list_head conn_proc_list; /* List of conns in this namespace for proc */
|
||||
struct list_head service_conns; /* Service conns in this namespace */
|
||||
rwlock_t conn_lock; /* Lock for ->conn_proc_list, ->service_conns */
|
||||
|
@ -97,8 +99,16 @@ struct rxrpc_net {
|
|||
struct list_head local_endpoints;
|
||||
struct mutex local_mutex; /* Lock for ->local_endpoints */
|
||||
|
||||
spinlock_t peer_hash_lock; /* Lock for ->peer_hash */
|
||||
DECLARE_HASHTABLE (peer_hash, 10);
|
||||
spinlock_t peer_hash_lock; /* Lock for ->peer_hash */
|
||||
|
||||
#define RXRPC_KEEPALIVE_TIME 20 /* NAT keepalive time in seconds */
|
||||
u8 peer_keepalive_cursor;
|
||||
ktime_t peer_keepalive_base;
|
||||
struct hlist_head peer_keepalive[RXRPC_KEEPALIVE_TIME + 1];
|
||||
struct hlist_head peer_keepalive_new;
|
||||
struct timer_list peer_keepalive_timer;
|
||||
struct work_struct peer_keepalive_work;
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -285,6 +295,8 @@ struct rxrpc_peer {
|
|||
struct hlist_head error_targets; /* targets for net error distribution */
|
||||
struct work_struct error_distributor;
|
||||
struct rb_root service_conns; /* Service connections */
|
||||
struct hlist_node keepalive_link; /* Link in net->peer_keepalive[] */
|
||||
time64_t last_tx_at; /* Last time packet sent here */
|
||||
seqlock_t service_conn_lock;
|
||||
spinlock_t lock; /* access lock */
|
||||
unsigned int if_mtu; /* interface MTU for this peer */
|
||||
|
@ -518,6 +530,7 @@ struct rxrpc_call {
|
|||
struct rxrpc_connection *conn; /* connection carrying call */
|
||||
struct rxrpc_peer *peer; /* Peer record for remote address */
|
||||
struct rxrpc_sock __rcu *socket; /* socket responsible */
|
||||
struct rxrpc_net *rxnet; /* Network namespace to which call belongs */
|
||||
struct mutex user_mutex; /* User access mutex */
|
||||
unsigned long ack_at; /* When deferred ACK needs to happen */
|
||||
unsigned long ack_lost_at; /* When ACK is figured as lost */
|
||||
|
@ -969,31 +982,12 @@ extern void rxrpc_process_local_events(struct rxrpc_local *);
|
|||
* local_object.c
|
||||
*/
|
||||
struct rxrpc_local *rxrpc_lookup_local(struct net *, const struct sockaddr_rxrpc *);
|
||||
void __rxrpc_put_local(struct rxrpc_local *);
|
||||
struct rxrpc_local *rxrpc_get_local(struct rxrpc_local *);
|
||||
struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *);
|
||||
void rxrpc_put_local(struct rxrpc_local *);
|
||||
void rxrpc_queue_local(struct rxrpc_local *);
|
||||
void rxrpc_destroy_all_locals(struct rxrpc_net *);
|
||||
|
||||
static inline void rxrpc_get_local(struct rxrpc_local *local)
|
||||
{
|
||||
atomic_inc(&local->usage);
|
||||
}
|
||||
|
||||
static inline
|
||||
struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *local)
|
||||
{
|
||||
return atomic_inc_not_zero(&local->usage) ? local : NULL;
|
||||
}
|
||||
|
||||
static inline void rxrpc_put_local(struct rxrpc_local *local)
|
||||
{
|
||||
if (local && atomic_dec_and_test(&local->usage))
|
||||
__rxrpc_put_local(local);
|
||||
}
|
||||
|
||||
static inline void rxrpc_queue_local(struct rxrpc_local *local)
|
||||
{
|
||||
rxrpc_queue_work(&local->processor);
|
||||
}
|
||||
|
||||
/*
|
||||
* misc.c
|
||||
*/
|
||||
|
@ -1026,6 +1020,7 @@ int rxrpc_send_ack_packet(struct rxrpc_call *, bool, rxrpc_serial_t *);
|
|||
int rxrpc_send_abort_packet(struct rxrpc_call *);
|
||||
int rxrpc_send_data_packet(struct rxrpc_call *, struct sk_buff *, bool);
|
||||
void rxrpc_reject_packets(struct rxrpc_local *);
|
||||
void rxrpc_send_keepalive(struct rxrpc_peer *);
|
||||
|
||||
/*
|
||||
* peer_event.c
|
||||
|
@ -1034,6 +1029,7 @@ void rxrpc_error_report(struct sock *);
|
|||
void rxrpc_peer_error_distributor(struct work_struct *);
|
||||
void rxrpc_peer_add_rtt(struct rxrpc_call *, enum rxrpc_rtt_rx_trace,
|
||||
rxrpc_serial_t, rxrpc_serial_t, ktime_t, ktime_t);
|
||||
void rxrpc_peer_keepalive_worker(struct work_struct *);
|
||||
|
||||
/*
|
||||
* peer_object.c
|
||||
|
@ -1045,25 +1041,11 @@ struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *,
|
|||
struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *, gfp_t);
|
||||
struct rxrpc_peer *rxrpc_lookup_incoming_peer(struct rxrpc_local *,
|
||||
struct rxrpc_peer *);
|
||||
|
||||
static inline struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *peer)
|
||||
{
|
||||
atomic_inc(&peer->usage);
|
||||
return peer;
|
||||
}
|
||||
|
||||
static inline
|
||||
struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *peer)
|
||||
{
|
||||
return atomic_inc_not_zero(&peer->usage) ? peer : NULL;
|
||||
}
|
||||
|
||||
extern void __rxrpc_put_peer(struct rxrpc_peer *peer);
|
||||
static inline void rxrpc_put_peer(struct rxrpc_peer *peer)
|
||||
{
|
||||
if (peer && atomic_dec_and_test(&peer->usage))
|
||||
__rxrpc_put_peer(peer);
|
||||
}
|
||||
void rxrpc_destroy_all_peers(struct rxrpc_net *);
|
||||
struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *);
|
||||
struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *);
|
||||
void rxrpc_put_peer(struct rxrpc_peer *);
|
||||
void __rxrpc_queue_peer_error(struct rxrpc_peer *);
|
||||
|
||||
/*
|
||||
* proc.c
|
||||
|
|
|
@ -138,6 +138,7 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
|
|||
|
||||
write_unlock(&rx->call_lock);
|
||||
|
||||
rxnet = call->rxnet;
|
||||
write_lock(&rxnet->call_lock);
|
||||
list_add_tail(&call->link, &rxnet->calls);
|
||||
write_unlock(&rxnet->call_lock);
|
||||
|
@ -218,6 +219,8 @@ void rxrpc_discard_prealloc(struct rxrpc_sock *rx)
|
|||
list_del(&conn->proc_link);
|
||||
write_unlock(&rxnet->conn_lock);
|
||||
kfree(conn);
|
||||
if (atomic_dec_and_test(&rxnet->nr_conns))
|
||||
wake_up_atomic_t(&rxnet->nr_conns);
|
||||
tail = (tail + 1) & (size - 1);
|
||||
}
|
||||
|
||||
|
@ -225,7 +228,7 @@ void rxrpc_discard_prealloc(struct rxrpc_sock *rx)
|
|||
tail = b->call_backlog_tail;
|
||||
while (CIRC_CNT(head, tail, size) > 0) {
|
||||
struct rxrpc_call *call = b->call_backlog[tail];
|
||||
call->socket = rx;
|
||||
rcu_assign_pointer(call->socket, rx);
|
||||
if (rx->discard_new_call) {
|
||||
_debug("discard %lx", call->user_call_ID);
|
||||
rx->discard_new_call(call, call->user_call_ID);
|
||||
|
@ -295,8 +298,7 @@ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
|
|||
b->conn_backlog[conn_tail] = NULL;
|
||||
smp_store_release(&b->conn_backlog_tail,
|
||||
(conn_tail + 1) & (RXRPC_BACKLOG_MAX - 1));
|
||||
rxrpc_get_local(local);
|
||||
conn->params.local = local;
|
||||
conn->params.local = rxrpc_get_local(local);
|
||||
conn->params.peer = peer;
|
||||
rxrpc_see_connection(conn);
|
||||
rxrpc_new_incoming_connection(rx, conn, skb);
|
||||
|
@ -456,6 +458,7 @@ struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *rx,
|
|||
unsigned long user_call_ID,
|
||||
rxrpc_notify_rx_t notify_rx)
|
||||
__releases(&rx->sk.sk_lock.slock)
|
||||
__acquires(call->user_mutex)
|
||||
{
|
||||
struct rxrpc_call *call;
|
||||
struct rb_node *parent, **pp;
|
||||
|
|
|
@ -226,7 +226,7 @@ static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j)
|
|||
ktime_to_ns(ktime_sub(skb->tstamp, max_age)));
|
||||
}
|
||||
|
||||
resend_at = nsecs_to_jiffies(ktime_to_ns(ktime_sub(oldest, now)));
|
||||
resend_at = nsecs_to_jiffies(ktime_to_ns(ktime_sub(now, oldest)));
|
||||
resend_at += jiffies + rxrpc_resend_timeout;
|
||||
WRITE_ONCE(call->resend_at, resend_at);
|
||||
|
||||
|
@ -238,7 +238,7 @@ static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j)
|
|||
* retransmitting data.
|
||||
*/
|
||||
if (!retrans) {
|
||||
rxrpc_reduce_call_timer(call, resend_at, now,
|
||||
rxrpc_reduce_call_timer(call, resend_at, now_j,
|
||||
rxrpc_timer_set_for_resend);
|
||||
spin_unlock_bh(&call->lock);
|
||||
ack_ts = ktime_sub(now, call->acks_latest_ts);
|
||||
|
|
|
@ -103,6 +103,7 @@ struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp,
|
|||
unsigned int debug_id)
|
||||
{
|
||||
struct rxrpc_call *call;
|
||||
struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
|
||||
|
||||
call = kmem_cache_zalloc(rxrpc_call_jar, gfp);
|
||||
if (!call)
|
||||
|
@ -153,6 +154,9 @@ struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp,
|
|||
|
||||
call->cong_cwnd = 2;
|
||||
call->cong_ssthresh = RXRPC_RXTX_BUFF_SIZE - 1;
|
||||
|
||||
call->rxnet = rxnet;
|
||||
atomic_inc(&rxnet->nr_calls);
|
||||
return call;
|
||||
|
||||
nomem_2:
|
||||
|
@ -219,9 +223,10 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
|
|||
gfp_t gfp,
|
||||
unsigned int debug_id)
|
||||
__releases(&rx->sk.sk_lock.slock)
|
||||
__acquires(&call->user_mutex)
|
||||
{
|
||||
struct rxrpc_call *call, *xcall;
|
||||
struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
|
||||
struct rxrpc_net *rxnet;
|
||||
struct rb_node *parent, **pp;
|
||||
const void *here = __builtin_return_address(0);
|
||||
int ret;
|
||||
|
@ -271,6 +276,7 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
|
|||
|
||||
write_unlock(&rx->call_lock);
|
||||
|
||||
rxnet = call->rxnet;
|
||||
write_lock(&rxnet->call_lock);
|
||||
list_add_tail(&call->link, &rxnet->calls);
|
||||
write_unlock(&rxnet->call_lock);
|
||||
|
@ -616,7 +622,7 @@ void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx)
|
|||
*/
|
||||
void rxrpc_put_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
|
||||
{
|
||||
struct rxrpc_net *rxnet;
|
||||
struct rxrpc_net *rxnet = call->rxnet;
|
||||
const void *here = __builtin_return_address(0);
|
||||
int n;
|
||||
|
||||
|
@ -630,7 +636,6 @@ void rxrpc_put_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
|
|||
ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
|
||||
|
||||
if (!list_empty(&call->link)) {
|
||||
rxnet = rxrpc_net(sock_net(&call->socket->sk));
|
||||
write_lock(&rxnet->call_lock);
|
||||
list_del_init(&call->link);
|
||||
write_unlock(&rxnet->call_lock);
|
||||
|
@ -646,11 +651,14 @@ void rxrpc_put_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
|
|||
static void rxrpc_rcu_destroy_call(struct rcu_head *rcu)
|
||||
{
|
||||
struct rxrpc_call *call = container_of(rcu, struct rxrpc_call, rcu);
|
||||
struct rxrpc_net *rxnet = call->rxnet;
|
||||
|
||||
rxrpc_put_peer(call->peer);
|
||||
kfree(call->rxtx_buffer);
|
||||
kfree(call->rxtx_annotations);
|
||||
kmem_cache_free(rxrpc_call_jar, call);
|
||||
if (atomic_dec_and_test(&rxnet->nr_calls))
|
||||
wake_up_atomic_t(&rxnet->nr_calls);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -715,4 +723,7 @@ void rxrpc_destroy_all_calls(struct rxrpc_net *rxnet)
|
|||
}
|
||||
|
||||
write_unlock(&rxnet->call_lock);
|
||||
|
||||
atomic_dec(&rxnet->nr_calls);
|
||||
wait_on_atomic_t(&rxnet->nr_calls, atomic_t_wait, TASK_UNINTERRUPTIBLE);
|
||||
}
|
||||
|
|
|
@ -207,6 +207,7 @@ rxrpc_alloc_client_connection(struct rxrpc_conn_parameters *cp, gfp_t gfp)
|
|||
if (ret < 0)
|
||||
goto error_2;
|
||||
|
||||
atomic_inc(&rxnet->nr_conns);
|
||||
write_lock(&rxnet->conn_lock);
|
||||
list_add_tail(&conn->proc_link, &rxnet->conn_proc_list);
|
||||
write_unlock(&rxnet->conn_lock);
|
||||
|
@ -776,7 +777,7 @@ void rxrpc_disconnect_client_call(struct rxrpc_call *call)
|
|||
unsigned int channel = call->cid & RXRPC_CHANNELMASK;
|
||||
struct rxrpc_connection *conn = call->conn;
|
||||
struct rxrpc_channel *chan = &conn->channels[channel];
|
||||
struct rxrpc_net *rxnet = rxrpc_net(sock_net(&call->socket->sk));
|
||||
struct rxrpc_net *rxnet = conn->params.local->rxnet;
|
||||
|
||||
trace_rxrpc_client(conn, channel, rxrpc_client_chan_disconnect);
|
||||
call->conn = NULL;
|
||||
|
|
|
@ -136,6 +136,7 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
|
|||
}
|
||||
|
||||
kernel_sendmsg(conn->params.local->socket, &msg, iov, ioc, len);
|
||||
conn->params.peer->last_tx_at = ktime_get_real();
|
||||
_leave("");
|
||||
return;
|
||||
}
|
||||
|
@ -239,6 +240,8 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
|
|||
return -EAGAIN;
|
||||
}
|
||||
|
||||
conn->params.peer->last_tx_at = ktime_get_real();
|
||||
|
||||
_leave(" = 0");
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -365,6 +365,9 @@ static void rxrpc_destroy_connection(struct rcu_head *rcu)
|
|||
key_put(conn->params.key);
|
||||
key_put(conn->server_key);
|
||||
rxrpc_put_peer(conn->params.peer);
|
||||
|
||||
if (atomic_dec_and_test(&conn->params.local->rxnet->nr_conns))
|
||||
wake_up_atomic_t(&conn->params.local->rxnet->nr_conns);
|
||||
rxrpc_put_local(conn->params.local);
|
||||
|
||||
kfree(conn);
|
||||
|
@ -418,7 +421,7 @@ void rxrpc_service_connection_reaper(struct work_struct *work)
|
|||
*/
|
||||
if (atomic_cmpxchg(&conn->usage, 1, 0) != 1)
|
||||
continue;
|
||||
trace_rxrpc_conn(conn, rxrpc_conn_reap_service, 0, 0);
|
||||
trace_rxrpc_conn(conn, rxrpc_conn_reap_service, 0, NULL);
|
||||
|
||||
if (rxrpc_conn_is_client(conn))
|
||||
BUG();
|
||||
|
@ -458,6 +461,7 @@ void rxrpc_destroy_all_connections(struct rxrpc_net *rxnet)
|
|||
|
||||
_enter("");
|
||||
|
||||
atomic_dec(&rxnet->nr_conns);
|
||||
rxrpc_destroy_all_client_connections(rxnet);
|
||||
|
||||
del_timer_sync(&rxnet->service_conn_reap_timer);
|
||||
|
@ -475,5 +479,9 @@ void rxrpc_destroy_all_connections(struct rxrpc_net *rxnet)
|
|||
|
||||
ASSERT(list_empty(&rxnet->conn_proc_list));
|
||||
|
||||
/* We need to wait for the connections to be destroyed by RCU as they
|
||||
* pin things that we still need to get rid of.
|
||||
*/
|
||||
wait_on_atomic_t(&rxnet->nr_conns, atomic_t_wait, TASK_UNINTERRUPTIBLE);
|
||||
_leave("");
|
||||
}
|
||||
|
|
|
@ -132,6 +132,7 @@ struct rxrpc_connection *rxrpc_prealloc_service_connection(struct rxrpc_net *rxn
|
|||
conn->state = RXRPC_CONN_SERVICE_PREALLOC;
|
||||
atomic_set(&conn->usage, 2);
|
||||
|
||||
atomic_inc(&rxnet->nr_conns);
|
||||
write_lock(&rxnet->conn_lock);
|
||||
list_add_tail(&conn->link, &rxnet->service_conns);
|
||||
list_add_tail(&conn->proc_link, &rxnet->conn_proc_list);
|
||||
|
|
|
@ -1183,6 +1183,8 @@ void rxrpc_data_ready(struct sock *udp_sk)
|
|||
|
||||
switch (sp->hdr.type) {
|
||||
case RXRPC_PACKET_TYPE_VERSION:
|
||||
if (!(sp->hdr.flags & RXRPC_CLIENT_INITIATED))
|
||||
goto discard;
|
||||
rxrpc_post_packet_to_local(local, skb);
|
||||
goto out;
|
||||
|
||||
|
@ -1240,16 +1242,19 @@ void rxrpc_data_ready(struct sock *udp_sk)
|
|||
goto discard_unlock;
|
||||
|
||||
if (sp->hdr.callNumber == chan->last_call) {
|
||||
/* For the previous service call, if completed successfully, we
|
||||
* discard all further packets.
|
||||
*/
|
||||
if (rxrpc_conn_is_service(conn) &&
|
||||
(chan->last_type == RXRPC_PACKET_TYPE_ACK ||
|
||||
sp->hdr.type == RXRPC_PACKET_TYPE_ABORT))
|
||||
if (chan->call ||
|
||||
sp->hdr.type == RXRPC_PACKET_TYPE_ABORT)
|
||||
goto discard_unlock;
|
||||
|
||||
/* But otherwise we need to retransmit the final packet from
|
||||
* data cached in the connection record.
|
||||
/* For the previous service call, if completed
|
||||
* successfully, we discard all further packets.
|
||||
*/
|
||||
if (rxrpc_conn_is_service(conn) &&
|
||||
chan->last_type == RXRPC_PACKET_TYPE_ACK)
|
||||
goto discard_unlock;
|
||||
|
||||
/* But otherwise we need to retransmit the final packet
|
||||
* from data cached in the connection record.
|
||||
*/
|
||||
rxrpc_post_packet_to_conn(conn, skb);
|
||||
goto out_unlock;
|
||||
|
|
|
@ -95,6 +95,7 @@ static struct rxrpc_local *rxrpc_alloc_local(struct rxrpc_net *rxnet,
|
|||
local->debug_id = atomic_inc_return(&rxrpc_debug_id);
|
||||
memcpy(&local->srx, srx, sizeof(*srx));
|
||||
local->srx.srx_service = 0;
|
||||
trace_rxrpc_local(local, rxrpc_local_new, 1, NULL);
|
||||
}
|
||||
|
||||
_leave(" = %p", local);
|
||||
|
@ -256,15 +257,74 @@ addr_in_use:
|
|||
return ERR_PTR(-EADDRINUSE);
|
||||
}
|
||||
|
||||
/*
|
||||
* Get a ref on a local endpoint.
|
||||
*/
|
||||
struct rxrpc_local *rxrpc_get_local(struct rxrpc_local *local)
|
||||
{
|
||||
const void *here = __builtin_return_address(0);
|
||||
int n;
|
||||
|
||||
n = atomic_inc_return(&local->usage);
|
||||
trace_rxrpc_local(local, rxrpc_local_got, n, here);
|
||||
return local;
|
||||
}
|
||||
|
||||
/*
|
||||
* Get a ref on a local endpoint unless its usage has already reached 0.
|
||||
*/
|
||||
struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *local)
|
||||
{
|
||||
const void *here = __builtin_return_address(0);
|
||||
|
||||
if (local) {
|
||||
int n = __atomic_add_unless(&local->usage, 1, 0);
|
||||
if (n > 0)
|
||||
trace_rxrpc_local(local, rxrpc_local_got, n + 1, here);
|
||||
else
|
||||
local = NULL;
|
||||
}
|
||||
return local;
|
||||
}
|
||||
|
||||
/*
|
||||
* Queue a local endpoint.
|
||||
*/
|
||||
void rxrpc_queue_local(struct rxrpc_local *local)
|
||||
{
|
||||
const void *here = __builtin_return_address(0);
|
||||
|
||||
if (rxrpc_queue_work(&local->processor))
|
||||
trace_rxrpc_local(local, rxrpc_local_queued,
|
||||
atomic_read(&local->usage), here);
|
||||
}
|
||||
|
||||
/*
|
||||
* A local endpoint reached its end of life.
|
||||
*/
|
||||
void __rxrpc_put_local(struct rxrpc_local *local)
|
||||
static void __rxrpc_put_local(struct rxrpc_local *local)
|
||||
{
|
||||
_enter("%d", local->debug_id);
|
||||
rxrpc_queue_work(&local->processor);
|
||||
}
|
||||
|
||||
/*
|
||||
* Drop a ref on a local endpoint.
|
||||
*/
|
||||
void rxrpc_put_local(struct rxrpc_local *local)
|
||||
{
|
||||
const void *here = __builtin_return_address(0);
|
||||
int n;
|
||||
|
||||
if (local) {
|
||||
n = atomic_dec_return(&local->usage);
|
||||
trace_rxrpc_local(local, rxrpc_local_put, n, here);
|
||||
|
||||
if (n == 0)
|
||||
__rxrpc_put_local(local);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Destroy a local endpoint's socket and then hand the record to RCU to dispose
|
||||
* of.
|
||||
|
@ -322,7 +382,8 @@ static void rxrpc_local_processor(struct work_struct *work)
|
|||
container_of(work, struct rxrpc_local, processor);
|
||||
bool again;
|
||||
|
||||
_enter("%d", local->debug_id);
|
||||
trace_rxrpc_local(local, rxrpc_local_processing,
|
||||
atomic_read(&local->usage), NULL);
|
||||
|
||||
do {
|
||||
again = false;
|
||||
|
|
|
@ -32,13 +32,22 @@ static void rxrpc_service_conn_reap_timeout(struct timer_list *timer)
|
|||
rxrpc_queue_work(&rxnet->service_conn_reaper);
|
||||
}
|
||||
|
||||
static void rxrpc_peer_keepalive_timeout(struct timer_list *timer)
|
||||
{
|
||||
struct rxrpc_net *rxnet =
|
||||
container_of(timer, struct rxrpc_net, peer_keepalive_timer);
|
||||
|
||||
if (rxnet->live)
|
||||
rxrpc_queue_work(&rxnet->peer_keepalive_work);
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialise a per-network namespace record.
|
||||
*/
|
||||
static __net_init int rxrpc_init_net(struct net *net)
|
||||
{
|
||||
struct rxrpc_net *rxnet = rxrpc_net(net);
|
||||
int ret;
|
||||
int ret, i;
|
||||
|
||||
rxnet->live = true;
|
||||
get_random_bytes(&rxnet->epoch, sizeof(rxnet->epoch));
|
||||
|
@ -46,7 +55,9 @@ static __net_init int rxrpc_init_net(struct net *net)
|
|||
|
||||
INIT_LIST_HEAD(&rxnet->calls);
|
||||
rwlock_init(&rxnet->call_lock);
|
||||
atomic_set(&rxnet->nr_calls, 1);
|
||||
|
||||
atomic_set(&rxnet->nr_conns, 1);
|
||||
INIT_LIST_HEAD(&rxnet->conn_proc_list);
|
||||
INIT_LIST_HEAD(&rxnet->service_conns);
|
||||
rwlock_init(&rxnet->conn_lock);
|
||||
|
@ -70,8 +81,16 @@ static __net_init int rxrpc_init_net(struct net *net)
|
|||
|
||||
INIT_LIST_HEAD(&rxnet->local_endpoints);
|
||||
mutex_init(&rxnet->local_mutex);
|
||||
|
||||
hash_init(rxnet->peer_hash);
|
||||
spin_lock_init(&rxnet->peer_hash_lock);
|
||||
for (i = 0; i < ARRAY_SIZE(rxnet->peer_keepalive); i++)
|
||||
INIT_HLIST_HEAD(&rxnet->peer_keepalive[i]);
|
||||
INIT_HLIST_HEAD(&rxnet->peer_keepalive_new);
|
||||
timer_setup(&rxnet->peer_keepalive_timer,
|
||||
rxrpc_peer_keepalive_timeout, 0);
|
||||
INIT_WORK(&rxnet->peer_keepalive_work, rxrpc_peer_keepalive_worker);
|
||||
rxnet->peer_keepalive_base = ktime_add(ktime_get_real(), NSEC_PER_SEC);
|
||||
|
||||
ret = -ENOMEM;
|
||||
rxnet->proc_net = proc_net_mkdir(net, "rxrpc", net->proc_net);
|
||||
|
@ -95,8 +114,11 @@ static __net_exit void rxrpc_exit_net(struct net *net)
|
|||
struct rxrpc_net *rxnet = rxrpc_net(net);
|
||||
|
||||
rxnet->live = false;
|
||||
del_timer_sync(&rxnet->peer_keepalive_timer);
|
||||
cancel_work_sync(&rxnet->peer_keepalive_work);
|
||||
rxrpc_destroy_all_calls(rxnet);
|
||||
rxrpc_destroy_all_connections(rxnet);
|
||||
rxrpc_destroy_all_peers(rxnet);
|
||||
rxrpc_destroy_all_locals(rxnet);
|
||||
proc_remove(rxnet->proc_net);
|
||||
}
|
||||
|
|
|
@ -32,6 +32,8 @@ struct rxrpc_abort_buffer {
|
|||
__be32 abort_code;
|
||||
};
|
||||
|
||||
static const char rxrpc_keepalive_string[] = "";
|
||||
|
||||
/*
|
||||
* Arrange for a keepalive ping a certain time after we last transmitted. This
|
||||
* lets the far side know we're still interested in this call and helps keep
|
||||
|
@ -122,6 +124,7 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
|
|||
struct kvec iov[2];
|
||||
rxrpc_serial_t serial;
|
||||
rxrpc_seq_t hard_ack, top;
|
||||
ktime_t now;
|
||||
size_t len, n;
|
||||
int ret;
|
||||
u8 reason;
|
||||
|
@ -203,8 +206,10 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
|
|||
}
|
||||
|
||||
ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len);
|
||||
now = ktime_get_real();
|
||||
if (ping)
|
||||
call->ping_time = ktime_get_real();
|
||||
call->ping_time = now;
|
||||
conn->params.peer->last_tx_at = ktime_get_real();
|
||||
|
||||
if (call->state < RXRPC_CALL_COMPLETE) {
|
||||
if (ret < 0) {
|
||||
|
@ -288,6 +293,7 @@ int rxrpc_send_abort_packet(struct rxrpc_call *call)
|
|||
|
||||
ret = kernel_sendmsg(conn->params.local->socket,
|
||||
&msg, iov, 1, sizeof(pkt));
|
||||
conn->params.peer->last_tx_at = ktime_get_real();
|
||||
|
||||
rxrpc_put_connection(conn);
|
||||
return ret;
|
||||
|
@ -378,6 +384,7 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
|
|||
* message and update the peer record
|
||||
*/
|
||||
ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len);
|
||||
conn->params.peer->last_tx_at = ktime_get_real();
|
||||
|
||||
up_read(&conn->params.local->defrag_sem);
|
||||
if (ret == -EMSGSIZE)
|
||||
|
@ -429,6 +436,7 @@ send_fragmentable:
|
|||
if (ret == 0) {
|
||||
ret = kernel_sendmsg(conn->params.local->socket, &msg,
|
||||
iov, 2, len);
|
||||
conn->params.peer->last_tx_at = ktime_get_real();
|
||||
|
||||
opt = IP_PMTUDISC_DO;
|
||||
kernel_setsockopt(conn->params.local->socket, SOL_IP,
|
||||
|
@ -446,6 +454,7 @@ send_fragmentable:
|
|||
if (ret == 0) {
|
||||
ret = kernel_sendmsg(conn->params.local->socket, &msg,
|
||||
iov, 2, len);
|
||||
conn->params.peer->last_tx_at = ktime_get_real();
|
||||
|
||||
opt = IPV6_PMTUDISC_DO;
|
||||
kernel_setsockopt(conn->params.local->socket,
|
||||
|
@ -515,3 +524,51 @@ void rxrpc_reject_packets(struct rxrpc_local *local)
|
|||
|
||||
_leave("");
|
||||
}
|
||||
|
||||
/*
|
||||
* Send a VERSION reply to a peer as a keepalive.
|
||||
*/
|
||||
void rxrpc_send_keepalive(struct rxrpc_peer *peer)
|
||||
{
|
||||
struct rxrpc_wire_header whdr;
|
||||
struct msghdr msg;
|
||||
struct kvec iov[2];
|
||||
size_t len;
|
||||
int ret;
|
||||
|
||||
_enter("");
|
||||
|
||||
msg.msg_name = &peer->srx.transport;
|
||||
msg.msg_namelen = peer->srx.transport_len;
|
||||
msg.msg_control = NULL;
|
||||
msg.msg_controllen = 0;
|
||||
msg.msg_flags = 0;
|
||||
|
||||
whdr.epoch = htonl(peer->local->rxnet->epoch);
|
||||
whdr.cid = 0;
|
||||
whdr.callNumber = 0;
|
||||
whdr.seq = 0;
|
||||
whdr.serial = 0;
|
||||
whdr.type = RXRPC_PACKET_TYPE_VERSION; /* Not client-initiated */
|
||||
whdr.flags = RXRPC_LAST_PACKET;
|
||||
whdr.userStatus = 0;
|
||||
whdr.securityIndex = 0;
|
||||
whdr._rsvd = 0;
|
||||
whdr.serviceId = 0;
|
||||
|
||||
iov[0].iov_base = &whdr;
|
||||
iov[0].iov_len = sizeof(whdr);
|
||||
iov[1].iov_base = (char *)rxrpc_keepalive_string;
|
||||
iov[1].iov_len = sizeof(rxrpc_keepalive_string);
|
||||
|
||||
len = iov[0].iov_len + iov[1].iov_len;
|
||||
|
||||
_proto("Tx VERSION (keepalive)");
|
||||
|
||||
ret = kernel_sendmsg(peer->local->socket, &msg, iov, 2, len);
|
||||
if (ret < 0)
|
||||
_debug("sendmsg failed: %d", ret);
|
||||
|
||||
peer->last_tx_at = ktime_get_real();
|
||||
_leave("");
|
||||
}
|
||||
|
|
|
@ -192,7 +192,7 @@ void rxrpc_error_report(struct sock *sk)
|
|||
rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
|
||||
|
||||
/* The ref we obtained is passed off to the work item */
|
||||
rxrpc_queue_work(&peer->error_distributor);
|
||||
__rxrpc_queue_peer_error(peer);
|
||||
_leave("");
|
||||
}
|
||||
|
||||
|
@ -348,3 +348,99 @@ void rxrpc_peer_add_rtt(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why,
|
|||
trace_rxrpc_rtt_rx(call, why, send_serial, resp_serial, rtt,
|
||||
usage, avg);
|
||||
}
|
||||
|
||||
/*
|
||||
* Perform keep-alive pings with VERSION packets to keep any NAT alive.
|
||||
*/
|
||||
void rxrpc_peer_keepalive_worker(struct work_struct *work)
|
||||
{
|
||||
struct rxrpc_net *rxnet =
|
||||
container_of(work, struct rxrpc_net, peer_keepalive_work);
|
||||
struct rxrpc_peer *peer;
|
||||
unsigned long delay;
|
||||
ktime_t base, now = ktime_get_real();
|
||||
s64 diff;
|
||||
u8 cursor, slot;
|
||||
|
||||
base = rxnet->peer_keepalive_base;
|
||||
cursor = rxnet->peer_keepalive_cursor;
|
||||
|
||||
_enter("%u,%lld", cursor, ktime_sub(now, base));
|
||||
|
||||
next_bucket:
|
||||
diff = ktime_to_ns(ktime_sub(now, base));
|
||||
if (diff < 0)
|
||||
goto resched;
|
||||
|
||||
_debug("at %u", cursor);
|
||||
spin_lock_bh(&rxnet->peer_hash_lock);
|
||||
next_peer:
|
||||
if (!rxnet->live) {
|
||||
spin_unlock_bh(&rxnet->peer_hash_lock);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Everything in the bucket at the cursor is processed this second; the
|
||||
* bucket at cursor + 1 goes now + 1s and so on...
|
||||
*/
|
||||
if (hlist_empty(&rxnet->peer_keepalive[cursor])) {
|
||||
if (hlist_empty(&rxnet->peer_keepalive_new)) {
|
||||
spin_unlock_bh(&rxnet->peer_hash_lock);
|
||||
goto emptied_bucket;
|
||||
}
|
||||
|
||||
hlist_move_list(&rxnet->peer_keepalive_new,
|
||||
&rxnet->peer_keepalive[cursor]);
|
||||
}
|
||||
|
||||
peer = hlist_entry(rxnet->peer_keepalive[cursor].first,
|
||||
struct rxrpc_peer, keepalive_link);
|
||||
hlist_del_init(&peer->keepalive_link);
|
||||
if (!rxrpc_get_peer_maybe(peer))
|
||||
goto next_peer;
|
||||
|
||||
spin_unlock_bh(&rxnet->peer_hash_lock);
|
||||
|
||||
_debug("peer %u {%pISp}", peer->debug_id, &peer->srx.transport);
|
||||
|
||||
recalc:
|
||||
diff = ktime_divns(ktime_sub(peer->last_tx_at, base), NSEC_PER_SEC);
|
||||
if (diff < -30 || diff > 30)
|
||||
goto send; /* LSW of 64-bit time probably wrapped on 32-bit */
|
||||
diff += RXRPC_KEEPALIVE_TIME - 1;
|
||||
if (diff < 0)
|
||||
goto send;
|
||||
|
||||
slot = (diff > RXRPC_KEEPALIVE_TIME - 1) ? RXRPC_KEEPALIVE_TIME - 1 : diff;
|
||||
if (slot == 0)
|
||||
goto send;
|
||||
|
||||
/* A transmission to this peer occurred since last we examined it so
|
||||
* put it into the appropriate future bucket.
|
||||
*/
|
||||
slot = (slot + cursor) % ARRAY_SIZE(rxnet->peer_keepalive);
|
||||
spin_lock_bh(&rxnet->peer_hash_lock);
|
||||
hlist_add_head(&peer->keepalive_link, &rxnet->peer_keepalive[slot]);
|
||||
rxrpc_put_peer(peer);
|
||||
goto next_peer;
|
||||
|
||||
send:
|
||||
rxrpc_send_keepalive(peer);
|
||||
now = ktime_get_real();
|
||||
goto recalc;
|
||||
|
||||
emptied_bucket:
|
||||
cursor++;
|
||||
if (cursor >= ARRAY_SIZE(rxnet->peer_keepalive))
|
||||
cursor = 0;
|
||||
base = ktime_add_ns(base, NSEC_PER_SEC);
|
||||
goto next_bucket;
|
||||
|
||||
resched:
|
||||
rxnet->peer_keepalive_base = base;
|
||||
rxnet->peer_keepalive_cursor = cursor;
|
||||
delay = nsecs_to_jiffies(-diff) + 1;
|
||||
timer_reduce(&rxnet->peer_keepalive_timer, jiffies + delay);
|
||||
out:
|
||||
_leave("");
|
||||
}
|
||||
|
|
|
@ -322,6 +322,7 @@ struct rxrpc_peer *rxrpc_lookup_incoming_peer(struct rxrpc_local *local,
|
|||
if (!peer) {
|
||||
peer = prealloc;
|
||||
hash_add_rcu(rxnet->peer_hash, &peer->hash_link, hash_key);
|
||||
hlist_add_head(&peer->keepalive_link, &rxnet->peer_keepalive_new);
|
||||
}
|
||||
|
||||
spin_unlock(&rxnet->peer_hash_lock);
|
||||
|
@ -363,9 +364,12 @@ struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *local,
|
|||
peer = __rxrpc_lookup_peer_rcu(local, srx, hash_key);
|
||||
if (peer && !rxrpc_get_peer_maybe(peer))
|
||||
peer = NULL;
|
||||
if (!peer)
|
||||
if (!peer) {
|
||||
hash_add_rcu(rxnet->peer_hash,
|
||||
&candidate->hash_link, hash_key);
|
||||
hlist_add_head(&candidate->keepalive_link,
|
||||
&rxnet->peer_keepalive_new);
|
||||
}
|
||||
|
||||
spin_unlock_bh(&rxnet->peer_hash_lock);
|
||||
|
||||
|
@ -382,9 +386,54 @@ struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *local,
|
|||
}
|
||||
|
||||
/*
|
||||
* Discard a ref on a remote peer record.
|
||||
* Get a ref on a peer record.
|
||||
*/
|
||||
void __rxrpc_put_peer(struct rxrpc_peer *peer)
|
||||
struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *peer)
|
||||
{
|
||||
const void *here = __builtin_return_address(0);
|
||||
int n;
|
||||
|
||||
n = atomic_inc_return(&peer->usage);
|
||||
trace_rxrpc_peer(peer, rxrpc_peer_got, n, here);
|
||||
return peer;
|
||||
}
|
||||
|
||||
/*
|
||||
* Get a ref on a peer record unless its usage has already reached 0.
|
||||
*/
|
||||
struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *peer)
|
||||
{
|
||||
const void *here = __builtin_return_address(0);
|
||||
|
||||
if (peer) {
|
||||
int n = __atomic_add_unless(&peer->usage, 1, 0);
|
||||
if (n > 0)
|
||||
trace_rxrpc_peer(peer, rxrpc_peer_got, n + 1, here);
|
||||
else
|
||||
peer = NULL;
|
||||
}
|
||||
return peer;
|
||||
}
|
||||
|
||||
/*
|
||||
* Queue a peer record. This passes the caller's ref to the workqueue.
|
||||
*/
|
||||
void __rxrpc_queue_peer_error(struct rxrpc_peer *peer)
|
||||
{
|
||||
const void *here = __builtin_return_address(0);
|
||||
int n;
|
||||
|
||||
n = atomic_read(&peer->usage);
|
||||
if (rxrpc_queue_work(&peer->error_distributor))
|
||||
trace_rxrpc_peer(peer, rxrpc_peer_queued_error, n, here);
|
||||
else
|
||||
rxrpc_put_peer(peer);
|
||||
}
|
||||
|
||||
/*
|
||||
* Discard a peer record.
|
||||
*/
|
||||
static void __rxrpc_put_peer(struct rxrpc_peer *peer)
|
||||
{
|
||||
struct rxrpc_net *rxnet = peer->local->rxnet;
|
||||
|
||||
|
@ -392,11 +441,49 @@ void __rxrpc_put_peer(struct rxrpc_peer *peer)
|
|||
|
||||
spin_lock_bh(&rxnet->peer_hash_lock);
|
||||
hash_del_rcu(&peer->hash_link);
|
||||
hlist_del_init(&peer->keepalive_link);
|
||||
spin_unlock_bh(&rxnet->peer_hash_lock);
|
||||
|
||||
kfree_rcu(peer, rcu);
|
||||
}
|
||||
|
||||
/*
|
||||
* Drop a ref on a peer record.
|
||||
*/
|
||||
void rxrpc_put_peer(struct rxrpc_peer *peer)
|
||||
{
|
||||
const void *here = __builtin_return_address(0);
|
||||
int n;
|
||||
|
||||
if (peer) {
|
||||
n = atomic_dec_return(&peer->usage);
|
||||
trace_rxrpc_peer(peer, rxrpc_peer_put, n, here);
|
||||
if (n == 0)
|
||||
__rxrpc_put_peer(peer);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Make sure all peer records have been discarded.
|
||||
*/
|
||||
void rxrpc_destroy_all_peers(struct rxrpc_net *rxnet)
|
||||
{
|
||||
struct rxrpc_peer *peer;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < HASH_SIZE(rxnet->peer_hash); i++) {
|
||||
if (hlist_empty(&rxnet->peer_hash[i]))
|
||||
continue;
|
||||
|
||||
hlist_for_each_entry(peer, &rxnet->peer_hash[i], hash_link) {
|
||||
pr_err("Leaked peer %u {%u} %pISp\n",
|
||||
peer->debug_id,
|
||||
atomic_read(&peer->usage),
|
||||
&peer->srx.transport);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* rxrpc_kernel_get_peer - Get the peer address of a call
|
||||
* @sock: The socket on which the call is in progress.
|
||||
|
|
|
@ -29,6 +29,8 @@ static const char *const rxrpc_conn_states[RXRPC_CONN__NR_STATES] = {
|
|||
* generate a list of extant and dead calls in /proc/net/rxrpc_calls
|
||||
*/
|
||||
static void *rxrpc_call_seq_start(struct seq_file *seq, loff_t *_pos)
|
||||
__acquires(rcu)
|
||||
__acquires(rxnet->call_lock)
|
||||
{
|
||||
struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
|
||||
|
||||
|
@ -45,6 +47,8 @@ static void *rxrpc_call_seq_next(struct seq_file *seq, void *v, loff_t *pos)
|
|||
}
|
||||
|
||||
static void rxrpc_call_seq_stop(struct seq_file *seq, void *v)
|
||||
__releases(rxnet->call_lock)
|
||||
__releases(rcu)
|
||||
{
|
||||
struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
|
||||
|
||||
|
@ -135,6 +139,7 @@ const struct file_operations rxrpc_call_seq_fops = {
|
|||
* generate a list of extant virtual connections in /proc/net/rxrpc_conns
|
||||
*/
|
||||
static void *rxrpc_connection_seq_start(struct seq_file *seq, loff_t *_pos)
|
||||
__acquires(rxnet->conn_lock)
|
||||
{
|
||||
struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
|
||||
|
||||
|
@ -151,6 +156,7 @@ static void *rxrpc_connection_seq_next(struct seq_file *seq, void *v,
|
|||
}
|
||||
|
||||
static void rxrpc_connection_seq_stop(struct seq_file *seq, void *v)
|
||||
__releases(rxnet->conn_lock)
|
||||
{
|
||||
struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
|
||||
|
||||
|
|
|
@ -668,6 +668,7 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn)
|
|||
return -EAGAIN;
|
||||
}
|
||||
|
||||
conn->params.peer->last_tx_at = ktime_get_real();
|
||||
_leave(" = 0");
|
||||
return 0;
|
||||
}
|
||||
|
@ -722,6 +723,7 @@ static int rxkad_send_response(struct rxrpc_connection *conn,
|
|||
return -EAGAIN;
|
||||
}
|
||||
|
||||
conn->params.peer->last_tx_at = ktime_get_real();
|
||||
_leave(" = 0");
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -19,9 +19,6 @@
|
|||
#include <keys/rxrpc-type.h>
|
||||
#include "ar-internal.h"
|
||||
|
||||
static LIST_HEAD(rxrpc_security_methods);
|
||||
static DECLARE_RWSEM(rxrpc_security_sem);
|
||||
|
||||
static const struct rxrpc_security *rxrpc_security_types[] = {
|
||||
[RXRPC_SECURITY_NONE] = &rxrpc_no_security,
|
||||
#ifdef CONFIG_RXKAD
|
||||
|
|
|
@ -130,7 +130,9 @@ static inline void rxrpc_instant_resend(struct rxrpc_call *call, int ix)
|
|||
spin_lock_bh(&call->lock);
|
||||
|
||||
if (call->state < RXRPC_CALL_COMPLETE) {
|
||||
call->rxtx_annotations[ix] = RXRPC_TX_ANNO_RETRANS;
|
||||
call->rxtx_annotations[ix] =
|
||||
(call->rxtx_annotations[ix] & RXRPC_TX_ANNO_LAST) |
|
||||
RXRPC_TX_ANNO_RETRANS;
|
||||
if (!test_and_set_bit(RXRPC_CALL_EV_RESEND, &call->events))
|
||||
rxrpc_queue_call(call);
|
||||
}
|
||||
|
@ -554,6 +556,7 @@ static struct rxrpc_call *
|
|||
rxrpc_new_client_call_for_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg,
|
||||
struct rxrpc_send_params *p)
|
||||
__releases(&rx->sk.sk_lock.slock)
|
||||
__acquires(&call->user_mutex)
|
||||
{
|
||||
struct rxrpc_conn_parameters cp;
|
||||
struct rxrpc_call *call;
|
||||
|
@ -583,6 +586,7 @@ rxrpc_new_client_call_for_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg,
|
|||
atomic_inc_return(&rxrpc_debug_id));
|
||||
/* The socket is now unlocked */
|
||||
|
||||
rxrpc_put_peer(cp.peer);
|
||||
_leave(" = %p\n", call);
|
||||
return call;
|
||||
}
|
||||
|
@ -594,6 +598,7 @@ rxrpc_new_client_call_for_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg,
|
|||
*/
|
||||
int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
|
||||
__releases(&rx->sk.sk_lock.slock)
|
||||
__releases(&call->user_mutex)
|
||||
{
|
||||
enum rxrpc_call_state state;
|
||||
struct rxrpc_call *call;
|
||||
|
|
Loading…
Reference in New Issue