sunrpc: fix potential races in pool_stats collection

In a later patch, we'll be removing some spinlocking around the socket
and thread queueing code in order to fix some contention problems. At
that point, the stats counters will no longer be protected by the
sp_lock.

Change the counters to atomic_long_t fields, except for the
"sockets_queued" counter which will still be manipulated under a
spinlock.

Signed-off-by: Jeff Layton <jlayton@primarydata.com>
Tested-by: Chris Worley <chris.worley@primarydata.com>
Signed-off-by: J. Bruce Fields <bfields@redhat.com>
This commit is contained in:
Jeff Layton 2014-11-21 14:19:29 -05:00 committed by J. Bruce Fields
parent 812443865c
commit 403c7b4444
2 changed files with 9 additions and 9 deletions

View File

@ -26,10 +26,10 @@ typedef int (*svc_thread_fn)(void *);
/* statistics for svc_pool structures */ /* statistics for svc_pool structures */
struct svc_pool_stats { struct svc_pool_stats {
unsigned long packets; atomic_long_t packets;
unsigned long sockets_queued; unsigned long sockets_queued;
unsigned long threads_woken; atomic_long_t threads_woken;
unsigned long threads_timedout; atomic_long_t threads_timedout;
}; };
/* /*

View File

@ -362,7 +362,7 @@ static void svc_xprt_do_enqueue(struct svc_xprt *xprt)
pool = svc_pool_for_cpu(xprt->xpt_server, cpu); pool = svc_pool_for_cpu(xprt->xpt_server, cpu);
spin_lock_bh(&pool->sp_lock); spin_lock_bh(&pool->sp_lock);
pool->sp_stats.packets++; atomic_long_inc(&pool->sp_stats.packets);
if (!list_empty(&pool->sp_threads)) { if (!list_empty(&pool->sp_threads)) {
rqstp = list_entry(pool->sp_threads.next, rqstp = list_entry(pool->sp_threads.next,
@ -383,7 +383,7 @@ static void svc_xprt_do_enqueue(struct svc_xprt *xprt)
svc_xprt_get(xprt); svc_xprt_get(xprt);
wake_up_process(rqstp->rq_task); wake_up_process(rqstp->rq_task);
rqstp->rq_xprt = xprt; rqstp->rq_xprt = xprt;
pool->sp_stats.threads_woken++; atomic_long_inc(&pool->sp_stats.threads_woken);
} else { } else {
dprintk("svc: transport %p put into queue\n", xprt); dprintk("svc: transport %p put into queue\n", xprt);
list_add_tail(&xprt->xpt_ready, &pool->sp_sockets); list_add_tail(&xprt->xpt_ready, &pool->sp_sockets);
@ -669,7 +669,7 @@ static struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp, long timeout)
spin_lock_bh(&pool->sp_lock); spin_lock_bh(&pool->sp_lock);
if (!time_left) if (!time_left)
pool->sp_stats.threads_timedout++; atomic_long_inc(&pool->sp_stats.threads_timedout);
xprt = rqstp->rq_xprt; xprt = rqstp->rq_xprt;
if (!xprt) { if (!xprt) {
@ -1306,10 +1306,10 @@ static int svc_pool_stats_show(struct seq_file *m, void *p)
seq_printf(m, "%u %lu %lu %lu %lu\n", seq_printf(m, "%u %lu %lu %lu %lu\n",
pool->sp_id, pool->sp_id,
pool->sp_stats.packets, (unsigned long)atomic_long_read(&pool->sp_stats.packets),
pool->sp_stats.sockets_queued, pool->sp_stats.sockets_queued,
pool->sp_stats.threads_woken, (unsigned long)atomic_long_read(&pool->sp_stats.threads_woken),
pool->sp_stats.threads_timedout); (unsigned long)atomic_long_read(&pool->sp_stats.threads_timedout));
return 0; return 0;
} }