SUNRPC: Make num_reqs a non-atomic integer
If recording xprt->stat.max_slots is moved into xprt_alloc_slot, then xprt->num_reqs is never manipulated outside xprt->reserve_lock. There's no longer a need for xprt->num_reqs to be atomic. Signed-off-by: Chuck Lever <chuck.lever@oracle.com> Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
This commit is contained in:
parent
78215759e2
commit
ff699ea826
|
@ -197,7 +197,7 @@ struct rpc_xprt {
|
|||
struct list_head free; /* free slots */
|
||||
unsigned int max_reqs; /* max number of slots */
|
||||
unsigned int min_reqs; /* min number of slots */
|
||||
atomic_t num_reqs; /* total slots */
|
||||
unsigned int num_reqs; /* total slots */
|
||||
unsigned long state; /* transport state */
|
||||
unsigned char resvport : 1; /* use a reserved port */
|
||||
atomic_t swapper; /* we're swapping over this
|
||||
|
|
|
@ -1009,7 +1009,7 @@ void xprt_transmit(struct rpc_task *task)
|
|||
struct rpc_rqst *req = task->tk_rqstp;
|
||||
struct rpc_xprt *xprt = req->rq_xprt;
|
||||
unsigned int connect_cookie;
|
||||
int status, numreqs;
|
||||
int status;
|
||||
|
||||
dprintk("RPC: %5u xprt_transmit(%u)\n", task->tk_pid, req->rq_slen);
|
||||
|
||||
|
@ -1047,9 +1047,6 @@ void xprt_transmit(struct rpc_task *task)
|
|||
|
||||
xprt->ops->set_retrans_timeout(task);
|
||||
|
||||
numreqs = atomic_read(&xprt->num_reqs);
|
||||
if (numreqs > xprt->stat.max_slots)
|
||||
xprt->stat.max_slots = numreqs;
|
||||
xprt->stat.sends++;
|
||||
xprt->stat.req_u += xprt->stat.sends - xprt->stat.recvs;
|
||||
xprt->stat.bklog_u += xprt->backlog.qlen;
|
||||
|
@ -1111,14 +1108,15 @@ static struct rpc_rqst *xprt_dynamic_alloc_slot(struct rpc_xprt *xprt)
|
|||
{
|
||||
struct rpc_rqst *req = ERR_PTR(-EAGAIN);
|
||||
|
||||
if (!atomic_add_unless(&xprt->num_reqs, 1, xprt->max_reqs))
|
||||
if (xprt->num_reqs >= xprt->max_reqs)
|
||||
goto out;
|
||||
++xprt->num_reqs;
|
||||
spin_unlock(&xprt->reserve_lock);
|
||||
req = kzalloc(sizeof(struct rpc_rqst), GFP_NOFS);
|
||||
spin_lock(&xprt->reserve_lock);
|
||||
if (req != NULL)
|
||||
goto out;
|
||||
atomic_dec(&xprt->num_reqs);
|
||||
--xprt->num_reqs;
|
||||
req = ERR_PTR(-ENOMEM);
|
||||
out:
|
||||
return req;
|
||||
|
@ -1126,7 +1124,8 @@ out:
|
|||
|
||||
static bool xprt_dynamic_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
|
||||
{
|
||||
if (atomic_add_unless(&xprt->num_reqs, -1, xprt->min_reqs)) {
|
||||
if (xprt->num_reqs > xprt->min_reqs) {
|
||||
--xprt->num_reqs;
|
||||
kfree(req);
|
||||
return true;
|
||||
}
|
||||
|
@ -1162,6 +1161,8 @@ void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
|
|||
spin_unlock(&xprt->reserve_lock);
|
||||
return;
|
||||
out_init_req:
|
||||
xprt->stat.max_slots = max_t(unsigned int, xprt->stat.max_slots,
|
||||
xprt->num_reqs);
|
||||
task->tk_status = 0;
|
||||
task->tk_rqstp = req;
|
||||
xprt_request_init(task, xprt);
|
||||
|
@ -1229,7 +1230,7 @@ struct rpc_xprt *xprt_alloc(struct net *net, size_t size,
|
|||
else
|
||||
xprt->max_reqs = num_prealloc;
|
||||
xprt->min_reqs = num_prealloc;
|
||||
atomic_set(&xprt->num_reqs, num_prealloc);
|
||||
xprt->num_reqs = num_prealloc;
|
||||
|
||||
return xprt;
|
||||
|
||||
|
|
Loading…
Reference in New Issue