SUNRPC: Convert the xprt->sending queue back to an ordinary wait queue
We no longer need priority semantics on the xprt->sending queue, because the order in which tasks are sent is now dictated by their position in the send queue. Note that the backlog queue remains a priority queue, meaning that slot resources are still managed in order of task priority. Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
This commit is contained in:
parent
f42f7c2830
commit
79c99152a3
|
@ -192,7 +192,6 @@ static void xprt_clear_locked(struct rpc_xprt *xprt)
|
||||||
int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
|
int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
|
||||||
{
|
{
|
||||||
struct rpc_rqst *req = task->tk_rqstp;
|
struct rpc_rqst *req = task->tk_rqstp;
|
||||||
int priority;
|
|
||||||
|
|
||||||
if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
|
if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
|
||||||
if (task == xprt->snd_task)
|
if (task == xprt->snd_task)
|
||||||
|
@ -212,13 +211,7 @@ out_sleep:
|
||||||
task->tk_pid, xprt);
|
task->tk_pid, xprt);
|
||||||
task->tk_timeout = RPC_IS_SOFT(task) ? req->rq_timeout : 0;
|
task->tk_timeout = RPC_IS_SOFT(task) ? req->rq_timeout : 0;
|
||||||
task->tk_status = -EAGAIN;
|
task->tk_status = -EAGAIN;
|
||||||
if (req == NULL)
|
rpc_sleep_on(&xprt->sending, task, NULL);
|
||||||
priority = RPC_PRIORITY_LOW;
|
|
||||||
else if (!req->rq_ntrans)
|
|
||||||
priority = RPC_PRIORITY_NORMAL;
|
|
||||||
else
|
|
||||||
priority = RPC_PRIORITY_HIGH;
|
|
||||||
rpc_sleep_on_priority(&xprt->sending, task, NULL, priority);
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(xprt_reserve_xprt);
|
EXPORT_SYMBOL_GPL(xprt_reserve_xprt);
|
||||||
|
@ -260,7 +253,6 @@ xprt_test_and_clear_congestion_window_wait(struct rpc_xprt *xprt)
|
||||||
int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
|
int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
|
||||||
{
|
{
|
||||||
struct rpc_rqst *req = task->tk_rqstp;
|
struct rpc_rqst *req = task->tk_rqstp;
|
||||||
int priority;
|
|
||||||
|
|
||||||
if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
|
if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
|
||||||
if (task == xprt->snd_task)
|
if (task == xprt->snd_task)
|
||||||
|
@ -283,13 +275,7 @@ out_sleep:
|
||||||
dprintk("RPC: %5u failed to lock transport %p\n", task->tk_pid, xprt);
|
dprintk("RPC: %5u failed to lock transport %p\n", task->tk_pid, xprt);
|
||||||
task->tk_timeout = RPC_IS_SOFT(task) ? req->rq_timeout : 0;
|
task->tk_timeout = RPC_IS_SOFT(task) ? req->rq_timeout : 0;
|
||||||
task->tk_status = -EAGAIN;
|
task->tk_status = -EAGAIN;
|
||||||
if (req == NULL)
|
rpc_sleep_on(&xprt->sending, task, NULL);
|
||||||
priority = RPC_PRIORITY_LOW;
|
|
||||||
else if (!req->rq_ntrans)
|
|
||||||
priority = RPC_PRIORITY_NORMAL;
|
|
||||||
else
|
|
||||||
priority = RPC_PRIORITY_HIGH;
|
|
||||||
rpc_sleep_on_priority(&xprt->sending, task, NULL, priority);
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(xprt_reserve_xprt_cong);
|
EXPORT_SYMBOL_GPL(xprt_reserve_xprt_cong);
|
||||||
|
@ -1796,7 +1782,7 @@ static void xprt_init(struct rpc_xprt *xprt, struct net *net)
|
||||||
|
|
||||||
rpc_init_wait_queue(&xprt->binding, "xprt_binding");
|
rpc_init_wait_queue(&xprt->binding, "xprt_binding");
|
||||||
rpc_init_wait_queue(&xprt->pending, "xprt_pending");
|
rpc_init_wait_queue(&xprt->pending, "xprt_pending");
|
||||||
rpc_init_priority_wait_queue(&xprt->sending, "xprt_sending");
|
rpc_init_wait_queue(&xprt->sending, "xprt_sending");
|
||||||
rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog");
|
rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog");
|
||||||
|
|
||||||
xprt_init_xid(xprt);
|
xprt_init_xid(xprt);
|
||||||
|
|
Loading…
Reference in New Issue