2019-05-19 20:08:55 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* linux/net/sunrpc/xprt.c
|
|
|
|
*
|
|
|
|
* This is a generic RPC call interface supporting congestion avoidance,
|
|
|
|
* and asynchronous calls.
|
|
|
|
*
|
|
|
|
* The interface works like this:
|
|
|
|
*
|
|
|
|
* - When a process places a call, it allocates a request slot if
|
|
|
|
* one is available. Otherwise, it sleeps on the backlog queue
|
|
|
|
* (xprt_reserve).
|
|
|
|
* - Next, the caller puts together the RPC message, stuffs it into
|
2005-08-12 04:25:47 +08:00
|
|
|
* the request struct, and calls xprt_transmit().
|
|
|
|
* - xprt_transmit sends the message and installs the caller on the
|
2009-04-01 21:23:03 +08:00
|
|
|
* transport's wait list. At the same time, if a reply is expected,
|
|
|
|
* it installs a timer that is run after the packet's timeout has
|
|
|
|
* expired.
|
2005-04-17 06:20:36 +08:00
|
|
|
* - When a packet arrives, the data_ready handler walks the list of
|
2005-08-12 04:25:47 +08:00
|
|
|
* pending requests for that transport. If a matching XID is found, the
|
2005-04-17 06:20:36 +08:00
|
|
|
* caller is woken up, and the timer removed.
|
|
|
|
* - When no reply arrives within the timeout interval, the timer is
|
|
|
|
* fired by the kernel and runs xprt_timer(). It either adjusts the
|
|
|
|
* timeout values (minor timeout) or wakes up the caller with a status
|
|
|
|
* of -ETIMEDOUT.
|
|
|
|
* - When the caller receives a notification from RPC that a reply arrived,
|
|
|
|
* it should release the RPC slot, and process the reply.
|
|
|
|
* If the call timed out, it may choose to retry the operation by
|
|
|
|
* adjusting the initial timeout value, and simply calling rpc_call
|
|
|
|
* again.
|
|
|
|
*
|
|
|
|
* Support for async RPC is done through a set of RPC-specific scheduling
|
|
|
|
* primitives that `transparently' work for processes as well as async
|
|
|
|
* tasks that rely on callbacks.
|
|
|
|
*
|
|
|
|
* Copyright (C) 1995-1997, Olaf Kirch <okir@monad.swb.de>
|
2005-08-12 04:25:47 +08:00
|
|
|
*
|
|
|
|
* Transport switch API copyright (C) 2005, Chuck Lever <cel@netapp.com>
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
|
|
|
|
2005-08-12 04:25:23 +08:00
|
|
|
#include <linux/module.h>
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <linux/types.h>
|
2005-08-12 04:25:23 +08:00
|
|
|
#include <linux/interrupt.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <linux/workqueue.h>
|
2006-05-25 13:40:51 +08:00
|
|
|
#include <linux/net.h>
|
2010-05-08 01:34:47 +08:00
|
|
|
#include <linux/ktime.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2005-08-12 04:25:23 +08:00
|
|
|
#include <linux/sunrpc/clnt.h>
|
2006-03-21 02:44:22 +08:00
|
|
|
#include <linux/sunrpc/metrics.h>
|
2010-03-20 03:36:22 +08:00
|
|
|
#include <linux/sunrpc/bc_xprt.h>
|
2015-02-15 06:48:49 +08:00
|
|
|
#include <linux/rcupdate.h>
|
2019-02-18 23:02:29 +08:00
|
|
|
#include <linux/sched/mm.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2014-10-29 02:24:13 +08:00
|
|
|
#include <trace/events/sunrpc.h>
|
|
|
|
|
2009-04-01 21:23:03 +08:00
|
|
|
#include "sunrpc.h"
|
2021-06-09 03:59:19 +08:00
|
|
|
#include "sysfs.h"
|
2009-04-01 21:23:03 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Local variables
|
|
|
|
*/
|
|
|
|
|
2014-11-18 05:58:04 +08:00
|
|
|
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
|
2005-04-17 06:20:36 +08:00
|
|
|
# define RPCDBG_FACILITY RPCDBG_XPRT
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Local functions
|
|
|
|
*/
|
2011-07-18 04:57:32 +08:00
|
|
|
static void xprt_init(struct rpc_xprt *xprt, struct net *net);
|
2018-05-05 03:34:53 +08:00
|
|
|
static __be32 xprt_alloc_xid(struct rpc_xprt *xprt);
|
2012-03-02 06:01:05 +08:00
|
|
|
static void xprt_destroy(struct rpc_xprt *xprt);
|
2021-05-17 07:59:10 +08:00
|
|
|
static void xprt_request_init(struct rpc_task *task);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2007-11-22 19:40:22 +08:00
|
|
|
static DEFINE_SPINLOCK(xprt_list_lock);
|
2007-09-11 01:46:00 +08:00
|
|
|
static LIST_HEAD(xprt_list);
|
|
|
|
|
2019-04-08 01:58:53 +08:00
|
|
|
static unsigned long xprt_request_timeout(const struct rpc_rqst *req)
|
|
|
|
{
|
|
|
|
unsigned long timeout = jiffies + req->rq_timeout;
|
|
|
|
|
|
|
|
if (time_before(timeout, req->rq_majortimeo))
|
|
|
|
return timeout;
|
|
|
|
return req->rq_majortimeo;
|
|
|
|
}
|
|
|
|
|
2007-09-11 01:46:00 +08:00
|
|
|
/**
|
|
|
|
* xprt_register_transport - register a transport implementation
|
|
|
|
* @transport: transport to register
|
|
|
|
*
|
|
|
|
* If a transport implementation is loaded as a kernel module, it can
|
|
|
|
* call this interface to make itself known to the RPC client.
|
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* 0: transport successfully registered
|
|
|
|
* -EEXIST: transport already registered
|
|
|
|
* -EINVAL: transport module being unloaded
|
|
|
|
*/
|
|
|
|
int xprt_register_transport(struct xprt_class *transport)
|
|
|
|
{
|
|
|
|
struct xprt_class *t;
|
|
|
|
int result;
|
|
|
|
|
|
|
|
result = -EEXIST;
|
|
|
|
spin_lock(&xprt_list_lock);
|
|
|
|
list_for_each_entry(t, &xprt_list, list) {
|
|
|
|
/* don't register the same transport class twice */
|
2007-09-11 01:47:57 +08:00
|
|
|
if (t->ident == transport->ident)
|
2007-09-11 01:46:00 +08:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2008-07-31 13:53:56 +08:00
|
|
|
list_add_tail(&transport->list, &xprt_list);
|
|
|
|
printk(KERN_INFO "RPC: Registered %s transport module.\n",
|
|
|
|
transport->name);
|
|
|
|
result = 0;
|
2007-09-11 01:46:00 +08:00
|
|
|
|
|
|
|
out:
|
|
|
|
spin_unlock(&xprt_list_lock);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(xprt_register_transport);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* xprt_unregister_transport - unregister a transport implementation
|
2008-02-14 07:03:23 +08:00
|
|
|
* @transport: transport to unregister
|
2007-09-11 01:46:00 +08:00
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* 0: transport successfully unregistered
|
|
|
|
* -ENOENT: transport never registered
|
|
|
|
*/
|
|
|
|
int xprt_unregister_transport(struct xprt_class *transport)
|
|
|
|
{
|
|
|
|
struct xprt_class *t;
|
|
|
|
int result;
|
|
|
|
|
|
|
|
result = 0;
|
|
|
|
spin_lock(&xprt_list_lock);
|
|
|
|
list_for_each_entry(t, &xprt_list, list) {
|
|
|
|
if (t == transport) {
|
|
|
|
printk(KERN_INFO
|
|
|
|
"RPC: Unregistered %s transport module.\n",
|
|
|
|
transport->name);
|
|
|
|
list_del_init(&transport->list);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
result = -ENOENT;
|
|
|
|
|
|
|
|
out:
|
|
|
|
spin_unlock(&xprt_list_lock);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(xprt_unregister_transport);
|
|
|
|
|
2020-11-07 05:33:38 +08:00
|
|
|
static void
|
|
|
|
xprt_class_release(const struct xprt_class *t)
|
|
|
|
{
|
|
|
|
module_put(t->owner);
|
|
|
|
}
|
|
|
|
|
2020-11-11 01:58:22 +08:00
|
|
|
static const struct xprt_class *
|
|
|
|
xprt_class_find_by_ident_locked(int ident)
|
|
|
|
{
|
|
|
|
const struct xprt_class *t;
|
|
|
|
|
|
|
|
list_for_each_entry(t, &xprt_list, list) {
|
|
|
|
if (t->ident != ident)
|
|
|
|
continue;
|
|
|
|
if (!try_module_get(t->owner))
|
|
|
|
continue;
|
|
|
|
return t;
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct xprt_class *
|
|
|
|
xprt_class_find_by_ident(int ident)
|
|
|
|
{
|
|
|
|
const struct xprt_class *t;
|
|
|
|
|
|
|
|
spin_lock(&xprt_list_lock);
|
|
|
|
t = xprt_class_find_by_ident_locked(ident);
|
|
|
|
spin_unlock(&xprt_list_lock);
|
|
|
|
return t;
|
|
|
|
}
|
|
|
|
|
2020-11-07 05:33:38 +08:00
|
|
|
static const struct xprt_class *
|
|
|
|
xprt_class_find_by_netid_locked(const char *netid)
|
|
|
|
{
|
|
|
|
const struct xprt_class *t;
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
list_for_each_entry(t, &xprt_list, list) {
|
|
|
|
for (i = 0; t->netid[i][0] != '\0'; i++) {
|
|
|
|
if (strcmp(t->netid[i], netid) != 0)
|
|
|
|
continue;
|
|
|
|
if (!try_module_get(t->owner))
|
|
|
|
continue;
|
|
|
|
return t;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct xprt_class *
|
|
|
|
xprt_class_find_by_netid(const char *netid)
|
|
|
|
{
|
|
|
|
const struct xprt_class *t;
|
|
|
|
|
|
|
|
spin_lock(&xprt_list_lock);
|
|
|
|
t = xprt_class_find_by_netid_locked(netid);
|
|
|
|
if (!t) {
|
|
|
|
spin_unlock(&xprt_list_lock);
|
|
|
|
request_module("rpc%s", netid);
|
|
|
|
spin_lock(&xprt_list_lock);
|
|
|
|
t = xprt_class_find_by_netid_locked(netid);
|
|
|
|
}
|
|
|
|
spin_unlock(&xprt_list_lock);
|
|
|
|
return t;
|
|
|
|
}
|
|
|
|
|
2009-03-12 02:37:56 +08:00
|
|
|
/**
|
2020-11-10 22:41:21 +08:00
|
|
|
* xprt_find_transport_ident - convert a netid into a transport identifier
|
2020-11-07 05:33:38 +08:00
|
|
|
* @netid: transport to load
|
2009-03-12 02:37:56 +08:00
|
|
|
*
|
|
|
|
* Returns:
|
2020-11-10 22:41:21 +08:00
|
|
|
* > 0: transport identifier
|
2009-03-12 02:37:56 +08:00
|
|
|
* -ENOENT: transport module not available
|
|
|
|
*/
|
2020-11-10 22:41:21 +08:00
|
|
|
int xprt_find_transport_ident(const char *netid)
|
2009-03-12 02:37:56 +08:00
|
|
|
{
|
2020-11-07 05:33:38 +08:00
|
|
|
const struct xprt_class *t;
|
2020-11-10 22:41:21 +08:00
|
|
|
int ret;
|
2009-03-12 02:37:56 +08:00
|
|
|
|
2020-11-07 05:33:38 +08:00
|
|
|
t = xprt_class_find_by_netid(netid);
|
|
|
|
if (!t)
|
|
|
|
return -ENOENT;
|
2020-11-10 22:41:21 +08:00
|
|
|
ret = t->ident;
|
2020-11-07 05:33:38 +08:00
|
|
|
xprt_class_release(t);
|
2020-11-10 22:41:21 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(xprt_find_transport_ident);
|
|
|
|
|
2018-09-04 11:39:27 +08:00
|
|
|
static void xprt_clear_locked(struct rpc_xprt *xprt)
|
|
|
|
{
|
|
|
|
xprt->snd_task = NULL;
|
|
|
|
if (!test_bit(XPRT_CLOSE_WAIT, &xprt->state)) {
|
|
|
|
smp_mb__before_atomic();
|
|
|
|
clear_bit(XPRT_LOCKED, &xprt->state);
|
|
|
|
smp_mb__after_atomic();
|
|
|
|
} else
|
|
|
|
queue_work(xprtiod_workqueue, &xprt->task_cleanup);
|
|
|
|
}
|
|
|
|
|
2005-08-26 07:25:51 +08:00
|
|
|
/**
|
|
|
|
* xprt_reserve_xprt - serialize write access to transports
|
|
|
|
* @task: task that is requesting access to the transport
|
2011-07-28 14:54:36 +08:00
|
|
|
* @xprt: pointer to the target transport
|
2005-08-26 07:25:51 +08:00
|
|
|
*
|
|
|
|
* This prevents mixing the payload of separate requests, and prevents
|
|
|
|
* transport connects from colliding with writes. No congestion control
|
|
|
|
* is provided.
|
|
|
|
*/
|
2011-07-18 04:01:03 +08:00
|
|
|
int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
|
2005-08-26 07:25:51 +08:00
|
|
|
{
|
|
|
|
struct rpc_rqst *req = task->tk_rqstp;
|
|
|
|
|
|
|
|
if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
|
|
|
|
if (task == xprt->snd_task)
|
2019-10-10 00:58:14 +08:00
|
|
|
goto out_locked;
|
2005-08-26 07:25:51 +08:00
|
|
|
goto out_sleep;
|
|
|
|
}
|
2018-09-04 11:39:27 +08:00
|
|
|
if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
|
|
|
|
goto out_unlock;
|
2005-08-26 07:25:51 +08:00
|
|
|
xprt->snd_task = task;
|
2011-03-11 01:40:28 +08:00
|
|
|
|
2019-10-10 00:58:14 +08:00
|
|
|
out_locked:
|
|
|
|
trace_xprt_reserve_xprt(xprt, task);
|
2005-08-26 07:25:51 +08:00
|
|
|
return 1;
|
|
|
|
|
2018-09-04 11:39:27 +08:00
|
|
|
out_unlock:
|
|
|
|
xprt_clear_locked(xprt);
|
2005-08-26 07:25:51 +08:00
|
|
|
out_sleep:
|
|
|
|
task->tk_status = -EAGAIN;
|
2019-04-08 01:58:49 +08:00
|
|
|
if (RPC_IS_SOFT(task))
|
|
|
|
rpc_sleep_on_timeout(&xprt->sending, task, NULL,
|
2019-04-08 01:58:53 +08:00
|
|
|
xprt_request_timeout(req));
|
2019-04-08 01:58:49 +08:00
|
|
|
else
|
|
|
|
rpc_sleep_on(&xprt->sending, task, NULL);
|
2005-08-26 07:25:51 +08:00
|
|
|
return 0;
|
|
|
|
}
|
2007-09-11 01:45:36 +08:00
|
|
|
EXPORT_SYMBOL_GPL(xprt_reserve_xprt);
|
2005-08-26 07:25:51 +08:00
|
|
|
|
2018-09-04 05:37:36 +08:00
|
|
|
static bool
|
|
|
|
xprt_need_congestion_window_wait(struct rpc_xprt *xprt)
|
|
|
|
{
|
|
|
|
return test_bit(XPRT_CWND_WAIT, &xprt->state);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
xprt_set_congestion_window_wait(struct rpc_xprt *xprt)
|
|
|
|
{
|
|
|
|
if (!list_empty(&xprt->xmit_queue)) {
|
|
|
|
/* Peek at head of queue to see if it can make progress */
|
|
|
|
if (list_first_entry(&xprt->xmit_queue, struct rpc_rqst,
|
|
|
|
rq_xmit)->rq_cong)
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
set_bit(XPRT_CWND_WAIT, &xprt->state);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
xprt_test_and_clear_congestion_window_wait(struct rpc_xprt *xprt)
|
|
|
|
{
|
|
|
|
if (!RPCXPRT_CONGESTED(xprt))
|
|
|
|
clear_bit(XPRT_CWND_WAIT, &xprt->state);
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
2005-08-26 07:25:51 +08:00
|
|
|
* xprt_reserve_xprt_cong - serialize write access to transports
|
|
|
|
* @task: task that is requesting access to the transport
|
|
|
|
*
|
|
|
|
* Same as xprt_reserve_xprt, but Van Jacobson congestion control is
|
|
|
|
* integrated into the decision of whether a request is allowed to be
|
|
|
|
* woken up and given access to the transport.
|
2018-09-04 05:37:36 +08:00
|
|
|
* Note that the lock is only granted if we know there are free slots.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2011-07-18 04:01:03 +08:00
|
|
|
int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct rpc_rqst *req = task->tk_rqstp;
|
|
|
|
|
2005-08-12 04:25:38 +08:00
|
|
|
if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
|
2005-04-17 06:20:36 +08:00
|
|
|
if (task == xprt->snd_task)
|
2019-10-10 00:58:14 +08:00
|
|
|
goto out_locked;
|
2005-04-17 06:20:36 +08:00
|
|
|
goto out_sleep;
|
|
|
|
}
|
2011-07-18 04:01:03 +08:00
|
|
|
if (req == NULL) {
|
|
|
|
xprt->snd_task = task;
|
2019-10-10 00:58:14 +08:00
|
|
|
goto out_locked;
|
2011-07-18 04:01:03 +08:00
|
|
|
}
|
2018-09-04 11:39:27 +08:00
|
|
|
if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
|
|
|
|
goto out_unlock;
|
2018-09-04 05:37:36 +08:00
|
|
|
if (!xprt_need_congestion_window_wait(xprt)) {
|
2005-04-17 06:20:36 +08:00
|
|
|
xprt->snd_task = task;
|
2019-10-10 00:58:14 +08:00
|
|
|
goto out_locked;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2018-09-04 11:39:27 +08:00
|
|
|
out_unlock:
|
2006-01-03 16:55:55 +08:00
|
|
|
xprt_clear_locked(xprt);
|
2005-04-17 06:20:36 +08:00
|
|
|
out_sleep:
|
|
|
|
task->tk_status = -EAGAIN;
|
2019-04-08 01:58:49 +08:00
|
|
|
if (RPC_IS_SOFT(task))
|
|
|
|
rpc_sleep_on_timeout(&xprt->sending, task, NULL,
|
2019-04-08 01:58:53 +08:00
|
|
|
xprt_request_timeout(req));
|
2019-04-08 01:58:49 +08:00
|
|
|
else
|
|
|
|
rpc_sleep_on(&xprt->sending, task, NULL);
|
2005-04-17 06:20:36 +08:00
|
|
|
return 0;
|
2019-10-10 00:58:14 +08:00
|
|
|
out_locked:
|
|
|
|
trace_xprt_reserve_cong(xprt, task);
|
|
|
|
return 1;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2007-09-11 01:45:36 +08:00
|
|
|
EXPORT_SYMBOL_GPL(xprt_reserve_xprt_cong);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2005-08-26 07:25:51 +08:00
|
|
|
static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
int retval;
|
|
|
|
|
2018-09-08 07:38:55 +08:00
|
|
|
if (test_bit(XPRT_LOCKED, &xprt->state) && xprt->snd_task == task)
|
|
|
|
return 1;
|
2019-05-02 23:21:08 +08:00
|
|
|
spin_lock(&xprt->transport_lock);
|
2011-07-18 04:01:03 +08:00
|
|
|
retval = xprt->ops->reserve_xprt(xprt, task);
|
2019-05-02 23:21:08 +08:00
|
|
|
spin_unlock(&xprt->transport_lock);
|
2005-04-17 06:20:36 +08:00
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
2012-01-18 11:57:37 +08:00
|
|
|
static bool __xprt_lock_write_func(struct rpc_task *task, void *data)
|
2005-08-26 07:25:51 +08:00
|
|
|
{
|
2012-01-18 11:57:37 +08:00
|
|
|
struct rpc_xprt *xprt = data;
|
2005-08-26 07:25:51 +08:00
|
|
|
|
|
|
|
xprt->snd_task = task;
|
2012-01-18 11:57:37 +08:00
|
|
|
return true;
|
|
|
|
}
|
2005-08-26 07:25:51 +08:00
|
|
|
|
2012-01-18 11:57:37 +08:00
|
|
|
static void __xprt_lock_write_next(struct rpc_xprt *xprt)
|
|
|
|
{
|
|
|
|
if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
|
|
|
|
return;
|
2018-09-04 11:39:27 +08:00
|
|
|
if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
|
|
|
|
goto out_unlock;
|
2016-05-28 00:59:33 +08:00
|
|
|
if (rpc_wake_up_first_on_wq(xprtiod_workqueue, &xprt->sending,
|
|
|
|
__xprt_lock_write_func, xprt))
|
2012-01-18 11:57:37 +08:00
|
|
|
return;
|
2018-09-04 11:39:27 +08:00
|
|
|
out_unlock:
|
2006-01-03 16:55:55 +08:00
|
|
|
xprt_clear_locked(xprt);
|
2005-08-26 07:25:51 +08:00
|
|
|
}
|
|
|
|
|
2012-01-18 11:57:37 +08:00
|
|
|
static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt)
|
|
|
|
{
|
|
|
|
if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
|
|
|
|
return;
|
2018-09-04 11:39:27 +08:00
|
|
|
if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
|
|
|
|
goto out_unlock;
|
2018-09-04 05:37:36 +08:00
|
|
|
if (xprt_need_congestion_window_wait(xprt))
|
2012-01-18 11:57:37 +08:00
|
|
|
goto out_unlock;
|
2016-05-28 00:59:33 +08:00
|
|
|
if (rpc_wake_up_first_on_wq(xprtiod_workqueue, &xprt->sending,
|
2018-09-04 05:37:36 +08:00
|
|
|
__xprt_lock_write_func, xprt))
|
2012-01-18 11:57:37 +08:00
|
|
|
return;
|
2005-04-17 06:20:36 +08:00
|
|
|
out_unlock:
|
2006-01-03 16:55:55 +08:00
|
|
|
xprt_clear_locked(xprt);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2005-08-26 07:25:51 +08:00
|
|
|
/**
|
|
|
|
* xprt_release_xprt - allow other requests to use a transport
|
|
|
|
* @xprt: transport with other tasks potentially waiting
|
|
|
|
* @task: task that is releasing access to the transport
|
|
|
|
*
|
|
|
|
* Note that "task" can be NULL. No congestion control is provided.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2005-08-26 07:25:51 +08:00
|
|
|
void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
if (xprt->snd_task == task) {
|
2006-01-03 16:55:55 +08:00
|
|
|
xprt_clear_locked(xprt);
|
2005-04-17 06:20:36 +08:00
|
|
|
__xprt_lock_write_next(xprt);
|
|
|
|
}
|
2019-10-10 00:58:14 +08:00
|
|
|
trace_xprt_release_xprt(xprt, task);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2007-09-11 01:45:36 +08:00
|
|
|
EXPORT_SYMBOL_GPL(xprt_release_xprt);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2005-08-26 07:25:51 +08:00
|
|
|
/**
|
|
|
|
* xprt_release_xprt_cong - allow other requests to use a transport
|
|
|
|
* @xprt: transport with other tasks potentially waiting
|
|
|
|
* @task: task that is releasing access to the transport
|
|
|
|
*
|
|
|
|
* Note that "task" can be NULL. Another task is awoken to use the
|
|
|
|
* transport if the transport's congestion window allows it.
|
|
|
|
*/
|
|
|
|
void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
|
|
|
|
{
|
|
|
|
if (xprt->snd_task == task) {
|
2006-01-03 16:55:55 +08:00
|
|
|
xprt_clear_locked(xprt);
|
2005-08-26 07:25:51 +08:00
|
|
|
__xprt_lock_write_next_cong(xprt);
|
|
|
|
}
|
2019-10-10 00:58:14 +08:00
|
|
|
trace_xprt_release_cong(xprt, task);
|
2005-08-26 07:25:51 +08:00
|
|
|
}
|
2007-09-11 01:45:36 +08:00
|
|
|
EXPORT_SYMBOL_GPL(xprt_release_xprt_cong);
|
2005-08-26 07:25:51 +08:00
|
|
|
|
2021-06-09 03:59:19 +08:00
|
|
|
void xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2018-09-08 07:38:55 +08:00
|
|
|
if (xprt->snd_task != task)
|
|
|
|
return;
|
2019-05-02 23:21:08 +08:00
|
|
|
spin_lock(&xprt->transport_lock);
|
2005-08-26 07:25:51 +08:00
|
|
|
xprt->ops->release_xprt(xprt, task);
|
2019-05-02 23:21:08 +08:00
|
|
|
spin_unlock(&xprt->transport_lock);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Van Jacobson congestion avoidance. Check if the congestion window
|
|
|
|
* overflowed. Put the task to sleep if this is the case.
|
|
|
|
*/
|
|
|
|
static int
|
2018-09-04 05:37:36 +08:00
|
|
|
__xprt_get_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
if (req->rq_cong)
|
|
|
|
return 1;
|
2019-10-10 00:58:14 +08:00
|
|
|
trace_xprt_get_cong(xprt, req->rq_task);
|
2018-09-04 05:37:36 +08:00
|
|
|
if (RPCXPRT_CONGESTED(xprt)) {
|
|
|
|
xprt_set_congestion_window_wait(xprt);
|
2005-04-17 06:20:36 +08:00
|
|
|
return 0;
|
2018-09-04 05:37:36 +08:00
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
req->rq_cong = 1;
|
|
|
|
xprt->cong += RPC_CWNDSCALE;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Adjust the congestion window, and wake up the next task
|
|
|
|
* that has been sleeping due to congestion
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
__xprt_put_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
|
|
|
|
{
|
|
|
|
if (!req->rq_cong)
|
|
|
|
return;
|
|
|
|
req->rq_cong = 0;
|
|
|
|
xprt->cong -= RPC_CWNDSCALE;
|
2018-09-04 05:37:36 +08:00
|
|
|
xprt_test_and_clear_congestion_window_wait(xprt);
|
2019-10-10 00:58:14 +08:00
|
|
|
trace_xprt_put_cong(xprt, req->rq_task);
|
2005-08-26 07:25:51 +08:00
|
|
|
__xprt_lock_write_next_cong(xprt);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2018-09-04 05:37:36 +08:00
|
|
|
/**
|
|
|
|
* xprt_request_get_cong - Request congestion control credits
|
|
|
|
* @xprt: pointer to transport
|
|
|
|
* @req: pointer to RPC request
|
|
|
|
*
|
|
|
|
* Useful for transports that require congestion control.
|
|
|
|
*/
|
|
|
|
bool
|
|
|
|
xprt_request_get_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
|
|
|
|
{
|
|
|
|
bool ret = false;
|
|
|
|
|
|
|
|
if (req->rq_cong)
|
|
|
|
return true;
|
2019-05-02 23:21:08 +08:00
|
|
|
spin_lock(&xprt->transport_lock);
|
2018-09-04 05:37:36 +08:00
|
|
|
ret = __xprt_get_cong(xprt, req) != 0;
|
2019-05-02 23:21:08 +08:00
|
|
|
spin_unlock(&xprt->transport_lock);
|
2018-09-04 05:37:36 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(xprt_request_get_cong);
|
|
|
|
|
2005-08-26 07:25:53 +08:00
|
|
|
/**
|
|
|
|
* xprt_release_rqst_cong - housekeeping when request is complete
|
|
|
|
* @task: RPC request that recently completed
|
|
|
|
*
|
|
|
|
* Useful for transports that require congestion control.
|
|
|
|
*/
|
|
|
|
void xprt_release_rqst_cong(struct rpc_task *task)
|
|
|
|
{
|
2013-01-08 22:10:21 +08:00
|
|
|
struct rpc_rqst *req = task->tk_rqstp;
|
|
|
|
|
|
|
|
__xprt_put_cong(req->rq_xprt, req);
|
2005-08-26 07:25:53 +08:00
|
|
|
}
|
2007-09-11 01:45:36 +08:00
|
|
|
EXPORT_SYMBOL_GPL(xprt_release_rqst_cong);
|
2005-08-26 07:25:53 +08:00
|
|
|
|
2019-09-14 04:01:07 +08:00
|
|
|
static void xprt_clear_congestion_window_wait_locked(struct rpc_xprt *xprt)
|
|
|
|
{
|
|
|
|
if (test_and_clear_bit(XPRT_CWND_WAIT, &xprt->state))
|
|
|
|
__xprt_lock_write_next_cong(xprt);
|
|
|
|
}
|
|
|
|
|
2018-09-04 05:37:36 +08:00
|
|
|
/*
|
|
|
|
* Clear the congestion window wait flag and wake up the next
|
|
|
|
* entry on xprt->sending
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
xprt_clear_congestion_window_wait(struct rpc_xprt *xprt)
|
|
|
|
{
|
|
|
|
if (test_and_clear_bit(XPRT_CWND_WAIT, &xprt->state)) {
|
2019-05-02 23:21:08 +08:00
|
|
|
spin_lock(&xprt->transport_lock);
|
2018-09-04 05:37:36 +08:00
|
|
|
__xprt_lock_write_next_cong(xprt);
|
2019-05-02 23:21:08 +08:00
|
|
|
spin_unlock(&xprt->transport_lock);
|
2018-09-04 05:37:36 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2005-08-26 07:25:52 +08:00
|
|
|
/**
|
|
|
|
* xprt_adjust_cwnd - adjust transport congestion window
|
2013-01-08 22:48:15 +08:00
|
|
|
* @xprt: pointer to xprt
|
2005-08-26 07:25:52 +08:00
|
|
|
* @task: recently completed RPC request used to adjust window
|
|
|
|
* @result: result code of completed RPC request
|
|
|
|
*
|
2014-05-28 22:34:49 +08:00
|
|
|
* The transport code maintains an estimate on the maximum number of out-
|
|
|
|
* standing RPC requests, using a smoothed version of the congestion
|
|
|
|
* avoidance implemented in 44BSD. This is basically the Van Jacobson
|
|
|
|
* congestion algorithm: If a retransmit occurs, the congestion window is
|
|
|
|
* halved; otherwise, it is incremented by 1/cwnd when
|
|
|
|
*
|
|
|
|
* - a reply is received and
|
|
|
|
* - a full number of requests are outstanding and
|
|
|
|
* - the congestion window hasn't been updated recently.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2013-01-08 22:48:15 +08:00
|
|
|
void xprt_adjust_cwnd(struct rpc_xprt *xprt, struct rpc_task *task, int result)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2005-08-26 07:25:52 +08:00
|
|
|
struct rpc_rqst *req = task->tk_rqstp;
|
|
|
|
unsigned long cwnd = xprt->cwnd;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
if (result >= 0 && cwnd <= xprt->cong) {
|
|
|
|
/* The (cwnd >> 1) term makes sure
|
|
|
|
* the result gets rounded properly. */
|
|
|
|
cwnd += (RPC_CWNDSCALE * RPC_CWNDSCALE + (cwnd >> 1)) / cwnd;
|
|
|
|
if (cwnd > RPC_MAXCWND(xprt))
|
|
|
|
cwnd = RPC_MAXCWND(xprt);
|
2005-08-26 07:25:51 +08:00
|
|
|
__xprt_lock_write_next_cong(xprt);
|
2005-04-17 06:20:36 +08:00
|
|
|
} else if (result == -ETIMEDOUT) {
|
|
|
|
cwnd >>= 1;
|
|
|
|
if (cwnd < RPC_CWNDSCALE)
|
|
|
|
cwnd = RPC_CWNDSCALE;
|
|
|
|
}
|
2007-02-01 01:14:08 +08:00
|
|
|
dprintk("RPC: cong %ld, cwnd was %ld, now %ld\n",
|
2005-04-17 06:20:36 +08:00
|
|
|
xprt->cong, xprt->cwnd, cwnd);
|
|
|
|
xprt->cwnd = cwnd;
|
2005-08-26 07:25:52 +08:00
|
|
|
__xprt_put_cong(xprt, req);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2007-09-11 01:45:36 +08:00
|
|
|
EXPORT_SYMBOL_GPL(xprt_adjust_cwnd);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2005-08-12 04:25:44 +08:00
|
|
|
/**
|
|
|
|
* xprt_wake_pending_tasks - wake all tasks on a transport's pending queue
|
|
|
|
* @xprt: transport with waiting tasks
|
|
|
|
* @status: result code to plant in each task before waking it
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status)
|
|
|
|
{
|
|
|
|
if (status < 0)
|
|
|
|
rpc_wake_up_status(&xprt->pending, status);
|
|
|
|
else
|
|
|
|
rpc_wake_up(&xprt->pending);
|
|
|
|
}
|
2007-09-11 01:45:36 +08:00
|
|
|
EXPORT_SYMBOL_GPL(xprt_wake_pending_tasks);
|
2005-08-12 04:25:44 +08:00
|
|
|
|
2005-08-12 04:25:50 +08:00
|
|
|
/**
|
|
|
|
* xprt_wait_for_buffer_space - wait for transport output buffer to clear
|
2018-09-04 11:39:27 +08:00
|
|
|
* @xprt: transport
|
2013-02-23 03:57:57 +08:00
|
|
|
*
|
|
|
|
* Note that we only set the timer for the case of RPC_IS_SOFT(), since
|
|
|
|
* we don't in general want to force a socket disconnection due to
|
|
|
|
* an incomplete RPC call transmission.
|
2005-08-12 04:25:50 +08:00
|
|
|
*/
|
2018-09-04 11:39:27 +08:00
|
|
|
void xprt_wait_for_buffer_space(struct rpc_xprt *xprt)
|
2005-08-12 04:25:50 +08:00
|
|
|
{
|
2018-09-04 11:39:27 +08:00
|
|
|
set_bit(XPRT_WRITE_SPACE, &xprt->state);
|
2005-08-12 04:25:50 +08:00
|
|
|
}
|
2007-09-11 01:45:36 +08:00
|
|
|
EXPORT_SYMBOL_GPL(xprt_wait_for_buffer_space);
|
2005-08-12 04:25:50 +08:00
|
|
|
|
2018-09-04 11:39:27 +08:00
|
|
|
static bool
|
|
|
|
xprt_clear_write_space_locked(struct rpc_xprt *xprt)
|
|
|
|
{
|
|
|
|
if (test_and_clear_bit(XPRT_WRITE_SPACE, &xprt->state)) {
|
|
|
|
__xprt_lock_write_next(xprt);
|
|
|
|
dprintk("RPC: write space: waking waiting task on "
|
|
|
|
"xprt %p\n", xprt);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2005-08-12 04:25:50 +08:00
|
|
|
/**
|
|
|
|
* xprt_write_space - wake the task waiting for transport output buffer space
|
|
|
|
* @xprt: transport with waiting tasks
|
|
|
|
*
|
|
|
|
* Can be called in a soft IRQ context, so xprt_write_space never sleeps.
|
|
|
|
*/
|
2018-09-04 11:39:27 +08:00
|
|
|
bool xprt_write_space(struct rpc_xprt *xprt)
|
2005-08-12 04:25:50 +08:00
|
|
|
{
|
2018-09-04 11:39:27 +08:00
|
|
|
bool ret;
|
|
|
|
|
|
|
|
if (!test_bit(XPRT_WRITE_SPACE, &xprt->state))
|
|
|
|
return false;
|
2019-05-02 23:21:08 +08:00
|
|
|
spin_lock(&xprt->transport_lock);
|
2018-09-04 11:39:27 +08:00
|
|
|
ret = xprt_clear_write_space_locked(xprt);
|
2019-05-02 23:21:08 +08:00
|
|
|
spin_unlock(&xprt->transport_lock);
|
2018-09-04 11:39:27 +08:00
|
|
|
return ret;
|
2005-08-12 04:25:50 +08:00
|
|
|
}
|
2007-09-11 01:45:36 +08:00
|
|
|
EXPORT_SYMBOL_GPL(xprt_write_space);
|
2005-08-12 04:25:50 +08:00
|
|
|
|
2019-04-08 01:58:56 +08:00
|
|
|
static unsigned long xprt_abs_ktime_to_jiffies(ktime_t abstime)
|
|
|
|
{
|
|
|
|
s64 delta = ktime_to_ns(ktime_get() - abstime);
|
|
|
|
return likely(delta >= 0) ?
|
|
|
|
jiffies - nsecs_to_jiffies(delta) :
|
|
|
|
jiffies + nsecs_to_jiffies(-delta);
|
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned long xprt_calc_majortimeo(struct rpc_rqst *req)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2007-12-21 05:03:55 +08:00
|
|
|
const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
|
2019-04-08 01:58:56 +08:00
|
|
|
unsigned long majortimeo = req->rq_timeout;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
if (to->to_exponential)
|
2019-04-08 01:58:56 +08:00
|
|
|
majortimeo <<= to->to_retries;
|
|
|
|
else
|
|
|
|
majortimeo += to->to_increment * to->to_retries;
|
|
|
|
if (majortimeo > to->to_maxval || majortimeo == 0)
|
|
|
|
majortimeo = to->to_maxval;
|
|
|
|
return majortimeo;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void xprt_reset_majortimeo(struct rpc_rqst *req)
|
|
|
|
{
|
|
|
|
req->rq_majortimeo += xprt_calc_majortimeo(req);
|
|
|
|
}
|
|
|
|
|
2020-07-16 01:17:52 +08:00
|
|
|
static void xprt_reset_minortimeo(struct rpc_rqst *req)
|
|
|
|
{
|
|
|
|
req->rq_minortimeo += req->rq_timeout;
|
|
|
|
}
|
|
|
|
|
2019-04-08 01:58:56 +08:00
|
|
|
static void xprt_init_majortimeo(struct rpc_task *task, struct rpc_rqst *req)
|
|
|
|
{
|
|
|
|
unsigned long time_init;
|
|
|
|
struct rpc_xprt *xprt = req->rq_xprt;
|
|
|
|
|
|
|
|
if (likely(xprt && xprt_connected(xprt)))
|
|
|
|
time_init = jiffies;
|
2005-04-17 06:20:36 +08:00
|
|
|
else
|
2019-04-08 01:58:56 +08:00
|
|
|
time_init = xprt_abs_ktime_to_jiffies(task->tk_start);
|
|
|
|
req->rq_timeout = task->tk_client->cl_timeout->to_initval;
|
|
|
|
req->rq_majortimeo = time_init + xprt_calc_majortimeo(req);
|
2020-07-16 01:17:52 +08:00
|
|
|
req->rq_minortimeo = time_init + req->rq_timeout;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2005-08-12 04:25:26 +08:00
|
|
|
/**
|
|
|
|
* xprt_adjust_timeout - adjust timeout values for next retransmit
|
|
|
|
* @req: RPC request containing parameters to use for the adjustment
|
|
|
|
*
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
|
|
|
int xprt_adjust_timeout(struct rpc_rqst *req)
|
|
|
|
{
|
|
|
|
struct rpc_xprt *xprt = req->rq_xprt;
|
2007-12-21 05:03:55 +08:00
|
|
|
const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
|
2005-04-17 06:20:36 +08:00
|
|
|
int status = 0;
|
|
|
|
|
|
|
|
if (time_before(jiffies, req->rq_majortimeo)) {
|
SUNRPC: Handle major timeout in xprt_adjust_timeout()
Currently if a major timeout value is reached, but the minor value has
not been reached, an ETIMEOUT will not be sent back to the caller.
This can occur if the v4 server is not responding to requests and
retrans is configured larger than the default of two.
For example, A TCP mount with a configured timeout value of 50 and a
retransmission count of 3 to a v4 server which is not responding:
1. Initial value and increment set to 5s, maxval set to 20s, retries at 3
2. Major timeout is set to 20s, minor timeout set to 5s initially
3. xport_adjust_timeout() is called after 5s, retry with 10s timeout,
minor timeout is bumped to 10s
4. And again after another 10s, 15s total time with minor timeout set
to 15s
5. After 20s total time xport_adjust_timeout is called as major timeout is
reached, but skipped because the minor timeout is not reached
- After this time the cpu spins continually calling
xport_adjust_timeout() and returning 0 for 10 seconds.
As seen on perf sched:
39243.913182 [0005] mount.nfs[3794] 4607.938 0.017 9746.863
6. This continues until the 15s minor timeout condition is reached (in
this case for 10 seconds). After which the ETIMEOUT is processed
back to the caller, the cpu spinning stops, and normal operations
continue
Fixes: 7de62bc09fe6 ("SUNRPC dont update timeout value on connection reset")
Signed-off-by: Chris Dion <Christopher.Dion@dell.com>
Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
2021-04-05 09:29:26 +08:00
|
|
|
if (time_before(jiffies, req->rq_minortimeo))
|
|
|
|
return status;
|
2005-04-17 06:20:36 +08:00
|
|
|
if (to->to_exponential)
|
|
|
|
req->rq_timeout <<= 1;
|
|
|
|
else
|
|
|
|
req->rq_timeout += to->to_increment;
|
|
|
|
if (to->to_maxval && req->rq_timeout >= to->to_maxval)
|
|
|
|
req->rq_timeout = to->to_maxval;
|
|
|
|
req->rq_retries++;
|
|
|
|
} else {
|
|
|
|
req->rq_timeout = to->to_initval;
|
|
|
|
req->rq_retries = 0;
|
|
|
|
xprt_reset_majortimeo(req);
|
|
|
|
/* Reset the RTT counters == "slow start" */
|
2019-05-02 23:21:08 +08:00
|
|
|
spin_lock(&xprt->transport_lock);
|
2005-04-17 06:20:36 +08:00
|
|
|
rpc_init_rtt(req->rq_task->tk_client->cl_rtt, to->to_initval);
|
2019-05-02 23:21:08 +08:00
|
|
|
spin_unlock(&xprt->transport_lock);
|
2005-04-17 06:20:36 +08:00
|
|
|
status = -ETIMEDOUT;
|
|
|
|
}
|
2020-07-16 01:17:52 +08:00
|
|
|
xprt_reset_minortimeo(req);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
if (req->rq_timeout == 0) {
|
|
|
|
printk(KERN_WARNING "xprt_adjust_timeout: rq_timeout = 0!\n");
|
|
|
|
req->rq_timeout = 5 * HZ;
|
|
|
|
}
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
2006-11-22 22:55:48 +08:00
|
|
|
static void xprt_autoclose(struct work_struct *work)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2006-11-22 22:55:48 +08:00
|
|
|
struct rpc_xprt *xprt =
|
|
|
|
container_of(work, struct rpc_xprt, task_cleanup);
|
2019-02-18 23:02:29 +08:00
|
|
|
unsigned int pflags = memalloc_nofs_save();
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2020-05-13 05:13:34 +08:00
|
|
|
trace_xprt_disconnect_auto(xprt);
|
2007-11-06 23:18:36 +08:00
|
|
|
clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
|
2015-06-20 04:17:57 +08:00
|
|
|
xprt->ops->close(xprt);
|
2005-04-17 06:20:36 +08:00
|
|
|
xprt_release_write(xprt, NULL);
|
2015-09-19 03:53:24 +08:00
|
|
|
wake_up_bit(&xprt->state, XPRT_LOCKED);
|
2019-02-18 23:02:29 +08:00
|
|
|
memalloc_nofs_restore(pflags);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2005-08-12 04:25:26 +08:00
|
|
|
/**
|
2007-11-07 07:44:20 +08:00
|
|
|
* xprt_disconnect_done - mark a transport as disconnected
|
2005-08-12 04:25:26 +08:00
|
|
|
* @xprt: transport to flag for disconnect
|
|
|
|
*
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2007-11-07 07:44:20 +08:00
|
|
|
void xprt_disconnect_done(struct rpc_xprt *xprt)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2020-05-13 05:13:34 +08:00
|
|
|
trace_xprt_disconnect_done(xprt);
|
2019-05-02 23:21:08 +08:00
|
|
|
spin_lock(&xprt->transport_lock);
|
2005-04-17 06:20:36 +08:00
|
|
|
xprt_clear_connected(xprt);
|
2018-09-04 11:39:27 +08:00
|
|
|
xprt_clear_write_space_locked(xprt);
|
2019-09-14 04:01:07 +08:00
|
|
|
xprt_clear_congestion_window_wait_locked(xprt);
|
2019-03-15 20:01:16 +08:00
|
|
|
xprt_wake_pending_tasks(xprt, -ENOTCONN);
|
2019-05-02 23:21:08 +08:00
|
|
|
spin_unlock(&xprt->transport_lock);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2007-11-07 07:44:20 +08:00
|
|
|
EXPORT_SYMBOL_GPL(xprt_disconnect_done);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2007-11-06 23:18:36 +08:00
|
|
|
/**
|
|
|
|
* xprt_force_disconnect - force a transport to disconnect
|
|
|
|
* @xprt: transport to disconnect
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
void xprt_force_disconnect(struct rpc_xprt *xprt)
|
|
|
|
{
|
2020-05-13 05:13:34 +08:00
|
|
|
trace_xprt_disconnect_force(xprt);
|
|
|
|
|
2007-11-06 23:18:36 +08:00
|
|
|
/* Don't race with the test_bit() in xprt_clear_locked() */
|
2019-05-02 23:21:08 +08:00
|
|
|
spin_lock(&xprt->transport_lock);
|
2007-11-06 23:18:36 +08:00
|
|
|
set_bit(XPRT_CLOSE_WAIT, &xprt->state);
|
|
|
|
/* Try to schedule an autoclose RPC call */
|
|
|
|
if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
|
2016-05-27 22:39:50 +08:00
|
|
|
queue_work(xprtiod_workqueue, &xprt->task_cleanup);
|
2018-12-18 02:34:59 +08:00
|
|
|
else if (xprt->snd_task)
|
|
|
|
rpc_wake_up_queued_task_set_status(&xprt->pending,
|
|
|
|
xprt->snd_task, -ENOTCONN);
|
2019-05-02 23:21:08 +08:00
|
|
|
spin_unlock(&xprt->transport_lock);
|
2007-11-06 23:18:36 +08:00
|
|
|
}
|
2017-04-12 01:22:38 +08:00
|
|
|
EXPORT_SYMBOL_GPL(xprt_force_disconnect);
|
2007-11-06 23:18:36 +08:00
|
|
|
|
2018-08-23 12:03:43 +08:00
|
|
|
static unsigned int
|
|
|
|
xprt_connect_cookie(struct rpc_xprt *xprt)
|
|
|
|
{
|
|
|
|
return READ_ONCE(xprt->connect_cookie);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool
|
|
|
|
xprt_request_retransmit_after_disconnect(struct rpc_task *task)
|
|
|
|
{
|
|
|
|
struct rpc_rqst *req = task->tk_rqstp;
|
|
|
|
struct rpc_xprt *xprt = req->rq_xprt;
|
|
|
|
|
|
|
|
return req->rq_connect_cookie != xprt_connect_cookie(xprt) ||
|
|
|
|
!xprt_connected(xprt);
|
|
|
|
}
|
|
|
|
|
2008-04-18 04:52:57 +08:00
|
|
|
/**
|
|
|
|
* xprt_conditional_disconnect - force a transport to disconnect
|
|
|
|
* @xprt: transport to disconnect
|
|
|
|
* @cookie: 'connection cookie'
|
|
|
|
*
|
|
|
|
* This attempts to break the connection if and only if 'cookie' matches
|
|
|
|
* the current transport 'connection cookie'. It ensures that we don't
|
|
|
|
* try to break the connection more than once when we need to retransmit
|
|
|
|
* a batch of RPC requests.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie)
|
|
|
|
{
|
|
|
|
/* Don't race with the test_bit() in xprt_clear_locked() */
|
2019-05-02 23:21:08 +08:00
|
|
|
spin_lock(&xprt->transport_lock);
|
2008-04-18 04:52:57 +08:00
|
|
|
if (cookie != xprt->connect_cookie)
|
|
|
|
goto out;
|
sunrpc: Don't engage exponential backoff when connection attempt is rejected.
xs_connect() contains an exponential backoff mechanism so the repeated
connection attempts are delayed by longer and longer amounts.
This is appropriate when the connection failed due to a timeout, but
it not appropriate when a definitive "no" answer is received. In such
cases, call_connect_status() imposes a minimum 3-second back-off, so
not having the exponetial back-off will never result in immediate
retries.
The current situation is a problem when the NFS server tries to
register with rpcbind but rpcbind isn't running. All connection
attempts are made on the same "xprt" and as the connection is never
"closed", the exponential back delays successive attempts to register,
or de-register, different protocols. This results in a multi-minute
delay with no benefit.
So, when call_connect_status() receives a definitive "no", use
xprt_conditional_disconnect() to cancel the previous connection attempt.
This will set XPRT_CLOSE_WAIT so that xprt->ops->close() calls xs_close()
which resets the reestablish_timeout.
To ensure xprt_conditional_disconnect() does the right thing, we
ensure that rq_connect_cookie is set before a connection attempt, and
allow xprt_conditional_disconnect() to complete even when the
transport is not fully connected.
Signed-off-by: NeilBrown <neilb@suse.com>
Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
2016-11-23 11:44:58 +08:00
|
|
|
if (test_bit(XPRT_CLOSING, &xprt->state))
|
2008-04-18 04:52:57 +08:00
|
|
|
goto out;
|
|
|
|
set_bit(XPRT_CLOSE_WAIT, &xprt->state);
|
|
|
|
/* Try to schedule an autoclose RPC call */
|
|
|
|
if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
|
2016-05-27 22:39:50 +08:00
|
|
|
queue_work(xprtiod_workqueue, &xprt->task_cleanup);
|
2009-03-12 02:38:00 +08:00
|
|
|
xprt_wake_pending_tasks(xprt, -EAGAIN);
|
2008-04-18 04:52:57 +08:00
|
|
|
out:
|
2019-05-02 23:21:08 +08:00
|
|
|
spin_unlock(&xprt->transport_lock);
|
2008-04-18 04:52:57 +08:00
|
|
|
}
|
|
|
|
|
2016-08-03 01:47:43 +08:00
|
|
|
static bool
|
|
|
|
xprt_has_timer(const struct rpc_xprt *xprt)
|
|
|
|
{
|
|
|
|
return xprt->idle_timeout != 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
xprt_schedule_autodisconnect(struct rpc_xprt *xprt)
|
|
|
|
__must_hold(&xprt->transport_lock)
|
|
|
|
{
|
2019-06-27 04:30:24 +08:00
|
|
|
xprt->last_used = jiffies;
|
2018-09-07 20:35:22 +08:00
|
|
|
if (RB_EMPTY_ROOT(&xprt->recv_queue) && xprt_has_timer(xprt))
|
2016-08-03 01:47:43 +08:00
|
|
|
mod_timer(&xprt->timer, xprt->last_used + xprt->idle_timeout);
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
static void
|
2017-10-17 08:29:42 +08:00
|
|
|
xprt_init_autodisconnect(struct timer_list *t)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2017-10-17 08:29:42 +08:00
|
|
|
struct rpc_xprt *xprt = from_timer(xprt, t, timer);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2018-09-07 20:35:22 +08:00
|
|
|
if (!RB_EMPTY_ROOT(&xprt->recv_queue))
|
2019-05-02 23:21:08 +08:00
|
|
|
return;
|
2016-08-03 01:47:43 +08:00
|
|
|
/* Reset xprt->last_used to avoid connect/autodisconnect cycling */
|
|
|
|
xprt->last_used = jiffies;
|
2005-08-12 04:25:38 +08:00
|
|
|
if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
|
2019-05-02 23:21:08 +08:00
|
|
|
return;
|
2016-05-27 22:39:50 +08:00
|
|
|
queue_work(xprtiod_workqueue, &xprt->task_cleanup);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2015-02-09 07:19:25 +08:00
|
|
|
bool xprt_lock_connect(struct rpc_xprt *xprt,
|
|
|
|
struct rpc_task *task,
|
|
|
|
void *cookie)
|
|
|
|
{
|
|
|
|
bool ret = false;
|
|
|
|
|
2019-05-02 23:21:08 +08:00
|
|
|
spin_lock(&xprt->transport_lock);
|
2015-02-09 07:19:25 +08:00
|
|
|
if (!test_bit(XPRT_LOCKED, &xprt->state))
|
|
|
|
goto out;
|
|
|
|
if (xprt->snd_task != task)
|
|
|
|
goto out;
|
|
|
|
xprt->snd_task = cookie;
|
|
|
|
ret = true;
|
|
|
|
out:
|
2019-05-02 23:21:08 +08:00
|
|
|
spin_unlock(&xprt->transport_lock);
|
2015-02-09 07:19:25 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
void xprt_unlock_connect(struct rpc_xprt *xprt, void *cookie)
|
|
|
|
{
|
2019-05-02 23:21:08 +08:00
|
|
|
spin_lock(&xprt->transport_lock);
|
2015-02-09 07:19:25 +08:00
|
|
|
if (xprt->snd_task != cookie)
|
|
|
|
goto out;
|
|
|
|
if (!test_bit(XPRT_LOCKED, &xprt->state))
|
|
|
|
goto out;
|
|
|
|
xprt->snd_task =NULL;
|
|
|
|
xprt->ops->release_xprt(xprt, NULL);
|
2016-08-03 01:47:43 +08:00
|
|
|
xprt_schedule_autodisconnect(xprt);
|
2015-02-09 07:19:25 +08:00
|
|
|
out:
|
2019-05-02 23:21:08 +08:00
|
|
|
spin_unlock(&xprt->transport_lock);
|
2015-09-19 03:53:24 +08:00
|
|
|
wake_up_bit(&xprt->state, XPRT_LOCKED);
|
2015-02-09 07:19:25 +08:00
|
|
|
}
|
|
|
|
|
2005-08-12 04:25:26 +08:00
|
|
|
/**
|
|
|
|
* xprt_connect - schedule a transport connect operation
|
|
|
|
* @task: RPC task that is requesting the connect
|
2005-04-17 06:20:36 +08:00
|
|
|
*
|
|
|
|
*/
|
|
|
|
void xprt_connect(struct rpc_task *task)
|
|
|
|
{
|
2013-01-08 23:08:33 +08:00
|
|
|
struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2020-07-09 04:09:47 +08:00
|
|
|
trace_xprt_connect(xprt);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2006-08-23 08:06:15 +08:00
|
|
|
if (!xprt_bound(xprt)) {
|
2009-03-12 02:09:39 +08:00
|
|
|
task->tk_status = -EAGAIN;
|
2005-04-17 06:20:36 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (!xprt_lock_write(xprt, task))
|
|
|
|
return;
|
2009-12-03 21:10:17 +08:00
|
|
|
|
2020-05-13 05:13:34 +08:00
|
|
|
if (test_and_clear_bit(XPRT_CLOSE_WAIT, &xprt->state)) {
|
|
|
|
trace_xprt_disconnect_cleanup(xprt);
|
2009-12-03 21:10:17 +08:00
|
|
|
xprt->ops->close(xprt);
|
2020-05-13 05:13:34 +08:00
|
|
|
}
|
2009-12-03 21:10:17 +08:00
|
|
|
|
2015-02-09 07:19:25 +08:00
|
|
|
if (!xprt_connected(xprt)) {
|
sunrpc: Don't engage exponential backoff when connection attempt is rejected.
xs_connect() contains an exponential backoff mechanism so the repeated
connection attempts are delayed by longer and longer amounts.
This is appropriate when the connection failed due to a timeout, but
it not appropriate when a definitive "no" answer is received. In such
cases, call_connect_status() imposes a minimum 3-second back-off, so
not having the exponetial back-off will never result in immediate
retries.
The current situation is a problem when the NFS server tries to
register with rpcbind but rpcbind isn't running. All connection
attempts are made on the same "xprt" and as the connection is never
"closed", the exponential back delays successive attempts to register,
or de-register, different protocols. This results in a multi-minute
delay with no benefit.
So, when call_connect_status() receives a definitive "no", use
xprt_conditional_disconnect() to cancel the previous connection attempt.
This will set XPRT_CLOSE_WAIT so that xprt->ops->close() calls xs_close()
which resets the reestablish_timeout.
To ensure xprt_conditional_disconnect() does the right thing, we
ensure that rq_connect_cookie is set before a connection attempt, and
allow xprt_conditional_disconnect() to complete even when the
transport is not fully connected.
Signed-off-by: NeilBrown <neilb@suse.com>
Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
2016-11-23 11:44:58 +08:00
|
|
|
task->tk_rqstp->rq_connect_cookie = xprt->connect_cookie;
|
2019-04-08 01:58:49 +08:00
|
|
|
rpc_sleep_on_timeout(&xprt->pending, task, NULL,
|
2019-04-08 01:58:53 +08:00
|
|
|
xprt_request_timeout(task->tk_rqstp));
|
2010-04-17 04:41:57 +08:00
|
|
|
|
|
|
|
if (test_bit(XPRT_CLOSING, &xprt->state))
|
|
|
|
return;
|
|
|
|
if (xprt_test_and_set_connecting(xprt))
|
|
|
|
return;
|
2018-12-02 12:18:00 +08:00
|
|
|
/* Race breaker */
|
|
|
|
if (!xprt_connected(xprt)) {
|
|
|
|
xprt->stat.connect_start = jiffies;
|
|
|
|
xprt->ops->connect(xprt, task);
|
|
|
|
} else {
|
|
|
|
xprt_clear_connecting(xprt);
|
|
|
|
task->tk_status = 0;
|
|
|
|
rpc_wake_up_queued_task(&xprt->pending, task);
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2015-02-09 07:19:25 +08:00
|
|
|
xprt_release_write(xprt, task);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2019-06-19 22:33:42 +08:00
|
|
|
/**
|
|
|
|
* xprt_reconnect_delay - compute the wait before scheduling a connect
|
|
|
|
* @xprt: transport instance
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
unsigned long xprt_reconnect_delay(const struct rpc_xprt *xprt)
|
|
|
|
{
|
|
|
|
unsigned long start, now = jiffies;
|
|
|
|
|
|
|
|
start = xprt->stat.connect_start + xprt->reestablish_timeout;
|
|
|
|
if (time_after(start, now))
|
|
|
|
return start - now;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(xprt_reconnect_delay);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* xprt_reconnect_backoff - compute the new re-establish timeout
|
|
|
|
* @xprt: transport instance
|
|
|
|
* @init_to: initial reestablish timeout
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
void xprt_reconnect_backoff(struct rpc_xprt *xprt, unsigned long init_to)
|
|
|
|
{
|
|
|
|
xprt->reestablish_timeout <<= 1;
|
|
|
|
if (xprt->reestablish_timeout > xprt->max_reconnect_timeout)
|
|
|
|
xprt->reestablish_timeout = xprt->max_reconnect_timeout;
|
|
|
|
if (xprt->reestablish_timeout < init_to)
|
|
|
|
xprt->reestablish_timeout = init_to;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(xprt_reconnect_backoff);
|
|
|
|
|
2018-09-07 20:35:22 +08:00
|
|
|
enum xprt_xid_rb_cmp {
|
|
|
|
XID_RB_EQUAL,
|
|
|
|
XID_RB_LEFT,
|
|
|
|
XID_RB_RIGHT,
|
|
|
|
};
|
|
|
|
static enum xprt_xid_rb_cmp
|
|
|
|
xprt_xid_cmp(__be32 xid1, __be32 xid2)
|
|
|
|
{
|
|
|
|
if (xid1 == xid2)
|
|
|
|
return XID_RB_EQUAL;
|
|
|
|
if ((__force u32)xid1 < (__force u32)xid2)
|
|
|
|
return XID_RB_LEFT;
|
|
|
|
return XID_RB_RIGHT;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct rpc_rqst *
|
|
|
|
xprt_request_rb_find(struct rpc_xprt *xprt, __be32 xid)
|
|
|
|
{
|
|
|
|
struct rb_node *n = xprt->recv_queue.rb_node;
|
|
|
|
struct rpc_rqst *req;
|
|
|
|
|
|
|
|
while (n != NULL) {
|
|
|
|
req = rb_entry(n, struct rpc_rqst, rq_recv);
|
|
|
|
switch (xprt_xid_cmp(xid, req->rq_xid)) {
|
|
|
|
case XID_RB_LEFT:
|
|
|
|
n = n->rb_left;
|
|
|
|
break;
|
|
|
|
case XID_RB_RIGHT:
|
|
|
|
n = n->rb_right;
|
|
|
|
break;
|
|
|
|
case XID_RB_EQUAL:
|
|
|
|
return req;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
xprt_request_rb_insert(struct rpc_xprt *xprt, struct rpc_rqst *new)
|
|
|
|
{
|
|
|
|
struct rb_node **p = &xprt->recv_queue.rb_node;
|
|
|
|
struct rb_node *n = NULL;
|
|
|
|
struct rpc_rqst *req;
|
|
|
|
|
|
|
|
while (*p != NULL) {
|
|
|
|
n = *p;
|
|
|
|
req = rb_entry(n, struct rpc_rqst, rq_recv);
|
|
|
|
switch(xprt_xid_cmp(new->rq_xid, req->rq_xid)) {
|
|
|
|
case XID_RB_LEFT:
|
|
|
|
p = &n->rb_left;
|
|
|
|
break;
|
|
|
|
case XID_RB_RIGHT:
|
|
|
|
p = &n->rb_right;
|
|
|
|
break;
|
|
|
|
case XID_RB_EQUAL:
|
|
|
|
WARN_ON_ONCE(new != req);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
rb_link_node(&new->rq_recv, n, p);
|
|
|
|
rb_insert_color(&new->rq_recv, &xprt->recv_queue);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
xprt_request_rb_remove(struct rpc_xprt *xprt, struct rpc_rqst *req)
|
|
|
|
{
|
|
|
|
rb_erase(&req->rq_recv, &xprt->recv_queue);
|
|
|
|
}
|
|
|
|
|
2005-08-12 04:25:26 +08:00
|
|
|
/**
|
|
|
|
* xprt_lookup_rqst - find an RPC request corresponding to an XID
|
|
|
|
* @xprt: transport on which the original request was transmitted
|
|
|
|
* @xid: RPC XID of incoming reply
|
|
|
|
*
|
2018-08-31 22:21:00 +08:00
|
|
|
* Caller holds xprt->queue_lock.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2006-09-27 13:29:38 +08:00
|
|
|
struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2010-10-06 03:30:19 +08:00
|
|
|
struct rpc_rqst *entry;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2018-09-07 20:35:22 +08:00
|
|
|
entry = xprt_request_rb_find(xprt, xid);
|
|
|
|
if (entry != NULL) {
|
|
|
|
trace_xprt_lookup_rqst(xprt, xid, 0);
|
|
|
|
entry->rq_rtt = ktime_sub(ktime_get(), entry->rq_xtime);
|
|
|
|
return entry;
|
|
|
|
}
|
2007-02-01 01:14:08 +08:00
|
|
|
|
|
|
|
dprintk("RPC: xprt_lookup_rqst did not find xid %08x\n",
|
|
|
|
ntohl(xid));
|
2014-10-29 02:24:13 +08:00
|
|
|
trace_xprt_lookup_rqst(xprt, xid, -ENOENT);
|
2006-03-21 02:44:16 +08:00
|
|
|
xprt->stat.bad_xids++;
|
|
|
|
return NULL;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2007-09-11 01:45:36 +08:00
|
|
|
EXPORT_SYMBOL_GPL(xprt_lookup_rqst);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2018-08-07 00:55:34 +08:00
|
|
|
static bool
|
|
|
|
xprt_is_pinned_rqst(struct rpc_rqst *req)
|
|
|
|
{
|
|
|
|
return atomic_read(&req->rq_pin) != 0;
|
|
|
|
}
|
|
|
|
|
2017-08-13 22:03:59 +08:00
|
|
|
/**
|
|
|
|
* xprt_pin_rqst - Pin a request on the transport receive list
|
|
|
|
* @req: Request to pin
|
|
|
|
*
|
|
|
|
* Caller must ensure this is atomic with the call to xprt_lookup_rqst()
|
2019-04-24 21:40:09 +08:00
|
|
|
* so should be holding xprt->queue_lock.
|
2017-08-13 22:03:59 +08:00
|
|
|
*/
|
|
|
|
void xprt_pin_rqst(struct rpc_rqst *req)
|
|
|
|
{
|
2018-08-07 00:55:34 +08:00
|
|
|
atomic_inc(&req->rq_pin);
|
2017-08-13 22:03:59 +08:00
|
|
|
}
|
2017-08-24 05:05:58 +08:00
|
|
|
EXPORT_SYMBOL_GPL(xprt_pin_rqst);
|
2017-08-13 22:03:59 +08:00
|
|
|
|
|
|
|
/**
|
|
|
|
* xprt_unpin_rqst - Unpin a request on the transport receive list
|
|
|
|
* @req: Request to pin
|
|
|
|
*
|
2019-04-24 21:40:09 +08:00
|
|
|
* Caller should be holding xprt->queue_lock.
|
2017-08-13 22:03:59 +08:00
|
|
|
*/
|
|
|
|
void xprt_unpin_rqst(struct rpc_rqst *req)
|
|
|
|
{
|
2018-08-07 00:55:34 +08:00
|
|
|
if (!test_bit(RPC_TASK_MSG_PIN_WAIT, &req->rq_task->tk_runstate)) {
|
|
|
|
atomic_dec(&req->rq_pin);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (atomic_dec_and_test(&req->rq_pin))
|
|
|
|
wake_up_var(&req->rq_pin);
|
2017-08-13 22:03:59 +08:00
|
|
|
}
|
2017-08-24 05:05:58 +08:00
|
|
|
EXPORT_SYMBOL_GPL(xprt_unpin_rqst);
|
2017-08-13 22:03:59 +08:00
|
|
|
|
|
|
|
static void xprt_wait_on_pinned_rqst(struct rpc_rqst *req)
|
|
|
|
{
|
2018-08-07 00:55:34 +08:00
|
|
|
wait_var_event(&req->rq_pin, !xprt_is_pinned_rqst(req));
|
2017-08-13 22:03:59 +08:00
|
|
|
}
|
|
|
|
|
2018-08-23 05:55:46 +08:00
|
|
|
static bool
|
|
|
|
xprt_request_data_received(struct rpc_task *task)
|
|
|
|
{
|
|
|
|
return !test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) &&
|
|
|
|
READ_ONCE(task->tk_rqstp->rq_reply_bytes_recvd) != 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool
|
|
|
|
xprt_request_need_enqueue_receive(struct rpc_task *task, struct rpc_rqst *req)
|
|
|
|
{
|
|
|
|
return !test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) &&
|
|
|
|
READ_ONCE(task->tk_rqstp->rq_reply_bytes_recvd) == 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* xprt_request_enqueue_receive - Add an request to the receive queue
|
|
|
|
* @task: RPC task
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
xprt_request_enqueue_receive(struct rpc_task *task)
|
|
|
|
{
|
|
|
|
struct rpc_rqst *req = task->tk_rqstp;
|
|
|
|
struct rpc_xprt *xprt = req->rq_xprt;
|
|
|
|
|
|
|
|
if (!xprt_request_need_enqueue_receive(task, req))
|
|
|
|
return;
|
2019-07-18 09:22:38 +08:00
|
|
|
|
|
|
|
xprt_request_prepare(task->tk_rqstp);
|
2018-08-23 05:55:46 +08:00
|
|
|
spin_lock(&xprt->queue_lock);
|
|
|
|
|
|
|
|
/* Update the softirq receive buffer */
|
|
|
|
memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
|
|
|
|
sizeof(req->rq_private_buf));
|
|
|
|
|
|
|
|
/* Add request to the receive list */
|
2018-09-07 20:35:22 +08:00
|
|
|
xprt_request_rb_insert(xprt, req);
|
2018-08-23 05:55:46 +08:00
|
|
|
set_bit(RPC_TASK_NEED_RECV, &task->tk_runstate);
|
|
|
|
spin_unlock(&xprt->queue_lock);
|
|
|
|
|
|
|
|
/* Turn off autodisconnect */
|
|
|
|
del_singleshot_timer_sync(&xprt->timer);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* xprt_request_dequeue_receive_locked - Remove a request from the receive queue
|
|
|
|
* @task: RPC task
|
|
|
|
*
|
|
|
|
* Caller must hold xprt->queue_lock.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
xprt_request_dequeue_receive_locked(struct rpc_task *task)
|
|
|
|
{
|
2018-09-07 20:35:22 +08:00
|
|
|
struct rpc_rqst *req = task->tk_rqstp;
|
|
|
|
|
2018-08-23 05:55:46 +08:00
|
|
|
if (test_and_clear_bit(RPC_TASK_NEED_RECV, &task->tk_runstate))
|
2018-09-07 20:35:22 +08:00
|
|
|
xprt_request_rb_remove(req->rq_xprt, req);
|
2018-08-23 05:55:46 +08:00
|
|
|
}
|
|
|
|
|
2018-03-06 04:12:57 +08:00
|
|
|
/**
|
|
|
|
* xprt_update_rtt - Update RPC RTT statistics
|
|
|
|
* @task: RPC request that recently completed
|
|
|
|
*
|
2018-08-31 22:21:00 +08:00
|
|
|
* Caller holds xprt->queue_lock.
|
2018-03-06 04:12:57 +08:00
|
|
|
*/
|
|
|
|
void xprt_update_rtt(struct rpc_task *task)
|
2005-08-26 07:25:52 +08:00
|
|
|
{
|
|
|
|
struct rpc_rqst *req = task->tk_rqstp;
|
|
|
|
struct rpc_rtt *rtt = task->tk_client->cl_rtt;
|
2012-04-15 13:58:06 +08:00
|
|
|
unsigned int timer = task->tk_msg.rpc_proc->p_timer;
|
2010-05-14 00:51:49 +08:00
|
|
|
long m = usecs_to_jiffies(ktime_to_us(req->rq_rtt));
|
2005-08-26 07:25:52 +08:00
|
|
|
|
|
|
|
if (timer) {
|
|
|
|
if (req->rq_ntrans == 1)
|
2010-05-08 01:34:47 +08:00
|
|
|
rpc_update_rtt(rtt, timer, m);
|
2005-08-26 07:25:52 +08:00
|
|
|
rpc_set_timeo(rtt, timer, req->rq_ntrans - 1);
|
|
|
|
}
|
|
|
|
}
|
2018-03-06 04:12:57 +08:00
|
|
|
EXPORT_SYMBOL_GPL(xprt_update_rtt);
|
2005-08-26 07:25:52 +08:00
|
|
|
|
2005-08-12 04:25:26 +08:00
|
|
|
/**
|
|
|
|
* xprt_complete_rqst - called when reply processing is complete
|
2005-08-26 07:25:52 +08:00
|
|
|
* @task: RPC request that recently completed
|
2005-08-12 04:25:26 +08:00
|
|
|
* @copied: actual number of bytes received from the transport
|
|
|
|
*
|
2018-08-31 22:21:00 +08:00
|
|
|
* Caller holds xprt->queue_lock.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2005-08-26 07:25:52 +08:00
|
|
|
void xprt_complete_rqst(struct rpc_task *task, int copied)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2005-08-26 07:25:52 +08:00
|
|
|
struct rpc_rqst *req = task->tk_rqstp;
|
2008-02-23 05:34:12 +08:00
|
|
|
struct rpc_xprt *xprt = req->rq_xprt;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-02-23 05:34:12 +08:00
|
|
|
xprt->stat.recvs++;
|
2006-03-21 02:44:17 +08:00
|
|
|
|
2008-03-22 04:19:41 +08:00
|
|
|
req->rq_private_buf.len = copied;
|
2009-04-01 21:23:28 +08:00
|
|
|
/* Ensure all writes are done before we update */
|
|
|
|
/* req->rq_reply_bytes_recvd */
|
2006-03-21 02:44:51 +08:00
|
|
|
smp_wmb();
|
2009-04-01 21:23:28 +08:00
|
|
|
req->rq_reply_bytes_recvd = copied;
|
2018-08-23 05:55:46 +08:00
|
|
|
xprt_request_dequeue_receive_locked(task);
|
2008-02-23 05:34:12 +08:00
|
|
|
rpc_wake_up_queued_task(&xprt->pending, task);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2007-09-11 01:45:36 +08:00
|
|
|
EXPORT_SYMBOL_GPL(xprt_complete_rqst);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2005-08-26 07:25:52 +08:00
|
|
|
static void xprt_timer(struct rpc_task *task)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2005-08-26 07:25:52 +08:00
|
|
|
struct rpc_rqst *req = task->tk_rqstp;
|
2005-04-17 06:20:36 +08:00
|
|
|
struct rpc_xprt *xprt = req->rq_xprt;
|
|
|
|
|
2008-02-23 05:34:17 +08:00
|
|
|
if (task->tk_status != -ETIMEDOUT)
|
|
|
|
return;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2018-01-04 04:38:25 +08:00
|
|
|
trace_xprt_timer(xprt, req->rq_xid, task->tk_status);
|
2009-04-01 21:23:28 +08:00
|
|
|
if (!req->rq_reply_bytes_recvd) {
|
2005-08-26 07:25:52 +08:00
|
|
|
if (xprt->ops->timer)
|
2013-01-08 22:48:15 +08:00
|
|
|
xprt->ops->timer(xprt, task);
|
2008-02-23 05:34:17 +08:00
|
|
|
} else
|
|
|
|
task->tk_status = 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2019-04-08 01:58:46 +08:00
|
|
|
/**
|
|
|
|
* xprt_wait_for_reply_request_def - wait for reply
|
|
|
|
* @task: pointer to rpc_task
|
|
|
|
*
|
|
|
|
* Set a request's retransmit timeout based on the transport's
|
|
|
|
* default timeout parameters. Used by transports that don't adjust
|
|
|
|
* the retransmit timeout based on round-trip time estimation,
|
|
|
|
* and put the task to sleep on the pending queue.
|
|
|
|
*/
|
|
|
|
void xprt_wait_for_reply_request_def(struct rpc_task *task)
|
|
|
|
{
|
|
|
|
struct rpc_rqst *req = task->tk_rqstp;
|
|
|
|
|
2019-04-08 01:58:49 +08:00
|
|
|
rpc_sleep_on_timeout(&req->rq_xprt->pending, task, xprt_timer,
|
2019-04-08 01:58:53 +08:00
|
|
|
xprt_request_timeout(req));
|
2019-04-08 01:58:46 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(xprt_wait_for_reply_request_def);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* xprt_wait_for_reply_request_rtt - wait for reply using RTT estimator
|
|
|
|
* @task: pointer to rpc_task
|
|
|
|
*
|
|
|
|
* Set a request's retransmit timeout using the RTT estimator,
|
|
|
|
* and put the task to sleep on the pending queue.
|
|
|
|
*/
|
|
|
|
void xprt_wait_for_reply_request_rtt(struct rpc_task *task)
|
|
|
|
{
|
|
|
|
int timer = task->tk_msg.rpc_proc->p_timer;
|
|
|
|
struct rpc_clnt *clnt = task->tk_client;
|
|
|
|
struct rpc_rtt *rtt = clnt->cl_rtt;
|
|
|
|
struct rpc_rqst *req = task->tk_rqstp;
|
|
|
|
unsigned long max_timeout = clnt->cl_timeout->to_maxval;
|
2019-04-08 01:58:49 +08:00
|
|
|
unsigned long timeout;
|
2019-04-08 01:58:46 +08:00
|
|
|
|
2019-04-08 01:58:49 +08:00
|
|
|
timeout = rpc_calc_rto(rtt, timer);
|
|
|
|
timeout <<= rpc_ntimeo(rtt, timer) + req->rq_retries;
|
|
|
|
if (timeout > max_timeout || timeout == 0)
|
|
|
|
timeout = max_timeout;
|
|
|
|
rpc_sleep_on_timeout(&req->rq_xprt->pending, task, xprt_timer,
|
|
|
|
jiffies + timeout);
|
2019-04-08 01:58:46 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(xprt_wait_for_reply_request_rtt);
|
|
|
|
|
2018-08-23 12:03:43 +08:00
|
|
|
/**
|
|
|
|
* xprt_request_wait_receive - wait for the reply to an RPC request
|
|
|
|
* @task: RPC task about to send a request
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
void xprt_request_wait_receive(struct rpc_task *task)
|
|
|
|
{
|
|
|
|
struct rpc_rqst *req = task->tk_rqstp;
|
|
|
|
struct rpc_xprt *xprt = req->rq_xprt;
|
|
|
|
|
|
|
|
if (!test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate))
|
|
|
|
return;
|
|
|
|
/*
|
|
|
|
* Sleep on the pending queue if we're expecting a reply.
|
|
|
|
* The spinlock ensures atomicity between the test of
|
|
|
|
* req->rq_reply_bytes_recvd, and the call to rpc_sleep_on().
|
|
|
|
*/
|
|
|
|
spin_lock(&xprt->queue_lock);
|
|
|
|
if (test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate)) {
|
2019-04-08 01:58:46 +08:00
|
|
|
xprt->ops->wait_for_reply_request(task);
|
2018-08-23 12:03:43 +08:00
|
|
|
/*
|
|
|
|
* Send an extra queue wakeup call if the
|
|
|
|
* connection was dropped in case the call to
|
|
|
|
* rpc_sleep_on() raced.
|
|
|
|
*/
|
|
|
|
if (xprt_request_retransmit_after_disconnect(task))
|
|
|
|
rpc_wake_up_queued_task_set_status(&xprt->pending,
|
|
|
|
task, -ENOTCONN);
|
|
|
|
}
|
|
|
|
spin_unlock(&xprt->queue_lock);
|
|
|
|
}
|
|
|
|
|
2018-08-10 11:33:21 +08:00
|
|
|
static bool
|
|
|
|
xprt_request_need_enqueue_transmit(struct rpc_task *task, struct rpc_rqst *req)
|
|
|
|
{
|
2018-08-25 04:28:28 +08:00
|
|
|
return !test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate);
|
2018-08-10 11:33:21 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* xprt_request_enqueue_transmit - queue a task for transmission
|
|
|
|
* @task: pointer to rpc_task
|
|
|
|
*
|
|
|
|
* Add a task to the transmission queue.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
xprt_request_enqueue_transmit(struct rpc_task *task)
|
|
|
|
{
|
2018-09-09 23:37:22 +08:00
|
|
|
struct rpc_rqst *pos, *req = task->tk_rqstp;
|
2018-08-10 11:33:21 +08:00
|
|
|
struct rpc_xprt *xprt = req->rq_xprt;
|
|
|
|
|
|
|
|
if (xprt_request_need_enqueue_transmit(task, req)) {
|
2019-01-03 06:53:10 +08:00
|
|
|
req->rq_bytes_sent = 0;
|
2018-08-10 11:33:21 +08:00
|
|
|
spin_lock(&xprt->queue_lock);
|
2018-09-04 05:37:36 +08:00
|
|
|
/*
|
|
|
|
* Requests that carry congestion control credits are added
|
|
|
|
* to the head of the list to avoid starvation issues.
|
|
|
|
*/
|
|
|
|
if (req->rq_cong) {
|
|
|
|
xprt_clear_congestion_window_wait(xprt);
|
|
|
|
list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) {
|
|
|
|
if (pos->rq_cong)
|
|
|
|
continue;
|
|
|
|
/* Note: req is added _before_ pos */
|
|
|
|
list_add_tail(&req->rq_xmit, &pos->rq_xmit);
|
|
|
|
INIT_LIST_HEAD(&req->rq_xmit2);
|
|
|
|
goto out;
|
|
|
|
}
|
2018-09-09 02:22:41 +08:00
|
|
|
} else if (RPC_IS_SWAPPER(task)) {
|
|
|
|
list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) {
|
|
|
|
if (pos->rq_cong || pos->rq_bytes_sent)
|
|
|
|
continue;
|
|
|
|
if (RPC_IS_SWAPPER(pos->rq_task))
|
|
|
|
continue;
|
|
|
|
/* Note: req is added _before_ pos */
|
|
|
|
list_add_tail(&req->rq_xmit, &pos->rq_xmit);
|
|
|
|
INIT_LIST_HEAD(&req->rq_xmit2);
|
|
|
|
goto out;
|
|
|
|
}
|
2019-01-09 23:04:57 +08:00
|
|
|
} else if (!req->rq_seqno) {
|
2018-09-04 05:37:36 +08:00
|
|
|
list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) {
|
|
|
|
if (pos->rq_task->tk_owner != task->tk_owner)
|
|
|
|
continue;
|
|
|
|
list_add_tail(&req->rq_xmit2, &pos->rq_xmit2);
|
|
|
|
INIT_LIST_HEAD(&req->rq_xmit);
|
|
|
|
goto out;
|
|
|
|
}
|
2018-09-09 23:37:22 +08:00
|
|
|
}
|
2018-08-10 11:33:21 +08:00
|
|
|
list_add_tail(&req->rq_xmit, &xprt->xmit_queue);
|
2018-09-09 23:37:22 +08:00
|
|
|
INIT_LIST_HEAD(&req->rq_xmit2);
|
|
|
|
out:
|
2021-02-10 05:04:15 +08:00
|
|
|
atomic_long_inc(&xprt->xmit_queuelen);
|
2018-08-10 11:33:21 +08:00
|
|
|
set_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate);
|
|
|
|
spin_unlock(&xprt->queue_lock);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* xprt_request_dequeue_transmit_locked - remove a task from the transmission queue
|
|
|
|
* @task: pointer to rpc_task
|
|
|
|
*
|
|
|
|
* Remove a task from the transmission queue
|
|
|
|
* Caller must hold xprt->queue_lock
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
xprt_request_dequeue_transmit_locked(struct rpc_task *task)
|
|
|
|
{
|
2018-09-09 23:37:22 +08:00
|
|
|
struct rpc_rqst *req = task->tk_rqstp;
|
|
|
|
|
|
|
|
if (!test_and_clear_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
|
|
|
|
return;
|
|
|
|
if (!list_empty(&req->rq_xmit)) {
|
|
|
|
list_del(&req->rq_xmit);
|
|
|
|
if (!list_empty(&req->rq_xmit2)) {
|
|
|
|
struct rpc_rqst *next = list_first_entry(&req->rq_xmit2,
|
|
|
|
struct rpc_rqst, rq_xmit2);
|
|
|
|
list_del(&req->rq_xmit2);
|
|
|
|
list_add_tail(&next->rq_xmit, &next->rq_xprt->xmit_queue);
|
|
|
|
}
|
|
|
|
} else
|
|
|
|
list_del(&req->rq_xmit2);
|
2021-02-10 05:04:15 +08:00
|
|
|
atomic_long_dec(&req->rq_xprt->xmit_queuelen);
|
2018-08-10 11:33:21 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* xprt_request_dequeue_transmit - remove a task from the transmission queue
|
|
|
|
* @task: pointer to rpc_task
|
|
|
|
*
|
|
|
|
* Remove a task from the transmission queue
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
xprt_request_dequeue_transmit(struct rpc_task *task)
|
|
|
|
{
|
|
|
|
struct rpc_rqst *req = task->tk_rqstp;
|
|
|
|
struct rpc_xprt *xprt = req->rq_xprt;
|
|
|
|
|
|
|
|
spin_lock(&xprt->queue_lock);
|
|
|
|
xprt_request_dequeue_transmit_locked(task);
|
|
|
|
spin_unlock(&xprt->queue_lock);
|
|
|
|
}
|
|
|
|
|
2019-09-11 01:01:35 +08:00
|
|
|
/**
|
|
|
|
* xprt_request_dequeue_xprt - remove a task from the transmit+receive queue
|
|
|
|
* @task: pointer to rpc_task
|
|
|
|
*
|
|
|
|
* Remove a task from the transmit and receive queues, and ensure that
|
|
|
|
* it is not pinned by the receive work item.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
xprt_request_dequeue_xprt(struct rpc_task *task)
|
|
|
|
{
|
|
|
|
struct rpc_rqst *req = task->tk_rqstp;
|
|
|
|
struct rpc_xprt *xprt = req->rq_xprt;
|
|
|
|
|
|
|
|
if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate) ||
|
|
|
|
test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) ||
|
|
|
|
xprt_is_pinned_rqst(req)) {
|
|
|
|
spin_lock(&xprt->queue_lock);
|
|
|
|
xprt_request_dequeue_transmit_locked(task);
|
|
|
|
xprt_request_dequeue_receive_locked(task);
|
|
|
|
while (xprt_is_pinned_rqst(req)) {
|
|
|
|
set_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate);
|
|
|
|
spin_unlock(&xprt->queue_lock);
|
|
|
|
xprt_wait_on_pinned_rqst(req);
|
|
|
|
spin_lock(&xprt->queue_lock);
|
|
|
|
clear_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate);
|
|
|
|
}
|
|
|
|
spin_unlock(&xprt->queue_lock);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-14 00:22:04 +08:00
|
|
|
/**
|
|
|
|
* xprt_request_prepare - prepare an encoded request for transport
|
|
|
|
* @req: pointer to rpc_rqst
|
|
|
|
*
|
|
|
|
* Calls into the transport layer to do whatever is needed to prepare
|
|
|
|
* the request for transmission or receive.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
xprt_request_prepare(struct rpc_rqst *req)
|
|
|
|
{
|
|
|
|
struct rpc_xprt *xprt = req->rq_xprt;
|
|
|
|
|
|
|
|
if (xprt->ops->prepare_request)
|
|
|
|
xprt->ops->prepare_request(req);
|
|
|
|
}
|
|
|
|
|
2018-08-25 04:28:28 +08:00
|
|
|
/**
|
|
|
|
* xprt_request_need_retransmit - Test if a task needs retransmission
|
|
|
|
* @task: pointer to rpc_task
|
|
|
|
*
|
|
|
|
* Test for whether a connection breakage requires the task to retransmit
|
|
|
|
*/
|
|
|
|
bool
|
|
|
|
xprt_request_need_retransmit(struct rpc_task *task)
|
|
|
|
{
|
|
|
|
return xprt_request_retransmit_after_disconnect(task);
|
|
|
|
}
|
|
|
|
|
2005-08-12 04:25:26 +08:00
|
|
|
/**
|
|
|
|
* xprt_prepare_transmit - reserve the transport before sending a request
|
|
|
|
* @task: RPC task about to send a request
|
|
|
|
*
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2013-09-26 00:17:18 +08:00
|
|
|
bool xprt_prepare_transmit(struct rpc_task *task)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct rpc_rqst *req = task->tk_rqstp;
|
|
|
|
struct rpc_xprt *xprt = req->rq_xprt;
|
|
|
|
|
2018-09-02 02:25:24 +08:00
|
|
|
if (!xprt_lock_write(xprt, task)) {
|
|
|
|
/* Race breaker: someone may have transmitted us */
|
2018-08-10 11:33:21 +08:00
|
|
|
if (!test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
|
2018-09-02 02:25:24 +08:00
|
|
|
rpc_wake_up_queued_task_set_status(&xprt->sending,
|
|
|
|
task, 0);
|
|
|
|
return false;
|
|
|
|
|
2013-09-26 00:17:18 +08:00
|
|
|
}
|
2018-09-02 02:25:24 +08:00
|
|
|
return true;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2006-07-28 05:22:50 +08:00
|
|
|
void xprt_end_transmit(struct rpc_task *task)
|
2005-10-19 05:20:11 +08:00
|
|
|
{
|
2021-04-01 01:22:14 +08:00
|
|
|
struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
|
|
|
|
|
|
|
|
xprt_inject_disconnect(xprt);
|
|
|
|
xprt_release_write(xprt, task);
|
2005-10-19 05:20:11 +08:00
|
|
|
}
|
|
|
|
|
2005-08-12 04:25:26 +08:00
|
|
|
/**
|
2018-08-30 05:40:55 +08:00
|
|
|
* xprt_request_transmit - send an RPC request on a transport
|
|
|
|
* @req: pointer to request to transmit
|
|
|
|
* @snd_task: RPC task that owns the transport lock
|
2005-08-12 04:25:26 +08:00
|
|
|
*
|
2018-08-30 05:40:55 +08:00
|
|
|
* This performs the transmission of a single request.
|
|
|
|
* Note that if the request is not the same as snd_task, then it
|
|
|
|
* does need to be pinned.
|
|
|
|
* Returns '0' on success.
|
2005-08-12 04:25:26 +08:00
|
|
|
*/
|
2018-08-30 05:40:55 +08:00
|
|
|
static int
|
|
|
|
xprt_request_transmit(struct rpc_rqst *req, struct rpc_task *snd_task)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2018-08-30 05:40:55 +08:00
|
|
|
struct rpc_xprt *xprt = req->rq_xprt;
|
|
|
|
struct rpc_task *task = req->rq_task;
|
2017-12-15 10:24:08 +08:00
|
|
|
unsigned int connect_cookie;
|
2018-09-02 02:29:18 +08:00
|
|
|
int is_retrans = RPC_WAS_SENT(task);
|
2018-03-06 04:13:13 +08:00
|
|
|
int status;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2018-08-23 05:55:46 +08:00
|
|
|
if (!req->rq_bytes_sent) {
|
2018-08-30 05:40:55 +08:00
|
|
|
if (xprt_request_data_received(task)) {
|
|
|
|
status = 0;
|
2018-08-10 11:33:21 +08:00
|
|
|
goto out_dequeue;
|
2018-08-30 05:40:55 +08:00
|
|
|
}
|
2018-08-15 01:50:21 +08:00
|
|
|
/* Verify that our message lies in the RPCSEC_GSS window */
|
2018-08-23 05:55:46 +08:00
|
|
|
if (rpcauth_xmit_need_reencode(task)) {
|
2018-08-30 05:40:55 +08:00
|
|
|
status = -EBADMSG;
|
2018-08-10 11:33:21 +08:00
|
|
|
goto out_dequeue;
|
2019-02-28 04:37:36 +08:00
|
|
|
}
|
2019-04-08 01:58:44 +08:00
|
|
|
if (RPC_SIGNALLED(task)) {
|
|
|
|
status = -ERESTARTSYS;
|
|
|
|
goto out_dequeue;
|
2018-08-15 01:50:21 +08:00
|
|
|
}
|
2018-08-23 05:55:46 +08:00
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2018-09-02 02:29:18 +08:00
|
|
|
/*
|
|
|
|
* Update req->rq_ntrans before transmitting to avoid races with
|
|
|
|
* xprt_update_rtt(), which needs to know that it is recording a
|
|
|
|
* reply to the first transmission.
|
|
|
|
*/
|
|
|
|
req->rq_ntrans++;
|
|
|
|
|
2020-05-13 05:13:28 +08:00
|
|
|
trace_rpc_xdr_sendto(task, &req->rq_snd_buf);
|
2017-12-15 10:24:08 +08:00
|
|
|
connect_cookie = xprt->connect_cookie;
|
2018-09-04 11:58:59 +08:00
|
|
|
status = xprt->ops->send_request(req);
|
2009-03-12 02:37:59 +08:00
|
|
|
if (status != 0) {
|
2018-09-02 02:29:18 +08:00
|
|
|
req->rq_ntrans--;
|
2019-02-12 00:25:04 +08:00
|
|
|
trace_xprt_transmit(req, status);
|
2018-08-30 05:40:55 +08:00
|
|
|
return status;
|
2009-03-12 02:37:59 +08:00
|
|
|
}
|
2018-08-28 21:00:27 +08:00
|
|
|
|
2021-04-01 01:22:27 +08:00
|
|
|
if (is_retrans) {
|
2018-09-02 02:29:18 +08:00
|
|
|
task->tk_client->cl_stats->rpcretrans++;
|
2021-04-01 01:22:27 +08:00
|
|
|
trace_xprt_retransmit(req);
|
|
|
|
}
|
2018-09-02 02:29:18 +08:00
|
|
|
|
2015-05-12 02:02:25 +08:00
|
|
|
xprt_inject_disconnect(xprt);
|
2006-03-21 02:44:16 +08:00
|
|
|
|
2011-04-19 03:57:32 +08:00
|
|
|
task->tk_flags |= RPC_TASK_SENT;
|
2019-05-02 23:21:08 +08:00
|
|
|
spin_lock(&xprt->transport_lock);
|
2006-03-21 02:44:16 +08:00
|
|
|
|
2009-03-12 02:37:59 +08:00
|
|
|
xprt->stat.sends++;
|
|
|
|
xprt->stat.req_u += xprt->stat.sends - xprt->stat.recvs;
|
|
|
|
xprt->stat.bklog_u += xprt->backlog.qlen;
|
2012-02-15 05:19:18 +08:00
|
|
|
xprt->stat.sending_u += xprt->sending.qlen;
|
|
|
|
xprt->stat.pending_u += xprt->pending.qlen;
|
2019-05-02 23:21:08 +08:00
|
|
|
spin_unlock(&xprt->transport_lock);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2017-12-15 10:24:08 +08:00
|
|
|
req->rq_connect_cookie = connect_cookie;
|
2018-08-10 11:33:21 +08:00
|
|
|
out_dequeue:
|
2019-02-12 00:25:04 +08:00
|
|
|
trace_xprt_transmit(req, status);
|
2018-08-10 11:33:21 +08:00
|
|
|
xprt_request_dequeue_transmit(task);
|
2018-08-30 05:40:55 +08:00
|
|
|
rpc_wake_up_queued_task_set_status(&xprt->sending, task, status);
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* xprt_transmit - send an RPC request on a transport
|
|
|
|
* @task: controlling RPC task
|
|
|
|
*
|
|
|
|
* Attempts to drain the transmit queue. On exit, either the transport
|
|
|
|
* signalled an error that needs to be handled before transmission can
|
|
|
|
* resume, or @task finished transmitting, and detected that it already
|
|
|
|
* received a reply.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
xprt_transmit(struct rpc_task *task)
|
|
|
|
{
|
|
|
|
struct rpc_rqst *next, *req = task->tk_rqstp;
|
|
|
|
struct rpc_xprt *xprt = req->rq_xprt;
|
2020-07-09 04:09:53 +08:00
|
|
|
int counter, status;
|
2018-08-30 05:40:55 +08:00
|
|
|
|
|
|
|
spin_lock(&xprt->queue_lock);
|
2020-07-09 04:09:53 +08:00
|
|
|
counter = 0;
|
2018-08-30 05:40:55 +08:00
|
|
|
while (!list_empty(&xprt->xmit_queue)) {
|
2020-07-09 04:09:53 +08:00
|
|
|
if (++counter == 20)
|
|
|
|
break;
|
2018-08-30 05:40:55 +08:00
|
|
|
next = list_first_entry(&xprt->xmit_queue,
|
|
|
|
struct rpc_rqst, rq_xmit);
|
|
|
|
xprt_pin_rqst(next);
|
|
|
|
spin_unlock(&xprt->queue_lock);
|
|
|
|
status = xprt_request_transmit(next, task);
|
|
|
|
if (status == -EBADMSG && next != req)
|
|
|
|
status = 0;
|
|
|
|
spin_lock(&xprt->queue_lock);
|
|
|
|
xprt_unpin_rqst(next);
|
|
|
|
if (status == 0) {
|
|
|
|
if (!xprt_request_data_received(task) ||
|
|
|
|
test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
|
|
|
|
continue;
|
2018-09-04 11:39:27 +08:00
|
|
|
} else if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
|
2018-08-30 05:40:55 +08:00
|
|
|
task->tk_status = status;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
spin_unlock(&xprt->queue_lock);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2021-05-26 06:43:38 +08:00
|
|
|
static void xprt_complete_request_init(struct rpc_task *task)
|
|
|
|
{
|
|
|
|
if (task->tk_rqstp)
|
|
|
|
xprt_request_init(task);
|
|
|
|
}
|
|
|
|
|
|
|
|
void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task)
|
2013-04-14 22:49:37 +08:00
|
|
|
{
|
|
|
|
set_bit(XPRT_CONGESTED, &xprt->state);
|
2021-05-26 06:43:38 +08:00
|
|
|
rpc_sleep_on(&xprt->backlog, task, xprt_complete_request_init);
|
2013-04-14 22:49:37 +08:00
|
|
|
}
|
2021-05-26 06:43:38 +08:00
|
|
|
EXPORT_SYMBOL_GPL(xprt_add_backlog);
|
2013-04-14 22:49:37 +08:00
|
|
|
|
2021-05-17 07:59:10 +08:00
|
|
|
static bool __xprt_set_rq(struct rpc_task *task, void *data)
|
2013-04-14 22:49:37 +08:00
|
|
|
{
|
2021-05-17 07:59:10 +08:00
|
|
|
struct rpc_rqst *req = data;
|
|
|
|
|
|
|
|
if (task->tk_rqstp == NULL) {
|
|
|
|
memset(req, 0, sizeof(*req)); /* mark unused */
|
|
|
|
task->tk_rqstp = req;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2021-05-26 06:43:38 +08:00
|
|
|
bool xprt_wake_up_backlog(struct rpc_xprt *xprt, struct rpc_rqst *req)
|
2021-05-17 07:59:10 +08:00
|
|
|
{
|
|
|
|
if (rpc_wake_up_first(&xprt->backlog, __xprt_set_rq, req) == NULL) {
|
2013-04-14 22:49:37 +08:00
|
|
|
clear_bit(XPRT_CONGESTED, &xprt->state);
|
2021-05-17 07:59:10 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
2013-04-14 22:49:37 +08:00
|
|
|
}
|
2021-05-26 06:43:38 +08:00
|
|
|
EXPORT_SYMBOL_GPL(xprt_wake_up_backlog);
|
2013-04-14 22:49:37 +08:00
|
|
|
|
|
|
|
static bool xprt_throttle_congested(struct rpc_xprt *xprt, struct rpc_task *task)
|
|
|
|
{
|
|
|
|
bool ret = false;
|
|
|
|
|
|
|
|
if (!test_bit(XPRT_CONGESTED, &xprt->state))
|
|
|
|
goto out;
|
|
|
|
spin_lock(&xprt->reserve_lock);
|
|
|
|
if (test_bit(XPRT_CONGESTED, &xprt->state)) {
|
2021-05-26 06:43:38 +08:00
|
|
|
xprt_add_backlog(xprt, task);
|
2013-04-14 22:49:37 +08:00
|
|
|
ret = true;
|
|
|
|
}
|
|
|
|
spin_unlock(&xprt->reserve_lock);
|
|
|
|
out:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2017-06-21 07:35:39 +08:00
|
|
|
static struct rpc_rqst *xprt_dynamic_alloc_slot(struct rpc_xprt *xprt)
|
2011-07-18 06:11:30 +08:00
|
|
|
{
|
|
|
|
struct rpc_rqst *req = ERR_PTR(-EAGAIN);
|
|
|
|
|
2018-03-06 04:13:13 +08:00
|
|
|
if (xprt->num_reqs >= xprt->max_reqs)
|
2011-07-18 06:11:30 +08:00
|
|
|
goto out;
|
2018-03-06 04:13:13 +08:00
|
|
|
++xprt->num_reqs;
|
2017-06-21 07:35:39 +08:00
|
|
|
spin_unlock(&xprt->reserve_lock);
|
|
|
|
req = kzalloc(sizeof(struct rpc_rqst), GFP_NOFS);
|
|
|
|
spin_lock(&xprt->reserve_lock);
|
2011-07-18 06:11:30 +08:00
|
|
|
if (req != NULL)
|
|
|
|
goto out;
|
2018-03-06 04:13:13 +08:00
|
|
|
--xprt->num_reqs;
|
2011-07-18 06:11:30 +08:00
|
|
|
req = ERR_PTR(-ENOMEM);
|
|
|
|
out:
|
|
|
|
return req;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool xprt_dynamic_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
|
|
|
|
{
|
2018-03-06 04:13:13 +08:00
|
|
|
if (xprt->num_reqs > xprt->min_reqs) {
|
|
|
|
--xprt->num_reqs;
|
2011-07-18 06:11:30 +08:00
|
|
|
kfree(req);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2012-09-07 23:08:50 +08:00
|
|
|
void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2011-07-18 06:11:30 +08:00
|
|
|
struct rpc_rqst *req;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2012-09-07 23:08:50 +08:00
|
|
|
spin_lock(&xprt->reserve_lock);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (!list_empty(&xprt->free)) {
|
2011-07-18 06:11:30 +08:00
|
|
|
req = list_entry(xprt->free.next, struct rpc_rqst, rq_list);
|
|
|
|
list_del(&req->rq_list);
|
|
|
|
goto out_init_req;
|
|
|
|
}
|
2017-06-21 07:35:39 +08:00
|
|
|
req = xprt_dynamic_alloc_slot(xprt);
|
2011-07-18 06:11:30 +08:00
|
|
|
if (!IS_ERR(req))
|
|
|
|
goto out_init_req;
|
|
|
|
switch (PTR_ERR(req)) {
|
|
|
|
case -ENOMEM:
|
|
|
|
dprintk("RPC: dynamic allocation of request slot "
|
|
|
|
"failed! Retrying\n");
|
2012-05-20 00:12:53 +08:00
|
|
|
task->tk_status = -ENOMEM;
|
2011-07-18 06:11:30 +08:00
|
|
|
break;
|
|
|
|
case -EAGAIN:
|
2013-04-14 22:49:37 +08:00
|
|
|
xprt_add_backlog(xprt, task);
|
2011-07-18 06:11:30 +08:00
|
|
|
dprintk("RPC: waiting for request slot\n");
|
2020-08-24 06:36:59 +08:00
|
|
|
fallthrough;
|
2012-05-20 00:12:53 +08:00
|
|
|
default:
|
|
|
|
task->tk_status = -EAGAIN;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2012-09-07 23:08:50 +08:00
|
|
|
spin_unlock(&xprt->reserve_lock);
|
2011-07-18 06:11:30 +08:00
|
|
|
return;
|
|
|
|
out_init_req:
|
2018-03-06 04:13:13 +08:00
|
|
|
xprt->stat.max_slots = max_t(unsigned int, xprt->stat.max_slots,
|
|
|
|
xprt->num_reqs);
|
2018-05-05 03:34:53 +08:00
|
|
|
spin_unlock(&xprt->reserve_lock);
|
|
|
|
|
2011-07-18 06:11:30 +08:00
|
|
|
task->tk_status = 0;
|
|
|
|
task->tk_rqstp = req;
|
2012-09-07 23:08:50 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(xprt_alloc_slot);
|
|
|
|
|
2018-05-05 03:34:59 +08:00
|
|
|
void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
|
2010-04-17 04:37:01 +08:00
|
|
|
{
|
|
|
|
spin_lock(&xprt->reserve_lock);
|
2021-05-17 07:59:10 +08:00
|
|
|
if (!xprt_wake_up_backlog(xprt, req) &&
|
|
|
|
!xprt_dynamic_free_slot(xprt, req)) {
|
2011-12-02 03:16:17 +08:00
|
|
|
memset(req, 0, sizeof(*req)); /* mark unused */
|
|
|
|
list_add(&req->rq_list, &xprt->free);
|
|
|
|
}
|
2010-04-17 04:37:01 +08:00
|
|
|
spin_unlock(&xprt->reserve_lock);
|
|
|
|
}
|
2018-05-05 03:34:59 +08:00
|
|
|
EXPORT_SYMBOL_GPL(xprt_free_slot);
|
2010-04-17 04:37:01 +08:00
|
|
|
|
2011-07-18 04:57:32 +08:00
|
|
|
static void xprt_free_all_slots(struct rpc_xprt *xprt)
|
|
|
|
{
|
|
|
|
struct rpc_rqst *req;
|
|
|
|
while (!list_empty(&xprt->free)) {
|
|
|
|
req = list_first_entry(&xprt->free, struct rpc_rqst, rq_list);
|
|
|
|
list_del(&req->rq_list);
|
|
|
|
kfree(req);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-06-09 03:59:13 +08:00
|
|
|
static DEFINE_IDA(rpc_xprt_ids);
|
|
|
|
|
|
|
|
void xprt_cleanup_ids(void)
|
|
|
|
{
|
|
|
|
ida_destroy(&rpc_xprt_ids);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int xprt_alloc_id(struct rpc_xprt *xprt)
|
|
|
|
{
|
|
|
|
int id;
|
|
|
|
|
|
|
|
id = ida_simple_get(&rpc_xprt_ids, 0, 0, GFP_KERNEL);
|
|
|
|
if (id < 0)
|
|
|
|
return id;
|
|
|
|
|
|
|
|
xprt->id = id;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void xprt_free_id(struct rpc_xprt *xprt)
|
|
|
|
{
|
|
|
|
ida_simple_remove(&rpc_xprt_ids, xprt->id);
|
|
|
|
}
|
|
|
|
|
2011-07-18 06:11:30 +08:00
|
|
|
struct rpc_xprt *xprt_alloc(struct net *net, size_t size,
|
|
|
|
unsigned int num_prealloc,
|
|
|
|
unsigned int max_alloc)
|
2010-09-29 20:02:43 +08:00
|
|
|
{
|
|
|
|
struct rpc_xprt *xprt;
|
2011-07-18 04:57:32 +08:00
|
|
|
struct rpc_rqst *req;
|
|
|
|
int i;
|
2010-09-29 20:02:43 +08:00
|
|
|
|
|
|
|
xprt = kzalloc(size, GFP_KERNEL);
|
|
|
|
if (xprt == NULL)
|
|
|
|
goto out;
|
|
|
|
|
2021-06-09 03:59:13 +08:00
|
|
|
xprt_alloc_id(xprt);
|
2011-07-18 04:57:32 +08:00
|
|
|
xprt_init(xprt, net);
|
|
|
|
|
|
|
|
for (i = 0; i < num_prealloc; i++) {
|
|
|
|
req = kzalloc(sizeof(struct rpc_rqst), GFP_KERNEL);
|
|
|
|
if (!req)
|
2013-10-15 11:44:30 +08:00
|
|
|
goto out_free;
|
2011-07-18 04:57:32 +08:00
|
|
|
list_add(&req->rq_list, &xprt->free);
|
|
|
|
}
|
2011-07-18 06:11:30 +08:00
|
|
|
if (max_alloc > num_prealloc)
|
|
|
|
xprt->max_reqs = max_alloc;
|
|
|
|
else
|
|
|
|
xprt->max_reqs = num_prealloc;
|
|
|
|
xprt->min_reqs = num_prealloc;
|
2018-03-06 04:13:13 +08:00
|
|
|
xprt->num_reqs = num_prealloc;
|
2010-09-29 20:02:43 +08:00
|
|
|
|
|
|
|
return xprt;
|
|
|
|
|
|
|
|
out_free:
|
2011-07-18 04:57:32 +08:00
|
|
|
xprt_free(xprt);
|
2010-09-29 20:02:43 +08:00
|
|
|
out:
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(xprt_alloc);
|
|
|
|
|
2010-09-29 20:03:13 +08:00
|
|
|
void xprt_free(struct rpc_xprt *xprt)
|
|
|
|
{
|
2010-09-29 20:05:43 +08:00
|
|
|
put_net(xprt->xprt_net);
|
2011-07-18 04:57:32 +08:00
|
|
|
xprt_free_all_slots(xprt);
|
2021-06-09 03:59:13 +08:00
|
|
|
xprt_free_id(xprt);
|
2021-06-09 03:59:19 +08:00
|
|
|
rpc_sysfs_xprt_destroy(xprt);
|
2015-02-15 06:48:49 +08:00
|
|
|
kfree_rcu(xprt, rcu);
|
2010-09-29 20:03:13 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(xprt_free);
|
|
|
|
|
2018-09-02 05:21:01 +08:00
|
|
|
static void
|
|
|
|
xprt_init_connect_cookie(struct rpc_rqst *req, struct rpc_xprt *xprt)
|
|
|
|
{
|
|
|
|
req->rq_connect_cookie = xprt_connect_cookie(xprt) - 1;
|
|
|
|
}
|
|
|
|
|
2018-08-23 02:24:16 +08:00
|
|
|
static __be32
|
|
|
|
xprt_alloc_xid(struct rpc_xprt *xprt)
|
|
|
|
{
|
|
|
|
__be32 xid;
|
|
|
|
|
|
|
|
spin_lock(&xprt->reserve_lock);
|
|
|
|
xid = (__force __be32)xprt->xid++;
|
|
|
|
spin_unlock(&xprt->reserve_lock);
|
|
|
|
return xid;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
xprt_init_xid(struct rpc_xprt *xprt)
|
|
|
|
{
|
|
|
|
xprt->xid = prandom_u32();
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
xprt_request_init(struct rpc_task *task)
|
|
|
|
{
|
|
|
|
struct rpc_xprt *xprt = task->tk_xprt;
|
|
|
|
struct rpc_rqst *req = task->tk_rqstp;
|
|
|
|
|
|
|
|
req->rq_task = task;
|
|
|
|
req->rq_xprt = xprt;
|
|
|
|
req->rq_buffer = NULL;
|
|
|
|
req->rq_xid = xprt_alloc_xid(xprt);
|
2018-09-02 05:21:01 +08:00
|
|
|
xprt_init_connect_cookie(req, xprt);
|
2018-08-23 02:24:16 +08:00
|
|
|
req->rq_snd_buf.len = 0;
|
|
|
|
req->rq_snd_buf.buflen = 0;
|
|
|
|
req->rq_rcv_buf.len = 0;
|
|
|
|
req->rq_rcv_buf.buflen = 0;
|
2018-12-01 05:11:15 +08:00
|
|
|
req->rq_snd_buf.bvec = NULL;
|
|
|
|
req->rq_rcv_buf.bvec = NULL;
|
2018-08-23 02:24:16 +08:00
|
|
|
req->rq_release_snd_buf = NULL;
|
2019-04-08 01:58:56 +08:00
|
|
|
xprt_init_majortimeo(task, req);
|
2020-07-09 04:09:21 +08:00
|
|
|
|
|
|
|
trace_xprt_reserve(req);
|
2018-08-23 02:24:16 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
xprt_do_reserve(struct rpc_xprt *xprt, struct rpc_task *task)
|
|
|
|
{
|
|
|
|
xprt->ops->alloc_slot(xprt, task);
|
|
|
|
if (task->tk_rqstp != NULL)
|
|
|
|
xprt_request_init(task);
|
|
|
|
}
|
|
|
|
|
2005-08-12 04:25:26 +08:00
|
|
|
/**
|
|
|
|
* xprt_reserve - allocate an RPC request slot
|
|
|
|
* @task: RPC task requesting a slot allocation
|
|
|
|
*
|
2013-04-14 22:49:37 +08:00
|
|
|
* If the transport is marked as being congested, or if no more
|
|
|
|
* slots are available, place the task on the transport's
|
2005-08-12 04:25:26 +08:00
|
|
|
* backlog queue.
|
|
|
|
*/
|
|
|
|
void xprt_reserve(struct rpc_task *task)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2016-01-31 05:39:26 +08:00
|
|
|
struct rpc_xprt *xprt = task->tk_xprt;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2011-07-18 04:01:03 +08:00
|
|
|
task->tk_status = 0;
|
|
|
|
if (task->tk_rqstp != NULL)
|
|
|
|
return;
|
|
|
|
|
|
|
|
task->tk_status = -EAGAIN;
|
2013-04-14 22:49:37 +08:00
|
|
|
if (!xprt_throttle_congested(xprt, task))
|
2018-08-23 02:24:16 +08:00
|
|
|
xprt_do_reserve(xprt, task);
|
2013-04-14 22:49:37 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* xprt_retry_reserve - allocate an RPC request slot
|
|
|
|
* @task: RPC task requesting a slot allocation
|
|
|
|
*
|
|
|
|
* If no more slots are available, place the task on the transport's
|
|
|
|
* backlog queue.
|
|
|
|
* Note that the only difference with xprt_reserve is that we now
|
|
|
|
* ignore the value of the XPRT_CONGESTED flag.
|
|
|
|
*/
|
|
|
|
void xprt_retry_reserve(struct rpc_task *task)
|
|
|
|
{
|
2016-01-31 05:39:26 +08:00
|
|
|
struct rpc_xprt *xprt = task->tk_xprt;
|
2013-04-14 22:49:37 +08:00
|
|
|
|
|
|
|
task->tk_status = 0;
|
2021-05-26 06:43:38 +08:00
|
|
|
if (task->tk_rqstp != NULL)
|
2013-04-14 22:49:37 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
task->tk_status = -EAGAIN;
|
2018-08-23 02:24:16 +08:00
|
|
|
xprt_do_reserve(xprt, task);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2005-08-12 04:25:26 +08:00
|
|
|
/**
|
|
|
|
* xprt_release - release an RPC request slot
|
|
|
|
* @task: task which is finished with the slot
|
|
|
|
*
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2005-08-12 04:25:26 +08:00
|
|
|
void xprt_release(struct rpc_task *task)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2009-04-01 21:23:03 +08:00
|
|
|
struct rpc_xprt *xprt;
|
2013-01-08 03:30:46 +08:00
|
|
|
struct rpc_rqst *req = task->tk_rqstp;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2013-01-08 03:30:46 +08:00
|
|
|
if (req == NULL) {
|
|
|
|
if (task->tk_client) {
|
2016-01-31 05:39:26 +08:00
|
|
|
xprt = task->tk_xprt;
|
2018-09-08 07:38:55 +08:00
|
|
|
xprt_release_write(xprt, task);
|
2013-01-08 03:30:46 +08:00
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
return;
|
2013-01-08 03:30:46 +08:00
|
|
|
}
|
2009-04-01 21:23:03 +08:00
|
|
|
|
|
|
|
xprt = req->rq_xprt;
|
2021-05-26 06:43:38 +08:00
|
|
|
xprt_request_dequeue_xprt(task);
|
|
|
|
spin_lock(&xprt->transport_lock);
|
|
|
|
xprt->ops->release_xprt(xprt, task);
|
|
|
|
if (xprt->ops->release_request)
|
|
|
|
xprt->ops->release_request(task);
|
|
|
|
xprt_schedule_autodisconnect(xprt);
|
|
|
|
spin_unlock(&xprt->transport_lock);
|
|
|
|
if (req->rq_buffer)
|
|
|
|
xprt->ops->buf_free(task);
|
|
|
|
xdr_free_bvec(&req->rq_rcv_buf);
|
|
|
|
xdr_free_bvec(&req->rq_snd_buf);
|
|
|
|
if (req->rq_cred != NULL)
|
|
|
|
put_rpccred(req->rq_cred);
|
|
|
|
if (req->rq_release_snd_buf)
|
|
|
|
req->rq_release_snd_buf(req);
|
2009-04-01 21:23:03 +08:00
|
|
|
|
2021-05-17 07:59:10 +08:00
|
|
|
task->tk_rqstp = NULL;
|
2010-04-17 04:37:01 +08:00
|
|
|
if (likely(!bc_prealloc(req)))
|
2018-05-05 03:34:59 +08:00
|
|
|
xprt->ops->free_slot(xprt, req);
|
2010-04-17 04:37:01 +08:00
|
|
|
else
|
2010-03-20 03:36:22 +08:00
|
|
|
xprt_free_bc_request(req);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2018-09-02 05:21:01 +08:00
|
|
|
#ifdef CONFIG_SUNRPC_BACKCHANNEL
|
|
|
|
void
|
|
|
|
xprt_init_bc_request(struct rpc_rqst *req, struct rpc_task *task)
|
|
|
|
{
|
|
|
|
struct xdr_buf *xbufp = &req->rq_snd_buf;
|
|
|
|
|
|
|
|
task->tk_rqstp = req;
|
|
|
|
req->rq_task = task;
|
|
|
|
xprt_init_connect_cookie(req, req->rq_xprt);
|
|
|
|
/*
|
|
|
|
* Set up the xdr_buf length.
|
|
|
|
* This also indicates that the buffer is XDR encoded already.
|
|
|
|
*/
|
|
|
|
xbufp->len = xbufp->head[0].iov_len + xbufp->page_len +
|
|
|
|
xbufp->tail[0].iov_len;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2011-07-18 04:57:32 +08:00
|
|
|
static void xprt_init(struct rpc_xprt *xprt, struct net *net)
|
2006-08-23 08:06:20 +08:00
|
|
|
{
|
2015-02-25 09:31:39 +08:00
|
|
|
kref_init(&xprt->kref);
|
2006-08-23 08:06:20 +08:00
|
|
|
|
|
|
|
spin_lock_init(&xprt->transport_lock);
|
|
|
|
spin_lock_init(&xprt->reserve_lock);
|
2018-08-31 22:21:00 +08:00
|
|
|
spin_lock_init(&xprt->queue_lock);
|
2006-08-23 08:06:20 +08:00
|
|
|
|
|
|
|
INIT_LIST_HEAD(&xprt->free);
|
2018-09-07 20:35:22 +08:00
|
|
|
xprt->recv_queue = RB_ROOT;
|
2018-08-10 11:33:21 +08:00
|
|
|
INIT_LIST_HEAD(&xprt->xmit_queue);
|
2011-07-14 07:20:49 +08:00
|
|
|
#if defined(CONFIG_SUNRPC_BACKCHANNEL)
|
2009-04-01 21:22:59 +08:00
|
|
|
spin_lock_init(&xprt->bc_pa_lock);
|
|
|
|
INIT_LIST_HEAD(&xprt->bc_pa_list);
|
2011-07-14 07:20:49 +08:00
|
|
|
#endif /* CONFIG_SUNRPC_BACKCHANNEL */
|
2015-02-15 09:31:59 +08:00
|
|
|
INIT_LIST_HEAD(&xprt->xprt_switch);
|
2009-04-01 21:22:59 +08:00
|
|
|
|
2006-08-23 08:06:20 +08:00
|
|
|
xprt->last_used = jiffies;
|
|
|
|
xprt->cwnd = RPC_INITCWND;
|
2007-03-30 04:48:04 +08:00
|
|
|
xprt->bind_index = 0;
|
2006-08-23 08:06:20 +08:00
|
|
|
|
|
|
|
rpc_init_wait_queue(&xprt->binding, "xprt_binding");
|
|
|
|
rpc_init_wait_queue(&xprt->pending, "xprt_pending");
|
2018-09-10 01:53:05 +08:00
|
|
|
rpc_init_wait_queue(&xprt->sending, "xprt_sending");
|
2006-08-23 08:06:20 +08:00
|
|
|
rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog");
|
|
|
|
|
|
|
|
xprt_init_xid(xprt);
|
|
|
|
|
2011-07-18 04:57:32 +08:00
|
|
|
xprt->xprt_net = get_net(net);
|
2011-07-18 04:01:09 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* xprt_create_transport - create an RPC transport
|
|
|
|
* @args: rpc transport creation arguments
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
struct rpc_xprt *xprt_create_transport(struct xprt_create *args)
|
|
|
|
{
|
|
|
|
struct rpc_xprt *xprt;
|
2020-11-11 01:58:22 +08:00
|
|
|
const struct xprt_class *t;
|
2011-07-18 04:01:09 +08:00
|
|
|
|
2020-11-11 01:58:22 +08:00
|
|
|
t = xprt_class_find_by_ident(args->ident);
|
|
|
|
if (!t) {
|
|
|
|
dprintk("RPC: transport (%d) not supported\n", args->ident);
|
|
|
|
return ERR_PTR(-EIO);
|
2011-07-18 04:01:09 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
xprt = t->setup(args);
|
2020-11-11 01:58:22 +08:00
|
|
|
xprt_class_release(t);
|
|
|
|
|
2020-05-13 05:13:34 +08:00
|
|
|
if (IS_ERR(xprt))
|
2011-07-18 04:57:32 +08:00
|
|
|
goto out;
|
2013-04-12 03:06:36 +08:00
|
|
|
if (args->flags & XPRT_CREATE_NO_IDLE_TIMEOUT)
|
|
|
|
xprt->idle_timeout = 0;
|
2011-07-18 04:57:32 +08:00
|
|
|
INIT_WORK(&xprt->task_cleanup, xprt_autoclose);
|
|
|
|
if (xprt_has_timer(xprt))
|
2019-06-19 02:57:33 +08:00
|
|
|
timer_setup(&xprt->timer, xprt_init_autodisconnect, 0);
|
2011-07-18 04:57:32 +08:00
|
|
|
else
|
2017-10-17 08:29:42 +08:00
|
|
|
timer_setup(&xprt->timer, NULL, 0);
|
2012-03-02 06:01:05 +08:00
|
|
|
|
|
|
|
if (strlen(args->servername) > RPC_MAXNETNAMELEN) {
|
|
|
|
xprt_destroy(xprt);
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
}
|
|
|
|
xprt->servername = kstrdup(args->servername, GFP_KERNEL);
|
|
|
|
if (xprt->servername == NULL) {
|
|
|
|
xprt_destroy(xprt);
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
}
|
|
|
|
|
2015-04-01 00:03:28 +08:00
|
|
|
rpc_xprt_debugfs_register(xprt);
|
2014-11-27 03:44:44 +08:00
|
|
|
|
2020-05-13 05:13:34 +08:00
|
|
|
trace_xprt_create(xprt);
|
2011-07-18 04:57:32 +08:00
|
|
|
out:
|
2006-08-23 08:06:20 +08:00
|
|
|
return xprt;
|
|
|
|
}
|
|
|
|
|
2017-10-20 00:13:10 +08:00
|
|
|
static void xprt_destroy_cb(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct rpc_xprt *xprt =
|
|
|
|
container_of(work, struct rpc_xprt, task_cleanup);
|
|
|
|
|
2020-05-13 05:13:34 +08:00
|
|
|
trace_xprt_destroy(xprt);
|
|
|
|
|
2017-10-20 00:13:10 +08:00
|
|
|
rpc_xprt_debugfs_unregister(xprt);
|
|
|
|
rpc_destroy_wait_queue(&xprt->binding);
|
|
|
|
rpc_destroy_wait_queue(&xprt->pending);
|
|
|
|
rpc_destroy_wait_queue(&xprt->sending);
|
|
|
|
rpc_destroy_wait_queue(&xprt->backlog);
|
|
|
|
kfree(xprt->servername);
|
2019-10-17 21:02:21 +08:00
|
|
|
/*
|
|
|
|
* Destroy any existing back channel
|
|
|
|
*/
|
|
|
|
xprt_destroy_backchannel(xprt, UINT_MAX);
|
|
|
|
|
2017-10-20 00:13:10 +08:00
|
|
|
/*
|
|
|
|
* Tear down transport state and free the rpc_xprt
|
|
|
|
*/
|
|
|
|
xprt->ops->destroy(xprt);
|
|
|
|
}
|
|
|
|
|
2005-08-12 04:25:26 +08:00
|
|
|
/**
|
|
|
|
* xprt_destroy - destroy an RPC transport, killing off all requests.
|
2011-03-16 07:56:30 +08:00
|
|
|
* @xprt: transport to destroy
|
2005-08-12 04:25:26 +08:00
|
|
|
*
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2011-03-16 07:56:30 +08:00
|
|
|
static void xprt_destroy(struct rpc_xprt *xprt)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2017-10-20 00:13:10 +08:00
|
|
|
/*
|
|
|
|
* Exclude transport connect/disconnect handlers and autoclose
|
|
|
|
*/
|
2015-09-19 03:53:24 +08:00
|
|
|
wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_UNINTERRUPTIBLE);
|
|
|
|
|
2006-01-03 16:55:56 +08:00
|
|
|
del_timer_sync(&xprt->timer);
|
2006-10-18 02:44:27 +08:00
|
|
|
|
|
|
|
/*
|
2017-10-20 00:13:10 +08:00
|
|
|
* Destroy sockets etc from the system workqueue so they can
|
|
|
|
* safely flush receive work running on rpciod.
|
2006-10-18 02:44:27 +08:00
|
|
|
*/
|
2017-10-20 00:13:10 +08:00
|
|
|
INIT_WORK(&xprt->task_cleanup, xprt_destroy_cb);
|
|
|
|
schedule_work(&xprt->task_cleanup);
|
2006-09-06 00:55:57 +08:00
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2015-02-25 09:31:39 +08:00
|
|
|
static void xprt_destroy_kref(struct kref *kref)
|
|
|
|
{
|
|
|
|
xprt_destroy(container_of(kref, struct rpc_xprt, kref));
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* xprt_get - return a reference to an RPC transport.
|
|
|
|
* @xprt: pointer to the transport
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
struct rpc_xprt *xprt_get(struct rpc_xprt *xprt)
|
|
|
|
{
|
|
|
|
if (xprt != NULL && kref_get_unless_zero(&xprt->kref))
|
|
|
|
return xprt;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(xprt_get);
|
|
|
|
|
2006-09-06 00:55:57 +08:00
|
|
|
/**
|
|
|
|
* xprt_put - release a reference to an RPC transport.
|
|
|
|
* @xprt: pointer to the transport
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
void xprt_put(struct rpc_xprt *xprt)
|
|
|
|
{
|
2015-02-25 09:31:39 +08:00
|
|
|
if (xprt != NULL)
|
|
|
|
kref_put(&xprt->kref, xprt_destroy_kref);
|
2006-09-06 00:55:57 +08:00
|
|
|
}
|
2016-01-08 03:50:10 +08:00
|
|
|
EXPORT_SYMBOL_GPL(xprt_put);
|