NFS-over-RDMA client updates for Linux 4.18
Stable patches: - xprtrdma: Return -ENOBUFS when no pages are available New features: - Add ->alloc_slot() and ->free_slot() functions Bugfixes and cleanups: - Add missing SPDX tags to some files - Try to fail mount quickly if client has no RDMA devices - Create transport IDs in the correct network namespace - Fix max_send_wr computation - Clean up receive tracepoints - Refactor receive handling - Remove unused functions -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEnZ5MQTpR7cLU7KEp18tUv7ClQOsFAlsRiOMACgkQ18tUv7Cl QOuIdQ//QdZmGkZ/5chQat5F4EBSY9vFc5pIz3XCIGZ5dtxABPSsxrn0kWj0UWN/ MBIYla6tLJ7j2bZ+6U/1YuF6QehpGXZYsWxtp9JLE/bXiaGt404QFrUN1dr23gyP +k2pT6V0h7vSDoQROQT496Lh6w8xCd7RZVE3u34k0sj2+iohqybiuE+5oSDcjfQ3 ArEi80Er5gGhnLTSwkx/6eOL0T2LVGRKNXUItYksQamRqQBq4N6jWlbAxZTtr4mq CwEi/Mv/SLBkgaN5kjQRFkU/MRNwAhYOQB59Al2Na20xkvEL91mDsh1s10ViqiVQ d7aux1Pcft/EQdDOZA2gq4qtlt1jPl/8rVLSj2FyvkwAAHW+ltmLSfv2jgWw/+v/ pKDkPIVCxCTwK8qEOnZizh1irfX8Eih6Pu6MoOleUqaNu14yvOZDANy7bREFA4Uj OckhiAcisahlHCzpvunPg1auQ6Ee1KSYoIZR3ARYcKcPs0L2ik/HiKDoMrYqDCtW 9NGCfDtuZ7xEwpbN+5a5QMcIyU2BRrt4/i5sPVpN0smLuG9Scm3M0PqjHlXex7jo d27Yfk07Na9oQ8wqGAv6NkIk89RuyHSgIh5T5zf9R/71osEE+2lBiZWZaNbbRFqd u+RaA/sX5rzL0Hi5Nz2yhTNN5PPeP4FIipk60XG0WucXfdMFAls= =I9YU -----END PGP SIGNATURE----- Merge tag 'nfs-rdma-for-4.18-1' of git://git.linux-nfs.org/projects/anna/linux-nfs NFS-over-RDMA client updates for Linux 4.18 Stable patches: - xprtrdma: Return -ENOBUFS when no pages are available New features: - Add ->alloc_slot() and ->free_slot() functions Bugfixes and cleanups: - Add missing SPDX tags to some files - Try to fail mount quickly if client has no RDMA devices - Create transport IDs in the correct network namespace - Fix max_send_wr computation - Clean up receive tracepoints - Refactor receive handling - Remove unused functions
This commit is contained in:
commit
fcda3d5d22
|
@ -1,3 +1,4 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
|
||||
/*
|
||||
* Copyright (c) 2015-2017 Oracle. All rights reserved.
|
||||
* Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
|
||||
|
|
|
@ -84,7 +84,6 @@ struct rpc_rqst {
|
|||
void (*rq_release_snd_buf)(struct rpc_rqst *); /* release rq_enc_pages */
|
||||
struct list_head rq_list;
|
||||
|
||||
void *rq_xprtdata; /* Per-xprt private data */
|
||||
void *rq_buffer; /* Call XDR encode buffer */
|
||||
size_t rq_callsize;
|
||||
void *rq_rbuffer; /* Reply XDR decode buffer */
|
||||
|
@ -127,6 +126,8 @@ struct rpc_xprt_ops {
|
|||
int (*reserve_xprt)(struct rpc_xprt *xprt, struct rpc_task *task);
|
||||
void (*release_xprt)(struct rpc_xprt *xprt, struct rpc_task *task);
|
||||
void (*alloc_slot)(struct rpc_xprt *xprt, struct rpc_task *task);
|
||||
void (*free_slot)(struct rpc_xprt *xprt,
|
||||
struct rpc_rqst *req);
|
||||
void (*rpcbind)(struct rpc_task *task);
|
||||
void (*set_port)(struct rpc_xprt *xprt, unsigned short port);
|
||||
void (*connect)(struct rpc_xprt *xprt, struct rpc_task *task);
|
||||
|
@ -324,10 +325,13 @@ struct xprt_class {
|
|||
struct rpc_xprt *xprt_create_transport(struct xprt_create *args);
|
||||
void xprt_connect(struct rpc_task *task);
|
||||
void xprt_reserve(struct rpc_task *task);
|
||||
void xprt_request_init(struct rpc_task *task);
|
||||
void xprt_retry_reserve(struct rpc_task *task);
|
||||
int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task);
|
||||
int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task);
|
||||
void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task);
|
||||
void xprt_free_slot(struct rpc_xprt *xprt,
|
||||
struct rpc_rqst *req);
|
||||
void xprt_lock_and_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task);
|
||||
bool xprt_prepare_transmit(struct rpc_task *task);
|
||||
void xprt_transmit(struct rpc_task *task);
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
|
||||
/*
|
||||
* Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
|
||||
*
|
||||
|
|
|
@ -528,24 +528,54 @@ TRACE_EVENT(xprtrdma_post_send,
|
|||
|
||||
TRACE_EVENT(xprtrdma_post_recv,
|
||||
TP_PROTO(
|
||||
const struct rpcrdma_rep *rep,
|
||||
int status
|
||||
const struct ib_cqe *cqe
|
||||
),
|
||||
|
||||
TP_ARGS(rep, status),
|
||||
TP_ARGS(cqe),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(const void *, rep)
|
||||
__field(int, status)
|
||||
__field(const void *, cqe)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->rep = rep;
|
||||
__entry->status = status;
|
||||
__entry->cqe = cqe;
|
||||
),
|
||||
|
||||
TP_printk("rep=%p status=%d",
|
||||
__entry->rep, __entry->status
|
||||
TP_printk("cqe=%p",
|
||||
__entry->cqe
|
||||
)
|
||||
);
|
||||
|
||||
TRACE_EVENT(xprtrdma_post_recvs,
|
||||
TP_PROTO(
|
||||
const struct rpcrdma_xprt *r_xprt,
|
||||
unsigned int count,
|
||||
int status
|
||||
),
|
||||
|
||||
TP_ARGS(r_xprt, count, status),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(const void *, r_xprt)
|
||||
__field(unsigned int, count)
|
||||
__field(int, status)
|
||||
__field(int, posted)
|
||||
__string(addr, rpcrdma_addrstr(r_xprt))
|
||||
__string(port, rpcrdma_portstr(r_xprt))
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->r_xprt = r_xprt;
|
||||
__entry->count = count;
|
||||
__entry->status = status;
|
||||
__entry->posted = r_xprt->rx_buf.rb_posted_receives;
|
||||
__assign_str(addr, rpcrdma_addrstr(r_xprt));
|
||||
__assign_str(port, rpcrdma_portstr(r_xprt));
|
||||
),
|
||||
|
||||
TP_printk("peer=[%s]:%s r_xprt=%p: %u new recvs, %d active (rc %d)",
|
||||
__get_str(addr), __get_str(port), __entry->r_xprt,
|
||||
__entry->count, __entry->posted, __entry->status
|
||||
)
|
||||
);
|
||||
|
||||
|
@ -584,28 +614,32 @@ TRACE_EVENT(xprtrdma_wc_send,
|
|||
|
||||
TRACE_EVENT(xprtrdma_wc_receive,
|
||||
TP_PROTO(
|
||||
const struct rpcrdma_rep *rep,
|
||||
const struct ib_wc *wc
|
||||
),
|
||||
|
||||
TP_ARGS(rep, wc),
|
||||
TP_ARGS(wc),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(const void *, rep)
|
||||
__field(unsigned int, byte_len)
|
||||
__field(const void *, cqe)
|
||||
__field(u32, byte_len)
|
||||
__field(unsigned int, status)
|
||||
__field(unsigned int, vendor_err)
|
||||
__field(u32, vendor_err)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->rep = rep;
|
||||
__entry->byte_len = wc->byte_len;
|
||||
__entry->cqe = wc->wr_cqe;
|
||||
__entry->status = wc->status;
|
||||
__entry->vendor_err = __entry->status ? wc->vendor_err : 0;
|
||||
if (wc->status) {
|
||||
__entry->byte_len = 0;
|
||||
__entry->vendor_err = wc->vendor_err;
|
||||
} else {
|
||||
__entry->byte_len = wc->byte_len;
|
||||
__entry->vendor_err = 0;
|
||||
}
|
||||
),
|
||||
|
||||
TP_printk("rep=%p, %u bytes: %s (%u/0x%x)",
|
||||
__entry->rep, __entry->byte_len,
|
||||
TP_printk("cqe=%p %u bytes: %s (%u/0x%x)",
|
||||
__entry->cqe, __entry->byte_len,
|
||||
rdma_show_wc_status(__entry->status),
|
||||
__entry->status, __entry->vendor_err
|
||||
)
|
||||
|
@ -616,6 +650,7 @@ DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li);
|
|||
DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li_wake);
|
||||
|
||||
DEFINE_MR_EVENT(xprtrdma_localinv);
|
||||
DEFINE_MR_EVENT(xprtrdma_dma_map);
|
||||
DEFINE_MR_EVENT(xprtrdma_dma_unmap);
|
||||
DEFINE_MR_EVENT(xprtrdma_remoteinv);
|
||||
DEFINE_MR_EVENT(xprtrdma_recover_mr);
|
||||
|
@ -799,7 +834,6 @@ TRACE_EVENT(xprtrdma_allocate,
|
|||
__field(unsigned int, task_id)
|
||||
__field(unsigned int, client_id)
|
||||
__field(const void *, req)
|
||||
__field(const void *, rep)
|
||||
__field(size_t, callsize)
|
||||
__field(size_t, rcvsize)
|
||||
),
|
||||
|
@ -808,15 +842,13 @@ TRACE_EVENT(xprtrdma_allocate,
|
|||
__entry->task_id = task->tk_pid;
|
||||
__entry->client_id = task->tk_client->cl_clid;
|
||||
__entry->req = req;
|
||||
__entry->rep = req ? req->rl_reply : NULL;
|
||||
__entry->callsize = task->tk_rqstp->rq_callsize;
|
||||
__entry->rcvsize = task->tk_rqstp->rq_rcvsize;
|
||||
),
|
||||
|
||||
TP_printk("task:%u@%u req=%p rep=%p (%zu, %zu)",
|
||||
TP_printk("task:%u@%u req=%p (%zu, %zu)",
|
||||
__entry->task_id, __entry->client_id,
|
||||
__entry->req, __entry->rep,
|
||||
__entry->callsize, __entry->rcvsize
|
||||
__entry->req, __entry->callsize, __entry->rcvsize
|
||||
)
|
||||
);
|
||||
|
||||
|
@ -848,8 +880,6 @@ TRACE_EVENT(xprtrdma_rpc_done,
|
|||
)
|
||||
);
|
||||
|
||||
DEFINE_RXPRT_EVENT(xprtrdma_noreps);
|
||||
|
||||
/**
|
||||
** Callback events
|
||||
**/
|
||||
|
|
|
@ -1546,6 +1546,7 @@ call_reserveresult(struct rpc_task *task)
|
|||
task->tk_status = 0;
|
||||
if (status >= 0) {
|
||||
if (task->tk_rqstp) {
|
||||
xprt_request_init(task);
|
||||
task->tk_action = call_refresh;
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -66,7 +66,7 @@
|
|||
* Local functions
|
||||
*/
|
||||
static void xprt_init(struct rpc_xprt *xprt, struct net *net);
|
||||
static void xprt_request_init(struct rpc_task *, struct rpc_xprt *);
|
||||
static __be32 xprt_alloc_xid(struct rpc_xprt *xprt);
|
||||
static void xprt_connect_status(struct rpc_task *task);
|
||||
static int __xprt_get_cong(struct rpc_xprt *, struct rpc_task *);
|
||||
static void __xprt_put_cong(struct rpc_xprt *, struct rpc_rqst *);
|
||||
|
@ -987,6 +987,8 @@ bool xprt_prepare_transmit(struct rpc_task *task)
|
|||
task->tk_status = -EAGAIN;
|
||||
goto out_unlock;
|
||||
}
|
||||
if (!bc_prealloc(req) && !req->rq_xmit_bytes_sent)
|
||||
req->rq_xid = xprt_alloc_xid(xprt);
|
||||
ret = true;
|
||||
out_unlock:
|
||||
spin_unlock_bh(&xprt->transport_lock);
|
||||
|
@ -1163,10 +1165,10 @@ void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
|
|||
out_init_req:
|
||||
xprt->stat.max_slots = max_t(unsigned int, xprt->stat.max_slots,
|
||||
xprt->num_reqs);
|
||||
spin_unlock(&xprt->reserve_lock);
|
||||
|
||||
task->tk_status = 0;
|
||||
task->tk_rqstp = req;
|
||||
xprt_request_init(task, xprt);
|
||||
spin_unlock(&xprt->reserve_lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xprt_alloc_slot);
|
||||
|
||||
|
@ -1184,7 +1186,7 @@ void xprt_lock_and_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(xprt_lock_and_alloc_slot);
|
||||
|
||||
static void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
|
||||
void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
|
||||
{
|
||||
spin_lock(&xprt->reserve_lock);
|
||||
if (!xprt_dynamic_free_slot(xprt, req)) {
|
||||
|
@ -1194,6 +1196,7 @@ static void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
|
|||
xprt_wake_up_backlog(xprt);
|
||||
spin_unlock(&xprt->reserve_lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xprt_free_slot);
|
||||
|
||||
static void xprt_free_all_slots(struct rpc_xprt *xprt)
|
||||
{
|
||||
|
@ -1303,8 +1306,9 @@ static inline void xprt_init_xid(struct rpc_xprt *xprt)
|
|||
xprt->xid = prandom_u32();
|
||||
}
|
||||
|
||||
static void xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt)
|
||||
void xprt_request_init(struct rpc_task *task)
|
||||
{
|
||||
struct rpc_xprt *xprt = task->tk_xprt;
|
||||
struct rpc_rqst *req = task->tk_rqstp;
|
||||
|
||||
INIT_LIST_HEAD(&req->rq_list);
|
||||
|
@ -1312,7 +1316,6 @@ static void xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt)
|
|||
req->rq_task = task;
|
||||
req->rq_xprt = xprt;
|
||||
req->rq_buffer = NULL;
|
||||
req->rq_xid = xprt_alloc_xid(xprt);
|
||||
req->rq_connect_cookie = xprt->connect_cookie - 1;
|
||||
req->rq_bytes_sent = 0;
|
||||
req->rq_snd_buf.len = 0;
|
||||
|
@ -1373,7 +1376,7 @@ void xprt_release(struct rpc_task *task)
|
|||
|
||||
dprintk("RPC: %5u release request %p\n", task->tk_pid, req);
|
||||
if (likely(!bc_prealloc(req)))
|
||||
xprt_free_slot(xprt, req);
|
||||
xprt->ops->free_slot(xprt, req);
|
||||
else
|
||||
xprt_free_bc_request(req);
|
||||
}
|
||||
|
|
|
@ -29,29 +29,41 @@ static void rpcrdma_bc_free_rqst(struct rpcrdma_xprt *r_xprt,
|
|||
spin_unlock(&buf->rb_reqslock);
|
||||
|
||||
rpcrdma_destroy_req(req);
|
||||
|
||||
kfree(rqst);
|
||||
}
|
||||
|
||||
static int rpcrdma_bc_setup_rqst(struct rpcrdma_xprt *r_xprt,
|
||||
struct rpc_rqst *rqst)
|
||||
static int rpcrdma_bc_setup_reqs(struct rpcrdma_xprt *r_xprt,
|
||||
unsigned int count)
|
||||
{
|
||||
struct rpcrdma_regbuf *rb;
|
||||
struct rpcrdma_req *req;
|
||||
size_t size;
|
||||
struct rpc_xprt *xprt = &r_xprt->rx_xprt;
|
||||
struct rpc_rqst *rqst;
|
||||
unsigned int i;
|
||||
|
||||
req = rpcrdma_create_req(r_xprt);
|
||||
if (IS_ERR(req))
|
||||
return PTR_ERR(req);
|
||||
for (i = 0; i < (count << 1); i++) {
|
||||
struct rpcrdma_regbuf *rb;
|
||||
struct rpcrdma_req *req;
|
||||
size_t size;
|
||||
|
||||
size = r_xprt->rx_data.inline_rsize;
|
||||
rb = rpcrdma_alloc_regbuf(size, DMA_TO_DEVICE, GFP_KERNEL);
|
||||
if (IS_ERR(rb))
|
||||
goto out_fail;
|
||||
req->rl_sendbuf = rb;
|
||||
xdr_buf_init(&rqst->rq_snd_buf, rb->rg_base,
|
||||
min_t(size_t, size, PAGE_SIZE));
|
||||
rpcrdma_set_xprtdata(rqst, req);
|
||||
req = rpcrdma_create_req(r_xprt);
|
||||
if (IS_ERR(req))
|
||||
return PTR_ERR(req);
|
||||
rqst = &req->rl_slot;
|
||||
|
||||
rqst->rq_xprt = xprt;
|
||||
INIT_LIST_HEAD(&rqst->rq_list);
|
||||
INIT_LIST_HEAD(&rqst->rq_bc_list);
|
||||
__set_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state);
|
||||
spin_lock_bh(&xprt->bc_pa_lock);
|
||||
list_add(&rqst->rq_bc_pa_list, &xprt->bc_pa_list);
|
||||
spin_unlock_bh(&xprt->bc_pa_lock);
|
||||
|
||||
size = r_xprt->rx_data.inline_rsize;
|
||||
rb = rpcrdma_alloc_regbuf(size, DMA_TO_DEVICE, GFP_KERNEL);
|
||||
if (IS_ERR(rb))
|
||||
goto out_fail;
|
||||
req->rl_sendbuf = rb;
|
||||
xdr_buf_init(&rqst->rq_snd_buf, rb->rg_base,
|
||||
min_t(size_t, size, PAGE_SIZE));
|
||||
}
|
||||
return 0;
|
||||
|
||||
out_fail:
|
||||
|
@ -59,23 +71,6 @@ out_fail:
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* Allocate and add receive buffers to the rpcrdma_buffer's
|
||||
* existing list of rep's. These are released when the
|
||||
* transport is destroyed.
|
||||
*/
|
||||
static int rpcrdma_bc_setup_reps(struct rpcrdma_xprt *r_xprt,
|
||||
unsigned int count)
|
||||
{
|
||||
int rc = 0;
|
||||
|
||||
while (count--) {
|
||||
rc = rpcrdma_create_rep(r_xprt);
|
||||
if (rc)
|
||||
break;
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
||||
/**
|
||||
* xprt_rdma_bc_setup - Pre-allocate resources for handling backchannel requests
|
||||
* @xprt: transport associated with these backchannel resources
|
||||
|
@ -86,9 +81,6 @@ static int rpcrdma_bc_setup_reps(struct rpcrdma_xprt *r_xprt,
|
|||
int xprt_rdma_bc_setup(struct rpc_xprt *xprt, unsigned int reqs)
|
||||
{
|
||||
struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
|
||||
struct rpcrdma_buffer *buffer = &r_xprt->rx_buf;
|
||||
struct rpc_rqst *rqst;
|
||||
unsigned int i;
|
||||
int rc;
|
||||
|
||||
/* The backchannel reply path returns each rpc_rqst to the
|
||||
|
@ -103,35 +95,11 @@ int xprt_rdma_bc_setup(struct rpc_xprt *xprt, unsigned int reqs)
|
|||
if (reqs > RPCRDMA_BACKWARD_WRS >> 1)
|
||||
goto out_err;
|
||||
|
||||
for (i = 0; i < (reqs << 1); i++) {
|
||||
rqst = kzalloc(sizeof(*rqst), GFP_KERNEL);
|
||||
if (!rqst)
|
||||
goto out_free;
|
||||
|
||||
dprintk("RPC: %s: new rqst %p\n", __func__, rqst);
|
||||
|
||||
rqst->rq_xprt = &r_xprt->rx_xprt;
|
||||
INIT_LIST_HEAD(&rqst->rq_list);
|
||||
INIT_LIST_HEAD(&rqst->rq_bc_list);
|
||||
__set_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state);
|
||||
|
||||
if (rpcrdma_bc_setup_rqst(r_xprt, rqst))
|
||||
goto out_free;
|
||||
|
||||
spin_lock_bh(&xprt->bc_pa_lock);
|
||||
list_add(&rqst->rq_bc_pa_list, &xprt->bc_pa_list);
|
||||
spin_unlock_bh(&xprt->bc_pa_lock);
|
||||
}
|
||||
|
||||
rc = rpcrdma_bc_setup_reps(r_xprt, reqs);
|
||||
rc = rpcrdma_bc_setup_reqs(r_xprt, reqs);
|
||||
if (rc)
|
||||
goto out_free;
|
||||
|
||||
rc = rpcrdma_ep_post_extra_recv(r_xprt, reqs);
|
||||
if (rc)
|
||||
goto out_free;
|
||||
|
||||
buffer->rb_bc_srv_max_requests = reqs;
|
||||
r_xprt->rx_buf.rb_bc_srv_max_requests = reqs;
|
||||
request_module("svcrdma");
|
||||
trace_xprtrdma_cb_setup(r_xprt, reqs);
|
||||
return 0;
|
||||
|
@ -235,6 +203,7 @@ int xprt_rdma_bc_send_reply(struct rpc_rqst *rqst)
|
|||
if (rc < 0)
|
||||
goto failed_marshal;
|
||||
|
||||
rpcrdma_post_recvs(r_xprt, true);
|
||||
if (rpcrdma_ep_post(&r_xprt->rx_ia, &r_xprt->rx_ep, req))
|
||||
goto drop_connection;
|
||||
return 0;
|
||||
|
@ -275,10 +244,14 @@ void xprt_rdma_bc_destroy(struct rpc_xprt *xprt, unsigned int reqs)
|
|||
*/
|
||||
void xprt_rdma_bc_free_rqst(struct rpc_rqst *rqst)
|
||||
{
|
||||
struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
|
||||
struct rpc_xprt *xprt = rqst->rq_xprt;
|
||||
|
||||
dprintk("RPC: %s: freeing rqst %p (req %p)\n",
|
||||
__func__, rqst, rpcr_to_rdmar(rqst));
|
||||
__func__, rqst, req);
|
||||
|
||||
rpcrdma_recv_buffer_put(req->rl_reply);
|
||||
req->rl_reply = NULL;
|
||||
|
||||
spin_lock_bh(&xprt->bc_pa_lock);
|
||||
list_add_tail(&rqst->rq_bc_pa_list, &xprt->bc_pa_list);
|
||||
|
|
|
@ -156,10 +156,32 @@ out_release:
|
|||
fmr_op_release_mr(mr);
|
||||
}
|
||||
|
||||
/* On success, sets:
|
||||
* ep->rep_attr.cap.max_send_wr
|
||||
* ep->rep_attr.cap.max_recv_wr
|
||||
* cdata->max_requests
|
||||
* ia->ri_max_segs
|
||||
*/
|
||||
static int
|
||||
fmr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
|
||||
struct rpcrdma_create_data_internal *cdata)
|
||||
{
|
||||
int max_qp_wr;
|
||||
|
||||
max_qp_wr = ia->ri_device->attrs.max_qp_wr;
|
||||
max_qp_wr -= RPCRDMA_BACKWARD_WRS;
|
||||
max_qp_wr -= 1;
|
||||
if (max_qp_wr < RPCRDMA_MIN_SLOT_TABLE)
|
||||
return -ENOMEM;
|
||||
if (cdata->max_requests > max_qp_wr)
|
||||
cdata->max_requests = max_qp_wr;
|
||||
ep->rep_attr.cap.max_send_wr = cdata->max_requests;
|
||||
ep->rep_attr.cap.max_send_wr += RPCRDMA_BACKWARD_WRS;
|
||||
ep->rep_attr.cap.max_send_wr += 1; /* for ib_drain_sq */
|
||||
ep->rep_attr.cap.max_recv_wr = cdata->max_requests;
|
||||
ep->rep_attr.cap.max_recv_wr += RPCRDMA_BACKWARD_WRS;
|
||||
ep->rep_attr.cap.max_recv_wr += 1; /* for ib_drain_rq */
|
||||
|
||||
ia->ri_max_segs = max_t(unsigned int, 1, RPCRDMA_MAX_DATA_SEGS /
|
||||
RPCRDMA_MAX_FMR_SGES);
|
||||
return 0;
|
||||
|
@ -219,6 +241,7 @@ fmr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
|
|||
mr->mr_sg, i, mr->mr_dir);
|
||||
if (!mr->mr_nents)
|
||||
goto out_dmamap_err;
|
||||
trace_xprtrdma_dma_map(mr);
|
||||
|
||||
for (i = 0, dma_pages = mr->fmr.fm_physaddrs; i < mr->mr_nents; i++)
|
||||
dma_pages[i] = sg_dma_address(&mr->mr_sg[i]);
|
||||
|
|
|
@ -202,12 +202,22 @@ out_release:
|
|||
frwr_op_release_mr(mr);
|
||||
}
|
||||
|
||||
/* On success, sets:
|
||||
* ep->rep_attr.cap.max_send_wr
|
||||
* ep->rep_attr.cap.max_recv_wr
|
||||
* cdata->max_requests
|
||||
* ia->ri_max_segs
|
||||
*
|
||||
* And these FRWR-related fields:
|
||||
* ia->ri_max_frwr_depth
|
||||
* ia->ri_mrtype
|
||||
*/
|
||||
static int
|
||||
frwr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
|
||||
struct rpcrdma_create_data_internal *cdata)
|
||||
{
|
||||
struct ib_device_attr *attrs = &ia->ri_device->attrs;
|
||||
int depth, delta;
|
||||
int max_qp_wr, depth, delta;
|
||||
|
||||
ia->ri_mrtype = IB_MR_TYPE_MEM_REG;
|
||||
if (attrs->device_cap_flags & IB_DEVICE_SG_GAPS_REG)
|
||||
|
@ -241,14 +251,26 @@ frwr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
|
|||
} while (delta > 0);
|
||||
}
|
||||
|
||||
ep->rep_attr.cap.max_send_wr *= depth;
|
||||
if (ep->rep_attr.cap.max_send_wr > attrs->max_qp_wr) {
|
||||
cdata->max_requests = attrs->max_qp_wr / depth;
|
||||
max_qp_wr = ia->ri_device->attrs.max_qp_wr;
|
||||
max_qp_wr -= RPCRDMA_BACKWARD_WRS;
|
||||
max_qp_wr -= 1;
|
||||
if (max_qp_wr < RPCRDMA_MIN_SLOT_TABLE)
|
||||
return -ENOMEM;
|
||||
if (cdata->max_requests > max_qp_wr)
|
||||
cdata->max_requests = max_qp_wr;
|
||||
ep->rep_attr.cap.max_send_wr = cdata->max_requests * depth;
|
||||
if (ep->rep_attr.cap.max_send_wr > max_qp_wr) {
|
||||
cdata->max_requests = max_qp_wr / depth;
|
||||
if (!cdata->max_requests)
|
||||
return -EINVAL;
|
||||
ep->rep_attr.cap.max_send_wr = cdata->max_requests *
|
||||
depth;
|
||||
}
|
||||
ep->rep_attr.cap.max_send_wr += RPCRDMA_BACKWARD_WRS;
|
||||
ep->rep_attr.cap.max_send_wr += 1; /* for ib_drain_sq */
|
||||
ep->rep_attr.cap.max_recv_wr = cdata->max_requests;
|
||||
ep->rep_attr.cap.max_recv_wr += RPCRDMA_BACKWARD_WRS;
|
||||
ep->rep_attr.cap.max_recv_wr += 1; /* for ib_drain_rq */
|
||||
|
||||
ia->ri_max_segs = max_t(unsigned int, 1, RPCRDMA_MAX_DATA_SEGS /
|
||||
ia->ri_max_frwr_depth);
|
||||
|
@ -393,6 +415,7 @@ frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
|
|||
mr->mr_nents = ib_dma_map_sg(ia->ri_device, mr->mr_sg, i, mr->mr_dir);
|
||||
if (!mr->mr_nents)
|
||||
goto out_dmamap_err;
|
||||
trace_xprtrdma_dma_map(mr);
|
||||
|
||||
ibmr = frwr->fr_mr;
|
||||
n = ib_map_mr_sg(ibmr, mr->mr_sg, mr->mr_nents, NULL, PAGE_SIZE);
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
|
||||
/*
|
||||
* Copyright (c) 2015, 2017 Oracle. All rights reserved.
|
||||
*/
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
|
||||
/*
|
||||
* Copyright (c) 2014-2017 Oracle. All rights reserved.
|
||||
* Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
|
||||
|
@ -54,14 +55,6 @@
|
|||
# define RPCDBG_FACILITY RPCDBG_TRANS
|
||||
#endif
|
||||
|
||||
static const char transfertypes[][12] = {
|
||||
"inline", /* no chunks */
|
||||
"read list", /* some argument via rdma read */
|
||||
"*read list", /* entire request via rdma read */
|
||||
"write list", /* some result via rdma write */
|
||||
"reply chunk" /* entire reply via rdma write */
|
||||
};
|
||||
|
||||
/* Returns size of largest RPC-over-RDMA header in a Call message
|
||||
*
|
||||
* The largest Call header contains a full-size Read list and a
|
||||
|
@ -230,7 +223,7 @@ rpcrdma_convert_iovs(struct rpcrdma_xprt *r_xprt, struct xdr_buf *xdrbuf,
|
|||
*/
|
||||
*ppages = alloc_page(GFP_ATOMIC);
|
||||
if (!*ppages)
|
||||
return -EAGAIN;
|
||||
return -ENOBUFS;
|
||||
}
|
||||
seg->mr_page = *ppages;
|
||||
seg->mr_offset = (char *)page_base;
|
||||
|
@ -365,7 +358,7 @@ rpcrdma_encode_read_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
|
|||
seg = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs,
|
||||
false, &mr);
|
||||
if (IS_ERR(seg))
|
||||
goto out_maperr;
|
||||
return PTR_ERR(seg);
|
||||
rpcrdma_mr_push(mr, &req->rl_registered);
|
||||
|
||||
if (encode_read_segment(xdr, mr, pos) < 0)
|
||||
|
@ -377,11 +370,6 @@ rpcrdma_encode_read_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
|
|||
} while (nsegs);
|
||||
|
||||
return 0;
|
||||
|
||||
out_maperr:
|
||||
if (PTR_ERR(seg) == -EAGAIN)
|
||||
xprt_wait_for_buffer_space(rqst->rq_task, NULL);
|
||||
return PTR_ERR(seg);
|
||||
}
|
||||
|
||||
/* Register and XDR encode the Write list. Supports encoding a list
|
||||
|
@ -428,7 +416,7 @@ rpcrdma_encode_write_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
|
|||
seg = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs,
|
||||
true, &mr);
|
||||
if (IS_ERR(seg))
|
||||
goto out_maperr;
|
||||
return PTR_ERR(seg);
|
||||
rpcrdma_mr_push(mr, &req->rl_registered);
|
||||
|
||||
if (encode_rdma_segment(xdr, mr) < 0)
|
||||
|
@ -445,11 +433,6 @@ rpcrdma_encode_write_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
|
|||
*segcount = cpu_to_be32(nchunks);
|
||||
|
||||
return 0;
|
||||
|
||||
out_maperr:
|
||||
if (PTR_ERR(seg) == -EAGAIN)
|
||||
xprt_wait_for_buffer_space(rqst->rq_task, NULL);
|
||||
return PTR_ERR(seg);
|
||||
}
|
||||
|
||||
/* Register and XDR encode the Reply chunk. Supports encoding an array
|
||||
|
@ -491,7 +474,7 @@ rpcrdma_encode_reply_chunk(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
|
|||
seg = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs,
|
||||
true, &mr);
|
||||
if (IS_ERR(seg))
|
||||
goto out_maperr;
|
||||
return PTR_ERR(seg);
|
||||
rpcrdma_mr_push(mr, &req->rl_registered);
|
||||
|
||||
if (encode_rdma_segment(xdr, mr) < 0)
|
||||
|
@ -508,11 +491,6 @@ rpcrdma_encode_reply_chunk(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
|
|||
*segcount = cpu_to_be32(nchunks);
|
||||
|
||||
return 0;
|
||||
|
||||
out_maperr:
|
||||
if (PTR_ERR(seg) == -EAGAIN)
|
||||
xprt_wait_for_buffer_space(rqst->rq_task, NULL);
|
||||
return PTR_ERR(seg);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -709,7 +687,7 @@ rpcrdma_prepare_send_sges(struct rpcrdma_xprt *r_xprt,
|
|||
{
|
||||
req->rl_sendctx = rpcrdma_sendctx_get_locked(&r_xprt->rx_buf);
|
||||
if (!req->rl_sendctx)
|
||||
return -ENOBUFS;
|
||||
return -EAGAIN;
|
||||
req->rl_sendctx->sc_wr.num_sge = 0;
|
||||
req->rl_sendctx->sc_unmap_count = 0;
|
||||
req->rl_sendctx->sc_req = req;
|
||||
|
@ -883,7 +861,15 @@ rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst)
|
|||
return 0;
|
||||
|
||||
out_err:
|
||||
r_xprt->rx_stats.failed_marshal_count++;
|
||||
switch (ret) {
|
||||
case -EAGAIN:
|
||||
xprt_wait_for_buffer_space(rqst->rq_task, NULL);
|
||||
break;
|
||||
case -ENOBUFS:
|
||||
break;
|
||||
default:
|
||||
r_xprt->rx_stats.failed_marshal_count++;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1026,8 +1012,6 @@ rpcrdma_is_bcall(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep)
|
|||
|
||||
out_short:
|
||||
pr_warn("RPC/RDMA short backward direction call\n");
|
||||
if (rpcrdma_ep_post_recv(&r_xprt->rx_ia, rep))
|
||||
xprt_disconnect_done(&r_xprt->rx_xprt);
|
||||
return true;
|
||||
}
|
||||
#else /* CONFIG_SUNRPC_BACKCHANNEL */
|
||||
|
@ -1333,13 +1317,14 @@ void rpcrdma_reply_handler(struct rpcrdma_rep *rep)
|
|||
u32 credits;
|
||||
__be32 *p;
|
||||
|
||||
--buf->rb_posted_receives;
|
||||
|
||||
if (rep->rr_hdrbuf.head[0].iov_len == 0)
|
||||
goto out_badstatus;
|
||||
|
||||
/* Fixed transport header fields */
|
||||
xdr_init_decode(&rep->rr_stream, &rep->rr_hdrbuf,
|
||||
rep->rr_hdrbuf.head[0].iov_base);
|
||||
|
||||
/* Fixed transport header fields */
|
||||
p = xdr_inline_decode(&rep->rr_stream, 4 * sizeof(*p));
|
||||
if (unlikely(!p))
|
||||
goto out_shortreply;
|
||||
|
@ -1378,17 +1363,10 @@ void rpcrdma_reply_handler(struct rpcrdma_rep *rep)
|
|||
|
||||
trace_xprtrdma_reply(rqst->rq_task, rep, req, credits);
|
||||
|
||||
rpcrdma_post_recvs(r_xprt, false);
|
||||
queue_work(rpcrdma_receive_wq, &rep->rr_work);
|
||||
return;
|
||||
|
||||
out_badstatus:
|
||||
rpcrdma_recv_buffer_put(rep);
|
||||
if (r_xprt->rx_ep.rep_connected == 1) {
|
||||
r_xprt->rx_ep.rep_connected = -EIO;
|
||||
rpcrdma_conn_func(&r_xprt->rx_ep);
|
||||
}
|
||||
return;
|
||||
|
||||
out_badversion:
|
||||
trace_xprtrdma_reply_vers(rep);
|
||||
goto repost;
|
||||
|
@ -1408,7 +1386,7 @@ out_shortreply:
|
|||
* receive buffer before returning.
|
||||
*/
|
||||
repost:
|
||||
r_xprt->rx_stats.bad_reply_count++;
|
||||
if (rpcrdma_ep_post_recv(&r_xprt->rx_ia, rep))
|
||||
rpcrdma_recv_buffer_put(rep);
|
||||
rpcrdma_post_recvs(r_xprt, false);
|
||||
out_badstatus:
|
||||
rpcrdma_recv_buffer_put(rep);
|
||||
}
|
||||
|
|
|
@ -273,6 +273,7 @@ static const struct rpc_xprt_ops xprt_rdma_bc_procs = {
|
|||
.reserve_xprt = xprt_reserve_xprt_cong,
|
||||
.release_xprt = xprt_release_xprt_cong,
|
||||
.alloc_slot = xprt_alloc_slot,
|
||||
.free_slot = xprt_free_slot,
|
||||
.release_request = xprt_release_rqst_cong,
|
||||
.buf_alloc = xprt_rdma_bc_allocate,
|
||||
.buf_free = xprt_rdma_bc_free,
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
|
||||
/*
|
||||
* Copyright (c) 2014-2017 Oracle. All rights reserved.
|
||||
* Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
|
||||
|
@ -330,9 +331,7 @@ xprt_setup_rdma(struct xprt_create *args)
|
|||
return ERR_PTR(-EBADF);
|
||||
}
|
||||
|
||||
xprt = xprt_alloc(args->net, sizeof(struct rpcrdma_xprt),
|
||||
xprt_rdma_slot_table_entries,
|
||||
xprt_rdma_slot_table_entries);
|
||||
xprt = xprt_alloc(args->net, sizeof(struct rpcrdma_xprt), 0, 0);
|
||||
if (xprt == NULL) {
|
||||
dprintk("RPC: %s: couldn't allocate rpcrdma_xprt\n",
|
||||
__func__);
|
||||
|
@ -364,7 +363,7 @@ xprt_setup_rdma(struct xprt_create *args)
|
|||
xprt_set_bound(xprt);
|
||||
xprt_rdma_format_addresses(xprt, sap);
|
||||
|
||||
cdata.max_requests = xprt->max_reqs;
|
||||
cdata.max_requests = xprt_rdma_slot_table_entries;
|
||||
|
||||
cdata.rsize = RPCRDMA_MAX_SEGS * PAGE_SIZE; /* RDMA write max */
|
||||
cdata.wsize = RPCRDMA_MAX_SEGS * PAGE_SIZE; /* RDMA read max */
|
||||
|
@ -537,6 +536,47 @@ xprt_rdma_connect(struct rpc_xprt *xprt, struct rpc_task *task)
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* xprt_rdma_alloc_slot - allocate an rpc_rqst
|
||||
* @xprt: controlling RPC transport
|
||||
* @task: RPC task requesting a fresh rpc_rqst
|
||||
*
|
||||
* tk_status values:
|
||||
* %0 if task->tk_rqstp points to a fresh rpc_rqst
|
||||
* %-EAGAIN if no rpc_rqst is available; queued on backlog
|
||||
*/
|
||||
static void
|
||||
xprt_rdma_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
|
||||
{
|
||||
struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
|
||||
struct rpcrdma_req *req;
|
||||
|
||||
req = rpcrdma_buffer_get(&r_xprt->rx_buf);
|
||||
if (!req)
|
||||
goto out_sleep;
|
||||
task->tk_rqstp = &req->rl_slot;
|
||||
task->tk_status = 0;
|
||||
return;
|
||||
|
||||
out_sleep:
|
||||
rpc_sleep_on(&xprt->backlog, task, NULL);
|
||||
task->tk_status = -EAGAIN;
|
||||
}
|
||||
|
||||
/**
|
||||
* xprt_rdma_free_slot - release an rpc_rqst
|
||||
* @xprt: controlling RPC transport
|
||||
* @rqst: rpc_rqst to release
|
||||
*
|
||||
*/
|
||||
static void
|
||||
xprt_rdma_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *rqst)
|
||||
{
|
||||
memset(rqst, 0, sizeof(*rqst));
|
||||
rpcrdma_buffer_put(rpcr_to_rdmar(rqst));
|
||||
rpc_wake_up_next(&xprt->backlog);
|
||||
}
|
||||
|
||||
static bool
|
||||
rpcrdma_get_sendbuf(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
|
||||
size_t size, gfp_t flags)
|
||||
|
@ -607,13 +647,9 @@ xprt_rdma_allocate(struct rpc_task *task)
|
|||
{
|
||||
struct rpc_rqst *rqst = task->tk_rqstp;
|
||||
struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt);
|
||||
struct rpcrdma_req *req;
|
||||
struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
|
||||
gfp_t flags;
|
||||
|
||||
req = rpcrdma_buffer_get(&r_xprt->rx_buf);
|
||||
if (req == NULL)
|
||||
goto out_get;
|
||||
|
||||
flags = RPCRDMA_DEF_GFP;
|
||||
if (RPC_IS_SWAPPER(task))
|
||||
flags = __GFP_MEMALLOC | GFP_NOWAIT | __GFP_NOWARN;
|
||||
|
@ -623,15 +659,12 @@ xprt_rdma_allocate(struct rpc_task *task)
|
|||
if (!rpcrdma_get_recvbuf(r_xprt, req, rqst->rq_rcvsize, flags))
|
||||
goto out_fail;
|
||||
|
||||
rpcrdma_set_xprtdata(rqst, req);
|
||||
rqst->rq_buffer = req->rl_sendbuf->rg_base;
|
||||
rqst->rq_rbuffer = req->rl_recvbuf->rg_base;
|
||||
trace_xprtrdma_allocate(task, req);
|
||||
return 0;
|
||||
|
||||
out_fail:
|
||||
rpcrdma_buffer_put(req);
|
||||
out_get:
|
||||
trace_xprtrdma_allocate(task, NULL);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
@ -652,7 +685,6 @@ xprt_rdma_free(struct rpc_task *task)
|
|||
if (test_bit(RPCRDMA_REQ_F_PENDING, &req->rl_flags))
|
||||
rpcrdma_release_rqst(r_xprt, req);
|
||||
trace_xprtrdma_rpc_done(task, req);
|
||||
rpcrdma_buffer_put(req);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -690,9 +722,6 @@ xprt_rdma_send_request(struct rpc_task *task)
|
|||
if (rc < 0)
|
||||
goto failed_marshal;
|
||||
|
||||
if (req->rl_reply == NULL) /* e.g. reconnection */
|
||||
rpcrdma_recv_buffer_get(req);
|
||||
|
||||
/* Must suppress retransmit to maintain credits */
|
||||
if (rqst->rq_connect_cookie == xprt->connect_cookie)
|
||||
goto drop_connection;
|
||||
|
@ -779,7 +808,8 @@ xprt_rdma_disable_swap(struct rpc_xprt *xprt)
|
|||
static const struct rpc_xprt_ops xprt_rdma_procs = {
|
||||
.reserve_xprt = xprt_reserve_xprt_cong,
|
||||
.release_xprt = xprt_release_xprt_cong, /* sunrpc/xprt.c */
|
||||
.alloc_slot = xprt_alloc_slot,
|
||||
.alloc_slot = xprt_rdma_alloc_slot,
|
||||
.free_slot = xprt_rdma_free_slot,
|
||||
.release_request = xprt_release_rqst_cong, /* ditto */
|
||||
.set_retrans_timeout = xprt_set_retrans_timeout_def, /* ditto */
|
||||
.timer = xprt_rdma_timer,
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
|
||||
/*
|
||||
* Copyright (c) 2014-2017 Oracle. All rights reserved.
|
||||
* Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
|
||||
|
@ -71,8 +72,10 @@
|
|||
/*
|
||||
* internal functions
|
||||
*/
|
||||
static void rpcrdma_sendctx_put_locked(struct rpcrdma_sendctx *sc);
|
||||
static void rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt);
|
||||
static void rpcrdma_mrs_destroy(struct rpcrdma_buffer *buf);
|
||||
static int rpcrdma_create_rep(struct rpcrdma_xprt *r_xprt, bool temp);
|
||||
static void rpcrdma_dma_unmap_regbuf(struct rpcrdma_regbuf *rb);
|
||||
|
||||
struct workqueue_struct *rpcrdma_receive_wq __read_mostly;
|
||||
|
@ -159,7 +162,7 @@ rpcrdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
|
|||
rr_cqe);
|
||||
|
||||
/* WARNING: Only wr_id and status are reliable at this point */
|
||||
trace_xprtrdma_wc_receive(rep, wc);
|
||||
trace_xprtrdma_wc_receive(wc);
|
||||
if (wc->status != IB_WC_SUCCESS)
|
||||
goto out_fail;
|
||||
|
||||
|
@ -231,7 +234,7 @@ rpcrdma_conn_upcall(struct rdma_cm_id *id, struct rdma_cm_event *event)
|
|||
complete(&ia->ri_done);
|
||||
break;
|
||||
case RDMA_CM_EVENT_ADDR_ERROR:
|
||||
ia->ri_async_rc = -EHOSTUNREACH;
|
||||
ia->ri_async_rc = -EPROTO;
|
||||
complete(&ia->ri_done);
|
||||
break;
|
||||
case RDMA_CM_EVENT_ROUTE_ERROR:
|
||||
|
@ -262,7 +265,7 @@ rpcrdma_conn_upcall(struct rdma_cm_id *id, struct rdma_cm_event *event)
|
|||
connstate = -ENOTCONN;
|
||||
goto connected;
|
||||
case RDMA_CM_EVENT_UNREACHABLE:
|
||||
connstate = -ENETDOWN;
|
||||
connstate = -ENETUNREACH;
|
||||
goto connected;
|
||||
case RDMA_CM_EVENT_REJECTED:
|
||||
dprintk("rpcrdma: connection to %s:%s rejected: %s\n",
|
||||
|
@ -305,8 +308,8 @@ rpcrdma_create_id(struct rpcrdma_xprt *xprt, struct rpcrdma_ia *ia)
|
|||
init_completion(&ia->ri_done);
|
||||
init_completion(&ia->ri_remove_done);
|
||||
|
||||
id = rdma_create_id(&init_net, rpcrdma_conn_upcall, xprt, RDMA_PS_TCP,
|
||||
IB_QPT_RC);
|
||||
id = rdma_create_id(xprt->rx_xprt.xprt_net, rpcrdma_conn_upcall,
|
||||
xprt, RDMA_PS_TCP, IB_QPT_RC);
|
||||
if (IS_ERR(id)) {
|
||||
rc = PTR_ERR(id);
|
||||
dprintk("RPC: %s: rdma_create_id() failed %i\n",
|
||||
|
@ -500,8 +503,8 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
|
|||
struct rpcrdma_create_data_internal *cdata)
|
||||
{
|
||||
struct rpcrdma_connect_private *pmsg = &ep->rep_cm_private;
|
||||
unsigned int max_qp_wr, max_sge;
|
||||
struct ib_cq *sendcq, *recvcq;
|
||||
unsigned int max_sge;
|
||||
int rc;
|
||||
|
||||
max_sge = min_t(unsigned int, ia->ri_device->attrs.max_sge,
|
||||
|
@ -512,29 +515,13 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
|
|||
}
|
||||
ia->ri_max_send_sges = max_sge;
|
||||
|
||||
if (ia->ri_device->attrs.max_qp_wr <= RPCRDMA_BACKWARD_WRS) {
|
||||
dprintk("RPC: %s: insufficient wqe's available\n",
|
||||
__func__);
|
||||
return -ENOMEM;
|
||||
}
|
||||
max_qp_wr = ia->ri_device->attrs.max_qp_wr - RPCRDMA_BACKWARD_WRS - 1;
|
||||
|
||||
/* check provider's send/recv wr limits */
|
||||
if (cdata->max_requests > max_qp_wr)
|
||||
cdata->max_requests = max_qp_wr;
|
||||
rc = ia->ri_ops->ro_open(ia, ep, cdata);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
ep->rep_attr.event_handler = rpcrdma_qp_async_error_upcall;
|
||||
ep->rep_attr.qp_context = ep;
|
||||
ep->rep_attr.srq = NULL;
|
||||
ep->rep_attr.cap.max_send_wr = cdata->max_requests;
|
||||
ep->rep_attr.cap.max_send_wr += RPCRDMA_BACKWARD_WRS;
|
||||
ep->rep_attr.cap.max_send_wr += 1; /* drain cqe */
|
||||
rc = ia->ri_ops->ro_open(ia, ep, cdata);
|
||||
if (rc)
|
||||
return rc;
|
||||
ep->rep_attr.cap.max_recv_wr = cdata->max_requests;
|
||||
ep->rep_attr.cap.max_recv_wr += RPCRDMA_BACKWARD_WRS;
|
||||
ep->rep_attr.cap.max_recv_wr += 1; /* drain cqe */
|
||||
ep->rep_attr.cap.max_send_sge = max_sge;
|
||||
ep->rep_attr.cap.max_recv_sge = 1;
|
||||
ep->rep_attr.cap.max_inline_data = 0;
|
||||
|
@ -741,7 +728,6 @@ rpcrdma_ep_connect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
|
|||
{
|
||||
struct rpcrdma_xprt *r_xprt = container_of(ia, struct rpcrdma_xprt,
|
||||
rx_ia);
|
||||
unsigned int extras;
|
||||
int rc;
|
||||
|
||||
retry:
|
||||
|
@ -785,9 +771,8 @@ retry:
|
|||
}
|
||||
|
||||
dprintk("RPC: %s: connected\n", __func__);
|
||||
extras = r_xprt->rx_buf.rb_bc_srv_max_requests;
|
||||
if (extras)
|
||||
rpcrdma_ep_post_extra_recv(r_xprt, extras);
|
||||
|
||||
rpcrdma_post_recvs(r_xprt, true);
|
||||
|
||||
out:
|
||||
if (rc)
|
||||
|
@ -893,6 +878,7 @@ static int rpcrdma_sendctxs_create(struct rpcrdma_xprt *r_xprt)
|
|||
sc->sc_xprt = r_xprt;
|
||||
buf->rb_sc_ctxs[i] = sc;
|
||||
}
|
||||
buf->rb_flags = 0;
|
||||
|
||||
return 0;
|
||||
|
||||
|
@ -950,7 +936,7 @@ out_emptyq:
|
|||
* completions recently. This is a sign the Send Queue is
|
||||
* backing up. Cause the caller to pause and try again.
|
||||
*/
|
||||
dprintk("RPC: %s: empty sendctx queue\n", __func__);
|
||||
set_bit(RPCRDMA_BUF_F_EMPTY_SCQ, &buf->rb_flags);
|
||||
r_xprt = container_of(buf, struct rpcrdma_xprt, rx_buf);
|
||||
r_xprt->rx_stats.empty_sendctx_q++;
|
||||
return NULL;
|
||||
|
@ -965,7 +951,8 @@ out_emptyq:
|
|||
*
|
||||
* The caller serializes calls to this function (per rpcrdma_buffer).
|
||||
*/
|
||||
void rpcrdma_sendctx_put_locked(struct rpcrdma_sendctx *sc)
|
||||
static void
|
||||
rpcrdma_sendctx_put_locked(struct rpcrdma_sendctx *sc)
|
||||
{
|
||||
struct rpcrdma_buffer *buf = &sc->sc_xprt->rx_buf;
|
||||
unsigned long next_tail;
|
||||
|
@ -984,6 +971,11 @@ void rpcrdma_sendctx_put_locked(struct rpcrdma_sendctx *sc)
|
|||
|
||||
/* Paired with READ_ONCE */
|
||||
smp_store_release(&buf->rb_sc_tail, next_tail);
|
||||
|
||||
if (test_and_clear_bit(RPCRDMA_BUF_F_EMPTY_SCQ, &buf->rb_flags)) {
|
||||
smp_mb__after_atomic();
|
||||
xprt_write_space(&sc->sc_xprt->rx_xprt);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -1097,14 +1089,8 @@ rpcrdma_create_req(struct rpcrdma_xprt *r_xprt)
|
|||
return req;
|
||||
}
|
||||
|
||||
/**
|
||||
* rpcrdma_create_rep - Allocate an rpcrdma_rep object
|
||||
* @r_xprt: controlling transport
|
||||
*
|
||||
* Returns 0 on success or a negative errno on failure.
|
||||
*/
|
||||
int
|
||||
rpcrdma_create_rep(struct rpcrdma_xprt *r_xprt)
|
||||
static int
|
||||
rpcrdma_create_rep(struct rpcrdma_xprt *r_xprt, bool temp)
|
||||
{
|
||||
struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
|
||||
struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
|
||||
|
@ -1132,6 +1118,7 @@ rpcrdma_create_rep(struct rpcrdma_xprt *r_xprt)
|
|||
rep->rr_recv_wr.wr_cqe = &rep->rr_cqe;
|
||||
rep->rr_recv_wr.sg_list = &rep->rr_rdmabuf->rg_iov;
|
||||
rep->rr_recv_wr.num_sge = 1;
|
||||
rep->rr_temp = temp;
|
||||
|
||||
spin_lock(&buf->rb_lock);
|
||||
list_add(&rep->rr_list, &buf->rb_recv_bufs);
|
||||
|
@ -1183,12 +1170,8 @@ rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt)
|
|||
list_add(&req->rl_list, &buf->rb_send_bufs);
|
||||
}
|
||||
|
||||
buf->rb_posted_receives = 0;
|
||||
INIT_LIST_HEAD(&buf->rb_recv_bufs);
|
||||
for (i = 0; i <= buf->rb_max_requests; i++) {
|
||||
rc = rpcrdma_create_rep(r_xprt);
|
||||
if (rc)
|
||||
goto out;
|
||||
}
|
||||
|
||||
rc = rpcrdma_sendctxs_create(r_xprt);
|
||||
if (rc)
|
||||
|
@ -1200,28 +1183,6 @@ out:
|
|||
return rc;
|
||||
}
|
||||
|
||||
static struct rpcrdma_req *
|
||||
rpcrdma_buffer_get_req_locked(struct rpcrdma_buffer *buf)
|
||||
{
|
||||
struct rpcrdma_req *req;
|
||||
|
||||
req = list_first_entry(&buf->rb_send_bufs,
|
||||
struct rpcrdma_req, rl_list);
|
||||
list_del_init(&req->rl_list);
|
||||
return req;
|
||||
}
|
||||
|
||||
static struct rpcrdma_rep *
|
||||
rpcrdma_buffer_get_rep_locked(struct rpcrdma_buffer *buf)
|
||||
{
|
||||
struct rpcrdma_rep *rep;
|
||||
|
||||
rep = list_first_entry(&buf->rb_recv_bufs,
|
||||
struct rpcrdma_rep, rr_list);
|
||||
list_del(&rep->rr_list);
|
||||
return rep;
|
||||
}
|
||||
|
||||
static void
|
||||
rpcrdma_destroy_rep(struct rpcrdma_rep *rep)
|
||||
{
|
||||
|
@ -1280,10 +1241,11 @@ rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf)
|
|||
while (!list_empty(&buf->rb_recv_bufs)) {
|
||||
struct rpcrdma_rep *rep;
|
||||
|
||||
rep = rpcrdma_buffer_get_rep_locked(buf);
|
||||
rep = list_first_entry(&buf->rb_recv_bufs,
|
||||
struct rpcrdma_rep, rr_list);
|
||||
list_del(&rep->rr_list);
|
||||
rpcrdma_destroy_rep(rep);
|
||||
}
|
||||
buf->rb_send_count = 0;
|
||||
|
||||
spin_lock(&buf->rb_reqslock);
|
||||
while (!list_empty(&buf->rb_allreqs)) {
|
||||
|
@ -1298,7 +1260,6 @@ rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf)
|
|||
spin_lock(&buf->rb_reqslock);
|
||||
}
|
||||
spin_unlock(&buf->rb_reqslock);
|
||||
buf->rb_recv_count = 0;
|
||||
|
||||
rpcrdma_mrs_destroy(buf);
|
||||
}
|
||||
|
@ -1371,27 +1332,11 @@ rpcrdma_mr_unmap_and_put(struct rpcrdma_mr *mr)
|
|||
__rpcrdma_mr_put(&r_xprt->rx_buf, mr);
|
||||
}
|
||||
|
||||
static struct rpcrdma_rep *
|
||||
rpcrdma_buffer_get_rep(struct rpcrdma_buffer *buffers)
|
||||
{
|
||||
/* If an RPC previously completed without a reply (say, a
|
||||
* credential problem or a soft timeout occurs) then hold off
|
||||
* on supplying more Receive buffers until the number of new
|
||||
* pending RPCs catches up to the number of posted Receives.
|
||||
*/
|
||||
if (unlikely(buffers->rb_send_count < buffers->rb_recv_count))
|
||||
return NULL;
|
||||
|
||||
if (unlikely(list_empty(&buffers->rb_recv_bufs)))
|
||||
return NULL;
|
||||
buffers->rb_recv_count++;
|
||||
return rpcrdma_buffer_get_rep_locked(buffers);
|
||||
}
|
||||
|
||||
/*
|
||||
* Get a set of request/reply buffers.
|
||||
/**
|
||||
* rpcrdma_buffer_get - Get a request buffer
|
||||
* @buffers: Buffer pool from which to obtain a buffer
|
||||
*
|
||||
* Reply buffer (if available) is attached to send buffer upon return.
|
||||
* Returns a fresh rpcrdma_req, or NULL if none are available.
|
||||
*/
|
||||
struct rpcrdma_req *
|
||||
rpcrdma_buffer_get(struct rpcrdma_buffer *buffers)
|
||||
|
@ -1399,23 +1344,18 @@ rpcrdma_buffer_get(struct rpcrdma_buffer *buffers)
|
|||
struct rpcrdma_req *req;
|
||||
|
||||
spin_lock(&buffers->rb_lock);
|
||||
if (list_empty(&buffers->rb_send_bufs))
|
||||
goto out_reqbuf;
|
||||
buffers->rb_send_count++;
|
||||
req = rpcrdma_buffer_get_req_locked(buffers);
|
||||
req->rl_reply = rpcrdma_buffer_get_rep(buffers);
|
||||
req = list_first_entry_or_null(&buffers->rb_send_bufs,
|
||||
struct rpcrdma_req, rl_list);
|
||||
if (req)
|
||||
list_del_init(&req->rl_list);
|
||||
spin_unlock(&buffers->rb_lock);
|
||||
|
||||
return req;
|
||||
|
||||
out_reqbuf:
|
||||
spin_unlock(&buffers->rb_lock);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Put request/reply buffers back into pool.
|
||||
* Pre-decrement counter/array index.
|
||||
/**
|
||||
* rpcrdma_buffer_put - Put request/reply buffers back into pool
|
||||
* @req: object to return
|
||||
*
|
||||
*/
|
||||
void
|
||||
rpcrdma_buffer_put(struct rpcrdma_req *req)
|
||||
|
@ -1426,27 +1366,16 @@ rpcrdma_buffer_put(struct rpcrdma_req *req)
|
|||
req->rl_reply = NULL;
|
||||
|
||||
spin_lock(&buffers->rb_lock);
|
||||
buffers->rb_send_count--;
|
||||
list_add_tail(&req->rl_list, &buffers->rb_send_bufs);
|
||||
list_add(&req->rl_list, &buffers->rb_send_bufs);
|
||||
if (rep) {
|
||||
buffers->rb_recv_count--;
|
||||
list_add_tail(&rep->rr_list, &buffers->rb_recv_bufs);
|
||||
if (!rep->rr_temp) {
|
||||
list_add(&rep->rr_list, &buffers->rb_recv_bufs);
|
||||
rep = NULL;
|
||||
}
|
||||
}
|
||||
spin_unlock(&buffers->rb_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* Recover reply buffers from pool.
|
||||
* This happens when recovering from disconnect.
|
||||
*/
|
||||
void
|
||||
rpcrdma_recv_buffer_get(struct rpcrdma_req *req)
|
||||
{
|
||||
struct rpcrdma_buffer *buffers = req->rl_buffer;
|
||||
|
||||
spin_lock(&buffers->rb_lock);
|
||||
req->rl_reply = rpcrdma_buffer_get_rep(buffers);
|
||||
spin_unlock(&buffers->rb_lock);
|
||||
if (rep)
|
||||
rpcrdma_destroy_rep(rep);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1458,10 +1387,13 @@ rpcrdma_recv_buffer_put(struct rpcrdma_rep *rep)
|
|||
{
|
||||
struct rpcrdma_buffer *buffers = &rep->rr_rxprt->rx_buf;
|
||||
|
||||
spin_lock(&buffers->rb_lock);
|
||||
buffers->rb_recv_count--;
|
||||
list_add_tail(&rep->rr_list, &buffers->rb_recv_bufs);
|
||||
spin_unlock(&buffers->rb_lock);
|
||||
if (!rep->rr_temp) {
|
||||
spin_lock(&buffers->rb_lock);
|
||||
list_add(&rep->rr_list, &buffers->rb_recv_bufs);
|
||||
spin_unlock(&buffers->rb_lock);
|
||||
} else {
|
||||
rpcrdma_destroy_rep(rep);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1557,13 +1489,6 @@ rpcrdma_ep_post(struct rpcrdma_ia *ia,
|
|||
struct ib_send_wr *send_wr = &req->rl_sendctx->sc_wr;
|
||||
int rc;
|
||||
|
||||
if (req->rl_reply) {
|
||||
rc = rpcrdma_ep_post_recv(ia, req->rl_reply);
|
||||
if (rc)
|
||||
return rc;
|
||||
req->rl_reply = NULL;
|
||||
}
|
||||
|
||||
if (!ep->rep_send_count ||
|
||||
test_bit(RPCRDMA_REQ_F_TX_RESOURCES, &req->rl_flags)) {
|
||||
send_wr->send_flags |= IB_SEND_SIGNALED;
|
||||
|
@ -1580,61 +1505,69 @@ rpcrdma_ep_post(struct rpcrdma_ia *ia,
|
|||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
rpcrdma_ep_post_recv(struct rpcrdma_ia *ia,
|
||||
struct rpcrdma_rep *rep)
|
||||
{
|
||||
struct ib_recv_wr *recv_wr_fail;
|
||||
int rc;
|
||||
|
||||
if (!rpcrdma_dma_map_regbuf(ia, rep->rr_rdmabuf))
|
||||
goto out_map;
|
||||
rc = ib_post_recv(ia->ri_id->qp, &rep->rr_recv_wr, &recv_wr_fail);
|
||||
trace_xprtrdma_post_recv(rep, rc);
|
||||
if (rc)
|
||||
return -ENOTCONN;
|
||||
return 0;
|
||||
|
||||
out_map:
|
||||
pr_err("rpcrdma: failed to DMA map the Receive buffer\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
/**
|
||||
* rpcrdma_ep_post_extra_recv - Post buffers for incoming backchannel requests
|
||||
* @r_xprt: transport associated with these backchannel resources
|
||||
* @count: minimum number of incoming requests expected
|
||||
* rpcrdma_post_recvs - Maybe post some Receive buffers
|
||||
* @r_xprt: controlling transport
|
||||
* @temp: when true, allocate temp rpcrdma_rep objects
|
||||
*
|
||||
* Returns zero if all requested buffers were posted, or a negative errno.
|
||||
*/
|
||||
int
|
||||
rpcrdma_ep_post_extra_recv(struct rpcrdma_xprt *r_xprt, unsigned int count)
|
||||
void
|
||||
rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp)
|
||||
{
|
||||
struct rpcrdma_buffer *buffers = &r_xprt->rx_buf;
|
||||
struct rpcrdma_ia *ia = &r_xprt->rx_ia;
|
||||
struct rpcrdma_rep *rep;
|
||||
int rc;
|
||||
struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
|
||||
struct ib_recv_wr *wr, *bad_wr;
|
||||
int needed, count, rc;
|
||||
|
||||
while (count--) {
|
||||
spin_lock(&buffers->rb_lock);
|
||||
if (list_empty(&buffers->rb_recv_bufs))
|
||||
goto out_reqbuf;
|
||||
rep = rpcrdma_buffer_get_rep_locked(buffers);
|
||||
spin_unlock(&buffers->rb_lock);
|
||||
needed = buf->rb_credits + (buf->rb_bc_srv_max_requests << 1);
|
||||
if (buf->rb_posted_receives > needed)
|
||||
return;
|
||||
needed -= buf->rb_posted_receives;
|
||||
|
||||
rc = rpcrdma_ep_post_recv(ia, rep);
|
||||
if (rc)
|
||||
goto out_rc;
|
||||
count = 0;
|
||||
wr = NULL;
|
||||
while (needed) {
|
||||
struct rpcrdma_regbuf *rb;
|
||||
struct rpcrdma_rep *rep;
|
||||
|
||||
spin_lock(&buf->rb_lock);
|
||||
rep = list_first_entry_or_null(&buf->rb_recv_bufs,
|
||||
struct rpcrdma_rep, rr_list);
|
||||
if (likely(rep))
|
||||
list_del(&rep->rr_list);
|
||||
spin_unlock(&buf->rb_lock);
|
||||
if (!rep) {
|
||||
if (rpcrdma_create_rep(r_xprt, temp))
|
||||
break;
|
||||
continue;
|
||||
}
|
||||
|
||||
rb = rep->rr_rdmabuf;
|
||||
if (!rpcrdma_regbuf_is_mapped(rb)) {
|
||||
if (!__rpcrdma_dma_map_regbuf(&r_xprt->rx_ia, rb)) {
|
||||
rpcrdma_recv_buffer_put(rep);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
trace_xprtrdma_post_recv(rep->rr_recv_wr.wr_cqe);
|
||||
rep->rr_recv_wr.next = wr;
|
||||
wr = &rep->rr_recv_wr;
|
||||
++count;
|
||||
--needed;
|
||||
}
|
||||
if (!count)
|
||||
return;
|
||||
|
||||
return 0;
|
||||
rc = ib_post_recv(r_xprt->rx_ia.ri_id->qp, wr, &bad_wr);
|
||||
if (rc) {
|
||||
for (wr = bad_wr; wr; wr = wr->next) {
|
||||
struct rpcrdma_rep *rep;
|
||||
|
||||
out_reqbuf:
|
||||
spin_unlock(&buffers->rb_lock);
|
||||
trace_xprtrdma_noreps(r_xprt);
|
||||
return -ENOMEM;
|
||||
|
||||
out_rc:
|
||||
rpcrdma_recv_buffer_put(rep);
|
||||
return rc;
|
||||
rep = container_of(wr, struct rpcrdma_rep, rr_recv_wr);
|
||||
rpcrdma_recv_buffer_put(rep);
|
||||
--count;
|
||||
}
|
||||
}
|
||||
buf->rb_posted_receives += count;
|
||||
trace_xprtrdma_post_recvs(r_xprt, count, rc);
|
||||
}
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
|
||||
/*
|
||||
* Copyright (c) 2014-2017 Oracle. All rights reserved.
|
||||
* Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
|
||||
|
@ -196,6 +197,7 @@ struct rpcrdma_rep {
|
|||
__be32 rr_proc;
|
||||
int rr_wc_flags;
|
||||
u32 rr_inv_rkey;
|
||||
bool rr_temp;
|
||||
struct rpcrdma_regbuf *rr_rdmabuf;
|
||||
struct rpcrdma_xprt *rr_rxprt;
|
||||
struct work_struct rr_work;
|
||||
|
@ -334,6 +336,7 @@ enum {
|
|||
struct rpcrdma_buffer;
|
||||
struct rpcrdma_req {
|
||||
struct list_head rl_list;
|
||||
struct rpc_rqst rl_slot;
|
||||
struct rpcrdma_buffer *rl_buffer;
|
||||
struct rpcrdma_rep *rl_reply;
|
||||
struct xdr_stream rl_stream;
|
||||
|
@ -356,16 +359,10 @@ enum {
|
|||
RPCRDMA_REQ_F_TX_RESOURCES,
|
||||
};
|
||||
|
||||
static inline void
|
||||
rpcrdma_set_xprtdata(struct rpc_rqst *rqst, struct rpcrdma_req *req)
|
||||
{
|
||||
rqst->rq_xprtdata = req;
|
||||
}
|
||||
|
||||
static inline struct rpcrdma_req *
|
||||
rpcr_to_rdmar(const struct rpc_rqst *rqst)
|
||||
{
|
||||
return rqst->rq_xprtdata;
|
||||
return container_of(rqst, struct rpcrdma_req, rl_slot);
|
||||
}
|
||||
|
||||
static inline void
|
||||
|
@ -401,11 +398,12 @@ struct rpcrdma_buffer {
|
|||
struct rpcrdma_sendctx **rb_sc_ctxs;
|
||||
|
||||
spinlock_t rb_lock; /* protect buf lists */
|
||||
int rb_send_count, rb_recv_count;
|
||||
struct list_head rb_send_bufs;
|
||||
struct list_head rb_recv_bufs;
|
||||
unsigned long rb_flags;
|
||||
u32 rb_max_requests;
|
||||
u32 rb_credits; /* most recent credit grant */
|
||||
int rb_posted_receives;
|
||||
|
||||
u32 rb_bc_srv_max_requests;
|
||||
spinlock_t rb_reqslock; /* protect rb_allreqs */
|
||||
|
@ -420,6 +418,11 @@ struct rpcrdma_buffer {
|
|||
};
|
||||
#define rdmab_to_ia(b) (&container_of((b), struct rpcrdma_xprt, rx_buf)->rx_ia)
|
||||
|
||||
/* rb_flags */
|
||||
enum {
|
||||
RPCRDMA_BUF_F_EMPTY_SCQ = 0,
|
||||
};
|
||||
|
||||
/*
|
||||
* Internal structure for transport instance creation. This
|
||||
* exists primarily for modularity.
|
||||
|
@ -561,18 +564,16 @@ void rpcrdma_ep_disconnect(struct rpcrdma_ep *, struct rpcrdma_ia *);
|
|||
|
||||
int rpcrdma_ep_post(struct rpcrdma_ia *, struct rpcrdma_ep *,
|
||||
struct rpcrdma_req *);
|
||||
int rpcrdma_ep_post_recv(struct rpcrdma_ia *, struct rpcrdma_rep *);
|
||||
void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp);
|
||||
|
||||
/*
|
||||
* Buffer calls - xprtrdma/verbs.c
|
||||
*/
|
||||
struct rpcrdma_req *rpcrdma_create_req(struct rpcrdma_xprt *);
|
||||
void rpcrdma_destroy_req(struct rpcrdma_req *);
|
||||
int rpcrdma_create_rep(struct rpcrdma_xprt *r_xprt);
|
||||
int rpcrdma_buffer_create(struct rpcrdma_xprt *);
|
||||
void rpcrdma_buffer_destroy(struct rpcrdma_buffer *);
|
||||
struct rpcrdma_sendctx *rpcrdma_sendctx_get_locked(struct rpcrdma_buffer *buf);
|
||||
void rpcrdma_sendctx_put_locked(struct rpcrdma_sendctx *sc);
|
||||
|
||||
struct rpcrdma_mr *rpcrdma_mr_get(struct rpcrdma_xprt *r_xprt);
|
||||
void rpcrdma_mr_put(struct rpcrdma_mr *mr);
|
||||
|
@ -581,7 +582,6 @@ void rpcrdma_mr_defer_recovery(struct rpcrdma_mr *mr);
|
|||
|
||||
struct rpcrdma_req *rpcrdma_buffer_get(struct rpcrdma_buffer *);
|
||||
void rpcrdma_buffer_put(struct rpcrdma_req *);
|
||||
void rpcrdma_recv_buffer_get(struct rpcrdma_req *);
|
||||
void rpcrdma_recv_buffer_put(struct rpcrdma_rep *);
|
||||
|
||||
struct rpcrdma_regbuf *rpcrdma_alloc_regbuf(size_t, enum dma_data_direction,
|
||||
|
@ -603,8 +603,6 @@ rpcrdma_dma_map_regbuf(struct rpcrdma_ia *ia, struct rpcrdma_regbuf *rb)
|
|||
return __rpcrdma_dma_map_regbuf(ia, rb);
|
||||
}
|
||||
|
||||
int rpcrdma_ep_post_extra_recv(struct rpcrdma_xprt *, unsigned int);
|
||||
|
||||
int rpcrdma_alloc_wq(void);
|
||||
void rpcrdma_destroy_wq(void);
|
||||
|
||||
|
|
|
@ -2763,6 +2763,7 @@ static const struct rpc_xprt_ops xs_local_ops = {
|
|||
.reserve_xprt = xprt_reserve_xprt,
|
||||
.release_xprt = xs_tcp_release_xprt,
|
||||
.alloc_slot = xprt_alloc_slot,
|
||||
.free_slot = xprt_free_slot,
|
||||
.rpcbind = xs_local_rpcbind,
|
||||
.set_port = xs_local_set_port,
|
||||
.connect = xs_local_connect,
|
||||
|
@ -2782,6 +2783,7 @@ static const struct rpc_xprt_ops xs_udp_ops = {
|
|||
.reserve_xprt = xprt_reserve_xprt_cong,
|
||||
.release_xprt = xprt_release_xprt_cong,
|
||||
.alloc_slot = xprt_alloc_slot,
|
||||
.free_slot = xprt_free_slot,
|
||||
.rpcbind = rpcb_getport_async,
|
||||
.set_port = xs_set_port,
|
||||
.connect = xs_connect,
|
||||
|
@ -2803,6 +2805,7 @@ static const struct rpc_xprt_ops xs_tcp_ops = {
|
|||
.reserve_xprt = xprt_reserve_xprt,
|
||||
.release_xprt = xs_tcp_release_xprt,
|
||||
.alloc_slot = xprt_lock_and_alloc_slot,
|
||||
.free_slot = xprt_free_slot,
|
||||
.rpcbind = rpcb_getport_async,
|
||||
.set_port = xs_set_port,
|
||||
.connect = xs_connect,
|
||||
|
@ -2834,6 +2837,7 @@ static const struct rpc_xprt_ops bc_tcp_ops = {
|
|||
.reserve_xprt = xprt_reserve_xprt,
|
||||
.release_xprt = xprt_release_xprt,
|
||||
.alloc_slot = xprt_alloc_slot,
|
||||
.free_slot = xprt_free_slot,
|
||||
.buf_alloc = bc_malloc,
|
||||
.buf_free = bc_free,
|
||||
.send_request = bc_send_request,
|
||||
|
|
Loading…
Reference in New Issue