Merge tag 'nfs-rdma-for-5.3-1' of git://git.linux-nfs.org/projects/anna/linux-nfs

NFSoRDMA client updates for 5.3

New features:
- Add a way to place MRs back on the free list
- Reduce context switching
- Add new trace events

Bugfixes and cleanups:
- Fix a BUG when tracing is enabled with NFSv4.1
- Fix a use-after-free in rpcrdma_post_recvs
- Replace use of xdr_stream_pos in rpcrdma_marshal_req
- Fix occasional transport deadlock
- Fix show_nfs_errors macros, other tracing improvements
- Remove RPCRDMA_REQ_F_PENDING and fr_state
- Various simplifications and refactors
This commit is contained in:
Trond Myklebust 2019-07-11 16:52:47 -04:00
commit 347543e640
16 changed files with 844 additions and 510 deletions

View File

@ -414,27 +414,39 @@ static __be32
validate_seqid(const struct nfs4_slot_table *tbl, const struct nfs4_slot *slot, validate_seqid(const struct nfs4_slot_table *tbl, const struct nfs4_slot *slot,
const struct cb_sequenceargs * args) const struct cb_sequenceargs * args)
{ {
__be32 ret;
ret = cpu_to_be32(NFS4ERR_BADSLOT);
if (args->csa_slotid > tbl->server_highest_slotid) if (args->csa_slotid > tbl->server_highest_slotid)
return htonl(NFS4ERR_BADSLOT); goto out_err;
/* Replay */ /* Replay */
if (args->csa_sequenceid == slot->seq_nr) { if (args->csa_sequenceid == slot->seq_nr) {
ret = cpu_to_be32(NFS4ERR_DELAY);
if (nfs4_test_locked_slot(tbl, slot->slot_nr)) if (nfs4_test_locked_slot(tbl, slot->slot_nr))
return htonl(NFS4ERR_DELAY); goto out_err;
/* Signal process_op to set this error on next op */ /* Signal process_op to set this error on next op */
ret = cpu_to_be32(NFS4ERR_RETRY_UNCACHED_REP);
if (args->csa_cachethis == 0) if (args->csa_cachethis == 0)
return htonl(NFS4ERR_RETRY_UNCACHED_REP); goto out_err;
/* Liar! We never allowed you to set csa_cachethis != 0 */ /* Liar! We never allowed you to set csa_cachethis != 0 */
return htonl(NFS4ERR_SEQ_FALSE_RETRY); ret = cpu_to_be32(NFS4ERR_SEQ_FALSE_RETRY);
goto out_err;
} }
/* Note: wraparound relies on seq_nr being of type u32 */ /* Note: wraparound relies on seq_nr being of type u32 */
if (likely(args->csa_sequenceid == slot->seq_nr + 1))
return htonl(NFS4_OK);
/* Misordered request */ /* Misordered request */
return htonl(NFS4ERR_SEQ_MISORDERED); ret = cpu_to_be32(NFS4ERR_SEQ_MISORDERED);
if (args->csa_sequenceid != slot->seq_nr + 1)
goto out_err;
return cpu_to_be32(NFS4_OK);
out_err:
trace_nfs4_cb_seqid_err(args, ret);
return ret;
} }
/* /*

View File

@ -151,7 +151,7 @@ static int decode_stat(struct xdr_stream *xdr, enum nfs_stat *status)
return 0; return 0;
out_status: out_status:
*status = be32_to_cpup(p); *status = be32_to_cpup(p);
trace_nfs_xdr_status((int)*status); trace_nfs_xdr_status(xdr, (int)*status);
return 0; return 0;
} }

View File

@ -343,7 +343,7 @@ static int decode_nfsstat3(struct xdr_stream *xdr, enum nfs_stat *status)
return 0; return 0;
out_status: out_status:
*status = be32_to_cpup(p); *status = be32_to_cpup(p);
trace_nfs_xdr_status((int)*status); trace_nfs_xdr_status(xdr, (int)*status);
return 0; return 0;
} }

View File

@ -156,7 +156,7 @@ TRACE_DEFINE_ENUM(NFS4ERR_WRONG_TYPE);
TRACE_DEFINE_ENUM(NFS4ERR_XDEV); TRACE_DEFINE_ENUM(NFS4ERR_XDEV);
#define show_nfsv4_errors(error) \ #define show_nfsv4_errors(error) \
__print_symbolic(-(error), \ __print_symbolic(error, \
{ NFS4_OK, "OK" }, \ { NFS4_OK, "OK" }, \
/* Mapped by nfs4_stat_to_errno() */ \ /* Mapped by nfs4_stat_to_errno() */ \
{ EPERM, "EPERM" }, \ { EPERM, "EPERM" }, \
@ -348,7 +348,7 @@ DECLARE_EVENT_CLASS(nfs4_clientid_event,
TP_STRUCT__entry( TP_STRUCT__entry(
__string(dstaddr, clp->cl_hostname) __string(dstaddr, clp->cl_hostname)
__field(int, error) __field(unsigned long, error)
), ),
TP_fast_assign( TP_fast_assign(
@ -357,8 +357,8 @@ DECLARE_EVENT_CLASS(nfs4_clientid_event,
), ),
TP_printk( TP_printk(
"error=%d (%s) dstaddr=%s", "error=%ld (%s) dstaddr=%s",
__entry->error, -__entry->error,
show_nfsv4_errors(__entry->error), show_nfsv4_errors(__entry->error),
__get_str(dstaddr) __get_str(dstaddr)
) )
@ -420,7 +420,7 @@ TRACE_EVENT(nfs4_sequence_done,
__field(unsigned int, highest_slotid) __field(unsigned int, highest_slotid)
__field(unsigned int, target_highest_slotid) __field(unsigned int, target_highest_slotid)
__field(unsigned int, status_flags) __field(unsigned int, status_flags)
__field(int, error) __field(unsigned long, error)
), ),
TP_fast_assign( TP_fast_assign(
@ -435,10 +435,10 @@ TRACE_EVENT(nfs4_sequence_done,
__entry->error = res->sr_status; __entry->error = res->sr_status;
), ),
TP_printk( TP_printk(
"error=%d (%s) session=0x%08x slot_nr=%u seq_nr=%u " "error=%ld (%s) session=0x%08x slot_nr=%u seq_nr=%u "
"highest_slotid=%u target_highest_slotid=%u " "highest_slotid=%u target_highest_slotid=%u "
"status_flags=%u (%s)", "status_flags=%u (%s)",
__entry->error, -__entry->error,
show_nfsv4_errors(__entry->error), show_nfsv4_errors(__entry->error),
__entry->session, __entry->session,
__entry->slot_nr, __entry->slot_nr,
@ -467,7 +467,7 @@ TRACE_EVENT(nfs4_cb_sequence,
__field(unsigned int, seq_nr) __field(unsigned int, seq_nr)
__field(unsigned int, highest_slotid) __field(unsigned int, highest_slotid)
__field(unsigned int, cachethis) __field(unsigned int, cachethis)
__field(int, error) __field(unsigned long, error)
), ),
TP_fast_assign( TP_fast_assign(
@ -476,13 +476,13 @@ TRACE_EVENT(nfs4_cb_sequence,
__entry->seq_nr = args->csa_sequenceid; __entry->seq_nr = args->csa_sequenceid;
__entry->highest_slotid = args->csa_highestslotid; __entry->highest_slotid = args->csa_highestslotid;
__entry->cachethis = args->csa_cachethis; __entry->cachethis = args->csa_cachethis;
__entry->error = -be32_to_cpu(status); __entry->error = be32_to_cpu(status);
), ),
TP_printk( TP_printk(
"error=%d (%s) session=0x%08x slot_nr=%u seq_nr=%u " "error=%ld (%s) session=0x%08x slot_nr=%u seq_nr=%u "
"highest_slotid=%u", "highest_slotid=%u",
__entry->error, -__entry->error,
show_nfsv4_errors(__entry->error), show_nfsv4_errors(__entry->error),
__entry->session, __entry->session,
__entry->slot_nr, __entry->slot_nr,
@ -490,6 +490,44 @@ TRACE_EVENT(nfs4_cb_sequence,
__entry->highest_slotid __entry->highest_slotid
) )
); );
TRACE_EVENT(nfs4_cb_seqid_err,
TP_PROTO(
const struct cb_sequenceargs *args,
__be32 status
),
TP_ARGS(args, status),
TP_STRUCT__entry(
__field(unsigned int, session)
__field(unsigned int, slot_nr)
__field(unsigned int, seq_nr)
__field(unsigned int, highest_slotid)
__field(unsigned int, cachethis)
__field(unsigned long, error)
),
TP_fast_assign(
__entry->session = nfs_session_id_hash(&args->csa_sessionid);
__entry->slot_nr = args->csa_slotid;
__entry->seq_nr = args->csa_sequenceid;
__entry->highest_slotid = args->csa_highestslotid;
__entry->cachethis = args->csa_cachethis;
__entry->error = be32_to_cpu(status);
),
TP_printk(
"error=%ld (%s) session=0x%08x slot_nr=%u seq_nr=%u "
"highest_slotid=%u",
-__entry->error,
show_nfsv4_errors(__entry->error),
__entry->session,
__entry->slot_nr,
__entry->seq_nr,
__entry->highest_slotid
)
);
#endif /* CONFIG_NFS_V4_1 */ #endif /* CONFIG_NFS_V4_1 */
TRACE_EVENT(nfs4_setup_sequence, TRACE_EVENT(nfs4_setup_sequence,
@ -526,26 +564,37 @@ TRACE_EVENT(nfs4_setup_sequence,
TRACE_EVENT(nfs4_xdr_status, TRACE_EVENT(nfs4_xdr_status,
TP_PROTO( TP_PROTO(
const struct xdr_stream *xdr,
u32 op, u32 op,
int error int error
), ),
TP_ARGS(op, error), TP_ARGS(xdr, op, error),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(unsigned int, task_id)
__field(unsigned int, client_id)
__field(u32, xid)
__field(u32, op) __field(u32, op)
__field(int, error) __field(unsigned long, error)
), ),
TP_fast_assign( TP_fast_assign(
const struct rpc_rqst *rqstp = xdr->rqst;
const struct rpc_task *task = rqstp->rq_task;
__entry->task_id = task->tk_pid;
__entry->client_id = task->tk_client->cl_clid;
__entry->xid = be32_to_cpu(rqstp->rq_xid);
__entry->op = op; __entry->op = op;
__entry->error = -error; __entry->error = error;
), ),
TP_printk( TP_printk(
"operation %d: nfs status %d (%s)", "task:%u@%d xid=0x%08x error=%ld (%s) operation=%u",
__entry->op, __entry->task_id, __entry->client_id, __entry->xid,
__entry->error, show_nfsv4_errors(__entry->error) -__entry->error, show_nfsv4_errors(__entry->error),
__entry->op
) )
); );
@ -559,7 +608,7 @@ DECLARE_EVENT_CLASS(nfs4_open_event,
TP_ARGS(ctx, flags, error), TP_ARGS(ctx, flags, error),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(int, error) __field(unsigned long, error)
__field(unsigned int, flags) __field(unsigned int, flags)
__field(unsigned int, fmode) __field(unsigned int, fmode)
__field(dev_t, dev) __field(dev_t, dev)
@ -577,7 +626,7 @@ DECLARE_EVENT_CLASS(nfs4_open_event,
const struct nfs4_state *state = ctx->state; const struct nfs4_state *state = ctx->state;
const struct inode *inode = NULL; const struct inode *inode = NULL;
__entry->error = error; __entry->error = -error;
__entry->flags = flags; __entry->flags = flags;
__entry->fmode = (__force unsigned int)ctx->mode; __entry->fmode = (__force unsigned int)ctx->mode;
__entry->dev = ctx->dentry->d_sb->s_dev; __entry->dev = ctx->dentry->d_sb->s_dev;
@ -609,11 +658,11 @@ DECLARE_EVENT_CLASS(nfs4_open_event,
), ),
TP_printk( TP_printk(
"error=%d (%s) flags=%d (%s) fmode=%s " "error=%ld (%s) flags=%d (%s) fmode=%s "
"fileid=%02x:%02x:%llu fhandle=0x%08x " "fileid=%02x:%02x:%llu fhandle=0x%08x "
"name=%02x:%02x:%llu/%s stateid=%d:0x%08x " "name=%02x:%02x:%llu/%s stateid=%d:0x%08x "
"openstateid=%d:0x%08x", "openstateid=%d:0x%08x",
__entry->error, -__entry->error,
show_nfsv4_errors(__entry->error), show_nfsv4_errors(__entry->error),
__entry->flags, __entry->flags,
show_open_flags(__entry->flags), show_open_flags(__entry->flags),
@ -695,7 +744,7 @@ TRACE_EVENT(nfs4_close,
__field(u32, fhandle) __field(u32, fhandle)
__field(u64, fileid) __field(u64, fileid)
__field(unsigned int, fmode) __field(unsigned int, fmode)
__field(int, error) __field(unsigned long, error)
__field(int, stateid_seq) __field(int, stateid_seq)
__field(u32, stateid_hash) __field(u32, stateid_hash)
), ),
@ -715,9 +764,9 @@ TRACE_EVENT(nfs4_close,
), ),
TP_printk( TP_printk(
"error=%d (%s) fmode=%s fileid=%02x:%02x:%llu " "error=%ld (%s) fmode=%s fileid=%02x:%02x:%llu "
"fhandle=0x%08x openstateid=%d:0x%08x", "fhandle=0x%08x openstateid=%d:0x%08x",
__entry->error, -__entry->error,
show_nfsv4_errors(__entry->error), show_nfsv4_errors(__entry->error),
__entry->fmode ? show_fmode_flags(__entry->fmode) : __entry->fmode ? show_fmode_flags(__entry->fmode) :
"closed", "closed",
@ -757,7 +806,7 @@ DECLARE_EVENT_CLASS(nfs4_lock_event,
TP_ARGS(request, state, cmd, error), TP_ARGS(request, state, cmd, error),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(int, error) __field(unsigned long, error)
__field(int, cmd) __field(int, cmd)
__field(char, type) __field(char, type)
__field(loff_t, start) __field(loff_t, start)
@ -787,10 +836,10 @@ DECLARE_EVENT_CLASS(nfs4_lock_event,
), ),
TP_printk( TP_printk(
"error=%d (%s) cmd=%s:%s range=%lld:%lld " "error=%ld (%s) cmd=%s:%s range=%lld:%lld "
"fileid=%02x:%02x:%llu fhandle=0x%08x " "fileid=%02x:%02x:%llu fhandle=0x%08x "
"stateid=%d:0x%08x", "stateid=%d:0x%08x",
__entry->error, -__entry->error,
show_nfsv4_errors(__entry->error), show_nfsv4_errors(__entry->error),
show_lock_cmd(__entry->cmd), show_lock_cmd(__entry->cmd),
show_lock_type(__entry->type), show_lock_type(__entry->type),
@ -827,7 +876,7 @@ TRACE_EVENT(nfs4_set_lock,
TP_ARGS(request, state, lockstateid, cmd, error), TP_ARGS(request, state, lockstateid, cmd, error),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(int, error) __field(unsigned long, error)
__field(int, cmd) __field(int, cmd)
__field(char, type) __field(char, type)
__field(loff_t, start) __field(loff_t, start)
@ -863,10 +912,10 @@ TRACE_EVENT(nfs4_set_lock,
), ),
TP_printk( TP_printk(
"error=%d (%s) cmd=%s:%s range=%lld:%lld " "error=%ld (%s) cmd=%s:%s range=%lld:%lld "
"fileid=%02x:%02x:%llu fhandle=0x%08x " "fileid=%02x:%02x:%llu fhandle=0x%08x "
"stateid=%d:0x%08x lockstateid=%d:0x%08x", "stateid=%d:0x%08x lockstateid=%d:0x%08x",
__entry->error, -__entry->error,
show_nfsv4_errors(__entry->error), show_nfsv4_errors(__entry->error),
show_lock_cmd(__entry->cmd), show_lock_cmd(__entry->cmd),
show_lock_type(__entry->type), show_lock_type(__entry->type),
@ -932,7 +981,7 @@ TRACE_EVENT(nfs4_delegreturn_exit,
TP_STRUCT__entry( TP_STRUCT__entry(
__field(dev_t, dev) __field(dev_t, dev)
__field(u32, fhandle) __field(u32, fhandle)
__field(int, error) __field(unsigned long, error)
__field(int, stateid_seq) __field(int, stateid_seq)
__field(u32, stateid_hash) __field(u32, stateid_hash)
), ),
@ -948,9 +997,9 @@ TRACE_EVENT(nfs4_delegreturn_exit,
), ),
TP_printk( TP_printk(
"error=%d (%s) dev=%02x:%02x fhandle=0x%08x " "error=%ld (%s) dev=%02x:%02x fhandle=0x%08x "
"stateid=%d:0x%08x", "stateid=%d:0x%08x",
__entry->error, -__entry->error,
show_nfsv4_errors(__entry->error), show_nfsv4_errors(__entry->error),
MAJOR(__entry->dev), MINOR(__entry->dev), MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->fhandle, __entry->fhandle,
@ -969,7 +1018,7 @@ DECLARE_EVENT_CLASS(nfs4_test_stateid_event,
TP_ARGS(state, lsp, error), TP_ARGS(state, lsp, error),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(int, error) __field(unsigned long, error)
__field(dev_t, dev) __field(dev_t, dev)
__field(u32, fhandle) __field(u32, fhandle)
__field(u64, fileid) __field(u64, fileid)
@ -991,9 +1040,9 @@ DECLARE_EVENT_CLASS(nfs4_test_stateid_event,
), ),
TP_printk( TP_printk(
"error=%d (%s) fileid=%02x:%02x:%llu fhandle=0x%08x " "error=%ld (%s) fileid=%02x:%02x:%llu fhandle=0x%08x "
"stateid=%d:0x%08x", "stateid=%d:0x%08x",
__entry->error, -__entry->error,
show_nfsv4_errors(__entry->error), show_nfsv4_errors(__entry->error),
MAJOR(__entry->dev), MINOR(__entry->dev), MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long long)__entry->fileid, (unsigned long long)__entry->fileid,
@ -1026,7 +1075,7 @@ DECLARE_EVENT_CLASS(nfs4_lookup_event,
TP_STRUCT__entry( TP_STRUCT__entry(
__field(dev_t, dev) __field(dev_t, dev)
__field(int, error) __field(unsigned long, error)
__field(u64, dir) __field(u64, dir)
__string(name, name->name) __string(name, name->name)
), ),
@ -1034,13 +1083,13 @@ DECLARE_EVENT_CLASS(nfs4_lookup_event,
TP_fast_assign( TP_fast_assign(
__entry->dev = dir->i_sb->s_dev; __entry->dev = dir->i_sb->s_dev;
__entry->dir = NFS_FILEID(dir); __entry->dir = NFS_FILEID(dir);
__entry->error = error; __entry->error = -error;
__assign_str(name, name->name); __assign_str(name, name->name);
), ),
TP_printk( TP_printk(
"error=%d (%s) name=%02x:%02x:%llu/%s", "error=%ld (%s) name=%02x:%02x:%llu/%s",
__entry->error, -__entry->error,
show_nfsv4_errors(__entry->error), show_nfsv4_errors(__entry->error),
MAJOR(__entry->dev), MINOR(__entry->dev), MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long long)__entry->dir, (unsigned long long)__entry->dir,
@ -1076,7 +1125,7 @@ TRACE_EVENT(nfs4_lookupp,
TP_STRUCT__entry( TP_STRUCT__entry(
__field(dev_t, dev) __field(dev_t, dev)
__field(u64, ino) __field(u64, ino)
__field(int, error) __field(unsigned long, error)
), ),
TP_fast_assign( TP_fast_assign(
@ -1086,8 +1135,8 @@ TRACE_EVENT(nfs4_lookupp,
), ),
TP_printk( TP_printk(
"error=%d (%s) inode=%02x:%02x:%llu", "error=%ld (%s) inode=%02x:%02x:%llu",
__entry->error, -__entry->error,
show_nfsv4_errors(__entry->error), show_nfsv4_errors(__entry->error),
MAJOR(__entry->dev), MINOR(__entry->dev), MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long long)__entry->ino (unsigned long long)__entry->ino
@ -1107,7 +1156,7 @@ TRACE_EVENT(nfs4_rename,
TP_STRUCT__entry( TP_STRUCT__entry(
__field(dev_t, dev) __field(dev_t, dev)
__field(int, error) __field(unsigned long, error)
__field(u64, olddir) __field(u64, olddir)
__string(oldname, oldname->name) __string(oldname, oldname->name)
__field(u64, newdir) __field(u64, newdir)
@ -1124,9 +1173,9 @@ TRACE_EVENT(nfs4_rename,
), ),
TP_printk( TP_printk(
"error=%d (%s) oldname=%02x:%02x:%llu/%s " "error=%ld (%s) oldname=%02x:%02x:%llu/%s "
"newname=%02x:%02x:%llu/%s", "newname=%02x:%02x:%llu/%s",
__entry->error, -__entry->error,
show_nfsv4_errors(__entry->error), show_nfsv4_errors(__entry->error),
MAJOR(__entry->dev), MINOR(__entry->dev), MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long long)__entry->olddir, (unsigned long long)__entry->olddir,
@ -1149,19 +1198,19 @@ DECLARE_EVENT_CLASS(nfs4_inode_event,
__field(dev_t, dev) __field(dev_t, dev)
__field(u32, fhandle) __field(u32, fhandle)
__field(u64, fileid) __field(u64, fileid)
__field(int, error) __field(unsigned long, error)
), ),
TP_fast_assign( TP_fast_assign(
__entry->dev = inode->i_sb->s_dev; __entry->dev = inode->i_sb->s_dev;
__entry->fileid = NFS_FILEID(inode); __entry->fileid = NFS_FILEID(inode);
__entry->fhandle = nfs_fhandle_hash(NFS_FH(inode)); __entry->fhandle = nfs_fhandle_hash(NFS_FH(inode));
__entry->error = error; __entry->error = error < 0 ? -error : 0;
), ),
TP_printk( TP_printk(
"error=%d (%s) fileid=%02x:%02x:%llu fhandle=0x%08x", "error=%ld (%s) fileid=%02x:%02x:%llu fhandle=0x%08x",
__entry->error, -__entry->error,
show_nfsv4_errors(__entry->error), show_nfsv4_errors(__entry->error),
MAJOR(__entry->dev), MINOR(__entry->dev), MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long long)__entry->fileid, (unsigned long long)__entry->fileid,
@ -1200,7 +1249,7 @@ DECLARE_EVENT_CLASS(nfs4_inode_stateid_event,
__field(dev_t, dev) __field(dev_t, dev)
__field(u32, fhandle) __field(u32, fhandle)
__field(u64, fileid) __field(u64, fileid)
__field(int, error) __field(unsigned long, error)
__field(int, stateid_seq) __field(int, stateid_seq)
__field(u32, stateid_hash) __field(u32, stateid_hash)
), ),
@ -1217,9 +1266,9 @@ DECLARE_EVENT_CLASS(nfs4_inode_stateid_event,
), ),
TP_printk( TP_printk(
"error=%d (%s) fileid=%02x:%02x:%llu fhandle=0x%08x " "error=%ld (%s) fileid=%02x:%02x:%llu fhandle=0x%08x "
"stateid=%d:0x%08x", "stateid=%d:0x%08x",
__entry->error, -__entry->error,
show_nfsv4_errors(__entry->error), show_nfsv4_errors(__entry->error),
MAJOR(__entry->dev), MINOR(__entry->dev), MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long long)__entry->fileid, (unsigned long long)__entry->fileid,
@ -1257,7 +1306,7 @@ DECLARE_EVENT_CLASS(nfs4_getattr_event,
__field(u32, fhandle) __field(u32, fhandle)
__field(u64, fileid) __field(u64, fileid)
__field(unsigned int, valid) __field(unsigned int, valid)
__field(int, error) __field(unsigned long, error)
), ),
TP_fast_assign( TP_fast_assign(
@ -1269,9 +1318,9 @@ DECLARE_EVENT_CLASS(nfs4_getattr_event,
), ),
TP_printk( TP_printk(
"error=%d (%s) fileid=%02x:%02x:%llu fhandle=0x%08x " "error=%ld (%s) fileid=%02x:%02x:%llu fhandle=0x%08x "
"valid=%s", "valid=%s",
__entry->error, -__entry->error,
show_nfsv4_errors(__entry->error), show_nfsv4_errors(__entry->error),
MAJOR(__entry->dev), MINOR(__entry->dev), MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long long)__entry->fileid, (unsigned long long)__entry->fileid,
@ -1304,7 +1353,7 @@ DECLARE_EVENT_CLASS(nfs4_inode_callback_event,
TP_ARGS(clp, fhandle, inode, error), TP_ARGS(clp, fhandle, inode, error),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(int, error) __field(unsigned long, error)
__field(dev_t, dev) __field(dev_t, dev)
__field(u32, fhandle) __field(u32, fhandle)
__field(u64, fileid) __field(u64, fileid)
@ -1325,9 +1374,9 @@ DECLARE_EVENT_CLASS(nfs4_inode_callback_event,
), ),
TP_printk( TP_printk(
"error=%d (%s) fileid=%02x:%02x:%llu fhandle=0x%08x " "error=%ld (%s) fileid=%02x:%02x:%llu fhandle=0x%08x "
"dstaddr=%s", "dstaddr=%s",
__entry->error, -__entry->error,
show_nfsv4_errors(__entry->error), show_nfsv4_errors(__entry->error),
MAJOR(__entry->dev), MINOR(__entry->dev), MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long long)__entry->fileid, (unsigned long long)__entry->fileid,
@ -1359,7 +1408,7 @@ DECLARE_EVENT_CLASS(nfs4_inode_stateid_callback_event,
TP_ARGS(clp, fhandle, inode, stateid, error), TP_ARGS(clp, fhandle, inode, stateid, error),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(int, error) __field(unsigned long, error)
__field(dev_t, dev) __field(dev_t, dev)
__field(u32, fhandle) __field(u32, fhandle)
__field(u64, fileid) __field(u64, fileid)
@ -1386,9 +1435,9 @@ DECLARE_EVENT_CLASS(nfs4_inode_stateid_callback_event,
), ),
TP_printk( TP_printk(
"error=%d (%s) fileid=%02x:%02x:%llu fhandle=0x%08x " "error=%ld (%s) fileid=%02x:%02x:%llu fhandle=0x%08x "
"stateid=%d:0x%08x dstaddr=%s", "stateid=%d:0x%08x dstaddr=%s",
__entry->error, -__entry->error,
show_nfsv4_errors(__entry->error), show_nfsv4_errors(__entry->error),
MAJOR(__entry->dev), MINOR(__entry->dev), MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long long)__entry->fileid, (unsigned long long)__entry->fileid,
@ -1422,7 +1471,7 @@ DECLARE_EVENT_CLASS(nfs4_idmap_event,
TP_ARGS(name, len, id, error), TP_ARGS(name, len, id, error),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(int, error) __field(unsigned long, error)
__field(u32, id) __field(u32, id)
__dynamic_array(char, name, len > 0 ? len + 1 : 1) __dynamic_array(char, name, len > 0 ? len + 1 : 1)
), ),
@ -1437,8 +1486,8 @@ DECLARE_EVENT_CLASS(nfs4_idmap_event,
), ),
TP_printk( TP_printk(
"error=%d id=%u name=%s", "error=%ld (%s) id=%u name=%s",
__entry->error, -__entry->error, show_nfsv4_errors(__entry->error),
__entry->id, __entry->id,
__get_str(name) __get_str(name)
) )
@ -1471,7 +1520,7 @@ DECLARE_EVENT_CLASS(nfs4_read_event,
__field(u64, fileid) __field(u64, fileid)
__field(loff_t, offset) __field(loff_t, offset)
__field(size_t, count) __field(size_t, count)
__field(int, error) __field(unsigned long, error)
__field(int, stateid_seq) __field(int, stateid_seq)
__field(u32, stateid_hash) __field(u32, stateid_hash)
), ),
@ -1485,7 +1534,7 @@ DECLARE_EVENT_CLASS(nfs4_read_event,
__entry->fhandle = nfs_fhandle_hash(NFS_FH(inode)); __entry->fhandle = nfs_fhandle_hash(NFS_FH(inode));
__entry->offset = hdr->args.offset; __entry->offset = hdr->args.offset;
__entry->count = hdr->args.count; __entry->count = hdr->args.count;
__entry->error = error; __entry->error = error < 0 ? -error : 0;
__entry->stateid_seq = __entry->stateid_seq =
be32_to_cpu(state->stateid.seqid); be32_to_cpu(state->stateid.seqid);
__entry->stateid_hash = __entry->stateid_hash =
@ -1493,9 +1542,9 @@ DECLARE_EVENT_CLASS(nfs4_read_event,
), ),
TP_printk( TP_printk(
"error=%d (%s) fileid=%02x:%02x:%llu fhandle=0x%08x " "error=%ld (%s) fileid=%02x:%02x:%llu fhandle=0x%08x "
"offset=%lld count=%zu stateid=%d:0x%08x", "offset=%lld count=%zu stateid=%d:0x%08x",
__entry->error, -__entry->error,
show_nfsv4_errors(__entry->error), show_nfsv4_errors(__entry->error),
MAJOR(__entry->dev), MINOR(__entry->dev), MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long long)__entry->fileid, (unsigned long long)__entry->fileid,
@ -1531,7 +1580,7 @@ DECLARE_EVENT_CLASS(nfs4_write_event,
__field(u64, fileid) __field(u64, fileid)
__field(loff_t, offset) __field(loff_t, offset)
__field(size_t, count) __field(size_t, count)
__field(int, error) __field(unsigned long, error)
__field(int, stateid_seq) __field(int, stateid_seq)
__field(u32, stateid_hash) __field(u32, stateid_hash)
), ),
@ -1545,7 +1594,7 @@ DECLARE_EVENT_CLASS(nfs4_write_event,
__entry->fhandle = nfs_fhandle_hash(NFS_FH(inode)); __entry->fhandle = nfs_fhandle_hash(NFS_FH(inode));
__entry->offset = hdr->args.offset; __entry->offset = hdr->args.offset;
__entry->count = hdr->args.count; __entry->count = hdr->args.count;
__entry->error = error; __entry->error = error < 0 ? -error : 0;
__entry->stateid_seq = __entry->stateid_seq =
be32_to_cpu(state->stateid.seqid); be32_to_cpu(state->stateid.seqid);
__entry->stateid_hash = __entry->stateid_hash =
@ -1553,9 +1602,9 @@ DECLARE_EVENT_CLASS(nfs4_write_event,
), ),
TP_printk( TP_printk(
"error=%d (%s) fileid=%02x:%02x:%llu fhandle=0x%08x " "error=%ld (%s) fileid=%02x:%02x:%llu fhandle=0x%08x "
"offset=%lld count=%zu stateid=%d:0x%08x", "offset=%lld count=%zu stateid=%d:0x%08x",
__entry->error, -__entry->error,
show_nfsv4_errors(__entry->error), show_nfsv4_errors(__entry->error),
MAJOR(__entry->dev), MINOR(__entry->dev), MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long long)__entry->fileid, (unsigned long long)__entry->fileid,
@ -1592,7 +1641,7 @@ DECLARE_EVENT_CLASS(nfs4_commit_event,
__field(u64, fileid) __field(u64, fileid)
__field(loff_t, offset) __field(loff_t, offset)
__field(size_t, count) __field(size_t, count)
__field(int, error) __field(unsigned long, error)
), ),
TP_fast_assign( TP_fast_assign(
@ -1606,9 +1655,9 @@ DECLARE_EVENT_CLASS(nfs4_commit_event,
), ),
TP_printk( TP_printk(
"error=%d (%s) fileid=%02x:%02x:%llu fhandle=0x%08x " "error=%ld (%s) fileid=%02x:%02x:%llu fhandle=0x%08x "
"offset=%lld count=%zu", "offset=%lld count=%zu",
__entry->error, -__entry->error,
show_nfsv4_errors(__entry->error), show_nfsv4_errors(__entry->error),
MAJOR(__entry->dev), MINOR(__entry->dev), MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long long)__entry->fileid, (unsigned long long)__entry->fileid,
@ -1656,7 +1705,7 @@ TRACE_EVENT(nfs4_layoutget,
__field(u32, iomode) __field(u32, iomode)
__field(u64, offset) __field(u64, offset)
__field(u64, count) __field(u64, count)
__field(int, error) __field(unsigned long, error)
__field(int, stateid_seq) __field(int, stateid_seq)
__field(u32, stateid_hash) __field(u32, stateid_hash)
__field(int, layoutstateid_seq) __field(int, layoutstateid_seq)
@ -1689,10 +1738,10 @@ TRACE_EVENT(nfs4_layoutget,
), ),
TP_printk( TP_printk(
"error=%d (%s) fileid=%02x:%02x:%llu fhandle=0x%08x " "error=%ld (%s) fileid=%02x:%02x:%llu fhandle=0x%08x "
"iomode=%s offset=%llu count=%llu stateid=%d:0x%08x " "iomode=%s offset=%llu count=%llu stateid=%d:0x%08x "
"layoutstateid=%d:0x%08x", "layoutstateid=%d:0x%08x",
__entry->error, -__entry->error,
show_nfsv4_errors(__entry->error), show_nfsv4_errors(__entry->error),
MAJOR(__entry->dev), MINOR(__entry->dev), MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long long)__entry->fileid, (unsigned long long)__entry->fileid,

View File

@ -3187,7 +3187,7 @@ static bool __decode_op_hdr(struct xdr_stream *xdr, enum nfs_opnum4 expected,
return true; return true;
out_status: out_status:
nfserr = be32_to_cpup(p); nfserr = be32_to_cpup(p);
trace_nfs4_xdr_status(opnum, nfserr); trace_nfs4_xdr_status(xdr, opnum, nfserr);
*nfs_retval = nfs4_stat_to_errno(nfserr); *nfs_retval = nfs4_stat_to_errno(nfserr);
return true; return true;
out_bad_operation: out_bad_operation:

View File

@ -11,6 +11,16 @@
#include <linux/tracepoint.h> #include <linux/tracepoint.h>
#include <linux/iversion.h> #include <linux/iversion.h>
TRACE_DEFINE_ENUM(DT_UNKNOWN);
TRACE_DEFINE_ENUM(DT_FIFO);
TRACE_DEFINE_ENUM(DT_CHR);
TRACE_DEFINE_ENUM(DT_DIR);
TRACE_DEFINE_ENUM(DT_BLK);
TRACE_DEFINE_ENUM(DT_REG);
TRACE_DEFINE_ENUM(DT_LNK);
TRACE_DEFINE_ENUM(DT_SOCK);
TRACE_DEFINE_ENUM(DT_WHT);
#define nfs_show_file_type(ftype) \ #define nfs_show_file_type(ftype) \
__print_symbolic(ftype, \ __print_symbolic(ftype, \
{ DT_UNKNOWN, "UNKNOWN" }, \ { DT_UNKNOWN, "UNKNOWN" }, \
@ -23,25 +33,57 @@
{ DT_SOCK, "SOCK" }, \ { DT_SOCK, "SOCK" }, \
{ DT_WHT, "WHT" }) { DT_WHT, "WHT" })
TRACE_DEFINE_ENUM(NFS_INO_INVALID_DATA);
TRACE_DEFINE_ENUM(NFS_INO_INVALID_ATIME);
TRACE_DEFINE_ENUM(NFS_INO_INVALID_ACCESS);
TRACE_DEFINE_ENUM(NFS_INO_INVALID_ACL);
TRACE_DEFINE_ENUM(NFS_INO_REVAL_PAGECACHE);
TRACE_DEFINE_ENUM(NFS_INO_REVAL_FORCED);
TRACE_DEFINE_ENUM(NFS_INO_INVALID_LABEL);
TRACE_DEFINE_ENUM(NFS_INO_INVALID_CHANGE);
TRACE_DEFINE_ENUM(NFS_INO_INVALID_CTIME);
TRACE_DEFINE_ENUM(NFS_INO_INVALID_MTIME);
TRACE_DEFINE_ENUM(NFS_INO_INVALID_SIZE);
TRACE_DEFINE_ENUM(NFS_INO_INVALID_OTHER);
#define nfs_show_cache_validity(v) \ #define nfs_show_cache_validity(v) \
__print_flags(v, "|", \ __print_flags(v, "|", \
{ NFS_INO_INVALID_ATTR, "INVALID_ATTR" }, \
{ NFS_INO_INVALID_DATA, "INVALID_DATA" }, \ { NFS_INO_INVALID_DATA, "INVALID_DATA" }, \
{ NFS_INO_INVALID_ATIME, "INVALID_ATIME" }, \ { NFS_INO_INVALID_ATIME, "INVALID_ATIME" }, \
{ NFS_INO_INVALID_ACCESS, "INVALID_ACCESS" }, \ { NFS_INO_INVALID_ACCESS, "INVALID_ACCESS" }, \
{ NFS_INO_INVALID_ACL, "INVALID_ACL" }, \ { NFS_INO_INVALID_ACL, "INVALID_ACL" }, \
{ NFS_INO_REVAL_PAGECACHE, "REVAL_PAGECACHE" }, \ { NFS_INO_REVAL_PAGECACHE, "REVAL_PAGECACHE" }, \
{ NFS_INO_REVAL_FORCED, "REVAL_FORCED" }, \ { NFS_INO_REVAL_FORCED, "REVAL_FORCED" }, \
{ NFS_INO_INVALID_LABEL, "INVALID_LABEL" }) { NFS_INO_INVALID_LABEL, "INVALID_LABEL" }, \
{ NFS_INO_INVALID_CHANGE, "INVALID_CHANGE" }, \
{ NFS_INO_INVALID_CTIME, "INVALID_CTIME" }, \
{ NFS_INO_INVALID_MTIME, "INVALID_MTIME" }, \
{ NFS_INO_INVALID_SIZE, "INVALID_SIZE" }, \
{ NFS_INO_INVALID_OTHER, "INVALID_OTHER" })
TRACE_DEFINE_ENUM(NFS_INO_ADVISE_RDPLUS);
TRACE_DEFINE_ENUM(NFS_INO_STALE);
TRACE_DEFINE_ENUM(NFS_INO_ACL_LRU_SET);
TRACE_DEFINE_ENUM(NFS_INO_INVALIDATING);
TRACE_DEFINE_ENUM(NFS_INO_FSCACHE);
TRACE_DEFINE_ENUM(NFS_INO_FSCACHE_LOCK);
TRACE_DEFINE_ENUM(NFS_INO_LAYOUTCOMMIT);
TRACE_DEFINE_ENUM(NFS_INO_LAYOUTCOMMITTING);
TRACE_DEFINE_ENUM(NFS_INO_LAYOUTSTATS);
TRACE_DEFINE_ENUM(NFS_INO_ODIRECT);
#define nfs_show_nfsi_flags(v) \ #define nfs_show_nfsi_flags(v) \
__print_flags(v, "|", \ __print_flags(v, "|", \
{ 1 << NFS_INO_ADVISE_RDPLUS, "ADVISE_RDPLUS" }, \ { BIT(NFS_INO_ADVISE_RDPLUS), "ADVISE_RDPLUS" }, \
{ 1 << NFS_INO_STALE, "STALE" }, \ { BIT(NFS_INO_STALE), "STALE" }, \
{ 1 << NFS_INO_INVALIDATING, "INVALIDATING" }, \ { BIT(NFS_INO_ACL_LRU_SET), "ACL_LRU_SET" }, \
{ 1 << NFS_INO_FSCACHE, "FSCACHE" }, \ { BIT(NFS_INO_INVALIDATING), "INVALIDATING" }, \
{ 1 << NFS_INO_LAYOUTCOMMIT, "NEED_LAYOUTCOMMIT" }, \ { BIT(NFS_INO_FSCACHE), "FSCACHE" }, \
{ 1 << NFS_INO_LAYOUTCOMMITTING, "LAYOUTCOMMIT" }) { BIT(NFS_INO_FSCACHE_LOCK), "FSCACHE_LOCK" }, \
{ BIT(NFS_INO_LAYOUTCOMMIT), "NEED_LAYOUTCOMMIT" }, \
{ BIT(NFS_INO_LAYOUTCOMMITTING), "LAYOUTCOMMIT" }, \
{ BIT(NFS_INO_LAYOUTSTATS), "LAYOUTSTATS" }, \
{ BIT(NFS_INO_ODIRECT), "ODIRECT" })
DECLARE_EVENT_CLASS(nfs_inode_event, DECLARE_EVENT_CLASS(nfs_inode_event,
TP_PROTO( TP_PROTO(
@ -83,7 +125,7 @@ DECLARE_EVENT_CLASS(nfs_inode_event_done,
TP_ARGS(inode, error), TP_ARGS(inode, error),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(int, error) __field(unsigned long, error)
__field(dev_t, dev) __field(dev_t, dev)
__field(u32, fhandle) __field(u32, fhandle)
__field(unsigned char, type) __field(unsigned char, type)
@ -96,7 +138,7 @@ DECLARE_EVENT_CLASS(nfs_inode_event_done,
TP_fast_assign( TP_fast_assign(
const struct nfs_inode *nfsi = NFS_I(inode); const struct nfs_inode *nfsi = NFS_I(inode);
__entry->error = error; __entry->error = error < 0 ? -error : 0;
__entry->dev = inode->i_sb->s_dev; __entry->dev = inode->i_sb->s_dev;
__entry->fileid = nfsi->fileid; __entry->fileid = nfsi->fileid;
__entry->fhandle = nfs_fhandle_hash(&nfsi->fh); __entry->fhandle = nfs_fhandle_hash(&nfsi->fh);
@ -108,10 +150,10 @@ DECLARE_EVENT_CLASS(nfs_inode_event_done,
), ),
TP_printk( TP_printk(
"error=%d fileid=%02x:%02x:%llu fhandle=0x%08x " "error=%ld (%s) fileid=%02x:%02x:%llu fhandle=0x%08x "
"type=%u (%s) version=%llu size=%lld " "type=%u (%s) version=%llu size=%lld "
"cache_validity=%lu (%s) nfs_flags=%ld (%s)", "cache_validity=0x%lx (%s) nfs_flags=0x%lx (%s)",
__entry->error, -__entry->error, nfs_show_status(__entry->error),
MAJOR(__entry->dev), MINOR(__entry->dev), MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long long)__entry->fileid, (unsigned long long)__entry->fileid,
__entry->fhandle, __entry->fhandle,
@ -158,13 +200,41 @@ DEFINE_NFS_INODE_EVENT_DONE(nfs_fsync_exit);
DEFINE_NFS_INODE_EVENT(nfs_access_enter); DEFINE_NFS_INODE_EVENT(nfs_access_enter);
DEFINE_NFS_INODE_EVENT_DONE(nfs_access_exit); DEFINE_NFS_INODE_EVENT_DONE(nfs_access_exit);
TRACE_DEFINE_ENUM(LOOKUP_FOLLOW);
TRACE_DEFINE_ENUM(LOOKUP_DIRECTORY);
TRACE_DEFINE_ENUM(LOOKUP_AUTOMOUNT);
TRACE_DEFINE_ENUM(LOOKUP_PARENT);
TRACE_DEFINE_ENUM(LOOKUP_REVAL);
TRACE_DEFINE_ENUM(LOOKUP_RCU);
TRACE_DEFINE_ENUM(LOOKUP_NO_REVAL);
TRACE_DEFINE_ENUM(LOOKUP_NO_EVAL);
TRACE_DEFINE_ENUM(LOOKUP_OPEN);
TRACE_DEFINE_ENUM(LOOKUP_CREATE);
TRACE_DEFINE_ENUM(LOOKUP_EXCL);
TRACE_DEFINE_ENUM(LOOKUP_RENAME_TARGET);
TRACE_DEFINE_ENUM(LOOKUP_JUMPED);
TRACE_DEFINE_ENUM(LOOKUP_ROOT);
TRACE_DEFINE_ENUM(LOOKUP_EMPTY);
TRACE_DEFINE_ENUM(LOOKUP_DOWN);
#define show_lookup_flags(flags) \ #define show_lookup_flags(flags) \
__print_flags((unsigned long)flags, "|", \ __print_flags(flags, "|", \
{ LOOKUP_AUTOMOUNT, "AUTOMOUNT" }, \ { LOOKUP_FOLLOW, "FOLLOW" }, \
{ LOOKUP_DIRECTORY, "DIRECTORY" }, \ { LOOKUP_DIRECTORY, "DIRECTORY" }, \
{ LOOKUP_AUTOMOUNT, "AUTOMOUNT" }, \
{ LOOKUP_PARENT, "PARENT" }, \
{ LOOKUP_REVAL, "REVAL" }, \
{ LOOKUP_RCU, "RCU" }, \
{ LOOKUP_NO_REVAL, "NO_REVAL" }, \
{ LOOKUP_NO_EVAL, "NO_EVAL" }, \
{ LOOKUP_OPEN, "OPEN" }, \ { LOOKUP_OPEN, "OPEN" }, \
{ LOOKUP_CREATE, "CREATE" }, \ { LOOKUP_CREATE, "CREATE" }, \
{ LOOKUP_EXCL, "EXCL" }) { LOOKUP_EXCL, "EXCL" }, \
{ LOOKUP_RENAME_TARGET, "RENAME_TARGET" }, \
{ LOOKUP_JUMPED, "JUMPED" }, \
{ LOOKUP_ROOT, "ROOT" }, \
{ LOOKUP_EMPTY, "EMPTY" }, \
{ LOOKUP_DOWN, "DOWN" })
DECLARE_EVENT_CLASS(nfs_lookup_event, DECLARE_EVENT_CLASS(nfs_lookup_event,
TP_PROTO( TP_PROTO(
@ -176,7 +246,7 @@ DECLARE_EVENT_CLASS(nfs_lookup_event,
TP_ARGS(dir, dentry, flags), TP_ARGS(dir, dentry, flags),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(unsigned int, flags) __field(unsigned long, flags)
__field(dev_t, dev) __field(dev_t, dev)
__field(u64, dir) __field(u64, dir)
__string(name, dentry->d_name.name) __string(name, dentry->d_name.name)
@ -190,7 +260,7 @@ DECLARE_EVENT_CLASS(nfs_lookup_event,
), ),
TP_printk( TP_printk(
"flags=%u (%s) name=%02x:%02x:%llu/%s", "flags=0x%lx (%s) name=%02x:%02x:%llu/%s",
__entry->flags, __entry->flags,
show_lookup_flags(__entry->flags), show_lookup_flags(__entry->flags),
MAJOR(__entry->dev), MINOR(__entry->dev), MAJOR(__entry->dev), MINOR(__entry->dev),
@ -219,8 +289,8 @@ DECLARE_EVENT_CLASS(nfs_lookup_event_done,
TP_ARGS(dir, dentry, flags, error), TP_ARGS(dir, dentry, flags, error),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(int, error) __field(unsigned long, error)
__field(unsigned int, flags) __field(unsigned long, flags)
__field(dev_t, dev) __field(dev_t, dev)
__field(u64, dir) __field(u64, dir)
__string(name, dentry->d_name.name) __string(name, dentry->d_name.name)
@ -229,14 +299,14 @@ DECLARE_EVENT_CLASS(nfs_lookup_event_done,
TP_fast_assign( TP_fast_assign(
__entry->dev = dir->i_sb->s_dev; __entry->dev = dir->i_sb->s_dev;
__entry->dir = NFS_FILEID(dir); __entry->dir = NFS_FILEID(dir);
__entry->error = error; __entry->error = error < 0 ? -error : 0;
__entry->flags = flags; __entry->flags = flags;
__assign_str(name, dentry->d_name.name); __assign_str(name, dentry->d_name.name);
), ),
TP_printk( TP_printk(
"error=%d flags=%u (%s) name=%02x:%02x:%llu/%s", "error=%ld (%s) flags=0x%lx (%s) name=%02x:%02x:%llu/%s",
__entry->error, -__entry->error, nfs_show_status(__entry->error),
__entry->flags, __entry->flags,
show_lookup_flags(__entry->flags), show_lookup_flags(__entry->flags),
MAJOR(__entry->dev), MINOR(__entry->dev), MAJOR(__entry->dev), MINOR(__entry->dev),
@ -260,15 +330,43 @@ DEFINE_NFS_LOOKUP_EVENT_DONE(nfs_lookup_exit);
DEFINE_NFS_LOOKUP_EVENT(nfs_lookup_revalidate_enter); DEFINE_NFS_LOOKUP_EVENT(nfs_lookup_revalidate_enter);
DEFINE_NFS_LOOKUP_EVENT_DONE(nfs_lookup_revalidate_exit); DEFINE_NFS_LOOKUP_EVENT_DONE(nfs_lookup_revalidate_exit);
TRACE_DEFINE_ENUM(O_WRONLY);
TRACE_DEFINE_ENUM(O_RDWR);
TRACE_DEFINE_ENUM(O_CREAT);
TRACE_DEFINE_ENUM(O_EXCL);
TRACE_DEFINE_ENUM(O_NOCTTY);
TRACE_DEFINE_ENUM(O_TRUNC);
TRACE_DEFINE_ENUM(O_APPEND);
TRACE_DEFINE_ENUM(O_NONBLOCK);
TRACE_DEFINE_ENUM(O_DSYNC);
TRACE_DEFINE_ENUM(O_DIRECT);
TRACE_DEFINE_ENUM(O_LARGEFILE);
TRACE_DEFINE_ENUM(O_DIRECTORY);
TRACE_DEFINE_ENUM(O_NOFOLLOW);
TRACE_DEFINE_ENUM(O_NOATIME);
TRACE_DEFINE_ENUM(O_CLOEXEC);
#define show_open_flags(flags) \ #define show_open_flags(flags) \
__print_flags((unsigned long)flags, "|", \ __print_flags(flags, "|", \
{ O_WRONLY, "O_WRONLY" }, \
{ O_RDWR, "O_RDWR" }, \
{ O_CREAT, "O_CREAT" }, \ { O_CREAT, "O_CREAT" }, \
{ O_EXCL, "O_EXCL" }, \ { O_EXCL, "O_EXCL" }, \
{ O_NOCTTY, "O_NOCTTY" }, \
{ O_TRUNC, "O_TRUNC" }, \ { O_TRUNC, "O_TRUNC" }, \
{ O_APPEND, "O_APPEND" }, \ { O_APPEND, "O_APPEND" }, \
{ O_NONBLOCK, "O_NONBLOCK" }, \
{ O_DSYNC, "O_DSYNC" }, \ { O_DSYNC, "O_DSYNC" }, \
{ O_DIRECT, "O_DIRECT" }, \ { O_DIRECT, "O_DIRECT" }, \
{ O_DIRECTORY, "O_DIRECTORY" }) { O_LARGEFILE, "O_LARGEFILE" }, \
{ O_DIRECTORY, "O_DIRECTORY" }, \
{ O_NOFOLLOW, "O_NOFOLLOW" }, \
{ O_NOATIME, "O_NOATIME" }, \
{ O_CLOEXEC, "O_CLOEXEC" })
TRACE_DEFINE_ENUM(FMODE_READ);
TRACE_DEFINE_ENUM(FMODE_WRITE);
TRACE_DEFINE_ENUM(FMODE_EXEC);
#define show_fmode_flags(mode) \ #define show_fmode_flags(mode) \
__print_flags(mode, "|", \ __print_flags(mode, "|", \
@ -286,7 +384,7 @@ TRACE_EVENT(nfs_atomic_open_enter,
TP_ARGS(dir, ctx, flags), TP_ARGS(dir, ctx, flags),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(unsigned int, flags) __field(unsigned long, flags)
__field(unsigned int, fmode) __field(unsigned int, fmode)
__field(dev_t, dev) __field(dev_t, dev)
__field(u64, dir) __field(u64, dir)
@ -302,7 +400,7 @@ TRACE_EVENT(nfs_atomic_open_enter,
), ),
TP_printk( TP_printk(
"flags=%u (%s) fmode=%s name=%02x:%02x:%llu/%s", "flags=0x%lx (%s) fmode=%s name=%02x:%02x:%llu/%s",
__entry->flags, __entry->flags,
show_open_flags(__entry->flags), show_open_flags(__entry->flags),
show_fmode_flags(__entry->fmode), show_fmode_flags(__entry->fmode),
@ -323,8 +421,8 @@ TRACE_EVENT(nfs_atomic_open_exit,
TP_ARGS(dir, ctx, flags, error), TP_ARGS(dir, ctx, flags, error),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(int, error) __field(unsigned long, error)
__field(unsigned int, flags) __field(unsigned long, flags)
__field(unsigned int, fmode) __field(unsigned int, fmode)
__field(dev_t, dev) __field(dev_t, dev)
__field(u64, dir) __field(u64, dir)
@ -332,7 +430,7 @@ TRACE_EVENT(nfs_atomic_open_exit,
), ),
TP_fast_assign( TP_fast_assign(
__entry->error = error; __entry->error = -error;
__entry->dev = dir->i_sb->s_dev; __entry->dev = dir->i_sb->s_dev;
__entry->dir = NFS_FILEID(dir); __entry->dir = NFS_FILEID(dir);
__entry->flags = flags; __entry->flags = flags;
@ -341,9 +439,9 @@ TRACE_EVENT(nfs_atomic_open_exit,
), ),
TP_printk( TP_printk(
"error=%d flags=%u (%s) fmode=%s " "error=%ld (%s) flags=0x%lx (%s) fmode=%s "
"name=%02x:%02x:%llu/%s", "name=%02x:%02x:%llu/%s",
__entry->error, -__entry->error, nfs_show_status(__entry->error),
__entry->flags, __entry->flags,
show_open_flags(__entry->flags), show_open_flags(__entry->flags),
show_fmode_flags(__entry->fmode), show_fmode_flags(__entry->fmode),
@ -363,7 +461,7 @@ TRACE_EVENT(nfs_create_enter,
TP_ARGS(dir, dentry, flags), TP_ARGS(dir, dentry, flags),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(unsigned int, flags) __field(unsigned long, flags)
__field(dev_t, dev) __field(dev_t, dev)
__field(u64, dir) __field(u64, dir)
__string(name, dentry->d_name.name) __string(name, dentry->d_name.name)
@ -377,7 +475,7 @@ TRACE_EVENT(nfs_create_enter,
), ),
TP_printk( TP_printk(
"flags=%u (%s) name=%02x:%02x:%llu/%s", "flags=0x%lx (%s) name=%02x:%02x:%llu/%s",
__entry->flags, __entry->flags,
show_open_flags(__entry->flags), show_open_flags(__entry->flags),
MAJOR(__entry->dev), MINOR(__entry->dev), MAJOR(__entry->dev), MINOR(__entry->dev),
@ -397,15 +495,15 @@ TRACE_EVENT(nfs_create_exit,
TP_ARGS(dir, dentry, flags, error), TP_ARGS(dir, dentry, flags, error),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(int, error) __field(unsigned long, error)
__field(unsigned int, flags) __field(unsigned long, flags)
__field(dev_t, dev) __field(dev_t, dev)
__field(u64, dir) __field(u64, dir)
__string(name, dentry->d_name.name) __string(name, dentry->d_name.name)
), ),
TP_fast_assign( TP_fast_assign(
__entry->error = error; __entry->error = -error;
__entry->dev = dir->i_sb->s_dev; __entry->dev = dir->i_sb->s_dev;
__entry->dir = NFS_FILEID(dir); __entry->dir = NFS_FILEID(dir);
__entry->flags = flags; __entry->flags = flags;
@ -413,8 +511,8 @@ TRACE_EVENT(nfs_create_exit,
), ),
TP_printk( TP_printk(
"error=%d flags=%u (%s) name=%02x:%02x:%llu/%s", "error=%ld (%s) flags=0x%lx (%s) name=%02x:%02x:%llu/%s",
__entry->error, -__entry->error, nfs_show_status(__entry->error),
__entry->flags, __entry->flags,
show_open_flags(__entry->flags), show_open_flags(__entry->flags),
MAJOR(__entry->dev), MINOR(__entry->dev), MAJOR(__entry->dev), MINOR(__entry->dev),
@ -469,7 +567,7 @@ DECLARE_EVENT_CLASS(nfs_directory_event_done,
TP_ARGS(dir, dentry, error), TP_ARGS(dir, dentry, error),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(int, error) __field(unsigned long, error)
__field(dev_t, dev) __field(dev_t, dev)
__field(u64, dir) __field(u64, dir)
__string(name, dentry->d_name.name) __string(name, dentry->d_name.name)
@ -478,13 +576,13 @@ DECLARE_EVENT_CLASS(nfs_directory_event_done,
TP_fast_assign( TP_fast_assign(
__entry->dev = dir->i_sb->s_dev; __entry->dev = dir->i_sb->s_dev;
__entry->dir = NFS_FILEID(dir); __entry->dir = NFS_FILEID(dir);
__entry->error = error; __entry->error = error < 0 ? -error : 0;
__assign_str(name, dentry->d_name.name); __assign_str(name, dentry->d_name.name);
), ),
TP_printk( TP_printk(
"error=%d name=%02x:%02x:%llu/%s", "error=%ld (%s) name=%02x:%02x:%llu/%s",
__entry->error, -__entry->error, nfs_show_status(__entry->error),
MAJOR(__entry->dev), MINOR(__entry->dev), MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long long)__entry->dir, (unsigned long long)__entry->dir,
__get_str(name) __get_str(name)
@ -557,7 +655,7 @@ TRACE_EVENT(nfs_link_exit,
TP_ARGS(inode, dir, dentry, error), TP_ARGS(inode, dir, dentry, error),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(int, error) __field(unsigned long, error)
__field(dev_t, dev) __field(dev_t, dev)
__field(u64, fileid) __field(u64, fileid)
__field(u64, dir) __field(u64, dir)
@ -568,13 +666,13 @@ TRACE_EVENT(nfs_link_exit,
__entry->dev = inode->i_sb->s_dev; __entry->dev = inode->i_sb->s_dev;
__entry->fileid = NFS_FILEID(inode); __entry->fileid = NFS_FILEID(inode);
__entry->dir = NFS_FILEID(dir); __entry->dir = NFS_FILEID(dir);
__entry->error = error; __entry->error = error < 0 ? -error : 0;
__assign_str(name, dentry->d_name.name); __assign_str(name, dentry->d_name.name);
), ),
TP_printk( TP_printk(
"error=%d fileid=%02x:%02x:%llu name=%02x:%02x:%llu/%s", "error=%ld (%s) fileid=%02x:%02x:%llu name=%02x:%02x:%llu/%s",
__entry->error, -__entry->error, nfs_show_status(__entry->error),
MAJOR(__entry->dev), MINOR(__entry->dev), MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->fileid, __entry->fileid,
MAJOR(__entry->dev), MINOR(__entry->dev), MAJOR(__entry->dev), MINOR(__entry->dev),
@ -642,7 +740,7 @@ DECLARE_EVENT_CLASS(nfs_rename_event_done,
TP_STRUCT__entry( TP_STRUCT__entry(
__field(dev_t, dev) __field(dev_t, dev)
__field(int, error) __field(unsigned long, error)
__field(u64, old_dir) __field(u64, old_dir)
__string(old_name, old_dentry->d_name.name) __string(old_name, old_dentry->d_name.name)
__field(u64, new_dir) __field(u64, new_dir)
@ -651,17 +749,17 @@ DECLARE_EVENT_CLASS(nfs_rename_event_done,
TP_fast_assign( TP_fast_assign(
__entry->dev = old_dir->i_sb->s_dev; __entry->dev = old_dir->i_sb->s_dev;
__entry->error = -error;
__entry->old_dir = NFS_FILEID(old_dir); __entry->old_dir = NFS_FILEID(old_dir);
__entry->new_dir = NFS_FILEID(new_dir); __entry->new_dir = NFS_FILEID(new_dir);
__entry->error = error;
__assign_str(old_name, old_dentry->d_name.name); __assign_str(old_name, old_dentry->d_name.name);
__assign_str(new_name, new_dentry->d_name.name); __assign_str(new_name, new_dentry->d_name.name);
), ),
TP_printk( TP_printk(
"error=%d old_name=%02x:%02x:%llu/%s " "error=%ld (%s) old_name=%02x:%02x:%llu/%s "
"new_name=%02x:%02x:%llu/%s", "new_name=%02x:%02x:%llu/%s",
__entry->error, -__entry->error, nfs_show_status(__entry->error),
MAJOR(__entry->dev), MINOR(__entry->dev), MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long long)__entry->old_dir, (unsigned long long)__entry->old_dir,
__get_str(old_name), __get_str(old_name),
@ -697,7 +795,7 @@ TRACE_EVENT(nfs_sillyrename_unlink,
TP_STRUCT__entry( TP_STRUCT__entry(
__field(dev_t, dev) __field(dev_t, dev)
__field(int, error) __field(unsigned long, error)
__field(u64, dir) __field(u64, dir)
__dynamic_array(char, name, data->args.name.len + 1) __dynamic_array(char, name, data->args.name.len + 1)
), ),
@ -707,15 +805,15 @@ TRACE_EVENT(nfs_sillyrename_unlink,
size_t len = data->args.name.len; size_t len = data->args.name.len;
__entry->dev = dir->i_sb->s_dev; __entry->dev = dir->i_sb->s_dev;
__entry->dir = NFS_FILEID(dir); __entry->dir = NFS_FILEID(dir);
__entry->error = error; __entry->error = -error;
memcpy(__get_str(name), memcpy(__get_str(name),
data->args.name.name, len); data->args.name.name, len);
__get_str(name)[len] = 0; __get_str(name)[len] = 0;
), ),
TP_printk( TP_printk(
"error=%d name=%02x:%02x:%llu/%s", "error=%ld (%s) name=%02x:%02x:%llu/%s",
__entry->error, -__entry->error, nfs_show_status(__entry->error),
MAJOR(__entry->dev), MINOR(__entry->dev), MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long long)__entry->dir, (unsigned long long)__entry->dir,
__get_str(name) __get_str(name)
@ -974,6 +1072,8 @@ TRACE_DEFINE_ENUM(NFSERR_PERM);
TRACE_DEFINE_ENUM(NFSERR_NOENT); TRACE_DEFINE_ENUM(NFSERR_NOENT);
TRACE_DEFINE_ENUM(NFSERR_IO); TRACE_DEFINE_ENUM(NFSERR_IO);
TRACE_DEFINE_ENUM(NFSERR_NXIO); TRACE_DEFINE_ENUM(NFSERR_NXIO);
TRACE_DEFINE_ENUM(ECHILD);
TRACE_DEFINE_ENUM(NFSERR_EAGAIN);
TRACE_DEFINE_ENUM(NFSERR_ACCES); TRACE_DEFINE_ENUM(NFSERR_ACCES);
TRACE_DEFINE_ENUM(NFSERR_EXIST); TRACE_DEFINE_ENUM(NFSERR_EXIST);
TRACE_DEFINE_ENUM(NFSERR_XDEV); TRACE_DEFINE_ENUM(NFSERR_XDEV);
@ -985,6 +1085,7 @@ TRACE_DEFINE_ENUM(NFSERR_FBIG);
TRACE_DEFINE_ENUM(NFSERR_NOSPC); TRACE_DEFINE_ENUM(NFSERR_NOSPC);
TRACE_DEFINE_ENUM(NFSERR_ROFS); TRACE_DEFINE_ENUM(NFSERR_ROFS);
TRACE_DEFINE_ENUM(NFSERR_MLINK); TRACE_DEFINE_ENUM(NFSERR_MLINK);
TRACE_DEFINE_ENUM(NFSERR_OPNOTSUPP);
TRACE_DEFINE_ENUM(NFSERR_NAMETOOLONG); TRACE_DEFINE_ENUM(NFSERR_NAMETOOLONG);
TRACE_DEFINE_ENUM(NFSERR_NOTEMPTY); TRACE_DEFINE_ENUM(NFSERR_NOTEMPTY);
TRACE_DEFINE_ENUM(NFSERR_DQUOT); TRACE_DEFINE_ENUM(NFSERR_DQUOT);
@ -1007,6 +1108,8 @@ TRACE_DEFINE_ENUM(NFSERR_JUKEBOX);
{ NFSERR_NOENT, "NOENT" }, \ { NFSERR_NOENT, "NOENT" }, \
{ NFSERR_IO, "IO" }, \ { NFSERR_IO, "IO" }, \
{ NFSERR_NXIO, "NXIO" }, \ { NFSERR_NXIO, "NXIO" }, \
{ ECHILD, "CHILD" }, \
{ NFSERR_EAGAIN, "AGAIN" }, \
{ NFSERR_ACCES, "ACCES" }, \ { NFSERR_ACCES, "ACCES" }, \
{ NFSERR_EXIST, "EXIST" }, \ { NFSERR_EXIST, "EXIST" }, \
{ NFSERR_XDEV, "XDEV" }, \ { NFSERR_XDEV, "XDEV" }, \
@ -1018,6 +1121,7 @@ TRACE_DEFINE_ENUM(NFSERR_JUKEBOX);
{ NFSERR_NOSPC, "NOSPC" }, \ { NFSERR_NOSPC, "NOSPC" }, \
{ NFSERR_ROFS, "ROFS" }, \ { NFSERR_ROFS, "ROFS" }, \
{ NFSERR_MLINK, "MLINK" }, \ { NFSERR_MLINK, "MLINK" }, \
{ NFSERR_OPNOTSUPP, "OPNOTSUPP" }, \
{ NFSERR_NAMETOOLONG, "NAMETOOLONG" }, \ { NFSERR_NAMETOOLONG, "NAMETOOLONG" }, \
{ NFSERR_NOTEMPTY, "NOTEMPTY" }, \ { NFSERR_NOTEMPTY, "NOTEMPTY" }, \
{ NFSERR_DQUOT, "DQUOT" }, \ { NFSERR_DQUOT, "DQUOT" }, \
@ -1035,22 +1139,33 @@ TRACE_DEFINE_ENUM(NFSERR_JUKEBOX);
TRACE_EVENT(nfs_xdr_status, TRACE_EVENT(nfs_xdr_status,
TP_PROTO( TP_PROTO(
const struct xdr_stream *xdr,
int error int error
), ),
TP_ARGS(error), TP_ARGS(xdr, error),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(int, error) __field(unsigned int, task_id)
__field(unsigned int, client_id)
__field(u32, xid)
__field(unsigned long, error)
), ),
TP_fast_assign( TP_fast_assign(
const struct rpc_rqst *rqstp = xdr->rqst;
const struct rpc_task *task = rqstp->rq_task;
__entry->task_id = task->tk_pid;
__entry->client_id = task->tk_client->cl_clid;
__entry->xid = be32_to_cpu(rqstp->rq_xid);
__entry->error = error; __entry->error = error;
), ),
TP_printk( TP_printk(
"error=%d (%s)", "task:%u@%d xid=0x%08x error=%ld (%s)",
__entry->error, nfs_show_status(__entry->error) __entry->task_id, __entry->client_id, __entry->xid,
-__entry->error, nfs_show_status(__entry->error)
) )
); );

View File

@ -335,6 +335,9 @@ struct xprt_class {
*/ */
struct rpc_xprt *xprt_create_transport(struct xprt_create *args); struct rpc_xprt *xprt_create_transport(struct xprt_create *args);
void xprt_connect(struct rpc_task *task); void xprt_connect(struct rpc_task *task);
unsigned long xprt_reconnect_delay(const struct rpc_xprt *xprt);
void xprt_reconnect_backoff(struct rpc_xprt *xprt,
unsigned long init_to);
void xprt_reserve(struct rpc_task *task); void xprt_reserve(struct rpc_task *task);
void xprt_retry_reserve(struct rpc_task *task); void xprt_retry_reserve(struct rpc_task *task);
int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task); int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task);

View File

@ -181,18 +181,6 @@ DECLARE_EVENT_CLASS(xprtrdma_wrch_event,
), \ ), \
TP_ARGS(task, mr, nsegs)) TP_ARGS(task, mr, nsegs))
TRACE_DEFINE_ENUM(FRWR_IS_INVALID);
TRACE_DEFINE_ENUM(FRWR_IS_VALID);
TRACE_DEFINE_ENUM(FRWR_FLUSHED_FR);
TRACE_DEFINE_ENUM(FRWR_FLUSHED_LI);
#define xprtrdma_show_frwr_state(x) \
__print_symbolic(x, \
{ FRWR_IS_INVALID, "INVALID" }, \
{ FRWR_IS_VALID, "VALID" }, \
{ FRWR_FLUSHED_FR, "FLUSHED_FR" }, \
{ FRWR_FLUSHED_LI, "FLUSHED_LI" })
DECLARE_EVENT_CLASS(xprtrdma_frwr_done, DECLARE_EVENT_CLASS(xprtrdma_frwr_done,
TP_PROTO( TP_PROTO(
const struct ib_wc *wc, const struct ib_wc *wc,
@ -203,22 +191,19 @@ DECLARE_EVENT_CLASS(xprtrdma_frwr_done,
TP_STRUCT__entry( TP_STRUCT__entry(
__field(const void *, mr) __field(const void *, mr)
__field(unsigned int, state)
__field(unsigned int, status) __field(unsigned int, status)
__field(unsigned int, vendor_err) __field(unsigned int, vendor_err)
), ),
TP_fast_assign( TP_fast_assign(
__entry->mr = container_of(frwr, struct rpcrdma_mr, frwr); __entry->mr = container_of(frwr, struct rpcrdma_mr, frwr);
__entry->state = frwr->fr_state;
__entry->status = wc->status; __entry->status = wc->status;
__entry->vendor_err = __entry->status ? wc->vendor_err : 0; __entry->vendor_err = __entry->status ? wc->vendor_err : 0;
), ),
TP_printk( TP_printk(
"mr=%p state=%s: %s (%u/0x%x)", "mr=%p: %s (%u/0x%x)",
__entry->mr, xprtrdma_show_frwr_state(__entry->state), __entry->mr, rdma_show_wc_status(__entry->status),
rdma_show_wc_status(__entry->status),
__entry->status, __entry->vendor_err __entry->status, __entry->vendor_err
) )
); );
@ -390,6 +375,37 @@ DEFINE_RXPRT_EVENT(xprtrdma_op_inject_dsc);
DEFINE_RXPRT_EVENT(xprtrdma_op_close); DEFINE_RXPRT_EVENT(xprtrdma_op_close);
DEFINE_RXPRT_EVENT(xprtrdma_op_connect); DEFINE_RXPRT_EVENT(xprtrdma_op_connect);
TRACE_EVENT(xprtrdma_op_set_cto,
TP_PROTO(
const struct rpcrdma_xprt *r_xprt,
unsigned long connect,
unsigned long reconnect
),
TP_ARGS(r_xprt, connect, reconnect),
TP_STRUCT__entry(
__field(const void *, r_xprt)
__field(unsigned long, connect)
__field(unsigned long, reconnect)
__string(addr, rpcrdma_addrstr(r_xprt))
__string(port, rpcrdma_portstr(r_xprt))
),
TP_fast_assign(
__entry->r_xprt = r_xprt;
__entry->connect = connect;
__entry->reconnect = reconnect;
__assign_str(addr, rpcrdma_addrstr(r_xprt));
__assign_str(port, rpcrdma_portstr(r_xprt));
),
TP_printk("peer=[%s]:%s r_xprt=%p: connect=%lu reconnect=%lu",
__get_str(addr), __get_str(port), __entry->r_xprt,
__entry->connect / HZ, __entry->reconnect / HZ
)
);
TRACE_EVENT(xprtrdma_qp_event, TRACE_EVENT(xprtrdma_qp_event,
TP_PROTO( TP_PROTO(
const struct rpcrdma_xprt *r_xprt, const struct rpcrdma_xprt *r_xprt,
@ -470,13 +486,12 @@ TRACE_DEFINE_ENUM(rpcrdma_replych);
TRACE_EVENT(xprtrdma_marshal, TRACE_EVENT(xprtrdma_marshal,
TP_PROTO( TP_PROTO(
const struct rpc_rqst *rqst, const struct rpcrdma_req *req,
unsigned int hdrlen,
unsigned int rtype, unsigned int rtype,
unsigned int wtype unsigned int wtype
), ),
TP_ARGS(rqst, hdrlen, rtype, wtype), TP_ARGS(req, rtype, wtype),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(unsigned int, task_id) __field(unsigned int, task_id)
@ -491,10 +506,12 @@ TRACE_EVENT(xprtrdma_marshal,
), ),
TP_fast_assign( TP_fast_assign(
const struct rpc_rqst *rqst = &req->rl_slot;
__entry->task_id = rqst->rq_task->tk_pid; __entry->task_id = rqst->rq_task->tk_pid;
__entry->client_id = rqst->rq_task->tk_client->cl_clid; __entry->client_id = rqst->rq_task->tk_client->cl_clid;
__entry->xid = be32_to_cpu(rqst->rq_xid); __entry->xid = be32_to_cpu(rqst->rq_xid);
__entry->hdrlen = hdrlen; __entry->hdrlen = req->rl_hdrbuf.len;
__entry->headlen = rqst->rq_snd_buf.head[0].iov_len; __entry->headlen = rqst->rq_snd_buf.head[0].iov_len;
__entry->pagelen = rqst->rq_snd_buf.page_len; __entry->pagelen = rqst->rq_snd_buf.page_len;
__entry->taillen = rqst->rq_snd_buf.tail[0].iov_len; __entry->taillen = rqst->rq_snd_buf.tail[0].iov_len;
@ -538,6 +555,33 @@ TRACE_EVENT(xprtrdma_marshal_failed,
) )
); );
TRACE_EVENT(xprtrdma_prepsend_failed,
TP_PROTO(const struct rpc_rqst *rqst,
int ret
),
TP_ARGS(rqst, ret),
TP_STRUCT__entry(
__field(unsigned int, task_id)
__field(unsigned int, client_id)
__field(u32, xid)
__field(int, ret)
),
TP_fast_assign(
__entry->task_id = rqst->rq_task->tk_pid;
__entry->client_id = rqst->rq_task->tk_client->cl_clid;
__entry->xid = be32_to_cpu(rqst->rq_xid);
__entry->ret = ret;
),
TP_printk("task:%u@%u xid=0x%08x: ret=%d",
__entry->task_id, __entry->client_id, __entry->xid,
__entry->ret
)
);
TRACE_EVENT(xprtrdma_post_send, TRACE_EVENT(xprtrdma_post_send,
TP_PROTO( TP_PROTO(
const struct rpcrdma_req *req, const struct rpcrdma_req *req,
@ -559,7 +603,8 @@ TRACE_EVENT(xprtrdma_post_send,
const struct rpc_rqst *rqst = &req->rl_slot; const struct rpc_rqst *rqst = &req->rl_slot;
__entry->task_id = rqst->rq_task->tk_pid; __entry->task_id = rqst->rq_task->tk_pid;
__entry->client_id = rqst->rq_task->tk_client->cl_clid; __entry->client_id = rqst->rq_task->tk_client ?
rqst->rq_task->tk_client->cl_clid : -1;
__entry->req = req; __entry->req = req;
__entry->num_sge = req->rl_sendctx->sc_wr.num_sge; __entry->num_sge = req->rl_sendctx->sc_wr.num_sge;
__entry->signaled = req->rl_sendctx->sc_wr.send_flags & __entry->signaled = req->rl_sendctx->sc_wr.send_flags &
@ -698,6 +743,7 @@ TRACE_EVENT(xprtrdma_wc_receive,
DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_fastreg); DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_fastreg);
DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li); DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li);
DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li_wake); DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li_wake);
DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li_done);
TRACE_EVENT(xprtrdma_frwr_alloc, TRACE_EVENT(xprtrdma_frwr_alloc,
TP_PROTO( TP_PROTO(

View File

@ -59,6 +59,7 @@ static struct rpc_wait_queue delay_queue;
*/ */
struct workqueue_struct *rpciod_workqueue __read_mostly; struct workqueue_struct *rpciod_workqueue __read_mostly;
struct workqueue_struct *xprtiod_workqueue __read_mostly; struct workqueue_struct *xprtiod_workqueue __read_mostly;
EXPORT_SYMBOL_GPL(xprtiod_workqueue);
unsigned long unsigned long
rpc_task_timeout(const struct rpc_task *task) rpc_task_timeout(const struct rpc_task *task)

View File

@ -846,6 +846,38 @@ void xprt_connect(struct rpc_task *task)
xprt_release_write(xprt, task); xprt_release_write(xprt, task);
} }
/**
* xprt_reconnect_delay - compute the wait before scheduling a connect
* @xprt: transport instance
*
*/
unsigned long xprt_reconnect_delay(const struct rpc_xprt *xprt)
{
unsigned long start, now = jiffies;
start = xprt->stat.connect_start + xprt->reestablish_timeout;
if (time_after(start, now))
return start - now;
return 0;
}
EXPORT_SYMBOL_GPL(xprt_reconnect_delay);
/**
* xprt_reconnect_backoff - compute the new re-establish timeout
* @xprt: transport instance
* @init_to: initial reestablish timeout
*
*/
void xprt_reconnect_backoff(struct rpc_xprt *xprt, unsigned long init_to)
{
xprt->reestablish_timeout <<= 1;
if (xprt->reestablish_timeout > xprt->max_reconnect_timeout)
xprt->reestablish_timeout = xprt->max_reconnect_timeout;
if (xprt->reestablish_timeout < init_to)
xprt->reestablish_timeout = init_to;
}
EXPORT_SYMBOL_GPL(xprt_reconnect_backoff);
enum xprt_xid_rb_cmp { enum xprt_xid_rb_cmp {
XID_RB_EQUAL, XID_RB_EQUAL,
XID_RB_LEFT, XID_RB_LEFT,

View File

@ -144,6 +144,26 @@ frwr_mr_recycle_worker(struct work_struct *work)
frwr_release_mr(mr); frwr_release_mr(mr);
} }
/* frwr_reset - Place MRs back on the free list
* @req: request to reset
*
* Used after a failed marshal. For FRWR, this means the MRs
* don't have to be fully released and recreated.
*
* NB: This is safe only as long as none of @req's MRs are
* involved with an ongoing asynchronous FAST_REG or LOCAL_INV
* Work Request.
*/
void frwr_reset(struct rpcrdma_req *req)
{
while (!list_empty(&req->rl_registered)) {
struct rpcrdma_mr *mr;
mr = rpcrdma_mr_pop(&req->rl_registered);
rpcrdma_mr_unmap_and_put(mr);
}
}
/** /**
* frwr_init_mr - Initialize one MR * frwr_init_mr - Initialize one MR
* @ia: interface adapter * @ia: interface adapter
@ -168,7 +188,6 @@ int frwr_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mr *mr)
goto out_list_err; goto out_list_err;
mr->frwr.fr_mr = frmr; mr->frwr.fr_mr = frmr;
mr->frwr.fr_state = FRWR_IS_INVALID;
mr->mr_dir = DMA_NONE; mr->mr_dir = DMA_NONE;
INIT_LIST_HEAD(&mr->mr_list); INIT_LIST_HEAD(&mr->mr_list);
INIT_WORK(&mr->mr_recycle, frwr_mr_recycle_worker); INIT_WORK(&mr->mr_recycle, frwr_mr_recycle_worker);
@ -297,65 +316,6 @@ size_t frwr_maxpages(struct rpcrdma_xprt *r_xprt)
(ia->ri_max_segs - 2) * ia->ri_max_frwr_depth); (ia->ri_max_segs - 2) * ia->ri_max_frwr_depth);
} }
/**
* frwr_wc_fastreg - Invoked by RDMA provider for a flushed FastReg WC
* @cq: completion queue (ignored)
* @wc: completed WR
*
*/
static void
frwr_wc_fastreg(struct ib_cq *cq, struct ib_wc *wc)
{
struct ib_cqe *cqe = wc->wr_cqe;
struct rpcrdma_frwr *frwr =
container_of(cqe, struct rpcrdma_frwr, fr_cqe);
/* WARNING: Only wr_cqe and status are reliable at this point */
if (wc->status != IB_WC_SUCCESS)
frwr->fr_state = FRWR_FLUSHED_FR;
trace_xprtrdma_wc_fastreg(wc, frwr);
}
/**
* frwr_wc_localinv - Invoked by RDMA provider for a flushed LocalInv WC
* @cq: completion queue (ignored)
* @wc: completed WR
*
*/
static void
frwr_wc_localinv(struct ib_cq *cq, struct ib_wc *wc)
{
struct ib_cqe *cqe = wc->wr_cqe;
struct rpcrdma_frwr *frwr = container_of(cqe, struct rpcrdma_frwr,
fr_cqe);
/* WARNING: Only wr_cqe and status are reliable at this point */
if (wc->status != IB_WC_SUCCESS)
frwr->fr_state = FRWR_FLUSHED_LI;
trace_xprtrdma_wc_li(wc, frwr);
}
/**
* frwr_wc_localinv_wake - Invoked by RDMA provider for a signaled LocalInv WC
* @cq: completion queue (ignored)
* @wc: completed WR
*
* Awaken anyone waiting for an MR to finish being fenced.
*/
static void
frwr_wc_localinv_wake(struct ib_cq *cq, struct ib_wc *wc)
{
struct ib_cqe *cqe = wc->wr_cqe;
struct rpcrdma_frwr *frwr = container_of(cqe, struct rpcrdma_frwr,
fr_cqe);
/* WARNING: Only wr_cqe and status are reliable at this point */
if (wc->status != IB_WC_SUCCESS)
frwr->fr_state = FRWR_FLUSHED_LI;
trace_xprtrdma_wc_li_wake(wc, frwr);
complete(&frwr->fr_linv_done);
}
/** /**
* frwr_map - Register a memory region * frwr_map - Register a memory region
* @r_xprt: controlling transport * @r_xprt: controlling transport
@ -378,23 +338,15 @@ struct rpcrdma_mr_seg *frwr_map(struct rpcrdma_xprt *r_xprt,
{ {
struct rpcrdma_ia *ia = &r_xprt->rx_ia; struct rpcrdma_ia *ia = &r_xprt->rx_ia;
bool holes_ok = ia->ri_mrtype == IB_MR_TYPE_SG_GAPS; bool holes_ok = ia->ri_mrtype == IB_MR_TYPE_SG_GAPS;
struct rpcrdma_frwr *frwr;
struct rpcrdma_mr *mr; struct rpcrdma_mr *mr;
struct ib_mr *ibmr; struct ib_mr *ibmr;
struct ib_reg_wr *reg_wr; struct ib_reg_wr *reg_wr;
int i, n; int i, n;
u8 key; u8 key;
mr = NULL;
do {
if (mr)
rpcrdma_mr_recycle(mr);
mr = rpcrdma_mr_get(r_xprt); mr = rpcrdma_mr_get(r_xprt);
if (!mr) if (!mr)
return ERR_PTR(-EAGAIN); goto out_getmr_err;
} while (mr->frwr.fr_state != FRWR_IS_INVALID);
frwr = &mr->frwr;
frwr->fr_state = FRWR_IS_VALID;
if (nsegs > ia->ri_max_frwr_depth) if (nsegs > ia->ri_max_frwr_depth)
nsegs = ia->ri_max_frwr_depth; nsegs = ia->ri_max_frwr_depth;
@ -423,7 +375,7 @@ struct rpcrdma_mr_seg *frwr_map(struct rpcrdma_xprt *r_xprt,
if (!mr->mr_nents) if (!mr->mr_nents)
goto out_dmamap_err; goto out_dmamap_err;
ibmr = frwr->fr_mr; ibmr = mr->frwr.fr_mr;
n = ib_map_mr_sg(ibmr, mr->mr_sg, mr->mr_nents, NULL, PAGE_SIZE); n = ib_map_mr_sg(ibmr, mr->mr_sg, mr->mr_nents, NULL, PAGE_SIZE);
if (unlikely(n != mr->mr_nents)) if (unlikely(n != mr->mr_nents))
goto out_mapmr_err; goto out_mapmr_err;
@ -433,7 +385,7 @@ struct rpcrdma_mr_seg *frwr_map(struct rpcrdma_xprt *r_xprt,
key = (u8)(ibmr->rkey & 0x000000FF); key = (u8)(ibmr->rkey & 0x000000FF);
ib_update_fast_reg_key(ibmr, ++key); ib_update_fast_reg_key(ibmr, ++key);
reg_wr = &frwr->fr_regwr; reg_wr = &mr->frwr.fr_regwr;
reg_wr->mr = ibmr; reg_wr->mr = ibmr;
reg_wr->key = ibmr->rkey; reg_wr->key = ibmr->rkey;
reg_wr->access = writing ? reg_wr->access = writing ?
@ -448,6 +400,10 @@ struct rpcrdma_mr_seg *frwr_map(struct rpcrdma_xprt *r_xprt,
*out = mr; *out = mr;
return seg; return seg;
out_getmr_err:
xprt_wait_for_buffer_space(&r_xprt->rx_xprt);
return ERR_PTR(-EAGAIN);
out_dmamap_err: out_dmamap_err:
mr->mr_dir = DMA_NONE; mr->mr_dir = DMA_NONE;
trace_xprtrdma_frwr_sgerr(mr, i); trace_xprtrdma_frwr_sgerr(mr, i);
@ -460,6 +416,23 @@ out_mapmr_err:
return ERR_PTR(-EIO); return ERR_PTR(-EIO);
} }
/**
* frwr_wc_fastreg - Invoked by RDMA provider for a flushed FastReg WC
* @cq: completion queue (ignored)
* @wc: completed WR
*
*/
static void frwr_wc_fastreg(struct ib_cq *cq, struct ib_wc *wc)
{
struct ib_cqe *cqe = wc->wr_cqe;
struct rpcrdma_frwr *frwr =
container_of(cqe, struct rpcrdma_frwr, fr_cqe);
/* WARNING: Only wr_cqe and status are reliable at this point */
trace_xprtrdma_wc_fastreg(wc, frwr);
/* The MR will get recycled when the associated req is retransmitted */
}
/** /**
* frwr_send - post Send WR containing the RPC Call message * frwr_send - post Send WR containing the RPC Call message
* @ia: interface adapter * @ia: interface adapter
@ -512,31 +485,75 @@ void frwr_reminv(struct rpcrdma_rep *rep, struct list_head *mrs)
if (mr->mr_handle == rep->rr_inv_rkey) { if (mr->mr_handle == rep->rr_inv_rkey) {
list_del_init(&mr->mr_list); list_del_init(&mr->mr_list);
trace_xprtrdma_mr_remoteinv(mr); trace_xprtrdma_mr_remoteinv(mr);
mr->frwr.fr_state = FRWR_IS_INVALID;
rpcrdma_mr_unmap_and_put(mr); rpcrdma_mr_unmap_and_put(mr);
break; /* only one invalidated MR per RPC */ break; /* only one invalidated MR per RPC */
} }
} }
static void __frwr_release_mr(struct ib_wc *wc, struct rpcrdma_mr *mr)
{
if (wc->status != IB_WC_SUCCESS)
rpcrdma_mr_recycle(mr);
else
rpcrdma_mr_unmap_and_put(mr);
}
/**
* frwr_wc_localinv - Invoked by RDMA provider for a LOCAL_INV WC
* @cq: completion queue (ignored)
* @wc: completed WR
*
*/
static void frwr_wc_localinv(struct ib_cq *cq, struct ib_wc *wc)
{
struct ib_cqe *cqe = wc->wr_cqe;
struct rpcrdma_frwr *frwr =
container_of(cqe, struct rpcrdma_frwr, fr_cqe);
struct rpcrdma_mr *mr = container_of(frwr, struct rpcrdma_mr, frwr);
/* WARNING: Only wr_cqe and status are reliable at this point */
trace_xprtrdma_wc_li(wc, frwr);
__frwr_release_mr(wc, mr);
}
/**
* frwr_wc_localinv_wake - Invoked by RDMA provider for a LOCAL_INV WC
* @cq: completion queue (ignored)
* @wc: completed WR
*
* Awaken anyone waiting for an MR to finish being fenced.
*/
static void frwr_wc_localinv_wake(struct ib_cq *cq, struct ib_wc *wc)
{
struct ib_cqe *cqe = wc->wr_cqe;
struct rpcrdma_frwr *frwr =
container_of(cqe, struct rpcrdma_frwr, fr_cqe);
struct rpcrdma_mr *mr = container_of(frwr, struct rpcrdma_mr, frwr);
/* WARNING: Only wr_cqe and status are reliable at this point */
trace_xprtrdma_wc_li_wake(wc, frwr);
complete(&frwr->fr_linv_done);
__frwr_release_mr(wc, mr);
}
/** /**
* frwr_unmap_sync - invalidate memory regions that were registered for @req * frwr_unmap_sync - invalidate memory regions that were registered for @req
* @r_xprt: controlling transport * @r_xprt: controlling transport instance
* @mrs: list of MRs to process * @req: rpcrdma_req with a non-empty list of MRs to process
* *
* Sleeps until it is safe for the host CPU to access the * Sleeps until it is safe for the host CPU to access the previously mapped
* previously mapped memory regions. * memory regions. This guarantees that registered MRs are properly fenced
* * from the server before the RPC consumer accesses the data in them. It
* Caller ensures that @mrs is not empty before the call. This * also ensures proper Send flow control: waking the next RPC waits until
* function empties the list. * this RPC has relinquished all its Send Queue entries.
*/ */
void frwr_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mrs) void frwr_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
{ {
struct ib_send_wr *first, **prev, *last; struct ib_send_wr *first, **prev, *last;
const struct ib_send_wr *bad_wr; const struct ib_send_wr *bad_wr;
struct rpcrdma_ia *ia = &r_xprt->rx_ia;
struct rpcrdma_frwr *frwr; struct rpcrdma_frwr *frwr;
struct rpcrdma_mr *mr; struct rpcrdma_mr *mr;
int count, rc; int rc;
/* ORDER: Invalidate all of the MRs first /* ORDER: Invalidate all of the MRs first
* *
@ -544,33 +561,32 @@ void frwr_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mrs)
* a single ib_post_send() call. * a single ib_post_send() call.
*/ */
frwr = NULL; frwr = NULL;
count = 0;
prev = &first; prev = &first;
list_for_each_entry(mr, mrs, mr_list) { while (!list_empty(&req->rl_registered)) {
mr->frwr.fr_state = FRWR_IS_INVALID; mr = rpcrdma_mr_pop(&req->rl_registered);
trace_xprtrdma_mr_localinv(mr);
r_xprt->rx_stats.local_inv_needed++;
frwr = &mr->frwr; frwr = &mr->frwr;
trace_xprtrdma_mr_localinv(mr);
frwr->fr_cqe.done = frwr_wc_localinv; frwr->fr_cqe.done = frwr_wc_localinv;
last = &frwr->fr_invwr; last = &frwr->fr_invwr;
memset(last, 0, sizeof(*last)); last->next = NULL;
last->wr_cqe = &frwr->fr_cqe; last->wr_cqe = &frwr->fr_cqe;
last->sg_list = NULL;
last->num_sge = 0;
last->opcode = IB_WR_LOCAL_INV; last->opcode = IB_WR_LOCAL_INV;
last->send_flags = IB_SEND_SIGNALED;
last->ex.invalidate_rkey = mr->mr_handle; last->ex.invalidate_rkey = mr->mr_handle;
count++;
*prev = last; *prev = last;
prev = &last->next; prev = &last->next;
} }
if (!frwr)
goto unmap;
/* Strong send queue ordering guarantees that when the /* Strong send queue ordering guarantees that when the
* last WR in the chain completes, all WRs in the chain * last WR in the chain completes, all WRs in the chain
* are complete. * are complete.
*/ */
last->send_flags = IB_SEND_SIGNALED;
frwr->fr_cqe.done = frwr_wc_localinv_wake; frwr->fr_cqe.done = frwr_wc_localinv_wake;
reinit_completion(&frwr->fr_linv_done); reinit_completion(&frwr->fr_linv_done);
@ -578,29 +594,20 @@ void frwr_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mrs)
* replaces the QP. The RPC reply handler won't call us * replaces the QP. The RPC reply handler won't call us
* unless ri_id->qp is a valid pointer. * unless ri_id->qp is a valid pointer.
*/ */
r_xprt->rx_stats.local_inv_needed++;
bad_wr = NULL; bad_wr = NULL;
rc = ib_post_send(ia->ri_id->qp, first, &bad_wr); rc = ib_post_send(r_xprt->rx_ia.ri_id->qp, first, &bad_wr);
trace_xprtrdma_post_send(req, rc);
/* The final LOCAL_INV WR in the chain is supposed to
* do the wake. If it was never posted, the wake will
* not happen, so don't wait in that case.
*/
if (bad_wr != first) if (bad_wr != first)
wait_for_completion(&frwr->fr_linv_done); wait_for_completion(&frwr->fr_linv_done);
if (rc) if (!rc)
goto out_release;
/* ORDER: Now DMA unmap all of the MRs, and return
* them to the free MR list.
*/
unmap:
while (!list_empty(mrs)) {
mr = rpcrdma_mr_pop(mrs);
rpcrdma_mr_unmap_and_put(mr);
}
return; return;
out_release: /* Recycle MRs in the LOCAL_INV chain that did not get posted.
pr_err("rpcrdma: FRWR invalidate ib_post_send returned %i\n", rc);
/* Unmap and release the MRs in the LOCAL_INV WRs that did not
* get posted.
*/ */
while (bad_wr) { while (bad_wr) {
frwr = container_of(bad_wr, struct rpcrdma_frwr, frwr = container_of(bad_wr, struct rpcrdma_frwr,
@ -612,3 +619,101 @@ out_release:
rpcrdma_mr_recycle(mr); rpcrdma_mr_recycle(mr);
} }
} }
/**
* frwr_wc_localinv_done - Invoked by RDMA provider for a signaled LOCAL_INV WC
* @cq: completion queue (ignored)
* @wc: completed WR
*
*/
static void frwr_wc_localinv_done(struct ib_cq *cq, struct ib_wc *wc)
{
struct ib_cqe *cqe = wc->wr_cqe;
struct rpcrdma_frwr *frwr =
container_of(cqe, struct rpcrdma_frwr, fr_cqe);
struct rpcrdma_mr *mr = container_of(frwr, struct rpcrdma_mr, frwr);
/* WARNING: Only wr_cqe and status are reliable at this point */
trace_xprtrdma_wc_li_done(wc, frwr);
rpcrdma_complete_rqst(frwr->fr_req->rl_reply);
__frwr_release_mr(wc, mr);
}
/**
* frwr_unmap_async - invalidate memory regions that were registered for @req
* @r_xprt: controlling transport instance
* @req: rpcrdma_req with a non-empty list of MRs to process
*
* This guarantees that registered MRs are properly fenced from the
* server before the RPC consumer accesses the data in them. It also
* ensures proper Send flow control: waking the next RPC waits until
* this RPC has relinquished all its Send Queue entries.
*/
void frwr_unmap_async(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
{
struct ib_send_wr *first, *last, **prev;
const struct ib_send_wr *bad_wr;
struct rpcrdma_frwr *frwr;
struct rpcrdma_mr *mr;
int rc;
/* Chain the LOCAL_INV Work Requests and post them with
* a single ib_post_send() call.
*/
frwr = NULL;
prev = &first;
while (!list_empty(&req->rl_registered)) {
mr = rpcrdma_mr_pop(&req->rl_registered);
trace_xprtrdma_mr_localinv(mr);
r_xprt->rx_stats.local_inv_needed++;
frwr = &mr->frwr;
frwr->fr_cqe.done = frwr_wc_localinv;
frwr->fr_req = req;
last = &frwr->fr_invwr;
last->next = NULL;
last->wr_cqe = &frwr->fr_cqe;
last->sg_list = NULL;
last->num_sge = 0;
last->opcode = IB_WR_LOCAL_INV;
last->send_flags = IB_SEND_SIGNALED;
last->ex.invalidate_rkey = mr->mr_handle;
*prev = last;
prev = &last->next;
}
/* Strong send queue ordering guarantees that when the
* last WR in the chain completes, all WRs in the chain
* are complete. The last completion will wake up the
* RPC waiter.
*/
frwr->fr_cqe.done = frwr_wc_localinv_done;
/* Transport disconnect drains the receive CQ before it
* replaces the QP. The RPC reply handler won't call us
* unless ri_id->qp is a valid pointer.
*/
bad_wr = NULL;
rc = ib_post_send(r_xprt->rx_ia.ri_id->qp, first, &bad_wr);
trace_xprtrdma_post_send(req, rc);
if (!rc)
return;
/* Recycle MRs in the LOCAL_INV chain that did not get posted.
*/
while (bad_wr) {
frwr = container_of(bad_wr, struct rpcrdma_frwr, fr_invwr);
mr = container_of(frwr, struct rpcrdma_mr, frwr);
bad_wr = bad_wr->next;
rpcrdma_mr_recycle(mr);
}
/* The final LOCAL_INV WR in the chain is supposed to
* do the wake. If it was never posted, the wake will
* not happen, so wake here in that case.
*/
rpcrdma_complete_rqst(req->rl_reply);
}

View File

@ -366,6 +366,9 @@ rpcrdma_encode_read_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
unsigned int pos; unsigned int pos;
int nsegs; int nsegs;
if (rtype == rpcrdma_noch)
goto done;
pos = rqst->rq_snd_buf.head[0].iov_len; pos = rqst->rq_snd_buf.head[0].iov_len;
if (rtype == rpcrdma_areadch) if (rtype == rpcrdma_areadch)
pos = 0; pos = 0;
@ -389,7 +392,8 @@ rpcrdma_encode_read_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
nsegs -= mr->mr_nents; nsegs -= mr->mr_nents;
} while (nsegs); } while (nsegs);
return 0; done:
return encode_item_not_present(xdr);
} }
/* Register and XDR encode the Write list. Supports encoding a list /* Register and XDR encode the Write list. Supports encoding a list
@ -417,6 +421,9 @@ rpcrdma_encode_write_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
int nsegs, nchunks; int nsegs, nchunks;
__be32 *segcount; __be32 *segcount;
if (wtype != rpcrdma_writech)
goto done;
seg = req->rl_segments; seg = req->rl_segments;
nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf, nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf,
rqst->rq_rcv_buf.head[0].iov_len, rqst->rq_rcv_buf.head[0].iov_len,
@ -451,7 +458,8 @@ rpcrdma_encode_write_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
/* Update count of segments in this Write chunk */ /* Update count of segments in this Write chunk */
*segcount = cpu_to_be32(nchunks); *segcount = cpu_to_be32(nchunks);
return 0; done:
return encode_item_not_present(xdr);
} }
/* Register and XDR encode the Reply chunk. Supports encoding an array /* Register and XDR encode the Reply chunk. Supports encoding an array
@ -476,6 +484,9 @@ rpcrdma_encode_reply_chunk(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
int nsegs, nchunks; int nsegs, nchunks;
__be32 *segcount; __be32 *segcount;
if (wtype != rpcrdma_replych)
return encode_item_not_present(xdr);
seg = req->rl_segments; seg = req->rl_segments;
nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf, 0, wtype, seg); nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf, 0, wtype, seg);
if (nsegs < 0) if (nsegs < 0)
@ -511,6 +522,16 @@ rpcrdma_encode_reply_chunk(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
return 0; return 0;
} }
static void rpcrdma_sendctx_done(struct kref *kref)
{
struct rpcrdma_req *req =
container_of(kref, struct rpcrdma_req, rl_kref);
struct rpcrdma_rep *rep = req->rl_reply;
rpcrdma_complete_rqst(rep);
rep->rr_rxprt->rx_stats.reply_waits_for_send++;
}
/** /**
* rpcrdma_sendctx_unmap - DMA-unmap Send buffer * rpcrdma_sendctx_unmap - DMA-unmap Send buffer
* @sc: sendctx containing SGEs to unmap * @sc: sendctx containing SGEs to unmap
@ -520,6 +541,9 @@ void rpcrdma_sendctx_unmap(struct rpcrdma_sendctx *sc)
{ {
struct ib_sge *sge; struct ib_sge *sge;
if (!sc->sc_unmap_count)
return;
/* The first two SGEs contain the transport header and /* The first two SGEs contain the transport header and
* the inline buffer. These are always left mapped so * the inline buffer. These are always left mapped so
* they can be cheaply re-used. * they can be cheaply re-used.
@ -529,9 +553,7 @@ void rpcrdma_sendctx_unmap(struct rpcrdma_sendctx *sc)
ib_dma_unmap_page(sc->sc_device, sge->addr, sge->length, ib_dma_unmap_page(sc->sc_device, sge->addr, sge->length,
DMA_TO_DEVICE); DMA_TO_DEVICE);
if (test_and_clear_bit(RPCRDMA_REQ_F_TX_RESOURCES, kref_put(&sc->sc_req->rl_kref, rpcrdma_sendctx_done);
&sc->sc_req->rl_flags))
wake_up_bit(&sc->sc_req->rl_flags, RPCRDMA_REQ_F_TX_RESOURCES);
} }
/* Prepare an SGE for the RPC-over-RDMA transport header. /* Prepare an SGE for the RPC-over-RDMA transport header.
@ -666,7 +688,7 @@ map_tail:
out: out:
sc->sc_wr.num_sge += sge_no; sc->sc_wr.num_sge += sge_no;
if (sc->sc_unmap_count) if (sc->sc_unmap_count)
__set_bit(RPCRDMA_REQ_F_TX_RESOURCES, &req->rl_flags); kref_get(&req->rl_kref);
return true; return true;
out_regbuf: out_regbuf:
@ -699,22 +721,28 @@ rpcrdma_prepare_send_sges(struct rpcrdma_xprt *r_xprt,
struct rpcrdma_req *req, u32 hdrlen, struct rpcrdma_req *req, u32 hdrlen,
struct xdr_buf *xdr, enum rpcrdma_chunktype rtype) struct xdr_buf *xdr, enum rpcrdma_chunktype rtype)
{ {
int ret;
ret = -EAGAIN;
req->rl_sendctx = rpcrdma_sendctx_get_locked(r_xprt); req->rl_sendctx = rpcrdma_sendctx_get_locked(r_xprt);
if (!req->rl_sendctx) if (!req->rl_sendctx)
return -EAGAIN; goto err;
req->rl_sendctx->sc_wr.num_sge = 0; req->rl_sendctx->sc_wr.num_sge = 0;
req->rl_sendctx->sc_unmap_count = 0; req->rl_sendctx->sc_unmap_count = 0;
req->rl_sendctx->sc_req = req; req->rl_sendctx->sc_req = req;
__clear_bit(RPCRDMA_REQ_F_TX_RESOURCES, &req->rl_flags); kref_init(&req->rl_kref);
ret = -EIO;
if (!rpcrdma_prepare_hdr_sge(r_xprt, req, hdrlen)) if (!rpcrdma_prepare_hdr_sge(r_xprt, req, hdrlen))
return -EIO; goto err;
if (rtype != rpcrdma_areadch) if (rtype != rpcrdma_areadch)
if (!rpcrdma_prepare_msg_sges(r_xprt, req, xdr, rtype)) if (!rpcrdma_prepare_msg_sges(r_xprt, req, xdr, rtype))
return -EIO; goto err;
return 0; return 0;
err:
trace_xprtrdma_prepsend_failed(&req->rl_slot, ret);
return ret;
} }
/** /**
@ -842,50 +870,28 @@ rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst)
* send a Call message with a Position Zero Read chunk and a * send a Call message with a Position Zero Read chunk and a
* regular Read chunk at the same time. * regular Read chunk at the same time.
*/ */
if (rtype != rpcrdma_noch) {
ret = rpcrdma_encode_read_list(r_xprt, req, rqst, rtype); ret = rpcrdma_encode_read_list(r_xprt, req, rqst, rtype);
if (ret) if (ret)
goto out_err; goto out_err;
}
ret = encode_item_not_present(xdr);
if (ret)
goto out_err;
if (wtype == rpcrdma_writech) {
ret = rpcrdma_encode_write_list(r_xprt, req, rqst, wtype); ret = rpcrdma_encode_write_list(r_xprt, req, rqst, wtype);
if (ret) if (ret)
goto out_err; goto out_err;
}
ret = encode_item_not_present(xdr);
if (ret)
goto out_err;
if (wtype != rpcrdma_replych)
ret = encode_item_not_present(xdr);
else
ret = rpcrdma_encode_reply_chunk(r_xprt, req, rqst, wtype); ret = rpcrdma_encode_reply_chunk(r_xprt, req, rqst, wtype);
if (ret) if (ret)
goto out_err; goto out_err;
trace_xprtrdma_marshal(rqst, xdr_stream_pos(xdr), rtype, wtype); ret = rpcrdma_prepare_send_sges(r_xprt, req, req->rl_hdrbuf.len,
ret = rpcrdma_prepare_send_sges(r_xprt, req, xdr_stream_pos(xdr),
&rqst->rq_snd_buf, rtype); &rqst->rq_snd_buf, rtype);
if (ret) if (ret)
goto out_err; goto out_err;
trace_xprtrdma_marshal(req, rtype, wtype);
return 0; return 0;
out_err: out_err:
trace_xprtrdma_marshal_failed(rqst, ret); trace_xprtrdma_marshal_failed(rqst, ret);
switch (ret) {
case -EAGAIN:
xprt_wait_for_buffer_space(rqst->rq_xprt);
break;
case -ENOBUFS:
break;
default:
r_xprt->rx_stats.failed_marshal_count++; r_xprt->rx_stats.failed_marshal_count++;
} frwr_reset(req);
return ret; return ret;
} }
@ -1269,51 +1275,17 @@ out_badheader:
goto out; goto out;
} }
void rpcrdma_release_rqst(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req) static void rpcrdma_reply_done(struct kref *kref)
{ {
/* Invalidate and unmap the data payloads before waking struct rpcrdma_req *req =
* the waiting application. This guarantees the memory container_of(kref, struct rpcrdma_req, rl_kref);
* regions are properly fenced from the server before the
* application accesses the data. It also ensures proper
* send flow control: waking the next RPC waits until this
* RPC has relinquished all its Send Queue entries.
*/
if (!list_empty(&req->rl_registered))
frwr_unmap_sync(r_xprt, &req->rl_registered);
/* Ensure that any DMA mapped pages associated with rpcrdma_complete_rqst(req->rl_reply);
* the Send of the RPC Call have been unmapped before
* allowing the RPC to complete. This protects argument
* memory not controlled by the RPC client from being
* re-used before we're done with it.
*/
if (test_bit(RPCRDMA_REQ_F_TX_RESOURCES, &req->rl_flags)) {
r_xprt->rx_stats.reply_waits_for_send++;
out_of_line_wait_on_bit(&req->rl_flags,
RPCRDMA_REQ_F_TX_RESOURCES,
bit_wait,
TASK_UNINTERRUPTIBLE);
}
} }
/* Reply handling runs in the poll worker thread. Anything that /**
* might wait is deferred to a separate workqueue. * rpcrdma_reply_handler - Process received RPC/RDMA messages
*/ * @rep: Incoming rpcrdma_rep object to process
void rpcrdma_deferred_completion(struct work_struct *work)
{
struct rpcrdma_rep *rep =
container_of(work, struct rpcrdma_rep, rr_work);
struct rpcrdma_req *req = rpcr_to_rdmar(rep->rr_rqst);
struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
trace_xprtrdma_defer_cmp(rep);
if (rep->rr_wc_flags & IB_WC_WITH_INVALIDATE)
frwr_reminv(rep, &req->rl_registered);
rpcrdma_release_rqst(r_xprt, req);
rpcrdma_complete_rqst(rep);
}
/* Process received RPC/RDMA messages.
* *
* Errors must result in the RPC task either being awakened, or * Errors must result in the RPC task either being awakened, or
* allowed to timeout, to discover the errors at that time. * allowed to timeout, to discover the errors at that time.
@ -1373,10 +1345,16 @@ void rpcrdma_reply_handler(struct rpcrdma_rep *rep)
} }
req->rl_reply = rep; req->rl_reply = rep;
rep->rr_rqst = rqst; rep->rr_rqst = rqst;
clear_bit(RPCRDMA_REQ_F_PENDING, &req->rl_flags);
trace_xprtrdma_reply(rqst->rq_task, rep, req, credits); trace_xprtrdma_reply(rqst->rq_task, rep, req, credits);
queue_work(buf->rb_completion_wq, &rep->rr_work);
if (rep->rr_wc_flags & IB_WC_WITH_INVALIDATE)
frwr_reminv(rep, &req->rl_registered);
if (!list_empty(&req->rl_registered))
frwr_unmap_async(r_xprt, req);
/* LocalInv completion will complete the RPC */
else
kref_put(&req->rl_kref, rpcrdma_reply_done);
return; return;
out_badversion: out_badversion:

View File

@ -298,6 +298,7 @@ xprt_rdma_destroy(struct rpc_xprt *xprt)
module_put(THIS_MODULE); module_put(THIS_MODULE);
} }
/* 60 second timeout, no retries */
static const struct rpc_timeout xprt_rdma_default_timeout = { static const struct rpc_timeout xprt_rdma_default_timeout = {
.to_initval = 60 * HZ, .to_initval = 60 * HZ,
.to_maxval = 60 * HZ, .to_maxval = 60 * HZ,
@ -323,8 +324,9 @@ xprt_setup_rdma(struct xprt_create *args)
if (!xprt) if (!xprt)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
/* 60 second timeout, no retries */
xprt->timeout = &xprt_rdma_default_timeout; xprt->timeout = &xprt_rdma_default_timeout;
xprt->connect_timeout = xprt->timeout->to_initval;
xprt->max_reconnect_timeout = xprt->timeout->to_maxval;
xprt->bind_timeout = RPCRDMA_BIND_TO; xprt->bind_timeout = RPCRDMA_BIND_TO;
xprt->reestablish_timeout = RPCRDMA_INIT_REEST_TO; xprt->reestablish_timeout = RPCRDMA_INIT_REEST_TO;
xprt->idle_timeout = RPCRDMA_IDLE_DISC_TO; xprt->idle_timeout = RPCRDMA_IDLE_DISC_TO;
@ -487,31 +489,64 @@ xprt_rdma_timer(struct rpc_xprt *xprt, struct rpc_task *task)
} }
/** /**
* xprt_rdma_connect - try to establish a transport connection * xprt_rdma_set_connect_timeout - set timeouts for establishing a connection
* @xprt: controlling transport instance
* @connect_timeout: reconnect timeout after client disconnects
* @reconnect_timeout: reconnect timeout after server disconnects
*
*/
static void xprt_rdma_tcp_set_connect_timeout(struct rpc_xprt *xprt,
unsigned long connect_timeout,
unsigned long reconnect_timeout)
{
struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
trace_xprtrdma_op_set_cto(r_xprt, connect_timeout, reconnect_timeout);
spin_lock(&xprt->transport_lock);
if (connect_timeout < xprt->connect_timeout) {
struct rpc_timeout to;
unsigned long initval;
to = *xprt->timeout;
initval = connect_timeout;
if (initval < RPCRDMA_INIT_REEST_TO << 1)
initval = RPCRDMA_INIT_REEST_TO << 1;
to.to_initval = initval;
to.to_maxval = initval;
r_xprt->rx_timeout = to;
xprt->timeout = &r_xprt->rx_timeout;
xprt->connect_timeout = connect_timeout;
}
if (reconnect_timeout < xprt->max_reconnect_timeout)
xprt->max_reconnect_timeout = reconnect_timeout;
spin_unlock(&xprt->transport_lock);
}
/**
* xprt_rdma_connect - schedule an attempt to reconnect
* @xprt: transport state * @xprt: transport state
* @task: RPC scheduler context * @task: RPC scheduler context (unused)
* *
*/ */
static void static void
xprt_rdma_connect(struct rpc_xprt *xprt, struct rpc_task *task) xprt_rdma_connect(struct rpc_xprt *xprt, struct rpc_task *task)
{ {
struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
unsigned long delay;
trace_xprtrdma_op_connect(r_xprt); trace_xprtrdma_op_connect(r_xprt);
delay = 0;
if (r_xprt->rx_ep.rep_connected != 0) { if (r_xprt->rx_ep.rep_connected != 0) {
/* Reconnect */ delay = xprt_reconnect_delay(xprt);
schedule_delayed_work(&r_xprt->rx_connect_worker, xprt_reconnect_backoff(xprt, RPCRDMA_INIT_REEST_TO);
xprt->reestablish_timeout);
xprt->reestablish_timeout <<= 1;
if (xprt->reestablish_timeout > RPCRDMA_MAX_REEST_TO)
xprt->reestablish_timeout = RPCRDMA_MAX_REEST_TO;
else if (xprt->reestablish_timeout < RPCRDMA_INIT_REEST_TO)
xprt->reestablish_timeout = RPCRDMA_INIT_REEST_TO;
} else {
schedule_delayed_work(&r_xprt->rx_connect_worker, 0);
if (!RPC_IS_ASYNC(task))
flush_delayed_work(&r_xprt->rx_connect_worker);
} }
queue_delayed_work(xprtiod_workqueue, &r_xprt->rx_connect_worker,
delay);
} }
/** /**
@ -550,8 +585,11 @@ out_sleep:
static void static void
xprt_rdma_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *rqst) xprt_rdma_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *rqst)
{ {
struct rpcrdma_xprt *r_xprt =
container_of(xprt, struct rpcrdma_xprt, rx_xprt);
memset(rqst, 0, sizeof(*rqst)); memset(rqst, 0, sizeof(*rqst));
rpcrdma_buffer_put(rpcr_to_rdmar(rqst)); rpcrdma_buffer_put(&r_xprt->rx_buf, rpcr_to_rdmar(rqst));
rpc_wake_up_next(&xprt->backlog); rpc_wake_up_next(&xprt->backlog);
} }
@ -618,9 +656,16 @@ xprt_rdma_free(struct rpc_task *task)
struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt); struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt);
struct rpcrdma_req *req = rpcr_to_rdmar(rqst); struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
if (test_bit(RPCRDMA_REQ_F_PENDING, &req->rl_flags))
rpcrdma_release_rqst(r_xprt, req);
trace_xprtrdma_op_free(task, req); trace_xprtrdma_op_free(task, req);
if (!list_empty(&req->rl_registered))
frwr_unmap_sync(r_xprt, req);
/* XXX: If the RPC is completing because of a signal and
* not because a reply was received, we ought to ensure
* that the Send completion has fired, so that memory
* involved with the Send is not still visible to the NIC.
*/
} }
/** /**
@ -667,7 +712,6 @@ xprt_rdma_send_request(struct rpc_rqst *rqst)
goto drop_connection; goto drop_connection;
rqst->rq_xtime = ktime_get(); rqst->rq_xtime = ktime_get();
__set_bit(RPCRDMA_REQ_F_PENDING, &req->rl_flags);
if (rpcrdma_ep_post(&r_xprt->rx_ia, &r_xprt->rx_ep, req)) if (rpcrdma_ep_post(&r_xprt->rx_ia, &r_xprt->rx_ep, req))
goto drop_connection; goto drop_connection;
@ -760,6 +804,7 @@ static const struct rpc_xprt_ops xprt_rdma_procs = {
.send_request = xprt_rdma_send_request, .send_request = xprt_rdma_send_request,
.close = xprt_rdma_close, .close = xprt_rdma_close,
.destroy = xprt_rdma_destroy, .destroy = xprt_rdma_destroy,
.set_connect_timeout = xprt_rdma_tcp_set_connect_timeout,
.print_stats = xprt_rdma_print_stats, .print_stats = xprt_rdma_print_stats,
.enable_swap = xprt_rdma_enable_swap, .enable_swap = xprt_rdma_enable_swap,
.disable_swap = xprt_rdma_disable_swap, .disable_swap = xprt_rdma_disable_swap,

View File

@ -89,14 +89,12 @@ static void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp);
*/ */
static void rpcrdma_xprt_drain(struct rpcrdma_xprt *r_xprt) static void rpcrdma_xprt_drain(struct rpcrdma_xprt *r_xprt)
{ {
struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
struct rpcrdma_ia *ia = &r_xprt->rx_ia; struct rpcrdma_ia *ia = &r_xprt->rx_ia;
/* Flush Receives, then wait for deferred Reply work /* Flush Receives, then wait for deferred Reply work
* to complete. * to complete.
*/ */
ib_drain_rq(ia->ri_id->qp); ib_drain_rq(ia->ri_id->qp);
drain_workqueue(buf->rb_completion_wq);
/* Deferred Reply processing might have scheduled /* Deferred Reply processing might have scheduled
* local invalidations. * local invalidations.
@ -901,7 +899,7 @@ out_emptyq:
* completions recently. This is a sign the Send Queue is * completions recently. This is a sign the Send Queue is
* backing up. Cause the caller to pause and try again. * backing up. Cause the caller to pause and try again.
*/ */
set_bit(RPCRDMA_BUF_F_EMPTY_SCQ, &buf->rb_flags); xprt_wait_for_buffer_space(&r_xprt->rx_xprt);
r_xprt->rx_stats.empty_sendctx_q++; r_xprt->rx_stats.empty_sendctx_q++;
return NULL; return NULL;
} }
@ -936,11 +934,8 @@ rpcrdma_sendctx_put_locked(struct rpcrdma_sendctx *sc)
/* Paired with READ_ONCE */ /* Paired with READ_ONCE */
smp_store_release(&buf->rb_sc_tail, next_tail); smp_store_release(&buf->rb_sc_tail, next_tail);
if (test_and_clear_bit(RPCRDMA_BUF_F_EMPTY_SCQ, &buf->rb_flags)) {
smp_mb__after_atomic();
xprt_write_space(&sc->sc_xprt->rx_xprt); xprt_write_space(&sc->sc_xprt->rx_xprt);
} }
}
static void static void
rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt) rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt)
@ -977,8 +972,6 @@ rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt)
r_xprt->rx_stats.mrs_allocated += count; r_xprt->rx_stats.mrs_allocated += count;
spin_unlock(&buf->rb_mrlock); spin_unlock(&buf->rb_mrlock);
trace_xprtrdma_createmrs(r_xprt, count); trace_xprtrdma_createmrs(r_xprt, count);
xprt_write_space(&r_xprt->rx_xprt);
} }
static void static void
@ -990,6 +983,7 @@ rpcrdma_mr_refresh_worker(struct work_struct *work)
rx_buf); rx_buf);
rpcrdma_mrs_create(r_xprt); rpcrdma_mrs_create(r_xprt);
xprt_write_space(&r_xprt->rx_xprt);
} }
/** /**
@ -1025,7 +1019,6 @@ struct rpcrdma_req *rpcrdma_req_create(struct rpcrdma_xprt *r_xprt, size_t size,
if (!req->rl_recvbuf) if (!req->rl_recvbuf)
goto out4; goto out4;
req->rl_buffer = buffer;
INIT_LIST_HEAD(&req->rl_registered); INIT_LIST_HEAD(&req->rl_registered);
spin_lock(&buffer->rb_lock); spin_lock(&buffer->rb_lock);
list_add(&req->rl_all, &buffer->rb_allreqs); list_add(&req->rl_all, &buffer->rb_allreqs);
@ -1042,9 +1035,9 @@ out1:
return NULL; return NULL;
} }
static bool rpcrdma_rep_create(struct rpcrdma_xprt *r_xprt, bool temp) static struct rpcrdma_rep *rpcrdma_rep_create(struct rpcrdma_xprt *r_xprt,
bool temp)
{ {
struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
struct rpcrdma_rep *rep; struct rpcrdma_rep *rep;
rep = kzalloc(sizeof(*rep), GFP_KERNEL); rep = kzalloc(sizeof(*rep), GFP_KERNEL);
@ -1055,27 +1048,22 @@ static bool rpcrdma_rep_create(struct rpcrdma_xprt *r_xprt, bool temp)
DMA_FROM_DEVICE, GFP_KERNEL); DMA_FROM_DEVICE, GFP_KERNEL);
if (!rep->rr_rdmabuf) if (!rep->rr_rdmabuf)
goto out_free; goto out_free;
xdr_buf_init(&rep->rr_hdrbuf, rdmab_data(rep->rr_rdmabuf), xdr_buf_init(&rep->rr_hdrbuf, rdmab_data(rep->rr_rdmabuf),
rdmab_length(rep->rr_rdmabuf)); rdmab_length(rep->rr_rdmabuf));
rep->rr_cqe.done = rpcrdma_wc_receive; rep->rr_cqe.done = rpcrdma_wc_receive;
rep->rr_rxprt = r_xprt; rep->rr_rxprt = r_xprt;
INIT_WORK(&rep->rr_work, rpcrdma_deferred_completion);
rep->rr_recv_wr.next = NULL; rep->rr_recv_wr.next = NULL;
rep->rr_recv_wr.wr_cqe = &rep->rr_cqe; rep->rr_recv_wr.wr_cqe = &rep->rr_cqe;
rep->rr_recv_wr.sg_list = &rep->rr_rdmabuf->rg_iov; rep->rr_recv_wr.sg_list = &rep->rr_rdmabuf->rg_iov;
rep->rr_recv_wr.num_sge = 1; rep->rr_recv_wr.num_sge = 1;
rep->rr_temp = temp; rep->rr_temp = temp;
return rep;
spin_lock(&buf->rb_lock);
list_add(&rep->rr_list, &buf->rb_recv_bufs);
spin_unlock(&buf->rb_lock);
return true;
out_free: out_free:
kfree(rep); kfree(rep);
out: out:
return false; return NULL;
} }
/** /**
@ -1089,7 +1077,6 @@ int rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt)
struct rpcrdma_buffer *buf = &r_xprt->rx_buf; struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
int i, rc; int i, rc;
buf->rb_flags = 0;
buf->rb_max_requests = r_xprt->rx_ep.rep_max_requests; buf->rb_max_requests = r_xprt->rx_ep.rep_max_requests;
buf->rb_bc_srv_max_requests = 0; buf->rb_bc_srv_max_requests = 0;
spin_lock_init(&buf->rb_mrlock); spin_lock_init(&buf->rb_mrlock);
@ -1122,15 +1109,6 @@ int rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt)
if (rc) if (rc)
goto out; goto out;
buf->rb_completion_wq = alloc_workqueue("rpcrdma-%s",
WQ_MEM_RECLAIM | WQ_HIGHPRI,
0,
r_xprt->rx_xprt.address_strings[RPC_DISPLAY_ADDR]);
if (!buf->rb_completion_wq) {
rc = -ENOMEM;
goto out;
}
return 0; return 0;
out: out:
rpcrdma_buffer_destroy(buf); rpcrdma_buffer_destroy(buf);
@ -1204,11 +1182,6 @@ rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf)
{ {
cancel_delayed_work_sync(&buf->rb_refresh_worker); cancel_delayed_work_sync(&buf->rb_refresh_worker);
if (buf->rb_completion_wq) {
destroy_workqueue(buf->rb_completion_wq);
buf->rb_completion_wq = NULL;
}
rpcrdma_sendctxs_destroy(buf); rpcrdma_sendctxs_destroy(buf);
while (!list_empty(&buf->rb_recv_bufs)) { while (!list_empty(&buf->rb_recv_bufs)) {
@ -1325,13 +1298,12 @@ rpcrdma_buffer_get(struct rpcrdma_buffer *buffers)
/** /**
* rpcrdma_buffer_put - Put request/reply buffers back into pool * rpcrdma_buffer_put - Put request/reply buffers back into pool
* @buffers: buffer pool
* @req: object to return * @req: object to return
* *
*/ */
void void rpcrdma_buffer_put(struct rpcrdma_buffer *buffers, struct rpcrdma_req *req)
rpcrdma_buffer_put(struct rpcrdma_req *req)
{ {
struct rpcrdma_buffer *buffers = req->rl_buffer;
struct rpcrdma_rep *rep = req->rl_reply; struct rpcrdma_rep *rep = req->rl_reply;
req->rl_reply = NULL; req->rl_reply = NULL;
@ -1484,8 +1456,7 @@ rpcrdma_ep_post(struct rpcrdma_ia *ia,
struct ib_send_wr *send_wr = &req->rl_sendctx->sc_wr; struct ib_send_wr *send_wr = &req->rl_sendctx->sc_wr;
int rc; int rc;
if (!ep->rep_send_count || if (!ep->rep_send_count || kref_read(&req->rl_kref) > 1) {
test_bit(RPCRDMA_REQ_F_TX_RESOURCES, &req->rl_flags)) {
send_wr->send_flags |= IB_SEND_SIGNALED; send_wr->send_flags |= IB_SEND_SIGNALED;
ep->rep_send_count = ep->rep_send_batch; ep->rep_send_count = ep->rep_send_batch;
} else { } else {
@ -1505,11 +1476,13 @@ rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp)
{ {
struct rpcrdma_buffer *buf = &r_xprt->rx_buf; struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
struct rpcrdma_ep *ep = &r_xprt->rx_ep; struct rpcrdma_ep *ep = &r_xprt->rx_ep;
struct ib_recv_wr *wr, *bad_wr; struct ib_recv_wr *i, *wr, *bad_wr;
struct rpcrdma_rep *rep;
int needed, count, rc; int needed, count, rc;
rc = 0; rc = 0;
count = 0; count = 0;
needed = buf->rb_credits + (buf->rb_bc_srv_max_requests << 1); needed = buf->rb_credits + (buf->rb_bc_srv_max_requests << 1);
if (ep->rep_receive_count > needed) if (ep->rep_receive_count > needed)
goto out; goto out;
@ -1517,51 +1490,65 @@ rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp)
if (!temp) if (!temp)
needed += RPCRDMA_MAX_RECV_BATCH; needed += RPCRDMA_MAX_RECV_BATCH;
count = 0; /* fast path: all needed reps can be found on the free list */
wr = NULL; wr = NULL;
while (needed) {
struct rpcrdma_regbuf *rb;
struct rpcrdma_rep *rep;
spin_lock(&buf->rb_lock); spin_lock(&buf->rb_lock);
while (needed) {
rep = list_first_entry_or_null(&buf->rb_recv_bufs, rep = list_first_entry_or_null(&buf->rb_recv_bufs,
struct rpcrdma_rep, rr_list); struct rpcrdma_rep, rr_list);
if (likely(rep)) if (!rep)
break;
list_del(&rep->rr_list); list_del(&rep->rr_list);
spin_unlock(&buf->rb_lock);
if (!rep) {
if (!rpcrdma_rep_create(r_xprt, temp))
break;
continue;
}
rb = rep->rr_rdmabuf;
if (!rpcrdma_regbuf_dma_map(r_xprt, rb)) {
rpcrdma_recv_buffer_put(rep);
break;
}
trace_xprtrdma_post_recv(rep->rr_recv_wr.wr_cqe);
rep->rr_recv_wr.next = wr; rep->rr_recv_wr.next = wr;
wr = &rep->rr_recv_wr; wr = &rep->rr_recv_wr;
++count;
--needed; --needed;
} }
if (!count) spin_unlock(&buf->rb_lock);
while (needed) {
rep = rpcrdma_rep_create(r_xprt, temp);
if (!rep)
break;
rep->rr_recv_wr.next = wr;
wr = &rep->rr_recv_wr;
--needed;
}
if (!wr)
goto out; goto out;
for (i = wr; i; i = i->next) {
rep = container_of(i, struct rpcrdma_rep, rr_recv_wr);
if (!rpcrdma_regbuf_dma_map(r_xprt, rep->rr_rdmabuf))
goto release_wrs;
trace_xprtrdma_post_recv(rep->rr_recv_wr.wr_cqe);
++count;
}
rc = ib_post_recv(r_xprt->rx_ia.ri_id->qp, wr, rc = ib_post_recv(r_xprt->rx_ia.ri_id->qp, wr,
(const struct ib_recv_wr **)&bad_wr); (const struct ib_recv_wr **)&bad_wr);
out:
trace_xprtrdma_post_recvs(r_xprt, count, rc);
if (rc) { if (rc) {
for (wr = bad_wr; wr; wr = wr->next) { for (wr = bad_wr; wr;) {
struct rpcrdma_rep *rep; struct rpcrdma_rep *rep;
rep = container_of(wr, struct rpcrdma_rep, rr_recv_wr); rep = container_of(wr, struct rpcrdma_rep, rr_recv_wr);
wr = wr->next;
rpcrdma_recv_buffer_put(rep); rpcrdma_recv_buffer_put(rep);
--count; --count;
} }
} }
ep->rep_receive_count += count; ep->rep_receive_count += count;
out: return;
trace_xprtrdma_post_recvs(r_xprt, count, rc);
release_wrs:
for (i = wr; i;) {
rep = container_of(i, struct rpcrdma_rep, rr_recv_wr);
i = i->next;
rpcrdma_recv_buffer_put(rep);
}
} }

View File

@ -45,6 +45,7 @@
#include <linux/wait.h> /* wait_queue_head_t, etc */ #include <linux/wait.h> /* wait_queue_head_t, etc */
#include <linux/spinlock.h> /* spinlock_t, etc */ #include <linux/spinlock.h> /* spinlock_t, etc */
#include <linux/atomic.h> /* atomic_t, etc */ #include <linux/atomic.h> /* atomic_t, etc */
#include <linux/kref.h> /* struct kref */
#include <linux/workqueue.h> /* struct work_struct */ #include <linux/workqueue.h> /* struct work_struct */
#include <rdma/rdma_cm.h> /* RDMA connection api */ #include <rdma/rdma_cm.h> /* RDMA connection api */
@ -202,10 +203,9 @@ struct rpcrdma_rep {
bool rr_temp; bool rr_temp;
struct rpcrdma_regbuf *rr_rdmabuf; struct rpcrdma_regbuf *rr_rdmabuf;
struct rpcrdma_xprt *rr_rxprt; struct rpcrdma_xprt *rr_rxprt;
struct work_struct rr_work; struct rpc_rqst *rr_rqst;
struct xdr_buf rr_hdrbuf; struct xdr_buf rr_hdrbuf;
struct xdr_stream rr_stream; struct xdr_stream rr_stream;
struct rpc_rqst *rr_rqst;
struct list_head rr_list; struct list_head rr_list;
struct ib_recv_wr rr_recv_wr; struct ib_recv_wr rr_recv_wr;
}; };
@ -240,18 +240,12 @@ struct rpcrdma_sendctx {
* An external memory region is any buffer or page that is registered * An external memory region is any buffer or page that is registered
* on the fly (ie, not pre-registered). * on the fly (ie, not pre-registered).
*/ */
enum rpcrdma_frwr_state { struct rpcrdma_req;
FRWR_IS_INVALID, /* ready to be used */
FRWR_IS_VALID, /* in use */
FRWR_FLUSHED_FR, /* flushed FASTREG WR */
FRWR_FLUSHED_LI, /* flushed LOCALINV WR */
};
struct rpcrdma_frwr { struct rpcrdma_frwr {
struct ib_mr *fr_mr; struct ib_mr *fr_mr;
struct ib_cqe fr_cqe; struct ib_cqe fr_cqe;
enum rpcrdma_frwr_state fr_state;
struct completion fr_linv_done; struct completion fr_linv_done;
struct rpcrdma_req *fr_req;
union { union {
struct ib_reg_wr fr_regwr; struct ib_reg_wr fr_regwr;
struct ib_send_wr fr_invwr; struct ib_send_wr fr_invwr;
@ -326,7 +320,6 @@ struct rpcrdma_buffer;
struct rpcrdma_req { struct rpcrdma_req {
struct list_head rl_list; struct list_head rl_list;
struct rpc_rqst rl_slot; struct rpc_rqst rl_slot;
struct rpcrdma_buffer *rl_buffer;
struct rpcrdma_rep *rl_reply; struct rpcrdma_rep *rl_reply;
struct xdr_stream rl_stream; struct xdr_stream rl_stream;
struct xdr_buf rl_hdrbuf; struct xdr_buf rl_hdrbuf;
@ -336,18 +329,12 @@ struct rpcrdma_req {
struct rpcrdma_regbuf *rl_recvbuf; /* rq_rcv_buf */ struct rpcrdma_regbuf *rl_recvbuf; /* rq_rcv_buf */
struct list_head rl_all; struct list_head rl_all;
unsigned long rl_flags; struct kref rl_kref;
struct list_head rl_registered; /* registered segments */ struct list_head rl_registered; /* registered segments */
struct rpcrdma_mr_seg rl_segments[RPCRDMA_MAX_SEGS]; struct rpcrdma_mr_seg rl_segments[RPCRDMA_MAX_SEGS];
}; };
/* rl_flags */
enum {
RPCRDMA_REQ_F_PENDING = 0,
RPCRDMA_REQ_F_TX_RESOURCES,
};
static inline struct rpcrdma_req * static inline struct rpcrdma_req *
rpcr_to_rdmar(const struct rpc_rqst *rqst) rpcr_to_rdmar(const struct rpc_rqst *rqst)
{ {
@ -391,22 +378,15 @@ struct rpcrdma_buffer {
struct list_head rb_recv_bufs; struct list_head rb_recv_bufs;
struct list_head rb_allreqs; struct list_head rb_allreqs;
unsigned long rb_flags;
u32 rb_max_requests; u32 rb_max_requests;
u32 rb_credits; /* most recent credit grant */ u32 rb_credits; /* most recent credit grant */
u32 rb_bc_srv_max_requests; u32 rb_bc_srv_max_requests;
u32 rb_bc_max_requests; u32 rb_bc_max_requests;
struct workqueue_struct *rb_completion_wq;
struct delayed_work rb_refresh_worker; struct delayed_work rb_refresh_worker;
}; };
/* rb_flags */
enum {
RPCRDMA_BUF_F_EMPTY_SCQ = 0,
};
/* /*
* Statistics for RPCRDMA * Statistics for RPCRDMA
*/ */
@ -452,6 +432,7 @@ struct rpcrdma_xprt {
struct rpcrdma_ep rx_ep; struct rpcrdma_ep rx_ep;
struct rpcrdma_buffer rx_buf; struct rpcrdma_buffer rx_buf;
struct delayed_work rx_connect_worker; struct delayed_work rx_connect_worker;
struct rpc_timeout rx_timeout;
struct rpcrdma_stats rx_stats; struct rpcrdma_stats rx_stats;
}; };
@ -518,7 +499,8 @@ rpcrdma_mr_recycle(struct rpcrdma_mr *mr)
} }
struct rpcrdma_req *rpcrdma_buffer_get(struct rpcrdma_buffer *); struct rpcrdma_req *rpcrdma_buffer_get(struct rpcrdma_buffer *);
void rpcrdma_buffer_put(struct rpcrdma_req *); void rpcrdma_buffer_put(struct rpcrdma_buffer *buffers,
struct rpcrdma_req *req);
void rpcrdma_recv_buffer_put(struct rpcrdma_rep *); void rpcrdma_recv_buffer_put(struct rpcrdma_rep *);
bool rpcrdma_regbuf_realloc(struct rpcrdma_regbuf *rb, size_t size, bool rpcrdma_regbuf_realloc(struct rpcrdma_regbuf *rb, size_t size,
@ -564,6 +546,7 @@ rpcrdma_data_dir(bool writing)
/* Memory registration calls xprtrdma/frwr_ops.c /* Memory registration calls xprtrdma/frwr_ops.c
*/ */
bool frwr_is_supported(struct ib_device *device); bool frwr_is_supported(struct ib_device *device);
void frwr_reset(struct rpcrdma_req *req);
int frwr_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep); int frwr_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep);
int frwr_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mr *mr); int frwr_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mr *mr);
void frwr_release_mr(struct rpcrdma_mr *mr); void frwr_release_mr(struct rpcrdma_mr *mr);
@ -574,8 +557,8 @@ struct rpcrdma_mr_seg *frwr_map(struct rpcrdma_xprt *r_xprt,
struct rpcrdma_mr **mr); struct rpcrdma_mr **mr);
int frwr_send(struct rpcrdma_ia *ia, struct rpcrdma_req *req); int frwr_send(struct rpcrdma_ia *ia, struct rpcrdma_req *req);
void frwr_reminv(struct rpcrdma_rep *rep, struct list_head *mrs); void frwr_reminv(struct rpcrdma_rep *rep, struct list_head *mrs);
void frwr_unmap_sync(struct rpcrdma_xprt *r_xprt, void frwr_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req);
struct list_head *mrs); void frwr_unmap_async(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req);
/* /*
* RPC/RDMA protocol calls - xprtrdma/rpc_rdma.c * RPC/RDMA protocol calls - xprtrdma/rpc_rdma.c
@ -598,9 +581,6 @@ int rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst);
void rpcrdma_set_max_header_sizes(struct rpcrdma_xprt *); void rpcrdma_set_max_header_sizes(struct rpcrdma_xprt *);
void rpcrdma_complete_rqst(struct rpcrdma_rep *rep); void rpcrdma_complete_rqst(struct rpcrdma_rep *rep);
void rpcrdma_reply_handler(struct rpcrdma_rep *rep); void rpcrdma_reply_handler(struct rpcrdma_rep *rep);
void rpcrdma_release_rqst(struct rpcrdma_xprt *r_xprt,
struct rpcrdma_req *req);
void rpcrdma_deferred_completion(struct work_struct *work);
static inline void rpcrdma_set_xdrlen(struct xdr_buf *xdr, size_t len) static inline void rpcrdma_set_xdrlen(struct xdr_buf *xdr, size_t len)
{ {

View File

@ -2414,25 +2414,6 @@ out:
xprt_wake_pending_tasks(xprt, status); xprt_wake_pending_tasks(xprt, status);
} }
static unsigned long xs_reconnect_delay(const struct rpc_xprt *xprt)
{
unsigned long start, now = jiffies;
start = xprt->stat.connect_start + xprt->reestablish_timeout;
if (time_after(start, now))
return start - now;
return 0;
}
static void xs_reconnect_backoff(struct rpc_xprt *xprt)
{
xprt->reestablish_timeout <<= 1;
if (xprt->reestablish_timeout > xprt->max_reconnect_timeout)
xprt->reestablish_timeout = xprt->max_reconnect_timeout;
if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO)
xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
}
/** /**
* xs_connect - connect a socket to a remote endpoint * xs_connect - connect a socket to a remote endpoint
* @xprt: pointer to transport structure * @xprt: pointer to transport structure
@ -2462,8 +2443,8 @@ static void xs_connect(struct rpc_xprt *xprt, struct rpc_task *task)
/* Start by resetting any existing state */ /* Start by resetting any existing state */
xs_reset_transport(transport); xs_reset_transport(transport);
delay = xs_reconnect_delay(xprt); delay = xprt_reconnect_delay(xprt);
xs_reconnect_backoff(xprt); xprt_reconnect_backoff(xprt, XS_TCP_INIT_REEST_TO);
} else } else
dprintk("RPC: xs_connect scheduled xprt %p\n", xprt); dprintk("RPC: xs_connect scheduled xprt %p\n", xprt);