RDS: IB: split the mr registration and invalidation path
MR invalidation in RDS is done in background thread and not in data path like registration. So break the dependency between them which helps to remove the performance bottleneck. Signed-off-by: Santosh Shilimkar <santosh.shilimkar@oracle.com>
This commit is contained in:
parent
584a8279a4
commit
5601245931
|
@ -14,7 +14,8 @@
|
||||||
|
|
||||||
#define RDS_IB_DEFAULT_RECV_WR 1024
|
#define RDS_IB_DEFAULT_RECV_WR 1024
|
||||||
#define RDS_IB_DEFAULT_SEND_WR 256
|
#define RDS_IB_DEFAULT_SEND_WR 256
|
||||||
#define RDS_IB_DEFAULT_FR_WR 512
|
#define RDS_IB_DEFAULT_FR_WR 256
|
||||||
|
#define RDS_IB_DEFAULT_FR_INV_WR 256
|
||||||
|
|
||||||
#define RDS_IB_DEFAULT_RETRY_COUNT 1
|
#define RDS_IB_DEFAULT_RETRY_COUNT 1
|
||||||
|
|
||||||
|
@ -125,6 +126,7 @@ struct rds_ib_connection {
|
||||||
|
|
||||||
/* To control the number of wrs from fastreg */
|
/* To control the number of wrs from fastreg */
|
||||||
atomic_t i_fastreg_wrs;
|
atomic_t i_fastreg_wrs;
|
||||||
|
atomic_t i_fastunreg_wrs;
|
||||||
|
|
||||||
/* interrupt handling */
|
/* interrupt handling */
|
||||||
struct tasklet_struct i_send_tasklet;
|
struct tasklet_struct i_send_tasklet;
|
||||||
|
|
|
@ -382,7 +382,10 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
|
||||||
* completion queue and send queue. This extra space is used for FRMR
|
* completion queue and send queue. This extra space is used for FRMR
|
||||||
* registration and invalidation work requests
|
* registration and invalidation work requests
|
||||||
*/
|
*/
|
||||||
fr_queue_space = (rds_ibdev->use_fastreg ? RDS_IB_DEFAULT_FR_WR : 0);
|
fr_queue_space = rds_ibdev->use_fastreg ?
|
||||||
|
(RDS_IB_DEFAULT_FR_WR + 1) +
|
||||||
|
(RDS_IB_DEFAULT_FR_INV_WR + 1)
|
||||||
|
: 0;
|
||||||
|
|
||||||
/* add the conn now so that connection establishment has the dev */
|
/* add the conn now so that connection establishment has the dev */
|
||||||
rds_ib_add_conn(rds_ibdev, conn);
|
rds_ib_add_conn(rds_ibdev, conn);
|
||||||
|
@ -444,6 +447,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
|
||||||
attr.send_cq = ic->i_send_cq;
|
attr.send_cq = ic->i_send_cq;
|
||||||
attr.recv_cq = ic->i_recv_cq;
|
attr.recv_cq = ic->i_recv_cq;
|
||||||
atomic_set(&ic->i_fastreg_wrs, RDS_IB_DEFAULT_FR_WR);
|
atomic_set(&ic->i_fastreg_wrs, RDS_IB_DEFAULT_FR_WR);
|
||||||
|
atomic_set(&ic->i_fastunreg_wrs, RDS_IB_DEFAULT_FR_INV_WR);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* XXX this can fail if max_*_wr is too large? Are we supposed
|
* XXX this can fail if max_*_wr is too large? Are we supposed
|
||||||
|
@ -766,7 +770,8 @@ void rds_ib_conn_path_shutdown(struct rds_conn_path *cp)
|
||||||
wait_event(rds_ib_ring_empty_wait,
|
wait_event(rds_ib_ring_empty_wait,
|
||||||
rds_ib_ring_empty(&ic->i_recv_ring) &&
|
rds_ib_ring_empty(&ic->i_recv_ring) &&
|
||||||
(atomic_read(&ic->i_signaled_sends) == 0) &&
|
(atomic_read(&ic->i_signaled_sends) == 0) &&
|
||||||
(atomic_read(&ic->i_fastreg_wrs) == RDS_IB_DEFAULT_FR_WR));
|
(atomic_read(&ic->i_fastreg_wrs) == RDS_IB_DEFAULT_FR_WR) &&
|
||||||
|
(atomic_read(&ic->i_fastunreg_wrs) == RDS_IB_DEFAULT_FR_INV_WR));
|
||||||
tasklet_kill(&ic->i_send_tasklet);
|
tasklet_kill(&ic->i_send_tasklet);
|
||||||
tasklet_kill(&ic->i_recv_tasklet);
|
tasklet_kill(&ic->i_recv_tasklet);
|
||||||
|
|
||||||
|
|
|
@ -241,8 +241,8 @@ static int rds_ib_post_inv(struct rds_ib_mr *ibmr)
|
||||||
if (frmr->fr_state != FRMR_IS_INUSE)
|
if (frmr->fr_state != FRMR_IS_INUSE)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
while (atomic_dec_return(&ibmr->ic->i_fastreg_wrs) <= 0) {
|
while (atomic_dec_return(&ibmr->ic->i_fastunreg_wrs) <= 0) {
|
||||||
atomic_inc(&ibmr->ic->i_fastreg_wrs);
|
atomic_inc(&ibmr->ic->i_fastunreg_wrs);
|
||||||
cpu_relax();
|
cpu_relax();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -261,7 +261,7 @@ static int rds_ib_post_inv(struct rds_ib_mr *ibmr)
|
||||||
if (unlikely(ret)) {
|
if (unlikely(ret)) {
|
||||||
frmr->fr_state = FRMR_IS_STALE;
|
frmr->fr_state = FRMR_IS_STALE;
|
||||||
frmr->fr_inv = false;
|
frmr->fr_inv = false;
|
||||||
atomic_inc(&ibmr->ic->i_fastreg_wrs);
|
atomic_inc(&ibmr->ic->i_fastunreg_wrs);
|
||||||
pr_err("RDS/IB: %s returned error(%d)\n", __func__, ret);
|
pr_err("RDS/IB: %s returned error(%d)\n", __func__, ret);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
@ -289,9 +289,10 @@ void rds_ib_mr_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc)
|
||||||
if (frmr->fr_inv) {
|
if (frmr->fr_inv) {
|
||||||
frmr->fr_state = FRMR_IS_FREE;
|
frmr->fr_state = FRMR_IS_FREE;
|
||||||
frmr->fr_inv = false;
|
frmr->fr_inv = false;
|
||||||
|
atomic_inc(&ic->i_fastreg_wrs);
|
||||||
|
} else {
|
||||||
|
atomic_inc(&ic->i_fastunreg_wrs);
|
||||||
}
|
}
|
||||||
|
|
||||||
atomic_inc(&ic->i_fastreg_wrs);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void rds_ib_unreg_frmr(struct list_head *list, unsigned int *nfreed,
|
void rds_ib_unreg_frmr(struct list_head *list, unsigned int *nfreed,
|
||||||
|
|
Loading…
Reference in New Issue