RDMA/hns: Support QP's restrack ops for hns driver

The QP restrack attributes come from the queue information maintained by
the driver.

For example:

$ rdma res show qp link hns_0 lqpn 41 -jp -dd
[ {
        "ifindex": 4,
        "ifname": "hns_0",
        "port": 1,
        "lqpn": 41,
        "rqpn": 40,
        "type": "RC",
        "state": "RTR",
        "rq-psn": 12474738,
        "sq-psn": 0,
        "path-mig-state": "ARMED",
        "pdn": 9,
        "pid": 1523,
        "comm": "ib_send_bw"
    },
    "drv_sq_wqe_cnt": 128,
    "drv_sq_max_gs": 1,
    "drv_rq_wqe_cnt": 512,
    "drv_rq_max_gs": 2,
    "drv_ext_sge_sge_cnt": 0
}

Link: https://lore.kernel.org/r/20220822104455.2311053-5-liangwenpeng@huawei.com
Signed-off-by: Wenpeng Liang <liangwenpeng@huawei.com>
Signed-off-by: Leon Romanovsky <leon@kernel.org>
This commit is contained in:
Wenpeng Liang 2022-08-22 18:44:52 +08:00 committed by Leon Romanovsky
parent f2b070f36d
commit e198d65d76
3 changed files with 36 additions and 0 deletions

View File

@ -1225,6 +1225,7 @@ int hns_roce_init(struct hns_roce_dev *hr_dev);
void hns_roce_exit(struct hns_roce_dev *hr_dev);
int hns_roce_fill_res_cq_entry(struct sk_buff *msg, struct ib_cq *ib_cq);
int hns_roce_fill_res_cq_entry_raw(struct sk_buff *msg, struct ib_cq *ib_cq);
int hns_roce_fill_res_qp_entry(struct sk_buff *msg, struct ib_qp *ib_qp);
struct hns_user_mmap_entry *
hns_roce_user_mmap_entry_insert(struct ib_ucontext *ucontext, u64 address,
size_t length,

View File

@ -568,6 +568,7 @@ static const struct ib_device_ops hns_roce_dev_xrcd_ops = {
static const struct ib_device_ops hns_roce_dev_restrack_ops = {
.fill_res_cq_entry = hns_roce_fill_res_cq_entry,
.fill_res_cq_entry_raw = hns_roce_fill_res_cq_entry_raw,
.fill_res_qp_entry = hns_roce_fill_res_qp_entry,
};
static int hns_roce_register_device(struct hns_roce_dev *hr_dev)

View File

@ -78,3 +78,37 @@ int hns_roce_fill_res_cq_entry_raw(struct sk_buff *msg, struct ib_cq *ib_cq)
return ret;
}
int hns_roce_fill_res_qp_entry(struct sk_buff *msg, struct ib_qp *ib_qp)
{
struct hns_roce_qp *hr_qp = to_hr_qp(ib_qp);
struct nlattr *table_attr;
table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
if (!table_attr)
return -EMSGSIZE;
if (rdma_nl_put_driver_u32_hex(msg, "sq_wqe_cnt", hr_qp->sq.wqe_cnt))
goto err;
if (rdma_nl_put_driver_u32_hex(msg, "sq_max_gs", hr_qp->sq.max_gs))
goto err;
if (rdma_nl_put_driver_u32_hex(msg, "rq_wqe_cnt", hr_qp->rq.wqe_cnt))
goto err;
if (rdma_nl_put_driver_u32_hex(msg, "rq_max_gs", hr_qp->rq.max_gs))
goto err;
if (rdma_nl_put_driver_u32_hex(msg, "ext_sge_sge_cnt", hr_qp->sge.sge_cnt))
goto err;
nla_nest_end(msg, table_attr);
return 0;
err:
nla_nest_cancel(msg, table_attr);
return -EMSGSIZE;
}