RDMA: Add dedicated MR resource tracker function
In order to avoid double multiplexing of the resource when it is a MR, add a dedicated callback function. Link: https://lore.kernel.org/r/20200623113043.1228482-5-leon@kernel.org Signed-off-by: Maor Gottlieb <maorg@mellanox.com> Signed-off-by: Leon Romanovsky <leonro@mellanox.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
This commit is contained in:
parent
24fd6d6f85
commit
f443452900
|
@ -2618,7 +2618,8 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
|
||||||
SET_DEVICE_OP(dev_ops, drain_sq);
|
SET_DEVICE_OP(dev_ops, drain_sq);
|
||||||
SET_DEVICE_OP(dev_ops, enable_driver);
|
SET_DEVICE_OP(dev_ops, enable_driver);
|
||||||
SET_DEVICE_OP(dev_ops, fill_res_entry);
|
SET_DEVICE_OP(dev_ops, fill_res_entry);
|
||||||
SET_DEVICE_OP(dev_ops, fill_stat_entry);
|
SET_DEVICE_OP(dev_ops, fill_res_mr_entry);
|
||||||
|
SET_DEVICE_OP(dev_ops, fill_stat_mr_entry);
|
||||||
SET_DEVICE_OP(dev_ops, get_dev_fw_str);
|
SET_DEVICE_OP(dev_ops, get_dev_fw_str);
|
||||||
SET_DEVICE_OP(dev_ops, get_dma_mr);
|
SET_DEVICE_OP(dev_ops, get_dma_mr);
|
||||||
SET_DEVICE_OP(dev_ops, get_hw_stats);
|
SET_DEVICE_OP(dev_ops, get_hw_stats);
|
||||||
|
|
|
@ -454,14 +454,6 @@ static bool fill_res_entry(struct ib_device *dev, struct sk_buff *msg,
|
||||||
return dev->ops.fill_res_entry(msg, res);
|
return dev->ops.fill_res_entry(msg, res);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool fill_stat_entry(struct ib_device *dev, struct sk_buff *msg,
|
|
||||||
struct rdma_restrack_entry *res)
|
|
||||||
{
|
|
||||||
if (!dev->ops.fill_stat_entry)
|
|
||||||
return false;
|
|
||||||
return dev->ops.fill_stat_entry(msg, res);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int fill_res_qp_entry(struct sk_buff *msg, bool has_cap_net_admin,
|
static int fill_res_qp_entry(struct sk_buff *msg, bool has_cap_net_admin,
|
||||||
struct rdma_restrack_entry *res, uint32_t port)
|
struct rdma_restrack_entry *res, uint32_t port)
|
||||||
{
|
{
|
||||||
|
@ -641,9 +633,8 @@ static int fill_res_mr_entry(struct sk_buff *msg, bool has_cap_net_admin,
|
||||||
if (fill_res_name_pid(msg, res))
|
if (fill_res_name_pid(msg, res))
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
if (fill_res_entry(dev, msg, res))
|
if (dev->ops.fill_res_mr_entry)
|
||||||
goto err;
|
return dev->ops.fill_res_mr_entry(msg, mr);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err: return -EMSGSIZE;
|
err: return -EMSGSIZE;
|
||||||
|
@ -786,9 +777,8 @@ static int fill_stat_mr_entry(struct sk_buff *msg, bool has_cap_net_admin,
|
||||||
if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_MRN, res->id))
|
if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_MRN, res->id))
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
if (fill_stat_entry(dev, msg, res))
|
if (dev->ops.fill_stat_mr_entry)
|
||||||
goto err;
|
return dev->ops.fill_stat_mr_entry(msg, mr);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err:
|
err:
|
||||||
|
|
|
@ -1055,6 +1055,7 @@ struct c4iw_wr_wait *c4iw_alloc_wr_wait(gfp_t gfp);
|
||||||
|
|
||||||
typedef int c4iw_restrack_func(struct sk_buff *msg,
|
typedef int c4iw_restrack_func(struct sk_buff *msg,
|
||||||
struct rdma_restrack_entry *res);
|
struct rdma_restrack_entry *res);
|
||||||
|
int c4iw_fill_res_mr_entry(struct sk_buff *msg, struct ib_mr *ibmr);
|
||||||
extern c4iw_restrack_func *c4iw_restrack_funcs[RDMA_RESTRACK_MAX];
|
extern c4iw_restrack_func *c4iw_restrack_funcs[RDMA_RESTRACK_MAX];
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -486,6 +486,7 @@ static const struct ib_device_ops c4iw_dev_ops = {
|
||||||
.destroy_qp = c4iw_destroy_qp,
|
.destroy_qp = c4iw_destroy_qp,
|
||||||
.destroy_srq = c4iw_destroy_srq,
|
.destroy_srq = c4iw_destroy_srq,
|
||||||
.fill_res_entry = fill_res_entry,
|
.fill_res_entry = fill_res_entry,
|
||||||
|
.fill_res_mr_entry = c4iw_fill_res_mr_entry,
|
||||||
.get_dev_fw_str = get_dev_fw_str,
|
.get_dev_fw_str = get_dev_fw_str,
|
||||||
.get_dma_mr = c4iw_get_dma_mr,
|
.get_dma_mr = c4iw_get_dma_mr,
|
||||||
.get_hw_stats = c4iw_get_mib,
|
.get_hw_stats = c4iw_get_mib,
|
||||||
|
|
|
@ -433,10 +433,8 @@ err:
|
||||||
return -EMSGSIZE;
|
return -EMSGSIZE;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int fill_res_mr_entry(struct sk_buff *msg,
|
int c4iw_fill_res_mr_entry(struct sk_buff *msg, struct ib_mr *ibmr)
|
||||||
struct rdma_restrack_entry *res)
|
|
||||||
{
|
{
|
||||||
struct ib_mr *ibmr = container_of(res, struct ib_mr, res);
|
|
||||||
struct c4iw_mr *mhp = to_c4iw_mr(ibmr);
|
struct c4iw_mr *mhp = to_c4iw_mr(ibmr);
|
||||||
struct c4iw_dev *dev = mhp->rhp;
|
struct c4iw_dev *dev = mhp->rhp;
|
||||||
u32 stag = mhp->attr.stag;
|
u32 stag = mhp->attr.stag;
|
||||||
|
@ -497,5 +495,4 @@ c4iw_restrack_func *c4iw_restrack_funcs[RDMA_RESTRACK_MAX] = {
|
||||||
[RDMA_RESTRACK_QP] = fill_res_qp_entry,
|
[RDMA_RESTRACK_QP] = fill_res_qp_entry,
|
||||||
[RDMA_RESTRACK_CM_ID] = fill_res_ep_entry,
|
[RDMA_RESTRACK_CM_ID] = fill_res_ep_entry,
|
||||||
[RDMA_RESTRACK_CQ] = fill_res_cq_entry,
|
[RDMA_RESTRACK_CQ] = fill_res_cq_entry,
|
||||||
[RDMA_RESTRACK_MR] = fill_res_mr_entry,
|
|
||||||
};
|
};
|
||||||
|
|
|
@ -6598,8 +6598,8 @@ static const struct ib_device_ops mlx5_ib_dev_ops = {
|
||||||
.drain_rq = mlx5_ib_drain_rq,
|
.drain_rq = mlx5_ib_drain_rq,
|
||||||
.drain_sq = mlx5_ib_drain_sq,
|
.drain_sq = mlx5_ib_drain_sq,
|
||||||
.enable_driver = mlx5_ib_enable_driver,
|
.enable_driver = mlx5_ib_enable_driver,
|
||||||
.fill_res_entry = mlx5_ib_fill_res_entry,
|
.fill_res_mr_entry = mlx5_ib_fill_res_mr_entry,
|
||||||
.fill_stat_entry = mlx5_ib_fill_stat_entry,
|
.fill_stat_mr_entry = mlx5_ib_fill_stat_mr_entry,
|
||||||
.get_dev_fw_str = get_dev_fw_str,
|
.get_dev_fw_str = get_dev_fw_str,
|
||||||
.get_dma_mr = mlx5_ib_get_dma_mr,
|
.get_dma_mr = mlx5_ib_get_dma_mr,
|
||||||
.get_link_layer = mlx5_ib_port_link_layer,
|
.get_link_layer = mlx5_ib_port_link_layer,
|
||||||
|
|
|
@ -1375,10 +1375,8 @@ struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *dev,
|
||||||
u8 *native_port_num);
|
u8 *native_port_num);
|
||||||
void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *dev,
|
void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *dev,
|
||||||
u8 port_num);
|
u8 port_num);
|
||||||
int mlx5_ib_fill_res_entry(struct sk_buff *msg,
|
int mlx5_ib_fill_res_mr_entry(struct sk_buff *msg, struct ib_mr *ib_mr);
|
||||||
struct rdma_restrack_entry *res);
|
int mlx5_ib_fill_stat_mr_entry(struct sk_buff *msg, struct ib_mr *ib_mr);
|
||||||
int mlx5_ib_fill_stat_entry(struct sk_buff *msg,
|
|
||||||
struct rdma_restrack_entry *res);
|
|
||||||
|
|
||||||
extern const struct uapi_definition mlx5_ib_devx_defs[];
|
extern const struct uapi_definition mlx5_ib_devx_defs[];
|
||||||
extern const struct uapi_definition mlx5_ib_flow_defs[];
|
extern const struct uapi_definition mlx5_ib_flow_defs[];
|
||||||
|
|
|
@ -8,10 +8,9 @@
|
||||||
#include <rdma/restrack.h>
|
#include <rdma/restrack.h>
|
||||||
#include "mlx5_ib.h"
|
#include "mlx5_ib.h"
|
||||||
|
|
||||||
static int fill_stat_mr_entry(struct sk_buff *msg,
|
int mlx5_ib_fill_stat_mr_entry(struct sk_buff *msg,
|
||||||
struct rdma_restrack_entry *res)
|
struct ib_mr *ibmr)
|
||||||
{
|
{
|
||||||
struct ib_mr *ibmr = container_of(res, struct ib_mr, res);
|
|
||||||
struct mlx5_ib_mr *mr = to_mmr(ibmr);
|
struct mlx5_ib_mr *mr = to_mmr(ibmr);
|
||||||
struct nlattr *table_attr;
|
struct nlattr *table_attr;
|
||||||
|
|
||||||
|
@ -41,10 +40,9 @@ err:
|
||||||
return -EMSGSIZE;
|
return -EMSGSIZE;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int fill_res_mr_entry(struct sk_buff *msg,
|
int mlx5_ib_fill_res_mr_entry(struct sk_buff *msg,
|
||||||
struct rdma_restrack_entry *res)
|
struct ib_mr *ibmr)
|
||||||
{
|
{
|
||||||
struct ib_mr *ibmr = container_of(res, struct ib_mr, res);
|
|
||||||
struct mlx5_ib_mr *mr = to_mmr(ibmr);
|
struct mlx5_ib_mr *mr = to_mmr(ibmr);
|
||||||
struct nlattr *table_attr;
|
struct nlattr *table_attr;
|
||||||
|
|
||||||
|
@ -70,21 +68,3 @@ err:
|
||||||
nla_nest_cancel(msg, table_attr);
|
nla_nest_cancel(msg, table_attr);
|
||||||
return -EMSGSIZE;
|
return -EMSGSIZE;
|
||||||
}
|
}
|
||||||
|
|
||||||
int mlx5_ib_fill_res_entry(struct sk_buff *msg,
|
|
||||||
struct rdma_restrack_entry *res)
|
|
||||||
{
|
|
||||||
if (res->type == RDMA_RESTRACK_MR)
|
|
||||||
return fill_res_mr_entry(msg, res);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int mlx5_ib_fill_stat_entry(struct sk_buff *msg,
|
|
||||||
struct rdma_restrack_entry *res)
|
|
||||||
{
|
|
||||||
if (res->type == RDMA_RESTRACK_MR)
|
|
||||||
return fill_stat_mr_entry(msg, res);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
|
@ -2583,6 +2583,7 @@ struct ib_device_ops {
|
||||||
*/
|
*/
|
||||||
int (*fill_res_entry)(struct sk_buff *msg,
|
int (*fill_res_entry)(struct sk_buff *msg,
|
||||||
struct rdma_restrack_entry *entry);
|
struct rdma_restrack_entry *entry);
|
||||||
|
int (*fill_res_mr_entry)(struct sk_buff *msg, struct ib_mr *ibmr);
|
||||||
|
|
||||||
/* Device lifecycle callbacks */
|
/* Device lifecycle callbacks */
|
||||||
/*
|
/*
|
||||||
|
@ -2637,8 +2638,7 @@ struct ib_device_ops {
|
||||||
* Allows rdma drivers to add their own restrack attributes
|
* Allows rdma drivers to add their own restrack attributes
|
||||||
* dumped via 'rdma stat' iproute2 command.
|
* dumped via 'rdma stat' iproute2 command.
|
||||||
*/
|
*/
|
||||||
int (*fill_stat_entry)(struct sk_buff *msg,
|
int (*fill_stat_mr_entry)(struct sk_buff *msg, struct ib_mr *ibmr);
|
||||||
struct rdma_restrack_entry *entry);
|
|
||||||
|
|
||||||
DECLARE_RDMA_OBJ_SIZE(ib_ah);
|
DECLARE_RDMA_OBJ_SIZE(ib_ah);
|
||||||
DECLARE_RDMA_OBJ_SIZE(ib_cq);
|
DECLARE_RDMA_OBJ_SIZE(ib_cq);
|
||||||
|
|
Loading…
Reference in New Issue