IB: Modify ib_create_mr API

Use ib_alloc_mr with specific parameters.
Change the existing callers.

Signed-off-by: Sagi Grimberg <sagig@mellanox.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
This commit is contained in:
Sagi Grimberg 2015-07-30 10:32:35 +03:00 committed by Doug Ledford
parent 8b91ffc1cf
commit 9bee178b4f
7 changed files with 58 additions and 46 deletions

View File

@ -1235,15 +1235,32 @@ int ib_dereg_mr(struct ib_mr *mr)
} }
EXPORT_SYMBOL(ib_dereg_mr); EXPORT_SYMBOL(ib_dereg_mr);
struct ib_mr *ib_create_mr(struct ib_pd *pd, /**
struct ib_mr_init_attr *mr_init_attr) * ib_alloc_mr() - Allocates a memory region
* @pd: protection domain associated with the region
* @mr_type: memory region type
* @max_num_sg: maximum sg entries available for registration.
*
* Notes:
* Memory registeration page/sg lists must not exceed max_num_sg.
* For mr_type IB_MR_TYPE_MEM_REG, the total length cannot exceed
* max_num_sg * used_page_size.
*
*/
struct ib_mr *ib_alloc_mr(struct ib_pd *pd,
enum ib_mr_type mr_type,
u32 max_num_sg)
{ {
struct ib_mr *mr; struct ib_mr *mr;
if (!pd->device->create_mr) if (pd->device->alloc_mr) {
return ERR_PTR(-ENOSYS); mr = pd->device->alloc_mr(pd, mr_type, max_num_sg);
} else {
mr = pd->device->create_mr(pd, mr_init_attr); if (mr_type != IB_MR_TYPE_MEM_REG ||
!pd->device->alloc_fast_reg_mr)
return ERR_PTR(-ENOSYS);
mr = pd->device->alloc_fast_reg_mr(pd, max_num_sg);
}
if (!IS_ERR(mr)) { if (!IS_ERR(mr)) {
mr->device = pd->device; mr->device = pd->device;
@ -1255,7 +1272,7 @@ struct ib_mr *ib_create_mr(struct ib_pd *pd,
return mr; return mr;
} }
EXPORT_SYMBOL(ib_create_mr); EXPORT_SYMBOL(ib_alloc_mr);
struct ib_mr *ib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len) struct ib_mr *ib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len)
{ {

View File

@ -1502,7 +1502,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
dev->ib_dev.attach_mcast = mlx5_ib_mcg_attach; dev->ib_dev.attach_mcast = mlx5_ib_mcg_attach;
dev->ib_dev.detach_mcast = mlx5_ib_mcg_detach; dev->ib_dev.detach_mcast = mlx5_ib_mcg_detach;
dev->ib_dev.process_mad = mlx5_ib_process_mad; dev->ib_dev.process_mad = mlx5_ib_process_mad;
dev->ib_dev.create_mr = mlx5_ib_create_mr; dev->ib_dev.alloc_mr = mlx5_ib_alloc_mr;
dev->ib_dev.alloc_fast_reg_mr = mlx5_ib_alloc_fast_reg_mr; dev->ib_dev.alloc_fast_reg_mr = mlx5_ib_alloc_fast_reg_mr;
dev->ib_dev.alloc_fast_reg_page_list = mlx5_ib_alloc_fast_reg_page_list; dev->ib_dev.alloc_fast_reg_page_list = mlx5_ib_alloc_fast_reg_page_list;
dev->ib_dev.free_fast_reg_page_list = mlx5_ib_free_fast_reg_page_list; dev->ib_dev.free_fast_reg_page_list = mlx5_ib_free_fast_reg_page_list;

View File

@ -573,8 +573,9 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
int mlx5_ib_update_mtt(struct mlx5_ib_mr *mr, u64 start_page_index, int mlx5_ib_update_mtt(struct mlx5_ib_mr *mr, u64 start_page_index,
int npages, int zap); int npages, int zap);
int mlx5_ib_dereg_mr(struct ib_mr *ibmr); int mlx5_ib_dereg_mr(struct ib_mr *ibmr);
struct ib_mr *mlx5_ib_create_mr(struct ib_pd *pd, struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
struct ib_mr_init_attr *mr_init_attr); enum ib_mr_type mr_type,
u32 max_num_sg);
struct ib_mr *mlx5_ib_alloc_fast_reg_mr(struct ib_pd *pd, struct ib_mr *mlx5_ib_alloc_fast_reg_mr(struct ib_pd *pd,
int max_page_list_len); int max_page_list_len);
struct ib_fast_reg_page_list *mlx5_ib_alloc_fast_reg_page_list(struct ib_device *ibdev, struct ib_fast_reg_page_list *mlx5_ib_alloc_fast_reg_page_list(struct ib_device *ibdev,

View File

@ -1244,14 +1244,15 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr)
return 0; return 0;
} }
struct ib_mr *mlx5_ib_create_mr(struct ib_pd *pd, struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
struct ib_mr_init_attr *mr_init_attr) enum ib_mr_type mr_type,
u32 max_num_sg)
{ {
struct mlx5_ib_dev *dev = to_mdev(pd->device); struct mlx5_ib_dev *dev = to_mdev(pd->device);
struct mlx5_create_mkey_mbox_in *in; struct mlx5_create_mkey_mbox_in *in;
struct mlx5_ib_mr *mr; struct mlx5_ib_mr *mr;
int access_mode, err; int access_mode, err;
int ndescs = roundup(mr_init_attr->max_reg_descriptors, 4); int ndescs = roundup(max_num_sg, 4);
mr = kzalloc(sizeof(*mr), GFP_KERNEL); mr = kzalloc(sizeof(*mr), GFP_KERNEL);
if (!mr) if (!mr)
@ -1267,9 +1268,11 @@ struct ib_mr *mlx5_ib_create_mr(struct ib_pd *pd,
in->seg.xlt_oct_size = cpu_to_be32(ndescs); in->seg.xlt_oct_size = cpu_to_be32(ndescs);
in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8); in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
in->seg.flags_pd = cpu_to_be32(to_mpd(pd)->pdn); in->seg.flags_pd = cpu_to_be32(to_mpd(pd)->pdn);
access_mode = MLX5_ACCESS_MODE_MTT;
if (mr_init_attr->flags & IB_MR_SIGNATURE_EN) { if (mr_type == IB_MR_TYPE_MEM_REG) {
access_mode = MLX5_ACCESS_MODE_MTT;
in->seg.log2_page_size = PAGE_SHIFT;
} else if (mr_type == IB_MR_TYPE_SIGNATURE) {
u32 psv_index[2]; u32 psv_index[2];
in->seg.flags_pd = cpu_to_be32(be32_to_cpu(in->seg.flags_pd) | in->seg.flags_pd = cpu_to_be32(be32_to_cpu(in->seg.flags_pd) |
@ -1295,6 +1298,10 @@ struct ib_mr *mlx5_ib_create_mr(struct ib_pd *pd,
mr->sig->sig_err_exists = false; mr->sig->sig_err_exists = false;
/* Next UMR, Arm SIGERR */ /* Next UMR, Arm SIGERR */
++mr->sig->sigerr_count; ++mr->sig->sigerr_count;
} else {
mlx5_ib_warn(dev, "Invalid mr type %d\n", mr_type);
err = -EINVAL;
goto err_free_in;
} }
in->seg.flags = MLX5_PERM_UMR_EN | access_mode; in->seg.flags = MLX5_PERM_UMR_EN | access_mode;

View File

@ -284,9 +284,7 @@ iser_alloc_pi_ctx(struct ib_device *ib_device, struct ib_pd *pd,
struct fast_reg_descriptor *desc) struct fast_reg_descriptor *desc)
{ {
struct iser_pi_context *pi_ctx = NULL; struct iser_pi_context *pi_ctx = NULL;
struct ib_mr_init_attr mr_init_attr = {.max_reg_descriptors = 2, int ret;
.flags = IB_MR_SIGNATURE_EN};
int ret = 0;
desc->pi_ctx = kzalloc(sizeof(*desc->pi_ctx), GFP_KERNEL); desc->pi_ctx = kzalloc(sizeof(*desc->pi_ctx), GFP_KERNEL);
if (!desc->pi_ctx) if (!desc->pi_ctx)
@ -309,7 +307,7 @@ iser_alloc_pi_ctx(struct ib_device *ib_device, struct ib_pd *pd,
} }
desc->reg_indicators |= ISER_PROT_KEY_VALID; desc->reg_indicators |= ISER_PROT_KEY_VALID;
pi_ctx->sig_mr = ib_create_mr(pd, &mr_init_attr); pi_ctx->sig_mr = ib_alloc_mr(pd, IB_MR_TYPE_SIGNATURE, 2);
if (IS_ERR(pi_ctx->sig_mr)) { if (IS_ERR(pi_ctx->sig_mr)) {
ret = PTR_ERR(pi_ctx->sig_mr); ret = PTR_ERR(pi_ctx->sig_mr);
goto sig_mr_failure; goto sig_mr_failure;

View File

@ -508,7 +508,6 @@ isert_create_pi_ctx(struct fast_reg_descriptor *desc,
struct ib_device *device, struct ib_device *device,
struct ib_pd *pd) struct ib_pd *pd)
{ {
struct ib_mr_init_attr mr_init_attr;
struct pi_context *pi_ctx; struct pi_context *pi_ctx;
int ret; int ret;
@ -536,10 +535,7 @@ isert_create_pi_ctx(struct fast_reg_descriptor *desc,
} }
desc->ind |= ISERT_PROT_KEY_VALID; desc->ind |= ISERT_PROT_KEY_VALID;
memset(&mr_init_attr, 0, sizeof(mr_init_attr)); pi_ctx->sig_mr = ib_alloc_mr(pd, IB_MR_TYPE_SIGNATURE, 2);
mr_init_attr.max_reg_descriptors = 2;
mr_init_attr.flags |= IB_MR_SIGNATURE_EN;
pi_ctx->sig_mr = ib_create_mr(pd, &mr_init_attr);
if (IS_ERR(pi_ctx->sig_mr)) { if (IS_ERR(pi_ctx->sig_mr)) {
isert_err("Failed to allocate signature enabled mr err=%ld\n", isert_err("Failed to allocate signature enabled mr err=%ld\n",
PTR_ERR(pi_ctx->sig_mr)); PTR_ERR(pi_ctx->sig_mr));

View File

@ -557,20 +557,18 @@ __attribute_const__ int ib_rate_to_mult(enum ib_rate rate);
*/ */
__attribute_const__ int ib_rate_to_mbps(enum ib_rate rate); __attribute_const__ int ib_rate_to_mbps(enum ib_rate rate);
enum ib_mr_create_flags {
IB_MR_SIGNATURE_EN = 1,
};
/** /**
* ib_mr_init_attr - Memory region init attributes passed to routine * enum ib_mr_type - memory region type
* ib_create_mr. * @IB_MR_TYPE_MEM_REG: memory region that is used for
* @max_reg_descriptors: max number of registration descriptors that * normal registration
* may be used with registration work requests. * @IB_MR_TYPE_SIGNATURE: memory region that is used for
* @flags: MR creation flags bit mask. * signature operations (data-integrity
* capable regions)
*/ */
struct ib_mr_init_attr { enum ib_mr_type {
int max_reg_descriptors; IB_MR_TYPE_MEM_REG,
u32 flags; IB_MR_TYPE_SIGNATURE,
}; };
/** /**
@ -1671,8 +1669,9 @@ struct ib_device {
int (*query_mr)(struct ib_mr *mr, int (*query_mr)(struct ib_mr *mr,
struct ib_mr_attr *mr_attr); struct ib_mr_attr *mr_attr);
int (*dereg_mr)(struct ib_mr *mr); int (*dereg_mr)(struct ib_mr *mr);
struct ib_mr * (*create_mr)(struct ib_pd *pd, struct ib_mr * (*alloc_mr)(struct ib_pd *pd,
struct ib_mr_init_attr *mr_init_attr); enum ib_mr_type mr_type,
u32 max_num_sg);
struct ib_mr * (*alloc_fast_reg_mr)(struct ib_pd *pd, struct ib_mr * (*alloc_fast_reg_mr)(struct ib_pd *pd,
int max_page_list_len); int max_page_list_len);
struct ib_fast_reg_page_list * (*alloc_fast_reg_page_list)(struct ib_device *device, struct ib_fast_reg_page_list * (*alloc_fast_reg_page_list)(struct ib_device *device,
@ -2817,15 +2816,9 @@ int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr);
*/ */
int ib_dereg_mr(struct ib_mr *mr); int ib_dereg_mr(struct ib_mr *mr);
struct ib_mr *ib_alloc_mr(struct ib_pd *pd,
/** enum ib_mr_type mr_type,
* ib_create_mr - Allocates a memory region that may be used for u32 max_num_sg);
* signature handover operations.
* @pd: The protection domain associated with the region.
* @mr_init_attr: memory region init attributes.
*/
struct ib_mr *ib_create_mr(struct ib_pd *pd,
struct ib_mr_init_attr *mr_init_attr);
/** /**
* ib_alloc_fast_reg_mr - Allocates memory region usable with the * ib_alloc_fast_reg_mr - Allocates memory region usable with the