RDMA/umem: Get rid of struct ib_umem.odp_data
This no longer has any use, we can use container_of to get to the umem_odp, and a simple flag to indicate if this is an odp MR. Remove the few remaining references to it. Signed-off-by: Jason Gunthorpe <jgg@mellanox.com> Signed-off-by: Leon Romanovsky <leonro@mellanox.com> Signed-off-by: Doug Ledford <dledford@redhat.com>
This commit is contained in:
parent
41b4deeaa1
commit
597ecc5a09
|
@ -112,7 +112,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
|
|||
umem = kzalloc(sizeof(struct ib_umem_odp), GFP_KERNEL);
|
||||
if (!umem)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
umem->odp_data = to_ib_umem_odp(umem);
|
||||
umem->is_odp = 1;
|
||||
} else {
|
||||
umem = kzalloc(sizeof(*umem), GFP_KERNEL);
|
||||
if (!umem)
|
||||
|
@ -243,7 +243,7 @@ EXPORT_SYMBOL(ib_umem_get);
|
|||
static void __ib_umem_release_tail(struct ib_umem *umem)
|
||||
{
|
||||
mmdrop(umem->owning_mm);
|
||||
if (umem->odp_data)
|
||||
if (umem->is_odp)
|
||||
kfree(to_ib_umem_odp(umem));
|
||||
else
|
||||
kfree(umem);
|
||||
|
@ -268,7 +268,7 @@ void ib_umem_release(struct ib_umem *umem)
|
|||
{
|
||||
struct ib_ucontext *context = umem->context;
|
||||
|
||||
if (umem->odp_data) {
|
||||
if (umem->is_odp) {
|
||||
ib_umem_odp_release(to_ib_umem_odp(umem));
|
||||
__ib_umem_release_tail(umem);
|
||||
return;
|
||||
|
@ -306,7 +306,7 @@ int ib_umem_page_count(struct ib_umem *umem)
|
|||
int n;
|
||||
struct scatterlist *sg;
|
||||
|
||||
if (umem->odp_data)
|
||||
if (umem->is_odp)
|
||||
return ib_umem_num_pages(umem);
|
||||
|
||||
n = 0;
|
||||
|
|
|
@ -291,6 +291,7 @@ struct ib_umem_odp *ib_alloc_odp_umem(struct ib_ucontext *context,
|
|||
umem->address = addr;
|
||||
umem->page_shift = PAGE_SHIFT;
|
||||
umem->writable = 1;
|
||||
umem->is_odp = 1;
|
||||
|
||||
mutex_init(&odp_data->umem_mutex);
|
||||
init_completion(&odp_data->notifier_completion);
|
||||
|
@ -319,8 +320,6 @@ struct ib_umem_odp *ib_alloc_odp_umem(struct ib_ucontext *context,
|
|||
&context->no_private_counters);
|
||||
up_write(&context->umem_rwsem);
|
||||
|
||||
umem->odp_data = odp_data;
|
||||
|
||||
return odp_data;
|
||||
|
||||
out_page_list:
|
||||
|
|
|
@ -57,7 +57,7 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr,
|
|||
int entry;
|
||||
unsigned long page_shift = umem->page_shift;
|
||||
|
||||
if (umem->odp_data) {
|
||||
if (umem->is_odp) {
|
||||
*ncont = ib_umem_page_count(umem);
|
||||
*count = *ncont << (page_shift - PAGE_SHIFT);
|
||||
*shift = page_shift;
|
||||
|
@ -152,14 +152,13 @@ void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
|
|||
struct scatterlist *sg;
|
||||
int entry;
|
||||
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
|
||||
const bool odp = umem->odp_data != NULL;
|
||||
|
||||
if (odp) {
|
||||
if (umem->is_odp) {
|
||||
WARN_ON(shift != 0);
|
||||
WARN_ON(access_flags != (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE));
|
||||
|
||||
for (i = 0; i < num_pages; ++i) {
|
||||
dma_addr_t pa = umem->odp_data->dma_list[offset + i];
|
||||
dma_addr_t pa =
|
||||
to_ib_umem_odp(umem)->dma_list[offset + i];
|
||||
|
||||
pas[i] = cpu_to_be64(umem_dma_to_mtt(pa));
|
||||
}
|
||||
|
|
|
@ -98,7 +98,7 @@ static bool use_umr_mtt_update(struct mlx5_ib_mr *mr, u64 start, u64 length)
|
|||
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
|
||||
static void update_odp_mr(struct mlx5_ib_mr *mr)
|
||||
{
|
||||
if (mr->umem->odp_data) {
|
||||
if (mr->umem->is_odp) {
|
||||
/*
|
||||
* This barrier prevents the compiler from moving the
|
||||
* setting of umem->odp_data->private to point to our
|
||||
|
@ -107,7 +107,7 @@ static void update_odp_mr(struct mlx5_ib_mr *mr)
|
|||
* handle invalidations.
|
||||
*/
|
||||
smp_wmb();
|
||||
mr->umem->odp_data->private = mr;
|
||||
to_ib_umem_odp(mr->umem)->private = mr;
|
||||
/*
|
||||
* Make sure we will see the new
|
||||
* umem->odp_data->private value in the invalidation
|
||||
|
@ -1624,15 +1624,16 @@ static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
|
|||
struct ib_umem *umem = mr->umem;
|
||||
|
||||
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
|
||||
if (umem && umem->odp_data) {
|
||||
if (umem && umem->is_odp) {
|
||||
struct ib_umem_odp *umem_odp = to_ib_umem_odp(umem);
|
||||
|
||||
/* Prevent new page faults from succeeding */
|
||||
mr->live = 0;
|
||||
/* Wait for all running page-fault handlers to finish. */
|
||||
synchronize_srcu(&dev->mr_srcu);
|
||||
/* Destroy all page mappings */
|
||||
if (umem->odp_data->page_list)
|
||||
mlx5_ib_invalidate_range(to_ib_umem_odp(umem),
|
||||
ib_umem_start(umem),
|
||||
if (umem_odp->page_list)
|
||||
mlx5_ib_invalidate_range(umem_odp, ib_umem_start(umem),
|
||||
ib_umem_end(umem));
|
||||
else
|
||||
mlx5_ib_free_implicit_mr(mr);
|
||||
|
|
|
@ -371,11 +371,12 @@ static struct ib_umem_odp *implicit_mr_get_data(struct mlx5_ib_mr *mr,
|
|||
struct ib_ucontext *ctx = mr->ibmr.pd->uobject->context;
|
||||
struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.pd->device);
|
||||
struct ib_umem_odp *odp, *result = NULL;
|
||||
struct ib_umem_odp *odp_mr = to_ib_umem_odp(mr->umem);
|
||||
u64 addr = io_virt & MLX5_IMR_MTT_MASK;
|
||||
int nentries = 0, start_idx = 0, ret;
|
||||
struct mlx5_ib_mr *mtt;
|
||||
|
||||
mutex_lock(&mr->umem->odp_data->umem_mutex);
|
||||
mutex_lock(&odp_mr->umem_mutex);
|
||||
odp = odp_lookup(ctx, addr, 1, mr);
|
||||
|
||||
mlx5_ib_dbg(dev, "io_virt:%llx bcnt:%zx addr:%llx odp:%p\n",
|
||||
|
@ -388,14 +389,14 @@ next_mr:
|
|||
} else {
|
||||
odp = ib_alloc_odp_umem(ctx, addr, MLX5_IMR_MTT_SIZE);
|
||||
if (IS_ERR(odp)) {
|
||||
mutex_unlock(&mr->umem->odp_data->umem_mutex);
|
||||
mutex_unlock(&odp_mr->umem_mutex);
|
||||
return ERR_CAST(odp);
|
||||
}
|
||||
|
||||
mtt = implicit_mr_alloc(mr->ibmr.pd, &odp->umem, 0,
|
||||
mr->access_flags);
|
||||
if (IS_ERR(mtt)) {
|
||||
mutex_unlock(&mr->umem->odp_data->umem_mutex);
|
||||
mutex_unlock(&odp_mr->umem_mutex);
|
||||
ib_umem_release(&odp->umem);
|
||||
return ERR_CAST(mtt);
|
||||
}
|
||||
|
@ -433,7 +434,7 @@ next_mr:
|
|||
}
|
||||
}
|
||||
|
||||
mutex_unlock(&mr->umem->odp_data->umem_mutex);
|
||||
mutex_unlock(&odp_mr->umem_mutex);
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@ -498,6 +499,7 @@ void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *imr)
|
|||
static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
|
||||
u64 io_virt, size_t bcnt, u32 *bytes_mapped)
|
||||
{
|
||||
struct ib_umem_odp *odp_mr = to_ib_umem_odp(mr->umem);
|
||||
u64 access_mask = ODP_READ_ALLOWED_BIT;
|
||||
int npages = 0, page_shift, np;
|
||||
u64 start_idx, page_mask;
|
||||
|
@ -506,7 +508,7 @@ static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
|
|||
size_t size;
|
||||
int ret;
|
||||
|
||||
if (!mr->umem->odp_data->page_list) {
|
||||
if (!odp_mr->page_list) {
|
||||
odp = implicit_mr_get_data(mr, io_virt, bcnt);
|
||||
|
||||
if (IS_ERR(odp))
|
||||
|
@ -514,7 +516,7 @@ static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
|
|||
mr = odp->private;
|
||||
|
||||
} else {
|
||||
odp = mr->umem->odp_data;
|
||||
odp = odp_mr;
|
||||
}
|
||||
|
||||
next_mr:
|
||||
|
|
|
@ -46,10 +46,10 @@ struct ib_umem {
|
|||
size_t length;
|
||||
unsigned long address;
|
||||
int page_shift;
|
||||
int writable;
|
||||
int hugetlb;
|
||||
u32 writable : 1;
|
||||
u32 hugetlb : 1;
|
||||
u32 is_odp : 1;
|
||||
struct work_struct work;
|
||||
struct ib_umem_odp *odp_data;
|
||||
struct sg_table sg_head;
|
||||
int nmap;
|
||||
int npages;
|
||||
|
|
Loading…
Reference in New Issue