RDMA/nes: Use for_each_sg_dma_page iterator for umem SGL

Use the for_each_sg_dma_page iterator variant to walk the umem DMA-mapped
SGL and get the page DMA address. This avoids the extra loop to iterate
pages in the SGE when for_each_sg iterator is used.

Additionally, purge umem->page_shift usage in the driver as its only
relevant for ODP MRs. Use system page size and shift instead.

Signed-off-by: Shiraz, Saleem <shiraz.saleem@intel.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
Shiraz, Saleem 2019-02-12 11:01:14 -06:00 committed by Jason Gunthorpe
parent 36d577089d
commit 52a572e9f7
1 changed files with 91 additions and 133 deletions

View File

@ -2098,7 +2098,7 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
struct nes_device *nesdev = nesvnic->nesdev;
struct nes_adapter *nesadapter = nesdev->nesadapter;
struct ib_mr *ibmr = ERR_PTR(-EINVAL);
struct scatterlist *sg;
struct sg_dma_page_iter dma_iter;
struct nes_ucontext *nes_ucontext;
struct nes_pbl *nespbl;
struct nes_mr *nesmr;
@ -2106,10 +2106,9 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
struct nes_mem_reg_req req;
struct nes_vpbl vpbl;
struct nes_root_vpbl root_vpbl;
int entry, page_index;
int page_index;
int page_count = 0;
int err, pbl_depth = 0;
int chunk_pages;
int ret;
u32 stag;
u32 stag_index = 0;
@ -2121,7 +2120,6 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u16 pbl_count;
u8 single_page = 1;
u8 stag_key;
int first_page = 1;
region = ib_umem_get(udata, start, length, acc, 0);
if (IS_ERR(region)) {
@ -2172,127 +2170,99 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
}
nesmr->region = region;
for_each_sg(region->sg_head.sgl, sg, region->nmap, entry) {
if (sg_dma_address(sg) & ~PAGE_MASK) {
ib_umem_release(region);
nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index);
nes_debug(NES_DBG_MR, "Unaligned Memory Buffer: 0x%x\n",
(unsigned int) sg_dma_address(sg));
ibmr = ERR_PTR(-EINVAL);
kfree(nesmr);
goto reg_user_mr_err;
}
for_each_sg_dma_page (region->sg_head.sgl, &dma_iter, region->nmap, 0) {
if (!sg_dma_len(sg)) {
ib_umem_release(region);
nes_free_resource(nesadapter, nesadapter->allocated_mrs,
stag_index);
nes_debug(NES_DBG_MR, "Invalid Buffer Size\n");
ibmr = ERR_PTR(-EINVAL);
kfree(nesmr);
goto reg_user_mr_err;
}
region_length += sg_dma_len(sg);
chunk_pages = sg_dma_len(sg) >> 12;
region_length += PAGE_SIZE;
region_length -= skip_pages << 12;
for (page_index = skip_pages; page_index < chunk_pages; page_index++) {
skip_pages = 0;
if ((page_count != 0) && (page_count << 12) - (ib_umem_offset(region) & (4096 - 1)) >= region->length)
goto enough_pages;
if ((page_count&0x01FF) == 0) {
if (page_count >= 1024 * 512) {
skip_pages = 0;
if ((page_count != 0) && (page_count << 12) - (ib_umem_offset(region) & (4096 - 1)) >= region->length)
goto enough_pages;
if ((page_count & 0x01FF) == 0) {
if (page_count >= 1024 * 512) {
ib_umem_release(region);
nes_free_resource(nesadapter,
nesadapter->allocated_mrs, stag_index);
kfree(nesmr);
ibmr = ERR_PTR(-E2BIG);
goto reg_user_mr_err;
}
if (root_pbl_index == 1) {
root_vpbl.pbl_vbase = pci_alloc_consistent(nesdev->pcidev,
8192, &root_vpbl.pbl_pbase);
nes_debug(NES_DBG_MR, "Allocating root PBL, va = %p, pa = 0x%08X\n",
root_vpbl.pbl_vbase, (unsigned int)root_vpbl.pbl_pbase);
if (!root_vpbl.pbl_vbase) {
ib_umem_release(region);
nes_free_resource(nesadapter,
nesadapter->allocated_mrs, stag_index);
pci_free_consistent(nesdev->pcidev, 4096, vpbl.pbl_vbase,
vpbl.pbl_pbase);
nes_free_resource(nesadapter, nesadapter->allocated_mrs,
stag_index);
kfree(nesmr);
ibmr = ERR_PTR(-E2BIG);
goto reg_user_mr_err;
}
if (root_pbl_index == 1) {
root_vpbl.pbl_vbase = pci_alloc_consistent(nesdev->pcidev,
8192, &root_vpbl.pbl_pbase);
nes_debug(NES_DBG_MR, "Allocating root PBL, va = %p, pa = 0x%08X\n",
root_vpbl.pbl_vbase, (unsigned int)root_vpbl.pbl_pbase);
if (!root_vpbl.pbl_vbase) {
ib_umem_release(region);
pci_free_consistent(nesdev->pcidev, 4096, vpbl.pbl_vbase,
vpbl.pbl_pbase);
nes_free_resource(nesadapter, nesadapter->allocated_mrs,
stag_index);
kfree(nesmr);
ibmr = ERR_PTR(-ENOMEM);
goto reg_user_mr_err;
}
root_vpbl.leaf_vpbl = kcalloc(1024,
sizeof(*root_vpbl.leaf_vpbl),
GFP_KERNEL);
if (!root_vpbl.leaf_vpbl) {
ib_umem_release(region);
pci_free_consistent(nesdev->pcidev, 8192, root_vpbl.pbl_vbase,
root_vpbl.pbl_pbase);
pci_free_consistent(nesdev->pcidev, 4096, vpbl.pbl_vbase,
vpbl.pbl_pbase);
nes_free_resource(nesadapter, nesadapter->allocated_mrs,
stag_index);
kfree(nesmr);
ibmr = ERR_PTR(-ENOMEM);
goto reg_user_mr_err;
}
root_vpbl.pbl_vbase[0].pa_low =
cpu_to_le32((u32)vpbl.pbl_pbase);
root_vpbl.pbl_vbase[0].pa_high =
cpu_to_le32((u32)((((u64)vpbl.pbl_pbase) >> 32)));
root_vpbl.leaf_vpbl[0] = vpbl;
}
vpbl.pbl_vbase = pci_alloc_consistent(nesdev->pcidev, 4096,
&vpbl.pbl_pbase);
nes_debug(NES_DBG_MR, "Allocating leaf PBL, va = %p, pa = 0x%08X\n",
vpbl.pbl_vbase, (unsigned int)vpbl.pbl_pbase);
if (!vpbl.pbl_vbase) {
ib_umem_release(region);
nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index);
ibmr = ERR_PTR(-ENOMEM);
kfree(nesmr);
goto reg_user_mr_err;
}
if (1 <= root_pbl_index) {
root_vpbl.pbl_vbase[root_pbl_index].pa_low =
cpu_to_le32((u32)vpbl.pbl_pbase);
root_vpbl.pbl_vbase[root_pbl_index].pa_high =
cpu_to_le32((u32)((((u64)vpbl.pbl_pbase)>>32)));
root_vpbl.leaf_vpbl[root_pbl_index] = vpbl;
root_vpbl.leaf_vpbl = kcalloc(1024,
sizeof(*root_vpbl.leaf_vpbl),
GFP_KERNEL);
if (!root_vpbl.leaf_vpbl) {
ib_umem_release(region);
pci_free_consistent(nesdev->pcidev, 8192, root_vpbl.pbl_vbase,
root_vpbl.pbl_pbase);
pci_free_consistent(nesdev->pcidev, 4096, vpbl.pbl_vbase,
vpbl.pbl_pbase);
nes_free_resource(nesadapter, nesadapter->allocated_mrs,
stag_index);
kfree(nesmr);
ibmr = ERR_PTR(-ENOMEM);
goto reg_user_mr_err;
}
root_pbl_index++;
cur_pbl_index = 0;
root_vpbl.pbl_vbase[0].pa_low =
cpu_to_le32((u32)vpbl.pbl_pbase);
root_vpbl.pbl_vbase[0].pa_high =
cpu_to_le32((u32)((((u64)vpbl.pbl_pbase) >> 32)));
root_vpbl.leaf_vpbl[0] = vpbl;
}
if (single_page) {
if (page_count != 0) {
if ((last_dma_addr+4096) !=
(sg_dma_address(sg)+
(page_index*4096)))
single_page = 0;
last_dma_addr = sg_dma_address(sg)+
(page_index*4096);
} else {
first_dma_addr = sg_dma_address(sg)+
(page_index*4096);
last_dma_addr = first_dma_addr;
}
vpbl.pbl_vbase = pci_alloc_consistent(nesdev->pcidev, 4096,
&vpbl.pbl_pbase);
nes_debug(NES_DBG_MR, "Allocating leaf PBL, va = %p, pa = 0x%08X\n",
vpbl.pbl_vbase, (unsigned int)vpbl.pbl_pbase);
if (!vpbl.pbl_vbase) {
ib_umem_release(region);
nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index);
ibmr = ERR_PTR(-ENOMEM);
kfree(nesmr);
goto reg_user_mr_err;
}
vpbl.pbl_vbase[cur_pbl_index].pa_low =
cpu_to_le32((u32)(sg_dma_address(sg)+
(page_index*4096)));
vpbl.pbl_vbase[cur_pbl_index].pa_high =
cpu_to_le32((u32)((((u64)(sg_dma_address(sg)+
(page_index*4096))) >> 32)));
cur_pbl_index++;
page_count++;
if (1 <= root_pbl_index) {
root_vpbl.pbl_vbase[root_pbl_index].pa_low =
cpu_to_le32((u32)vpbl.pbl_pbase);
root_vpbl.pbl_vbase[root_pbl_index].pa_high =
cpu_to_le32((u32)((((u64)vpbl.pbl_pbase) >> 32)));
root_vpbl.leaf_vpbl[root_pbl_index] = vpbl;
}
root_pbl_index++;
cur_pbl_index = 0;
}
if (single_page) {
if (page_count != 0) {
if ((last_dma_addr + 4096) != sg_page_iter_dma_address(&dma_iter))
single_page = 0;
last_dma_addr = sg_page_iter_dma_address(&dma_iter);
} else {
first_dma_addr = sg_page_iter_dma_address(&dma_iter);
last_dma_addr = first_dma_addr;
}
}
vpbl.pbl_vbase[cur_pbl_index].pa_low =
cpu_to_le32((u32)(sg_page_iter_dma_address(&dma_iter)));
vpbl.pbl_vbase[cur_pbl_index].pa_high =
cpu_to_le32((u32)((u64)(sg_page_iter_dma_address(&dma_iter))));
cur_pbl_index++;
page_count++;
}
enough_pages:
enough_pages:
nes_debug(NES_DBG_MR, "calculating stag, stag_index=0x%08x, driver_key=0x%08x,"
" stag_key=0x%08x\n",
stag_index, driver_key, stag_key);
@ -2334,7 +2304,7 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
ibmr = ERR_PTR(-ENOMEM);
}
reg_user_mr_err:
reg_user_mr_err:
/* free the resources */
if (root_pbl_index == 1) {
pci_free_consistent(nesdev->pcidev, 4096, vpbl.pbl_vbase,
@ -2401,26 +2371,14 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
nespbl->pbl_size, (unsigned long) nespbl->pbl_pbase,
(void *) nespbl->pbl_vbase, nespbl->user_base);
for_each_sg(region->sg_head.sgl, sg, region->nmap, entry) {
chunk_pages = sg_dma_len(sg) >> 12;
chunk_pages += (sg_dma_len(sg) & (4096-1)) ? 1 : 0;
if (first_page) {
nespbl->page = sg_page(sg);
first_page = 0;
}
for (page_index = 0; page_index < chunk_pages; page_index++) {
((__le32 *)pbl)[0] = cpu_to_le32((u32)
(sg_dma_address(sg)+
(page_index*4096)));
((__le32 *)pbl)[1] = cpu_to_le32(((u64)
(sg_dma_address(sg)+
(page_index*4096)))>>32);
nes_debug(NES_DBG_MR, "pbl=%p, *pbl=0x%016llx, 0x%08x%08x\n", pbl,
(unsigned long long)*pbl,
le32_to_cpu(((__le32 *)pbl)[1]), le32_to_cpu(((__le32 *)pbl)[0]));
pbl++;
}
nespbl->page = sg_page(region->sg_head.sgl);
for_each_sg_dma_page(region->sg_head.sgl, &dma_iter, region->nmap, 0) {
((__le32 *)pbl)[0] = cpu_to_le32((u32)(sg_page_iter_dma_address(&dma_iter)));
((__le32 *)pbl)[1] = cpu_to_le32(((u64)(sg_page_iter_dma_address(&dma_iter)))>>32);
nes_debug(NES_DBG_MR, "pbl=%p, *pbl=0x%016llx, 0x%08x%08x\n", pbl,
(unsigned long long)*pbl,
le32_to_cpu(((__le32 *)pbl)[1]), le32_to_cpu(((__le32 *)pbl)[0]));
pbl++;
}
if (req.reg_type == IWNES_MEMREG_TYPE_QP) {