RDMA/hns: Use for_each_sg_dma_page iterator on umem SGL
Use the for_each_sg_dma_page iterator variant to walk the umem DMA-mapped SGL and get the page DMA address. This avoids the extra loop to iterate pages in the SGE when for_each_sg iterator is used. Additionally, purge umem->page_shift usage in the driver as its only relevant for ODP MRs. Use system page size and shift instead. Signed-off-by: Shiraz, Saleem <shiraz.saleem@intel.com> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
parent
43fae91276
commit
3856ec5527
|
@ -1871,9 +1871,8 @@ static int hns_roce_v1_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
|
|||
unsigned long mtpt_idx)
|
||||
{
|
||||
struct hns_roce_v1_mpt_entry *mpt_entry;
|
||||
struct scatterlist *sg;
|
||||
struct sg_dma_page_iter sg_iter;
|
||||
u64 *pages;
|
||||
int entry;
|
||||
int i;
|
||||
|
||||
/* MPT filled into mailbox buf */
|
||||
|
@ -1928,8 +1927,8 @@ static int hns_roce_v1_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
|
|||
return -ENOMEM;
|
||||
|
||||
i = 0;
|
||||
for_each_sg(mr->umem->sg_head.sgl, sg, mr->umem->nmap, entry) {
|
||||
pages[i] = ((u64)sg_dma_address(sg)) >> 12;
|
||||
for_each_sg_dma_page(mr->umem->sg_head.sgl, &sg_iter, mr->umem->nmap, 0) {
|
||||
pages[i] = ((u64)sg_page_iter_dma_address(&sg_iter)) >> 12;
|
||||
|
||||
/* Directly record to MTPT table firstly 7 entry */
|
||||
if (i >= HNS_ROCE_MAX_INNER_MTPT_NUM)
|
||||
|
|
|
@ -2084,12 +2084,10 @@ static int hns_roce_v2_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port,
|
|||
static int set_mtpt_pbl(struct hns_roce_v2_mpt_entry *mpt_entry,
|
||||
struct hns_roce_mr *mr)
|
||||
{
|
||||
struct scatterlist *sg;
|
||||
struct sg_dma_page_iter sg_iter;
|
||||
u64 page_addr;
|
||||
u64 *pages;
|
||||
int i, j;
|
||||
int len;
|
||||
int entry;
|
||||
int i;
|
||||
|
||||
mpt_entry->pbl_size = cpu_to_le32(mr->pbl_size);
|
||||
mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(mr->pbl_ba >> 3));
|
||||
|
@ -2102,17 +2100,14 @@ static int set_mtpt_pbl(struct hns_roce_v2_mpt_entry *mpt_entry,
|
|||
return -ENOMEM;
|
||||
|
||||
i = 0;
|
||||
for_each_sg(mr->umem->sg_head.sgl, sg, mr->umem->nmap, entry) {
|
||||
len = sg_dma_len(sg) >> PAGE_SHIFT;
|
||||
for (j = 0; j < len; ++j) {
|
||||
page_addr = sg_dma_address(sg) +
|
||||
(j << mr->umem->page_shift);
|
||||
pages[i] = page_addr >> 6;
|
||||
/* Record the first 2 entry directly to MTPT table */
|
||||
if (i >= HNS_ROCE_V2_MAX_INNER_MTPT_NUM - 1)
|
||||
goto found;
|
||||
i++;
|
||||
}
|
||||
for_each_sg_dma_page(mr->umem->sg_head.sgl, &sg_iter, mr->umem->nmap, 0) {
|
||||
page_addr = sg_page_iter_dma_address(&sg_iter);
|
||||
pages[i] = page_addr >> 6;
|
||||
|
||||
/* Record the first 2 entry directly to MTPT table */
|
||||
if (i >= HNS_ROCE_V2_MAX_INNER_MTPT_NUM - 1)
|
||||
goto found;
|
||||
i++;
|
||||
}
|
||||
found:
|
||||
mpt_entry->pa0_l = cpu_to_le32(lower_32_bits(pages[0]));
|
||||
|
|
|
@ -976,12 +976,11 @@ int hns_roce_ib_umem_write_mtt(struct hns_roce_dev *hr_dev,
|
|||
struct hns_roce_mtt *mtt, struct ib_umem *umem)
|
||||
{
|
||||
struct device *dev = hr_dev->dev;
|
||||
struct scatterlist *sg;
|
||||
struct sg_dma_page_iter sg_iter;
|
||||
unsigned int order;
|
||||
int i, k, entry;
|
||||
int npage = 0;
|
||||
int ret = 0;
|
||||
int len;
|
||||
int i;
|
||||
u64 page_addr;
|
||||
u64 *pages;
|
||||
u32 bt_page_size;
|
||||
|
@ -1014,29 +1013,25 @@ int hns_roce_ib_umem_write_mtt(struct hns_roce_dev *hr_dev,
|
|||
|
||||
i = n = 0;
|
||||
|
||||
for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
|
||||
len = sg_dma_len(sg) >> PAGE_SHIFT;
|
||||
for (k = 0; k < len; ++k) {
|
||||
page_addr =
|
||||
sg_dma_address(sg) + (k << umem->page_shift);
|
||||
if (!(npage % (1 << (mtt->page_shift - PAGE_SHIFT)))) {
|
||||
if (page_addr & ((1 << mtt->page_shift) - 1)) {
|
||||
dev_err(dev, "page_addr 0x%llx is not page_shift %d alignment!\n",
|
||||
page_addr, mtt->page_shift);
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
pages[i++] = page_addr;
|
||||
}
|
||||
npage++;
|
||||
if (i == bt_page_size / sizeof(u64)) {
|
||||
ret = hns_roce_write_mtt(hr_dev, mtt, n, i,
|
||||
pages);
|
||||
if (ret)
|
||||
goto out;
|
||||
n += i;
|
||||
i = 0;
|
||||
for_each_sg_dma_page(umem->sg_head.sgl, &sg_iter, umem->nmap, 0) {
|
||||
page_addr = sg_page_iter_dma_address(&sg_iter);
|
||||
if (!(npage % (1 << (mtt->page_shift - PAGE_SHIFT)))) {
|
||||
if (page_addr & ((1 << mtt->page_shift) - 1)) {
|
||||
dev_err(dev,
|
||||
"page_addr 0x%llx is not page_shift %d alignment!\n",
|
||||
page_addr, mtt->page_shift);
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
pages[i++] = page_addr;
|
||||
}
|
||||
npage++;
|
||||
if (i == bt_page_size / sizeof(u64)) {
|
||||
ret = hns_roce_write_mtt(hr_dev, mtt, n, i, pages);
|
||||
if (ret)
|
||||
goto out;
|
||||
n += i;
|
||||
i = 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1052,10 +1047,8 @@ static int hns_roce_ib_umem_write_mr(struct hns_roce_dev *hr_dev,
|
|||
struct hns_roce_mr *mr,
|
||||
struct ib_umem *umem)
|
||||
{
|
||||
struct scatterlist *sg;
|
||||
int i = 0, j = 0, k;
|
||||
int entry;
|
||||
int len;
|
||||
struct sg_dma_page_iter sg_iter;
|
||||
int i = 0, j = 0;
|
||||
u64 page_addr;
|
||||
u32 pbl_bt_sz;
|
||||
|
||||
|
@ -1063,27 +1056,22 @@ static int hns_roce_ib_umem_write_mr(struct hns_roce_dev *hr_dev,
|
|||
return 0;
|
||||
|
||||
pbl_bt_sz = 1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT);
|
||||
for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
|
||||
len = sg_dma_len(sg) >> PAGE_SHIFT;
|
||||
for (k = 0; k < len; ++k) {
|
||||
page_addr = sg_dma_address(sg) +
|
||||
(k << umem->page_shift);
|
||||
for_each_sg_dma_page(umem->sg_head.sgl, &sg_iter, umem->nmap, 0) {
|
||||
page_addr = sg_page_iter_dma_address(&sg_iter);
|
||||
if (!hr_dev->caps.pbl_hop_num) {
|
||||
mr->pbl_buf[i++] = page_addr >> 12;
|
||||
} else if (hr_dev->caps.pbl_hop_num == 1) {
|
||||
mr->pbl_buf[i++] = page_addr;
|
||||
} else {
|
||||
if (hr_dev->caps.pbl_hop_num == 2)
|
||||
mr->pbl_bt_l1[i][j] = page_addr;
|
||||
else if (hr_dev->caps.pbl_hop_num == 3)
|
||||
mr->pbl_bt_l2[i][j] = page_addr;
|
||||
|
||||
if (!hr_dev->caps.pbl_hop_num) {
|
||||
mr->pbl_buf[i++] = page_addr >> 12;
|
||||
} else if (hr_dev->caps.pbl_hop_num == 1) {
|
||||
mr->pbl_buf[i++] = page_addr;
|
||||
} else {
|
||||
if (hr_dev->caps.pbl_hop_num == 2)
|
||||
mr->pbl_bt_l1[i][j] = page_addr;
|
||||
else if (hr_dev->caps.pbl_hop_num == 3)
|
||||
mr->pbl_bt_l2[i][j] = page_addr;
|
||||
|
||||
j++;
|
||||
if (j >= (pbl_bt_sz / 8)) {
|
||||
i++;
|
||||
j = 0;
|
||||
}
|
||||
j++;
|
||||
if (j >= (pbl_bt_sz / 8)) {
|
||||
i++;
|
||||
j = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -640,19 +640,19 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
|
|||
}
|
||||
|
||||
hr_qp->mtt.mtt_type = MTT_TYPE_WQE;
|
||||
page_shift = PAGE_SHIFT;
|
||||
if (hr_dev->caps.mtt_buf_pg_sz) {
|
||||
npages = (ib_umem_page_count(hr_qp->umem) +
|
||||
(1 << hr_dev->caps.mtt_buf_pg_sz) - 1) /
|
||||
(1 << hr_dev->caps.mtt_buf_pg_sz);
|
||||
page_shift = PAGE_SHIFT + hr_dev->caps.mtt_buf_pg_sz;
|
||||
(1 << hr_dev->caps.mtt_buf_pg_sz);
|
||||
page_shift += hr_dev->caps.mtt_buf_pg_sz;
|
||||
ret = hns_roce_mtt_init(hr_dev, npages,
|
||||
page_shift,
|
||||
&hr_qp->mtt);
|
||||
} else {
|
||||
ret = hns_roce_mtt_init(hr_dev,
|
||||
ib_umem_page_count(hr_qp->umem),
|
||||
hr_qp->umem->page_shift,
|
||||
&hr_qp->mtt);
|
||||
ib_umem_page_count(hr_qp->umem),
|
||||
page_shift, &hr_qp->mtt);
|
||||
}
|
||||
if (ret) {
|
||||
dev_err(dev, "hns_roce_mtt_init error for create qp\n");
|
||||
|
|
Loading…
Reference in New Issue