RDMA/efa: Use rdma block iterator in chunk list creation

When creating the chunks list the rdma_for_each_block() iterator is used
in order to iterate over the payload in EFA_CHUNK_PAYLOAD_SIZE (device
defined) strides.

Reviewed-by: Firas JahJah <firasj@amazon.com>
Reviewed-by: Yossi Leybovich <sleybo@amazon.com>
Reviewed-by: Shiraz Saleem <shiraz.saleem@intel.com>
Signed-off-by: Gal Pressman <galpress@amazon.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
Gal Pressman 2019-05-28 15:46:17 +03:00 committed by Jason Gunthorpe
parent e0e3f39759
commit 4d50e084c5
1 changed files with 10 additions and 13 deletions

View File

@ -1085,14 +1085,14 @@ err:
*/ */
static int pbl_chunk_list_create(struct efa_dev *dev, struct pbl_context *pbl) static int pbl_chunk_list_create(struct efa_dev *dev, struct pbl_context *pbl)
{ {
unsigned int entry, payloads_in_sg, chunk_list_size, chunk_idx, payload_idx;
struct pbl_chunk_list *chunk_list = &pbl->phys.indirect.chunk_list; struct pbl_chunk_list *chunk_list = &pbl->phys.indirect.chunk_list;
int page_cnt = pbl->phys.indirect.pbl_buf_size_in_pages; int page_cnt = pbl->phys.indirect.pbl_buf_size_in_pages;
struct scatterlist *pages_sgl = pbl->phys.indirect.sgl; struct scatterlist *pages_sgl = pbl->phys.indirect.sgl;
unsigned int chunk_list_size, chunk_idx, payload_idx;
int sg_dma_cnt = pbl->phys.indirect.sg_dma_cnt; int sg_dma_cnt = pbl->phys.indirect.sg_dma_cnt;
struct efa_com_ctrl_buff_info *ctrl_buf; struct efa_com_ctrl_buff_info *ctrl_buf;
u64 *cur_chunk_buf, *prev_chunk_buf; u64 *cur_chunk_buf, *prev_chunk_buf;
struct scatterlist *sg; struct ib_block_iter biter;
dma_addr_t dma_addr; dma_addr_t dma_addr;
int i; int i;
@ -1126,18 +1126,15 @@ static int pbl_chunk_list_create(struct efa_dev *dev, struct pbl_context *pbl)
chunk_idx = 0; chunk_idx = 0;
payload_idx = 0; payload_idx = 0;
cur_chunk_buf = chunk_list->chunks[0].buf; cur_chunk_buf = chunk_list->chunks[0].buf;
for_each_sg(pages_sgl, sg, sg_dma_cnt, entry) { rdma_for_each_block(pages_sgl, &biter, sg_dma_cnt,
payloads_in_sg = sg_dma_len(sg) >> EFA_CHUNK_PAYLOAD_SHIFT; EFA_CHUNK_PAYLOAD_SIZE) {
for (i = 0; i < payloads_in_sg; i++) { cur_chunk_buf[payload_idx++] =
cur_chunk_buf[payload_idx++] = rdma_block_iter_dma_address(&biter);
(sg_dma_address(sg) & ~(EFA_CHUNK_PAYLOAD_SIZE - 1)) +
(EFA_CHUNK_PAYLOAD_SIZE * i);
if (payload_idx == EFA_PTRS_PER_CHUNK) { if (payload_idx == EFA_PTRS_PER_CHUNK) {
chunk_idx++; chunk_idx++;
cur_chunk_buf = chunk_list->chunks[chunk_idx].buf; cur_chunk_buf = chunk_list->chunks[chunk_idx].buf;
payload_idx = 0; payload_idx = 0;
}
} }
} }