RDMA/mana_ib: Fix a bug when the PF indicates more entries for registering memory on first packet
When registering memory in a large chunk that doesn't fit into a single PF
message, the PF may return GDMA_STATUS_MORE_ENTRIES on the first message if
there are more messages needed for registering more chunks.
Fix the VF to make it process the correct return code.
Fixes: 0266a17763
("RDMA/mana_ib: Add a driver for Microsoft Azure Network Adapter")
Link: https://lore.kernel.org/r/1676507522-21018-1-git-send-email-longli@linuxonhyperv.com
Signed-off-by: Long Li <longli@microsoft.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
This commit is contained in:
parent
72a0362744
commit
89d42b8c85
|
@ -249,7 +249,8 @@ static int
|
|||
mana_ib_gd_first_dma_region(struct mana_ib_dev *dev,
|
||||
struct gdma_context *gc,
|
||||
struct gdma_create_dma_region_req *create_req,
|
||||
size_t num_pages, mana_handle_t *gdma_region)
|
||||
size_t num_pages, mana_handle_t *gdma_region,
|
||||
u32 expected_status)
|
||||
{
|
||||
struct gdma_create_dma_region_resp create_resp = {};
|
||||
unsigned int create_req_msg_size;
|
||||
|
@ -261,7 +262,7 @@ mana_ib_gd_first_dma_region(struct mana_ib_dev *dev,
|
|||
|
||||
err = mana_gd_send_request(gc, create_req_msg_size, create_req,
|
||||
sizeof(create_resp), &create_resp);
|
||||
if (err || create_resp.hdr.status) {
|
||||
if (err || create_resp.hdr.status != expected_status) {
|
||||
ibdev_dbg(&dev->ib_dev,
|
||||
"Failed to create DMA region: %d, 0x%x\n",
|
||||
err, create_resp.hdr.status);
|
||||
|
@ -372,14 +373,21 @@ int mana_ib_gd_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
|
|||
|
||||
page_addr_list = create_req->page_addr_list;
|
||||
rdma_umem_for_each_dma_block(umem, &biter, page_sz) {
|
||||
u32 expected_status = 0;
|
||||
|
||||
page_addr_list[tail++] = rdma_block_iter_dma_address(&biter);
|
||||
if (tail < num_pages_to_handle)
|
||||
continue;
|
||||
|
||||
if (num_pages_processed + num_pages_to_handle <
|
||||
num_pages_total)
|
||||
expected_status = GDMA_STATUS_MORE_ENTRIES;
|
||||
|
||||
if (!num_pages_processed) {
|
||||
/* First create message */
|
||||
err = mana_ib_gd_first_dma_region(dev, gc, create_req,
|
||||
tail, gdma_region);
|
||||
tail, gdma_region,
|
||||
expected_status);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
|
@ -392,14 +400,8 @@ int mana_ib_gd_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
|
|||
page_addr_list = add_req->page_addr_list;
|
||||
} else {
|
||||
/* Subsequent create messages */
|
||||
u32 expected_s = 0;
|
||||
|
||||
if (num_pages_processed + num_pages_to_handle <
|
||||
num_pages_total)
|
||||
expected_s = GDMA_STATUS_MORE_ENTRIES;
|
||||
|
||||
err = mana_ib_gd_add_dma_region(dev, gc, add_req, tail,
|
||||
expected_s);
|
||||
expected_status);
|
||||
if (err)
|
||||
break;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue