RDMA/hns: Fix sg offset non-zero issue
When run perftest in many times, the system will report a BUG as follows: BUG: Bad rss-counter state mm:(____ptrval____) idx:0 val:-1 BUG: Bad rss-counter state mm:(____ptrval____) idx:1 val:1 We tested with different kernel version and found it started from the the following commit: commitd10bcf947a
("RDMA/umem: Combine contiguous PAGE_SIZE regions in SGEs") In this commit, the sg->offset is always 0 when sg_set_page() is called in ib_umem_get() and the drivers are not allowed to change the sgl, otherwise it will get bad page descriptor when unfolding SGEs in __ib_umem_release() as sg_page_count() will get wrong result while sgl->offset is not 0. However, there is a weird sgl usage in the current hns driver, the driver modified sg->offset after calling ib_umem_get(), which caused we iterate past the wrong number of pages in for_each_sg_page iterator. This patch fixes it by correcting the non-standard sgl usage found in the hns_roce_db_map_user() function. Fixes:d10bcf947a
("RDMA/umem: Combine contiguous PAGE_SIZE regions in SGEs") Fixes:0425e3e6e0
("RDMA/hns: Support flush cqe for hip08 in kernel space") Link: https://lore.kernel.org/r/1562808737-45723-1-git-send-email-oulijun@huawei.com Signed-off-by: Xi Wang <wangxi11@huawei.com> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
parent
d5121ffebc
commit
60c3becfd1
|
@ -12,13 +12,15 @@ int hns_roce_db_map_user(struct hns_roce_ucontext *context,
|
|||
struct ib_udata *udata, unsigned long virt,
|
||||
struct hns_roce_db *db)
|
||||
{
|
||||
unsigned long page_addr = virt & PAGE_MASK;
|
||||
struct hns_roce_user_db_page *page;
|
||||
unsigned int offset;
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&context->page_mutex);
|
||||
|
||||
list_for_each_entry(page, &context->page_list, list)
|
||||
if (page->user_virt == (virt & PAGE_MASK))
|
||||
if (page->user_virt == page_addr)
|
||||
goto found;
|
||||
|
||||
page = kmalloc(sizeof(*page), GFP_KERNEL);
|
||||
|
@ -28,8 +30,8 @@ int hns_roce_db_map_user(struct hns_roce_ucontext *context,
|
|||
}
|
||||
|
||||
refcount_set(&page->refcount, 1);
|
||||
page->user_virt = (virt & PAGE_MASK);
|
||||
page->umem = ib_umem_get(udata, virt & PAGE_MASK, PAGE_SIZE, 0, 0);
|
||||
page->user_virt = page_addr;
|
||||
page->umem = ib_umem_get(udata, page_addr, PAGE_SIZE, 0, 0);
|
||||
if (IS_ERR(page->umem)) {
|
||||
ret = PTR_ERR(page->umem);
|
||||
kfree(page);
|
||||
|
@ -39,10 +41,9 @@ int hns_roce_db_map_user(struct hns_roce_ucontext *context,
|
|||
list_add(&page->list, &context->page_list);
|
||||
|
||||
found:
|
||||
db->dma = sg_dma_address(page->umem->sg_head.sgl) +
|
||||
(virt & ~PAGE_MASK);
|
||||
page->umem->sg_head.sgl->offset = virt & ~PAGE_MASK;
|
||||
db->virt_addr = sg_virt(page->umem->sg_head.sgl);
|
||||
offset = virt - page_addr;
|
||||
db->dma = sg_dma_address(page->umem->sg_head.sgl) + offset;
|
||||
db->virt_addr = sg_virt(page->umem->sg_head.sgl) + offset;
|
||||
db->u.user_page = page;
|
||||
refcount_inc(&page->refcount);
|
||||
|
||||
|
|
Loading…
Reference in New Issue