IB/hfi1: Prevent unpinning of wrong pages
The routine used by the SDMA cache to handle already cached nodes can extend an already existing node. In its error handling code, the routine will unpin pages when not all pages of the buffer extension were pinned. There was a bug in that part of the routine, which would mistakenly unpin pages from the original set rather than the newly pinned pages. This commit fixes that bug by offsetting the page array to the proper place pointing at the beginning of the newly pinned pages. Reviewed-by: Dean Luick <dean.luick@intel.com> Signed-off-by: Mitko Haralanov <mitko.haralanov@intel.com> Signed-off-by: Doug Ledford <dledford@redhat.com>
This commit is contained in:
parent
de82bdff62
commit
849e3e9398
|
@ -278,7 +278,8 @@ static inline void pq_update(struct hfi1_user_sdma_pkt_q *);
|
|||
static void user_sdma_free_request(struct user_sdma_request *, bool);
|
||||
static int pin_vector_pages(struct user_sdma_request *,
|
||||
struct user_sdma_iovec *);
|
||||
static void unpin_vector_pages(struct mm_struct *, struct page **, unsigned);
|
||||
static void unpin_vector_pages(struct mm_struct *, struct page **, unsigned,
|
||||
unsigned);
|
||||
static int check_header_template(struct user_sdma_request *,
|
||||
struct hfi1_pkt_header *, u32, u32);
|
||||
static int set_txreq_header(struct user_sdma_request *,
|
||||
|
@ -1110,7 +1111,8 @@ retry:
|
|||
goto bail;
|
||||
}
|
||||
if (pinned != npages) {
|
||||
unpin_vector_pages(current->mm, pages, pinned);
|
||||
unpin_vector_pages(current->mm, pages, node->npages,
|
||||
pinned);
|
||||
ret = -EFAULT;
|
||||
goto bail;
|
||||
}
|
||||
|
@ -1150,9 +1152,9 @@ bail:
|
|||
}
|
||||
|
||||
static void unpin_vector_pages(struct mm_struct *mm, struct page **pages,
|
||||
unsigned npages)
|
||||
unsigned start, unsigned npages)
|
||||
{
|
||||
hfi1_release_user_pages(mm, pages, npages, 0);
|
||||
hfi1_release_user_pages(mm, pages + start, npages, 0);
|
||||
kfree(pages);
|
||||
}
|
||||
|
||||
|
@ -1566,7 +1568,8 @@ static void sdma_rb_remove(struct rb_root *root, struct mmu_rb_node *mnode,
|
|||
* prevent a deadlock when hfi1_release_user_pages() attempts to
|
||||
* take the mmap_sem, which the MMU notifier has already taken.
|
||||
*/
|
||||
unpin_vector_pages(mm ? NULL : current->mm, node->pages, node->npages);
|
||||
unpin_vector_pages(mm ? NULL : current->mm, node->pages, 0,
|
||||
node->npages);
|
||||
/*
|
||||
* If called by the MMU notifier, we have to adjust the pinned
|
||||
* page count ourselves.
|
||||
|
|
Loading…
Reference in New Issue