IB/hfi1: Clean up pin_vector_pages() function
Clean up pin_vector_pages() function by moving page pinning related code to a separate function since it really stands on its own. Reviewed-by: Dennis Dalessandro <dennis.dalessandro@intel.com> Signed-off-by: Harish Chegondi <harish.chegondi@intel.com> Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com> Signed-off-by: Doug Ledford <dledford@redhat.com>
This commit is contained in:
parent
624b9ac15c
commit
4c6c9aa6cb
|
@ -1124,11 +1124,53 @@ static u32 sdma_cache_evict(struct hfi1_user_sdma_pkt_q *pq, u32 npages)
|
|||
return evict_data.cleared;
|
||||
}
|
||||
|
||||
static int pin_sdma_pages(struct user_sdma_request *req,
|
||||
struct user_sdma_iovec *iovec,
|
||||
struct sdma_mmu_node *node,
|
||||
int npages)
|
||||
{
|
||||
int pinned, cleared;
|
||||
struct page **pages;
|
||||
struct hfi1_user_sdma_pkt_q *pq = req->pq;
|
||||
|
||||
pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
|
||||
if (!pages) {
|
||||
SDMA_DBG(req, "Failed page array alloc");
|
||||
return -ENOMEM;
|
||||
}
|
||||
memcpy(pages, node->pages, node->npages * sizeof(*pages));
|
||||
|
||||
npages -= node->npages;
|
||||
retry:
|
||||
if (!hfi1_can_pin_pages(pq->dd, pq->mm,
|
||||
atomic_read(&pq->n_locked), npages)) {
|
||||
cleared = sdma_cache_evict(pq, npages);
|
||||
if (cleared >= npages)
|
||||
goto retry;
|
||||
}
|
||||
pinned = hfi1_acquire_user_pages(pq->mm,
|
||||
((unsigned long)iovec->iov.iov_base +
|
||||
(node->npages * PAGE_SIZE)), npages, 0,
|
||||
pages + node->npages);
|
||||
if (pinned < 0) {
|
||||
kfree(pages);
|
||||
return pinned;
|
||||
}
|
||||
if (pinned != npages) {
|
||||
unpin_vector_pages(pq->mm, pages, node->npages, pinned);
|
||||
return -EFAULT;
|
||||
}
|
||||
kfree(node->pages);
|
||||
node->rb.len = iovec->iov.iov_len;
|
||||
node->pages = pages;
|
||||
atomic_add(pinned, &pq->n_locked);
|
||||
return pinned;
|
||||
}
|
||||
|
||||
static int pin_vector_pages(struct user_sdma_request *req,
|
||||
struct user_sdma_iovec *iovec)
|
||||
{
|
||||
int ret = 0, pinned, npages, cleared;
|
||||
struct page **pages;
|
||||
int ret = 0, pinned, npages;
|
||||
struct hfi1_user_sdma_pkt_q *pq = req->pq;
|
||||
struct sdma_mmu_node *node = NULL;
|
||||
struct mmu_rb_node *rb_node;
|
||||
|
@ -1162,44 +1204,13 @@ static int pin_vector_pages(struct user_sdma_request *req,
|
|||
|
||||
npages = num_user_pages(&iovec->iov);
|
||||
if (node->npages < npages) {
|
||||
pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
|
||||
if (!pages) {
|
||||
SDMA_DBG(req, "Failed page array alloc");
|
||||
ret = -ENOMEM;
|
||||
goto bail;
|
||||
}
|
||||
memcpy(pages, node->pages, node->npages * sizeof(*pages));
|
||||
|
||||
npages -= node->npages;
|
||||
|
||||
retry:
|
||||
if (!hfi1_can_pin_pages(pq->dd, pq->mm,
|
||||
atomic_read(&pq->n_locked), npages)) {
|
||||
cleared = sdma_cache_evict(pq, npages);
|
||||
if (cleared >= npages)
|
||||
goto retry;
|
||||
}
|
||||
pinned = hfi1_acquire_user_pages(pq->mm,
|
||||
((unsigned long)iovec->iov.iov_base +
|
||||
(node->npages * PAGE_SIZE)), npages, 0,
|
||||
pages + node->npages);
|
||||
pinned = pin_sdma_pages(req, iovec, node, npages);
|
||||
if (pinned < 0) {
|
||||
kfree(pages);
|
||||
ret = pinned;
|
||||
goto bail;
|
||||
}
|
||||
if (pinned != npages) {
|
||||
unpin_vector_pages(pq->mm, pages, node->npages,
|
||||
pinned);
|
||||
ret = -EFAULT;
|
||||
goto bail;
|
||||
}
|
||||
kfree(node->pages);
|
||||
node->rb.len = iovec->iov.iov_len;
|
||||
node->pages = pages;
|
||||
node->npages += pinned;
|
||||
npages = node->npages;
|
||||
atomic_add(pinned, &pq->n_locked);
|
||||
}
|
||||
iovec->pages = node->pages;
|
||||
iovec->npages = npages;
|
||||
|
|
Loading…
Reference in New Issue