[media] media: vb2: Convert vb2_dma_sg_get_userptr() to use frame vector

Tested-by: Marek Szyprowski <m.szyprowski@samsung.com>
Signed-off-by: Jan Kara <jack@suse.cz>
Signed-off-by: Hans Verkuil <hans.verkuil@cisco.com>
Signed-off-by: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
This commit is contained in:
Jan Kara 2015-07-13 11:55:47 -03:00 committed by Mauro Carvalho Chehab
parent 21fb0cb7ec
commit 3336c24f25
1 changed files with 15 additions and 80 deletions

View File

@ -38,6 +38,7 @@ struct vb2_dma_sg_buf {
struct device *dev; struct device *dev;
void *vaddr; void *vaddr;
struct page **pages; struct page **pages;
struct frame_vector *vec;
int offset; int offset;
enum dma_data_direction dma_dir; enum dma_data_direction dma_dir;
struct sg_table sg_table; struct sg_table sg_table;
@ -51,7 +52,6 @@ struct vb2_dma_sg_buf {
unsigned int num_pages; unsigned int num_pages;
atomic_t refcount; atomic_t refcount;
struct vb2_vmarea_handler handler; struct vb2_vmarea_handler handler;
struct vm_area_struct *vma;
struct dma_buf_attachment *db_attach; struct dma_buf_attachment *db_attach;
}; };
@ -225,25 +225,17 @@ static void vb2_dma_sg_finish(void *buf_priv)
dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir); dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
} }
static inline int vma_is_io(struct vm_area_struct *vma)
{
return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
}
static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr, static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
unsigned long size, unsigned long size,
enum dma_data_direction dma_dir) enum dma_data_direction dma_dir)
{ {
struct vb2_dma_sg_conf *conf = alloc_ctx; struct vb2_dma_sg_conf *conf = alloc_ctx;
struct vb2_dma_sg_buf *buf; struct vb2_dma_sg_buf *buf;
unsigned long first, last;
int num_pages_from_user;
struct vm_area_struct *vma;
struct sg_table *sgt; struct sg_table *sgt;
DEFINE_DMA_ATTRS(attrs); DEFINE_DMA_ATTRS(attrs);
struct frame_vector *vec;
dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
buf = kzalloc(sizeof *buf, GFP_KERNEL); buf = kzalloc(sizeof *buf, GFP_KERNEL);
if (!buf) if (!buf)
return NULL; return NULL;
@ -254,63 +246,19 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
buf->offset = vaddr & ~PAGE_MASK; buf->offset = vaddr & ~PAGE_MASK;
buf->size = size; buf->size = size;
buf->dma_sgt = &buf->sg_table; buf->dma_sgt = &buf->sg_table;
vec = vb2_create_framevec(vaddr, size, buf->dma_dir == DMA_FROM_DEVICE);
if (IS_ERR(vec))
goto userptr_fail_pfnvec;
buf->vec = vec;
first = (vaddr & PAGE_MASK) >> PAGE_SHIFT; buf->pages = frame_vector_pages(vec);
last = ((vaddr + size - 1) & PAGE_MASK) >> PAGE_SHIFT; if (IS_ERR(buf->pages))
buf->num_pages = last - first + 1; goto userptr_fail_sgtable;
buf->num_pages = frame_vector_count(vec);
buf->pages = kzalloc(buf->num_pages * sizeof(struct page *),
GFP_KERNEL);
if (!buf->pages)
goto userptr_fail_alloc_pages;
down_read(&current->mm->mmap_sem);
vma = find_vma(current->mm, vaddr);
if (!vma) {
dprintk(1, "no vma for address %lu\n", vaddr);
goto userptr_fail_find_vma;
}
if (vma->vm_end < vaddr + size) {
dprintk(1, "vma at %lu is too small for %lu bytes\n",
vaddr, size);
goto userptr_fail_find_vma;
}
buf->vma = vb2_get_vma(vma);
if (!buf->vma) {
dprintk(1, "failed to copy vma\n");
goto userptr_fail_find_vma;
}
if (vma_is_io(buf->vma)) {
for (num_pages_from_user = 0;
num_pages_from_user < buf->num_pages;
++num_pages_from_user, vaddr += PAGE_SIZE) {
unsigned long pfn;
if (follow_pfn(vma, vaddr, &pfn)) {
dprintk(1, "no page for address %lu\n", vaddr);
break;
}
buf->pages[num_pages_from_user] = pfn_to_page(pfn);
}
} else
num_pages_from_user = get_user_pages(current, current->mm,
vaddr & PAGE_MASK,
buf->num_pages,
buf->dma_dir == DMA_FROM_DEVICE,
1, /* force */
buf->pages,
NULL);
up_read(&current->mm->mmap_sem);
if (num_pages_from_user != buf->num_pages)
goto userptr_fail_get_user_pages;
if (sg_alloc_table_from_pages(buf->dma_sgt, buf->pages, if (sg_alloc_table_from_pages(buf->dma_sgt, buf->pages,
buf->num_pages, buf->offset, size, 0)) buf->num_pages, buf->offset, size, 0))
goto userptr_fail_alloc_table_from_pages; goto userptr_fail_sgtable;
sgt = &buf->sg_table; sgt = &buf->sg_table;
/* /*
@ -326,19 +274,9 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
userptr_fail_map: userptr_fail_map:
sg_free_table(&buf->sg_table); sg_free_table(&buf->sg_table);
userptr_fail_alloc_table_from_pages: userptr_fail_sgtable:
userptr_fail_get_user_pages: vb2_destroy_framevec(vec);
dprintk(1, "get_user_pages requested/got: %d/%d]\n", userptr_fail_pfnvec:
buf->num_pages, num_pages_from_user);
if (!vma_is_io(buf->vma))
while (--num_pages_from_user >= 0)
put_page(buf->pages[num_pages_from_user]);
down_read(&current->mm->mmap_sem);
vb2_put_vma(buf->vma);
userptr_fail_find_vma:
up_read(&current->mm->mmap_sem);
kfree(buf->pages);
userptr_fail_alloc_pages:
kfree(buf); kfree(buf);
return NULL; return NULL;
} }
@ -366,11 +304,8 @@ static void vb2_dma_sg_put_userptr(void *buf_priv)
while (--i >= 0) { while (--i >= 0) {
if (buf->dma_dir == DMA_FROM_DEVICE) if (buf->dma_dir == DMA_FROM_DEVICE)
set_page_dirty_lock(buf->pages[i]); set_page_dirty_lock(buf->pages[i]);
if (!vma_is_io(buf->vma))
put_page(buf->pages[i]);
} }
kfree(buf->pages); vb2_destroy_framevec(buf->vec);
vb2_put_vma(buf->vma);
kfree(buf); kfree(buf);
} }