Merge branch 'drm-prime-vmap' of git://people.freedesktop.org/~airlied/linux
Pull drm prime mmap/vmap code from Dave Airlie: "As mentioned previously these are the extra bits of drm that relied on the dma-buf pull to work, the first three just stub out the mmap interface, and the next set provide vmap export to i915/radeon/nouveau and vmap import to udl." * 'drm-prime-vmap' of git://people.freedesktop.org/~airlied/linux: radeon: add radeon prime vmap support. nouveau: add vmap support to nouveau prime support udl: support vmapping imported dma-bufs i915: add dma-buf vmap support for exporting vmapped buffer radeon: add stub dma-buf mmap functionality nouveau: add stub dma-buf mmap functionality. i915: add stub dma-buf mmap callback.
This commit is contained in:
commit
9fdadb2cba
|
@ -942,6 +942,9 @@ struct drm_i915_gem_object {
|
||||||
|
|
||||||
/* prime dma-buf support */
|
/* prime dma-buf support */
|
||||||
struct sg_table *sg_table;
|
struct sg_table *sg_table;
|
||||||
|
void *dma_buf_vmapping;
|
||||||
|
int vmapping_count;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Used for performing relocations during execbuffer insertion.
|
* Used for performing relocations during execbuffer insertion.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -74,6 +74,59 @@ static void i915_gem_dmabuf_release(struct dma_buf *dma_buf)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
|
||||||
|
{
|
||||||
|
struct drm_i915_gem_object *obj = dma_buf->priv;
|
||||||
|
struct drm_device *dev = obj->base.dev;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
ret = i915_mutex_lock_interruptible(dev);
|
||||||
|
if (ret)
|
||||||
|
return ERR_PTR(ret);
|
||||||
|
|
||||||
|
if (obj->dma_buf_vmapping) {
|
||||||
|
obj->vmapping_count++;
|
||||||
|
goto out_unlock;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!obj->pages) {
|
||||||
|
ret = i915_gem_object_get_pages_gtt(obj, __GFP_NORETRY | __GFP_NOWARN);
|
||||||
|
if (ret) {
|
||||||
|
mutex_unlock(&dev->struct_mutex);
|
||||||
|
return ERR_PTR(ret);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
obj->dma_buf_vmapping = vmap(obj->pages, obj->base.size / PAGE_SIZE, 0, PAGE_KERNEL);
|
||||||
|
if (!obj->dma_buf_vmapping) {
|
||||||
|
DRM_ERROR("failed to vmap object\n");
|
||||||
|
goto out_unlock;
|
||||||
|
}
|
||||||
|
|
||||||
|
obj->vmapping_count = 1;
|
||||||
|
out_unlock:
|
||||||
|
mutex_unlock(&dev->struct_mutex);
|
||||||
|
return obj->dma_buf_vmapping;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
|
||||||
|
{
|
||||||
|
struct drm_i915_gem_object *obj = dma_buf->priv;
|
||||||
|
struct drm_device *dev = obj->base.dev;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
ret = i915_mutex_lock_interruptible(dev);
|
||||||
|
if (ret)
|
||||||
|
return;
|
||||||
|
|
||||||
|
--obj->vmapping_count;
|
||||||
|
if (obj->vmapping_count == 0) {
|
||||||
|
vunmap(obj->dma_buf_vmapping);
|
||||||
|
obj->dma_buf_vmapping = NULL;
|
||||||
|
}
|
||||||
|
mutex_unlock(&dev->struct_mutex);
|
||||||
|
}
|
||||||
|
|
||||||
static void *i915_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
|
static void *i915_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
|
||||||
{
|
{
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -93,6 +146,11 @@ static void i915_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_n
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
|
||||||
|
{
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
static const struct dma_buf_ops i915_dmabuf_ops = {
|
static const struct dma_buf_ops i915_dmabuf_ops = {
|
||||||
.map_dma_buf = i915_gem_map_dma_buf,
|
.map_dma_buf = i915_gem_map_dma_buf,
|
||||||
.unmap_dma_buf = i915_gem_unmap_dma_buf,
|
.unmap_dma_buf = i915_gem_unmap_dma_buf,
|
||||||
|
@ -101,6 +159,9 @@ static const struct dma_buf_ops i915_dmabuf_ops = {
|
||||||
.kmap_atomic = i915_gem_dmabuf_kmap_atomic,
|
.kmap_atomic = i915_gem_dmabuf_kmap_atomic,
|
||||||
.kunmap = i915_gem_dmabuf_kunmap,
|
.kunmap = i915_gem_dmabuf_kunmap,
|
||||||
.kunmap_atomic = i915_gem_dmabuf_kunmap_atomic,
|
.kunmap_atomic = i915_gem_dmabuf_kunmap_atomic,
|
||||||
|
.mmap = i915_gem_dmabuf_mmap,
|
||||||
|
.vmap = i915_gem_dmabuf_vmap,
|
||||||
|
.vunmap = i915_gem_dmabuf_vunmap,
|
||||||
};
|
};
|
||||||
|
|
||||||
struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
|
struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
|
||||||
|
|
|
@ -123,6 +123,9 @@ struct nouveau_bo {
|
||||||
|
|
||||||
struct drm_gem_object *gem;
|
struct drm_gem_object *gem;
|
||||||
int pin_refcnt;
|
int pin_refcnt;
|
||||||
|
|
||||||
|
struct ttm_bo_kmap_obj dma_buf_vmap;
|
||||||
|
int vmapping_count;
|
||||||
};
|
};
|
||||||
|
|
||||||
#define nouveau_bo_tile_layout(nvbo) \
|
#define nouveau_bo_tile_layout(nvbo) \
|
||||||
|
|
|
@ -61,6 +61,48 @@ static void nouveau_gem_kunmap(struct dma_buf *dma_buf, unsigned long page_num,
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int nouveau_gem_prime_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
|
||||||
|
{
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void *nouveau_gem_prime_vmap(struct dma_buf *dma_buf)
|
||||||
|
{
|
||||||
|
struct nouveau_bo *nvbo = dma_buf->priv;
|
||||||
|
struct drm_device *dev = nvbo->gem->dev;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
mutex_lock(&dev->struct_mutex);
|
||||||
|
if (nvbo->vmapping_count) {
|
||||||
|
nvbo->vmapping_count++;
|
||||||
|
goto out_unlock;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.num_pages,
|
||||||
|
&nvbo->dma_buf_vmap);
|
||||||
|
if (ret) {
|
||||||
|
mutex_unlock(&dev->struct_mutex);
|
||||||
|
return ERR_PTR(ret);
|
||||||
|
}
|
||||||
|
nvbo->vmapping_count = 1;
|
||||||
|
out_unlock:
|
||||||
|
mutex_unlock(&dev->struct_mutex);
|
||||||
|
return nvbo->dma_buf_vmap.virtual;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void nouveau_gem_prime_vunmap(struct dma_buf *dma_buf, void *vaddr)
|
||||||
|
{
|
||||||
|
struct nouveau_bo *nvbo = dma_buf->priv;
|
||||||
|
struct drm_device *dev = nvbo->gem->dev;
|
||||||
|
|
||||||
|
mutex_lock(&dev->struct_mutex);
|
||||||
|
nvbo->vmapping_count--;
|
||||||
|
if (nvbo->vmapping_count == 0) {
|
||||||
|
ttm_bo_kunmap(&nvbo->dma_buf_vmap);
|
||||||
|
}
|
||||||
|
mutex_unlock(&dev->struct_mutex);
|
||||||
|
}
|
||||||
|
|
||||||
static const struct dma_buf_ops nouveau_dmabuf_ops = {
|
static const struct dma_buf_ops nouveau_dmabuf_ops = {
|
||||||
.map_dma_buf = nouveau_gem_map_dma_buf,
|
.map_dma_buf = nouveau_gem_map_dma_buf,
|
||||||
.unmap_dma_buf = nouveau_gem_unmap_dma_buf,
|
.unmap_dma_buf = nouveau_gem_unmap_dma_buf,
|
||||||
|
@ -69,6 +111,9 @@ static const struct dma_buf_ops nouveau_dmabuf_ops = {
|
||||||
.kmap_atomic = nouveau_gem_kmap_atomic,
|
.kmap_atomic = nouveau_gem_kmap_atomic,
|
||||||
.kunmap = nouveau_gem_kunmap,
|
.kunmap = nouveau_gem_kunmap,
|
||||||
.kunmap_atomic = nouveau_gem_kunmap_atomic,
|
.kunmap_atomic = nouveau_gem_kunmap_atomic,
|
||||||
|
.mmap = nouveau_gem_prime_mmap,
|
||||||
|
.vmap = nouveau_gem_prime_vmap,
|
||||||
|
.vunmap = nouveau_gem_prime_vunmap,
|
||||||
};
|
};
|
||||||
|
|
||||||
static int
|
static int
|
||||||
|
|
|
@ -346,6 +346,9 @@ struct radeon_bo {
|
||||||
/* Constant after initialization */
|
/* Constant after initialization */
|
||||||
struct radeon_device *rdev;
|
struct radeon_device *rdev;
|
||||||
struct drm_gem_object gem_base;
|
struct drm_gem_object gem_base;
|
||||||
|
|
||||||
|
struct ttm_bo_kmap_obj dma_buf_vmap;
|
||||||
|
int vmapping_count;
|
||||||
};
|
};
|
||||||
#define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base)
|
#define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base)
|
||||||
|
|
||||||
|
|
|
@ -85,6 +85,47 @@ static void radeon_gem_kunmap(struct dma_buf *dma_buf, unsigned long page_num, v
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int radeon_gem_prime_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
|
||||||
|
{
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void *radeon_gem_prime_vmap(struct dma_buf *dma_buf)
|
||||||
|
{
|
||||||
|
struct radeon_bo *bo = dma_buf->priv;
|
||||||
|
struct drm_device *dev = bo->rdev->ddev;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
mutex_lock(&dev->struct_mutex);
|
||||||
|
if (bo->vmapping_count) {
|
||||||
|
bo->vmapping_count++;
|
||||||
|
goto out_unlock;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages,
|
||||||
|
&bo->dma_buf_vmap);
|
||||||
|
if (ret) {
|
||||||
|
mutex_unlock(&dev->struct_mutex);
|
||||||
|
return ERR_PTR(ret);
|
||||||
|
}
|
||||||
|
bo->vmapping_count = 1;
|
||||||
|
out_unlock:
|
||||||
|
mutex_unlock(&dev->struct_mutex);
|
||||||
|
return bo->dma_buf_vmap.virtual;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void radeon_gem_prime_vunmap(struct dma_buf *dma_buf, void *vaddr)
|
||||||
|
{
|
||||||
|
struct radeon_bo *bo = dma_buf->priv;
|
||||||
|
struct drm_device *dev = bo->rdev->ddev;
|
||||||
|
|
||||||
|
mutex_lock(&dev->struct_mutex);
|
||||||
|
bo->vmapping_count--;
|
||||||
|
if (bo->vmapping_count == 0) {
|
||||||
|
ttm_bo_kunmap(&bo->dma_buf_vmap);
|
||||||
|
}
|
||||||
|
mutex_unlock(&dev->struct_mutex);
|
||||||
|
}
|
||||||
const static struct dma_buf_ops radeon_dmabuf_ops = {
|
const static struct dma_buf_ops radeon_dmabuf_ops = {
|
||||||
.map_dma_buf = radeon_gem_map_dma_buf,
|
.map_dma_buf = radeon_gem_map_dma_buf,
|
||||||
.unmap_dma_buf = radeon_gem_unmap_dma_buf,
|
.unmap_dma_buf = radeon_gem_unmap_dma_buf,
|
||||||
|
@ -93,6 +134,9 @@ const static struct dma_buf_ops radeon_dmabuf_ops = {
|
||||||
.kmap_atomic = radeon_gem_kmap_atomic,
|
.kmap_atomic = radeon_gem_kmap_atomic,
|
||||||
.kunmap = radeon_gem_kunmap,
|
.kunmap = radeon_gem_kunmap,
|
||||||
.kunmap_atomic = radeon_gem_kunmap_atomic,
|
.kunmap_atomic = radeon_gem_kunmap_atomic,
|
||||||
|
.mmap = radeon_gem_prime_mmap,
|
||||||
|
.vmap = radeon_gem_prime_vmap,
|
||||||
|
.vunmap = radeon_gem_prime_vunmap,
|
||||||
};
|
};
|
||||||
|
|
||||||
static int radeon_prime_create(struct drm_device *dev,
|
static int radeon_prime_create(struct drm_device *dev,
|
||||||
|
|
|
@ -156,8 +156,17 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
|
||||||
if (!fb->active_16)
|
if (!fb->active_16)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (!fb->obj->vmapping)
|
if (!fb->obj->vmapping) {
|
||||||
udl_gem_vmap(fb->obj);
|
ret = udl_gem_vmap(fb->obj);
|
||||||
|
if (ret == -ENOMEM) {
|
||||||
|
DRM_ERROR("failed to vmap fb\n");
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
if (!fb->obj->vmapping) {
|
||||||
|
DRM_ERROR("failed to vmapping\n");
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
start_cycles = get_cycles();
|
start_cycles = get_cycles();
|
||||||
|
|
||||||
|
|
|
@ -180,6 +180,18 @@ int udl_gem_vmap(struct udl_gem_object *obj)
|
||||||
int page_count = obj->base.size / PAGE_SIZE;
|
int page_count = obj->base.size / PAGE_SIZE;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
if (obj->base.import_attach) {
|
||||||
|
ret = dma_buf_begin_cpu_access(obj->base.import_attach->dmabuf,
|
||||||
|
0, obj->base.size, DMA_BIDIRECTIONAL);
|
||||||
|
if (ret)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
obj->vmapping = dma_buf_vmap(obj->base.import_attach->dmabuf);
|
||||||
|
if (!obj->vmapping)
|
||||||
|
return -ENOMEM;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
ret = udl_gem_get_pages(obj, GFP_KERNEL);
|
ret = udl_gem_get_pages(obj, GFP_KERNEL);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -192,6 +204,13 @@ int udl_gem_vmap(struct udl_gem_object *obj)
|
||||||
|
|
||||||
void udl_gem_vunmap(struct udl_gem_object *obj)
|
void udl_gem_vunmap(struct udl_gem_object *obj)
|
||||||
{
|
{
|
||||||
|
if (obj->base.import_attach) {
|
||||||
|
dma_buf_vunmap(obj->base.import_attach->dmabuf, obj->vmapping);
|
||||||
|
dma_buf_end_cpu_access(obj->base.import_attach->dmabuf, 0,
|
||||||
|
obj->base.size, DMA_BIDIRECTIONAL);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
if (obj->vmapping)
|
if (obj->vmapping)
|
||||||
vunmap(obj->vmapping);
|
vunmap(obj->vmapping);
|
||||||
|
|
||||||
|
@ -202,12 +221,12 @@ void udl_gem_free_object(struct drm_gem_object *gem_obj)
|
||||||
{
|
{
|
||||||
struct udl_gem_object *obj = to_udl_bo(gem_obj);
|
struct udl_gem_object *obj = to_udl_bo(gem_obj);
|
||||||
|
|
||||||
if (gem_obj->import_attach)
|
|
||||||
drm_prime_gem_destroy(gem_obj, obj->sg);
|
|
||||||
|
|
||||||
if (obj->vmapping)
|
if (obj->vmapping)
|
||||||
udl_gem_vunmap(obj);
|
udl_gem_vunmap(obj);
|
||||||
|
|
||||||
|
if (gem_obj->import_attach)
|
||||||
|
drm_prime_gem_destroy(gem_obj, obj->sg);
|
||||||
|
|
||||||
if (obj->pages)
|
if (obj->pages)
|
||||||
udl_gem_put_pages(obj);
|
udl_gem_put_pages(obj);
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue