2019-03-13 08:43:44 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
/*
|
|
|
|
* Copyright 2018 Noralf Trønnes
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/dma-buf.h>
|
|
|
|
#include <linux/export.h>
|
2021-10-20 21:19:41 +08:00
|
|
|
#include <linux/module.h>
|
2019-03-13 08:43:44 +08:00
|
|
|
#include <linux/mutex.h>
|
|
|
|
#include <linux/shmem_fs.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/vmalloc.h>
|
2021-12-03 04:34:00 +08:00
|
|
|
#include <linux/module.h>
|
2019-03-13 08:43:44 +08:00
|
|
|
|
2021-08-12 21:14:11 +08:00
|
|
|
#ifdef CONFIG_X86
|
|
|
|
#include <asm/set_memory.h>
|
|
|
|
#endif
|
|
|
|
|
2019-07-19 00:15:03 +08:00
|
|
|
#include <drm/drm.h>
|
2019-03-13 08:43:44 +08:00
|
|
|
#include <drm/drm_device.h>
|
|
|
|
#include <drm/drm_drv.h>
|
|
|
|
#include <drm/drm_gem_shmem_helper.h>
|
|
|
|
#include <drm/drm_prime.h>
|
|
|
|
#include <drm/drm_print.h>
|
|
|
|
|
2021-10-27 12:48:43 +08:00
|
|
|
MODULE_IMPORT_NS(DMA_BUF);
|
|
|
|
|
2019-03-13 08:43:44 +08:00
|
|
|
/**
|
|
|
|
* DOC: overview
|
|
|
|
*
|
|
|
|
* This library provides helpers for GEM objects backed by shmem buffers
|
|
|
|
* allocated using anonymous pageable memory.
|
2021-11-08 17:31:49 +08:00
|
|
|
*
|
|
|
|
* Functions that operate on the GEM object receive struct &drm_gem_shmem_object.
|
|
|
|
* For GEM callback helpers in struct &drm_gem_object functions, see likewise
|
|
|
|
* named functions with an _object_ infix (e.g., drm_gem_shmem_object_vmap() wraps
|
|
|
|
* drm_gem_shmem_vmap()). These helpers perform the necessary type conversion.
|
2019-03-13 08:43:44 +08:00
|
|
|
*/
|
|
|
|
|
|
|
|
static const struct drm_gem_object_funcs drm_gem_shmem_funcs = {
|
2021-11-08 17:31:48 +08:00
|
|
|
.free = drm_gem_shmem_object_free,
|
|
|
|
.print_info = drm_gem_shmem_object_print_info,
|
|
|
|
.pin = drm_gem_shmem_object_pin,
|
|
|
|
.unpin = drm_gem_shmem_object_unpin,
|
|
|
|
.get_sg_table = drm_gem_shmem_object_get_sg_table,
|
|
|
|
.vmap = drm_gem_shmem_object_vmap,
|
|
|
|
.vunmap = drm_gem_shmem_object_vunmap,
|
|
|
|
.mmap = drm_gem_shmem_object_mmap,
|
2022-02-09 23:56:33 +08:00
|
|
|
.vm_ops = &drm_gem_shmem_vm_ops,
|
2019-03-13 08:43:44 +08:00
|
|
|
};
|
|
|
|
|
2020-05-29 22:05:42 +08:00
|
|
|
static struct drm_gem_shmem_object *
|
|
|
|
__drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private)
|
2019-03-13 08:43:44 +08:00
|
|
|
{
|
|
|
|
struct drm_gem_shmem_object *shmem;
|
|
|
|
struct drm_gem_object *obj;
|
2020-05-29 22:05:42 +08:00
|
|
|
int ret = 0;
|
2019-03-13 08:43:44 +08:00
|
|
|
|
|
|
|
size = PAGE_ALIGN(size);
|
|
|
|
|
2021-11-30 17:52:55 +08:00
|
|
|
if (dev->driver->gem_create_object) {
|
2019-03-13 08:43:44 +08:00
|
|
|
obj = dev->driver->gem_create_object(dev, size);
|
2021-11-30 17:52:55 +08:00
|
|
|
if (IS_ERR(obj))
|
|
|
|
return ERR_CAST(obj);
|
|
|
|
shmem = to_drm_gem_shmem_obj(obj);
|
|
|
|
} else {
|
|
|
|
shmem = kzalloc(sizeof(*shmem), GFP_KERNEL);
|
|
|
|
if (!shmem)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
obj = &shmem->base;
|
|
|
|
}
|
2020-11-17 21:31:55 +08:00
|
|
|
|
2019-03-13 08:43:44 +08:00
|
|
|
if (!obj->funcs)
|
|
|
|
obj->funcs = &drm_gem_shmem_funcs;
|
|
|
|
|
2020-11-17 21:31:55 +08:00
|
|
|
if (private) {
|
2020-05-29 22:05:42 +08:00
|
|
|
drm_gem_private_object_init(dev, obj, size);
|
2020-11-17 21:31:55 +08:00
|
|
|
shmem->map_wc = false; /* dma-buf mappings use always writecombine */
|
|
|
|
} else {
|
2020-05-29 22:05:42 +08:00
|
|
|
ret = drm_gem_object_init(dev, obj, size);
|
2020-11-17 21:31:55 +08:00
|
|
|
}
|
2019-03-13 08:43:44 +08:00
|
|
|
if (ret)
|
|
|
|
goto err_free;
|
|
|
|
|
|
|
|
ret = drm_gem_create_mmap_offset(obj);
|
|
|
|
if (ret)
|
|
|
|
goto err_release;
|
|
|
|
|
|
|
|
mutex_init(&shmem->pages_lock);
|
|
|
|
mutex_init(&shmem->vmap_lock);
|
2019-08-05 22:33:57 +08:00
|
|
|
INIT_LIST_HEAD(&shmem->madv_list);
|
2019-03-13 08:43:44 +08:00
|
|
|
|
2020-06-15 23:10:26 +08:00
|
|
|
if (!private) {
|
|
|
|
/*
|
|
|
|
* Our buffers are kept pinned, so allocating them
|
|
|
|
* from the MOVABLE zone is a really bad idea, and
|
|
|
|
* conflicts with CMA. See comments above new_inode()
|
|
|
|
* why this is required _and_ expected if you're
|
|
|
|
* going to pin these pages.
|
|
|
|
*/
|
|
|
|
mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER |
|
|
|
|
__GFP_RETRY_MAYFAIL | __GFP_NOWARN);
|
|
|
|
}
|
2019-03-13 08:43:44 +08:00
|
|
|
|
|
|
|
return shmem;
|
|
|
|
|
|
|
|
err_release:
|
|
|
|
drm_gem_object_release(obj);
|
|
|
|
err_free:
|
|
|
|
kfree(obj);
|
|
|
|
|
|
|
|
return ERR_PTR(ret);
|
|
|
|
}
|
2020-05-29 22:05:42 +08:00
|
|
|
/**
|
|
|
|
* drm_gem_shmem_create - Allocate an object with the given size
|
|
|
|
* @dev: DRM device
|
|
|
|
* @size: Size of the object to allocate
|
|
|
|
*
|
|
|
|
* This function creates a shmem GEM object.
|
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative
|
|
|
|
* error code on failure.
|
|
|
|
*/
|
|
|
|
struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size)
|
|
|
|
{
|
|
|
|
return __drm_gem_shmem_create(dev, size, false);
|
|
|
|
}
|
2019-03-13 08:43:44 +08:00
|
|
|
EXPORT_SYMBOL_GPL(drm_gem_shmem_create);
|
|
|
|
|
|
|
|
/**
|
2021-11-08 17:31:49 +08:00
|
|
|
* drm_gem_shmem_free - Free resources associated with a shmem GEM object
|
|
|
|
* @shmem: shmem GEM object to free
|
2019-03-13 08:43:44 +08:00
|
|
|
*
|
|
|
|
* This function cleans up the GEM object state and frees the memory used to
|
2021-11-08 17:31:48 +08:00
|
|
|
* store the object itself.
|
2019-03-13 08:43:44 +08:00
|
|
|
*/
|
2021-11-08 17:31:49 +08:00
|
|
|
void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
|
2019-03-13 08:43:44 +08:00
|
|
|
{
|
2021-11-08 17:31:49 +08:00
|
|
|
struct drm_gem_object *obj = &shmem->base;
|
2019-03-13 08:43:44 +08:00
|
|
|
|
|
|
|
WARN_ON(shmem->vmap_use_count);
|
|
|
|
|
|
|
|
if (obj->import_attach) {
|
|
|
|
drm_prime_gem_destroy(obj, shmem->sgt);
|
|
|
|
} else {
|
|
|
|
if (shmem->sgt) {
|
2020-05-11 18:27:54 +08:00
|
|
|
dma_unmap_sgtable(obj->dev->dev, shmem->sgt,
|
|
|
|
DMA_BIDIRECTIONAL, 0);
|
2019-03-13 08:43:44 +08:00
|
|
|
sg_free_table(shmem->sgt);
|
|
|
|
kfree(shmem->sgt);
|
|
|
|
}
|
2019-07-19 22:30:12 +08:00
|
|
|
if (shmem->pages)
|
|
|
|
drm_gem_shmem_put_pages(shmem);
|
2019-03-13 08:43:44 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
WARN_ON(shmem->pages_use_count);
|
|
|
|
|
|
|
|
drm_gem_object_release(obj);
|
|
|
|
mutex_destroy(&shmem->pages_lock);
|
|
|
|
mutex_destroy(&shmem->vmap_lock);
|
|
|
|
kfree(shmem);
|
|
|
|
}
|
2021-11-08 17:31:49 +08:00
|
|
|
EXPORT_SYMBOL_GPL(drm_gem_shmem_free);
|
2019-03-13 08:43:44 +08:00
|
|
|
|
|
|
|
static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem)
|
|
|
|
{
|
|
|
|
struct drm_gem_object *obj = &shmem->base;
|
|
|
|
struct page **pages;
|
|
|
|
|
|
|
|
if (shmem->pages_use_count++ > 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
pages = drm_gem_get_pages(obj);
|
|
|
|
if (IS_ERR(pages)) {
|
|
|
|
DRM_DEBUG_KMS("Failed to get pages (%ld)\n", PTR_ERR(pages));
|
|
|
|
shmem->pages_use_count = 0;
|
|
|
|
return PTR_ERR(pages);
|
|
|
|
}
|
|
|
|
|
2021-08-12 21:14:11 +08:00
|
|
|
/*
|
|
|
|
* TODO: Allocating WC pages which are correctly flushed is only
|
|
|
|
* supported on x86. Ideal solution would be a GFP_WC flag, which also
|
|
|
|
* ttm_pool.c could use.
|
|
|
|
*/
|
|
|
|
#ifdef CONFIG_X86
|
|
|
|
if (shmem->map_wc)
|
|
|
|
set_pages_array_wc(pages, obj->size >> PAGE_SHIFT);
|
|
|
|
#endif
|
|
|
|
|
2019-03-13 08:43:44 +08:00
|
|
|
shmem->pages = pages;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* drm_gem_shmem_get_pages - Allocate backing pages for a shmem GEM object
|
|
|
|
* @shmem: shmem GEM object
|
|
|
|
*
|
|
|
|
* This function makes sure that backing pages exists for the shmem GEM object
|
|
|
|
* and increases the use count.
|
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* 0 on success or a negative error code on failure.
|
|
|
|
*/
|
|
|
|
int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
2020-05-11 17:35:53 +08:00
|
|
|
WARN_ON(shmem->base.import_attach);
|
|
|
|
|
2019-03-13 08:43:44 +08:00
|
|
|
ret = mutex_lock_interruptible(&shmem->pages_lock);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
ret = drm_gem_shmem_get_pages_locked(shmem);
|
|
|
|
mutex_unlock(&shmem->pages_lock);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(drm_gem_shmem_get_pages);
|
|
|
|
|
|
|
|
static void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem)
|
|
|
|
{
|
|
|
|
struct drm_gem_object *obj = &shmem->base;
|
|
|
|
|
|
|
|
if (WARN_ON_ONCE(!shmem->pages_use_count))
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (--shmem->pages_use_count > 0)
|
|
|
|
return;
|
|
|
|
|
2021-08-12 21:14:11 +08:00
|
|
|
#ifdef CONFIG_X86
|
|
|
|
if (shmem->map_wc)
|
|
|
|
set_pages_array_wb(shmem->pages, obj->size >> PAGE_SHIFT);
|
|
|
|
#endif
|
|
|
|
|
2019-03-13 08:43:44 +08:00
|
|
|
drm_gem_put_pages(obj, shmem->pages,
|
|
|
|
shmem->pages_mark_dirty_on_put,
|
|
|
|
shmem->pages_mark_accessed_on_put);
|
|
|
|
shmem->pages = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* drm_gem_shmem_put_pages - Decrease use count on the backing pages for a shmem GEM object
|
|
|
|
* @shmem: shmem GEM object
|
|
|
|
*
|
|
|
|
* This function decreases the use count and puts the backing pages when use drops to zero.
|
|
|
|
*/
|
|
|
|
void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem)
|
|
|
|
{
|
|
|
|
mutex_lock(&shmem->pages_lock);
|
|
|
|
drm_gem_shmem_put_pages_locked(shmem);
|
|
|
|
mutex_unlock(&shmem->pages_lock);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(drm_gem_shmem_put_pages);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* drm_gem_shmem_pin - Pin backing pages for a shmem GEM object
|
2021-11-08 17:31:49 +08:00
|
|
|
* @shmem: shmem GEM object
|
2019-03-13 08:43:44 +08:00
|
|
|
*
|
|
|
|
* This function makes sure the backing pages are pinned in memory while the
|
2021-11-08 17:31:48 +08:00
|
|
|
* buffer is exported.
|
2019-03-13 08:43:44 +08:00
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* 0 on success or a negative error code on failure.
|
|
|
|
*/
|
2021-11-08 17:31:49 +08:00
|
|
|
int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem)
|
2019-03-13 08:43:44 +08:00
|
|
|
{
|
2020-05-11 17:35:53 +08:00
|
|
|
WARN_ON(shmem->base.import_attach);
|
|
|
|
|
2019-03-13 08:43:44 +08:00
|
|
|
return drm_gem_shmem_get_pages(shmem);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(drm_gem_shmem_pin);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* drm_gem_shmem_unpin - Unpin backing pages for a shmem GEM object
|
2021-11-08 17:31:49 +08:00
|
|
|
* @shmem: shmem GEM object
|
2019-03-13 08:43:44 +08:00
|
|
|
*
|
|
|
|
* This function removes the requirement that the backing pages are pinned in
|
2021-11-08 17:31:48 +08:00
|
|
|
* memory.
|
2019-03-13 08:43:44 +08:00
|
|
|
*/
|
2021-11-08 17:31:49 +08:00
|
|
|
void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem)
|
2019-03-13 08:43:44 +08:00
|
|
|
{
|
2020-05-11 17:35:53 +08:00
|
|
|
WARN_ON(shmem->base.import_attach);
|
|
|
|
|
2019-03-13 08:43:44 +08:00
|
|
|
drm_gem_shmem_put_pages(shmem);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(drm_gem_shmem_unpin);
|
|
|
|
|
2022-02-05 01:05:41 +08:00
|
|
|
static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,
|
|
|
|
struct iosys_map *map)
|
2019-03-13 08:43:44 +08:00
|
|
|
{
|
|
|
|
struct drm_gem_object *obj = &shmem->base;
|
2020-09-25 19:55:59 +08:00
|
|
|
int ret = 0;
|
2019-03-13 08:43:44 +08:00
|
|
|
|
2020-11-03 17:30:11 +08:00
|
|
|
if (shmem->vmap_use_count++ > 0) {
|
2022-02-05 01:05:41 +08:00
|
|
|
iosys_map_set_vaddr(map, shmem->vaddr);
|
2020-11-03 17:30:11 +08:00
|
|
|
return 0;
|
|
|
|
}
|
2019-03-13 08:43:44 +08:00
|
|
|
|
2020-02-26 23:47:50 +08:00
|
|
|
if (obj->import_attach) {
|
2020-11-03 17:30:11 +08:00
|
|
|
ret = dma_buf_vmap(obj->import_attach->dmabuf, map);
|
|
|
|
if (!ret) {
|
|
|
|
if (WARN_ON(map->is_iomem)) {
|
|
|
|
ret = -EIO;
|
|
|
|
goto err_put_pages;
|
|
|
|
}
|
|
|
|
shmem->vaddr = map->vaddr;
|
|
|
|
}
|
2020-02-26 23:47:50 +08:00
|
|
|
} else {
|
|
|
|
pgprot_t prot = PAGE_KERNEL;
|
|
|
|
|
2020-05-15 04:22:56 +08:00
|
|
|
ret = drm_gem_shmem_get_pages(shmem);
|
|
|
|
if (ret)
|
|
|
|
goto err_zero_use;
|
|
|
|
|
2020-11-17 21:31:55 +08:00
|
|
|
if (shmem->map_wc)
|
2020-02-26 23:47:50 +08:00
|
|
|
prot = pgprot_writecombine(prot);
|
2019-05-29 14:51:21 +08:00
|
|
|
shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
|
2020-02-26 23:47:50 +08:00
|
|
|
VM_MAP, prot);
|
2020-09-25 19:55:59 +08:00
|
|
|
if (!shmem->vaddr)
|
|
|
|
ret = -ENOMEM;
|
2020-11-03 17:30:11 +08:00
|
|
|
else
|
2022-02-05 01:05:41 +08:00
|
|
|
iosys_map_set_vaddr(map, shmem->vaddr);
|
2020-02-26 23:47:50 +08:00
|
|
|
}
|
2019-03-13 08:43:44 +08:00
|
|
|
|
2020-09-25 19:55:59 +08:00
|
|
|
if (ret) {
|
|
|
|
DRM_DEBUG_KMS("Failed to vmap pages, error %d\n", ret);
|
2019-03-13 08:43:44 +08:00
|
|
|
goto err_put_pages;
|
|
|
|
}
|
|
|
|
|
2020-11-03 17:30:11 +08:00
|
|
|
return 0;
|
2019-03-13 08:43:44 +08:00
|
|
|
|
|
|
|
err_put_pages:
|
2020-05-15 04:22:56 +08:00
|
|
|
if (!obj->import_attach)
|
|
|
|
drm_gem_shmem_put_pages(shmem);
|
2019-03-13 08:43:44 +08:00
|
|
|
err_zero_use:
|
|
|
|
shmem->vmap_use_count = 0;
|
|
|
|
|
2020-11-03 17:30:11 +08:00
|
|
|
return ret;
|
2019-03-13 08:43:44 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object
|
|
|
|
* @shmem: shmem GEM object
|
2020-11-03 17:30:11 +08:00
|
|
|
* @map: Returns the kernel virtual address of the SHMEM GEM object's backing
|
|
|
|
* store.
|
2019-03-13 08:43:44 +08:00
|
|
|
*
|
2020-05-11 17:35:48 +08:00
|
|
|
* This function makes sure that a contiguous kernel virtual address mapping
|
2021-11-08 17:31:48 +08:00
|
|
|
* exists for the buffer backing the shmem GEM object. It hides the differences
|
|
|
|
* between dma-buf imported and natively allocated objects.
|
2020-05-11 17:35:48 +08:00
|
|
|
*
|
|
|
|
* Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap().
|
2019-03-13 08:43:44 +08:00
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* 0 on success or a negative error code on failure.
|
|
|
|
*/
|
2022-02-05 01:05:41 +08:00
|
|
|
int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem,
|
|
|
|
struct iosys_map *map)
|
2019-03-13 08:43:44 +08:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = mutex_lock_interruptible(&shmem->vmap_lock);
|
|
|
|
if (ret)
|
2020-11-03 17:30:11 +08:00
|
|
|
return ret;
|
|
|
|
ret = drm_gem_shmem_vmap_locked(shmem, map);
|
2019-03-13 08:43:44 +08:00
|
|
|
mutex_unlock(&shmem->vmap_lock);
|
|
|
|
|
2020-11-03 17:30:11 +08:00
|
|
|
return ret;
|
2019-03-13 08:43:44 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(drm_gem_shmem_vmap);
|
|
|
|
|
2020-11-03 17:30:11 +08:00
|
|
|
static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem,
|
2022-02-05 01:05:41 +08:00
|
|
|
struct iosys_map *map)
|
2019-03-13 08:43:44 +08:00
|
|
|
{
|
|
|
|
struct drm_gem_object *obj = &shmem->base;
|
|
|
|
|
|
|
|
if (WARN_ON_ONCE(!shmem->vmap_use_count))
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (--shmem->vmap_use_count > 0)
|
|
|
|
return;
|
|
|
|
|
2021-02-19 20:22:03 +08:00
|
|
|
if (obj->import_attach) {
|
2020-11-03 17:30:11 +08:00
|
|
|
dma_buf_vunmap(obj->import_attach->dmabuf, map);
|
2021-02-19 20:22:03 +08:00
|
|
|
} else {
|
2019-03-13 08:43:44 +08:00
|
|
|
vunmap(shmem->vaddr);
|
2021-02-19 20:22:03 +08:00
|
|
|
drm_gem_shmem_put_pages(shmem);
|
|
|
|
}
|
2019-03-13 08:43:44 +08:00
|
|
|
|
|
|
|
shmem->vaddr = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2021-07-30 21:27:29 +08:00
|
|
|
* drm_gem_shmem_vunmap - Unmap a virtual mapping for a shmem GEM object
|
2019-03-13 08:43:44 +08:00
|
|
|
* @shmem: shmem GEM object
|
2020-11-03 17:30:11 +08:00
|
|
|
* @map: Kernel virtual address where the SHMEM GEM object was mapped
|
2019-03-13 08:43:44 +08:00
|
|
|
*
|
2020-05-11 17:35:48 +08:00
|
|
|
* This function cleans up a kernel virtual address mapping acquired by
|
|
|
|
* drm_gem_shmem_vmap(). The mapping is only removed when the use count drops to
|
|
|
|
* zero.
|
|
|
|
*
|
2021-11-08 17:31:48 +08:00
|
|
|
* This function hides the differences between dma-buf imported and natively
|
|
|
|
* allocated objects.
|
2019-03-13 08:43:44 +08:00
|
|
|
*/
|
2022-02-05 01:05:41 +08:00
|
|
|
void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem,
|
|
|
|
struct iosys_map *map)
|
2019-03-13 08:43:44 +08:00
|
|
|
{
|
|
|
|
mutex_lock(&shmem->vmap_lock);
|
2020-11-03 17:30:11 +08:00
|
|
|
drm_gem_shmem_vunmap_locked(shmem, map);
|
2019-03-13 08:43:44 +08:00
|
|
|
mutex_unlock(&shmem->vmap_lock);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(drm_gem_shmem_vunmap);
|
|
|
|
|
2021-11-08 17:31:47 +08:00
|
|
|
static struct drm_gem_shmem_object *
|
2019-03-13 08:43:44 +08:00
|
|
|
drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
|
|
|
|
struct drm_device *dev, size_t size,
|
|
|
|
uint32_t *handle)
|
|
|
|
{
|
|
|
|
struct drm_gem_shmem_object *shmem;
|
|
|
|
int ret;
|
|
|
|
|
2020-06-16 19:47:23 +08:00
|
|
|
shmem = drm_gem_shmem_create(dev, size);
|
2019-03-13 08:43:44 +08:00
|
|
|
if (IS_ERR(shmem))
|
|
|
|
return shmem;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Allocate an id of idr table where the obj is registered
|
|
|
|
* and handle has the id what user can see.
|
|
|
|
*/
|
|
|
|
ret = drm_gem_handle_create(file_priv, &shmem->base, handle);
|
|
|
|
/* drop reference from allocate - handle holds it now. */
|
2020-05-15 17:50:53 +08:00
|
|
|
drm_gem_object_put(&shmem->base);
|
2019-03-13 08:43:44 +08:00
|
|
|
if (ret)
|
|
|
|
return ERR_PTR(ret);
|
|
|
|
|
|
|
|
return shmem;
|
|
|
|
}
|
|
|
|
|
2019-08-05 22:33:57 +08:00
|
|
|
/* Update madvise status, returns true if not purged, else
|
|
|
|
* false or -errno.
|
|
|
|
*/
|
2021-11-08 17:31:49 +08:00
|
|
|
int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv)
|
2019-08-05 22:33:57 +08:00
|
|
|
{
|
|
|
|
mutex_lock(&shmem->pages_lock);
|
|
|
|
|
|
|
|
if (shmem->madv >= 0)
|
|
|
|
shmem->madv = madv;
|
|
|
|
|
|
|
|
madv = shmem->madv;
|
|
|
|
|
|
|
|
mutex_unlock(&shmem->pages_lock);
|
|
|
|
|
|
|
|
return (madv >= 0);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(drm_gem_shmem_madvise);
|
|
|
|
|
2021-11-08 17:31:49 +08:00
|
|
|
void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem)
|
2019-08-05 22:33:57 +08:00
|
|
|
{
|
2021-11-08 17:31:49 +08:00
|
|
|
struct drm_gem_object *obj = &shmem->base;
|
2019-08-05 22:33:57 +08:00
|
|
|
struct drm_device *dev = obj->dev;
|
|
|
|
|
|
|
|
WARN_ON(!drm_gem_shmem_is_purgeable(shmem));
|
|
|
|
|
2021-11-08 17:31:49 +08:00
|
|
|
dma_unmap_sgtable(dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0);
|
drm/shmem: Do dma_unmap_sg before purging pages
Calling dma_unmap_sg() in drm_gem_shmem_free_object() is too late if the
backing pages have already been released by the shrinker. The result is
the following abort:
Unable to handle kernel paging request at virtual address ffff8000098ed000
Mem abort info:
ESR = 0x96000147
Exception class = DABT (current EL), IL = 32 bits
SET = 0, FnV = 0
EA = 0, S1PTW = 0
Data abort info:
ISV = 0, ISS = 0x00000147
CM = 1, WnR = 1
swapper pgtable: 4k pages, 48-bit VAs, pgdp=0000000002f51000
[ffff8000098ed000] pgd=00000000401f8003, pud=00000000401f7003, pmd=00000000401b1003, pte=00e80000098ed712
Internal error: Oops: 96000147 [#1] SMP
Modules linked in: panfrost gpu_sched
CPU: 5 PID: 902 Comm: gnome-shell Not tainted 5.3.0-rc1+ #95
Hardware name: 96boards Rock960 (DT)
pstate: 40000005 (nZcv daif -PAN -UAO)
pc : __dma_inv_area+0x40/0x58
lr : arch_sync_dma_for_cpu+0x28/0x30
sp : ffff00001321ba30
x29: ffff00001321ba30 x28: ffff00001321bd08
x27: 0000000000000000 x26: 0000000000000009
x25: 0000ffffc1f86170 x24: 0000000000000000
x23: 0000000000000000 x22: 0000000000000000
x21: 0000000000021000 x20: ffff80003bb2d810
x19: 00000000098ed000 x18: 0000000000000000
x17: 0000000000000000 x16: ffff800023fd9480
x15: 0000000000000000 x14: 0000000000000000
x13: 0000000000000000 x12: 0000000000000000
x11: 00000000fffb9fff x10: 0000000000000000
x9 : 0000000000000000 x8 : ffff800023fd9c18
x7 : 0000000000000000 x6 : 00000000ffffffff
x5 : 0000000000000000 x4 : 0000000000021000
Purging 5693440 bytes
x3 : 000000000000003f x2 : 0000000000000040
x1 : ffff80000990e000 x0 : ffff8000098ed000
Call trace:
__dma_inv_area+0x40/0x58
dma_direct_sync_single_for_cpu+0x7c/0x80
dma_direct_unmap_page+0x80/0x88
dma_direct_unmap_sg+0x54/0x80
drm_gem_shmem_free_object+0xfc/0x108
panfrost_gem_free_object+0x118/0x128 [panfrost]
drm_gem_object_free+0x18/0x90
drm_gem_object_put_unlocked+0x58/0x80
drm_gem_object_handle_put_unlocked+0x64/0xb0
drm_gem_object_release_handle+0x70/0x98
drm_gem_handle_delete+0x64/0xb0
drm_gem_close_ioctl+0x28/0x38
drm_ioctl_kernel+0xb8/0x110
drm_ioctl+0x244/0x3f0
do_vfs_ioctl+0xbc/0x910
ksys_ioctl+0x78/0xa8
__arm64_sys_ioctl+0x1c/0x28
el0_svc_common.constprop.0+0x88/0x150
el0_svc_handler+0x28/0x78
el0_svc+0x8/0xc
Code: 8a230000 54000060 d50b7e20 14000002 (d5087620)
Fixes: 17acb9f35ed7 ("drm/shmem: Add madvise state and purge helpers")
Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Cc: Maxime Ripard <maxime.ripard@bootlin.com>
Cc: Sean Paul <sean@poorly.run>
Cc: David Airlie <airlied@linux.ie>
Cc: Daniel Vetter <daniel@ffwll.ch>
Reviewed-by: Steven Price <steven.price@arm.com>
Signed-off-by: Rob Herring <robh@kernel.org>
Acked-by: Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190823021216.5862-5-robh@kernel.org
2019-08-23 10:12:12 +08:00
|
|
|
sg_free_table(shmem->sgt);
|
|
|
|
kfree(shmem->sgt);
|
|
|
|
shmem->sgt = NULL;
|
|
|
|
|
2019-08-05 22:33:57 +08:00
|
|
|
drm_gem_shmem_put_pages_locked(shmem);
|
|
|
|
|
|
|
|
shmem->madv = -1;
|
|
|
|
|
|
|
|
drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
|
|
|
|
drm_gem_free_mmap_offset(obj);
|
|
|
|
|
|
|
|
/* Our goal here is to return as much of the memory as
|
|
|
|
* is possible back to the system as we are called from OOM.
|
|
|
|
* To do this we must instruct the shmfs to drop all of its
|
|
|
|
* backing pages, *now*.
|
|
|
|
*/
|
|
|
|
shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
|
|
|
|
|
2021-11-08 17:31:49 +08:00
|
|
|
invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 0, (loff_t)-1);
|
2019-08-05 22:33:57 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(drm_gem_shmem_purge_locked);
|
|
|
|
|
2021-11-08 17:31:49 +08:00
|
|
|
bool drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem)
|
2019-08-05 22:33:57 +08:00
|
|
|
{
|
2019-08-23 10:12:13 +08:00
|
|
|
if (!mutex_trylock(&shmem->pages_lock))
|
|
|
|
return false;
|
2021-11-08 17:31:49 +08:00
|
|
|
drm_gem_shmem_purge_locked(shmem);
|
2019-08-05 22:33:57 +08:00
|
|
|
mutex_unlock(&shmem->pages_lock);
|
2019-08-23 10:12:13 +08:00
|
|
|
|
|
|
|
return true;
|
2019-08-05 22:33:57 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(drm_gem_shmem_purge);
|
|
|
|
|
2019-03-13 08:43:44 +08:00
|
|
|
/**
|
|
|
|
* drm_gem_shmem_dumb_create - Create a dumb shmem buffer object
|
|
|
|
* @file: DRM file structure to create the dumb buffer for
|
|
|
|
* @dev: DRM device
|
|
|
|
* @args: IOCTL data
|
|
|
|
*
|
|
|
|
* This function computes the pitch of the dumb buffer and rounds it up to an
|
|
|
|
* integer number of bytes per pixel. Drivers for hardware that doesn't have
|
|
|
|
* any additional restrictions on the pitch can directly use this function as
|
|
|
|
* their &drm_driver.dumb_create callback.
|
|
|
|
*
|
|
|
|
* For hardware with additional restrictions, drivers can adjust the fields
|
|
|
|
* set up by userspace before calling into this function.
|
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* 0 on success or a negative error code on failure.
|
|
|
|
*/
|
|
|
|
int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
|
|
|
|
struct drm_mode_create_dumb *args)
|
|
|
|
{
|
|
|
|
u32 min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
|
|
|
|
struct drm_gem_shmem_object *shmem;
|
|
|
|
|
|
|
|
if (!args->pitch || !args->size) {
|
|
|
|
args->pitch = min_pitch;
|
2021-06-04 00:41:12 +08:00
|
|
|
args->size = PAGE_ALIGN(args->pitch * args->height);
|
2019-03-13 08:43:44 +08:00
|
|
|
} else {
|
|
|
|
/* ensure sane minimum values */
|
|
|
|
if (args->pitch < min_pitch)
|
|
|
|
args->pitch = min_pitch;
|
|
|
|
if (args->size < args->pitch * args->height)
|
2021-06-04 00:41:12 +08:00
|
|
|
args->size = PAGE_ALIGN(args->pitch * args->height);
|
2019-03-13 08:43:44 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
shmem = drm_gem_shmem_create_with_handle(file, dev, args->size, &args->handle);
|
|
|
|
|
|
|
|
return PTR_ERR_OR_ZERO(shmem);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create);
|
|
|
|
|
|
|
|
static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
|
|
|
|
{
|
|
|
|
struct vm_area_struct *vma = vmf->vma;
|
|
|
|
struct drm_gem_object *obj = vma->vm_private_data;
|
|
|
|
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
|
|
|
|
loff_t num_pages = obj->size >> PAGE_SHIFT;
|
2021-02-23 23:51:24 +08:00
|
|
|
vm_fault_t ret;
|
2019-03-13 08:43:44 +08:00
|
|
|
struct page *page;
|
2021-02-23 23:51:25 +08:00
|
|
|
pgoff_t page_offset;
|
|
|
|
|
|
|
|
/* We don't use vmf->pgoff since that has the fake offset */
|
|
|
|
page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
|
2019-03-13 08:43:44 +08:00
|
|
|
|
2021-02-23 23:51:24 +08:00
|
|
|
mutex_lock(&shmem->pages_lock);
|
|
|
|
|
2021-02-23 23:51:25 +08:00
|
|
|
if (page_offset >= num_pages ||
|
2021-02-23 23:51:24 +08:00
|
|
|
WARN_ON_ONCE(!shmem->pages) ||
|
|
|
|
shmem->madv < 0) {
|
|
|
|
ret = VM_FAULT_SIGBUS;
|
|
|
|
} else {
|
2021-02-23 23:51:25 +08:00
|
|
|
page = shmem->pages[page_offset];
|
2019-03-13 08:43:44 +08:00
|
|
|
|
drm/shmem-helper: Switch to vmf_insert_pfn
We want to stop gup, which isn't the case if we use vmf_insert_page
and VM_MIXEDMAP, because that does not set pte_special.
The motivation here is to stop get_user_pages from working on buffer
object mmaps in general. Quoting some discussion with Thomas:
On Thu, Jul 22, 2021 at 08:22:43PM +0200, Thomas Zimmermann wrote:
> Am 13.07.21 um 22:51 schrieb Daniel Vetter:
> > We want to stop gup, which isn't the case if we use vmf_insert_page
>
> What is gup?
get_user_pages. It pins memory wherever it is, which badly wreaks at least
ttm and could also cause trouble with cma allocations. In both cases
becaue we can't move/reuse these pages anymore.
Now get_user_pages fails when the memory isn't considered "normal", like
with VM_PFNMAP and using vm_insert_pfn. For consistency across all dma-buf
I'm trying (together with Christian König) to roll this out everywhere,
for fewer surprises.
E.g. for 5.14 iirc we merged a patch to do the same for ttm, where it
closes an actual bug (ttm gets really badly confused when there's suddenly
pinned pages where it thought it can move them).
cma allcoations already use VM_PFNMAP (because that's what dma_mmap is
using underneath), as is anything that's using remap_pfn_range. Worst case
we have to revert this patch for shmem helpers if it breaks something, but
I hope that's not the case. On the ttm side we've also had some fallout
that we needed to paper over with clever tricks.
v2: With this shmem gem helpers now definitely need CONFIG_MMU (0day)
v3: add more depends on MMU. For usb drivers this is a bit awkward,
but really it's correct: To be able to provide a contig mapping of
buffers to userspace on !MMU platforms we'd need to use the cma
helpers for these drivers on those platforms. As-is this wont work.
Also not exactly sure why vm_insert_page doesn't go boom, because that
definitely wont fly in practice since the pages are non-contig to
begin with.
v4: Explain the entire motivation a lot more (Thomas)
Acked-by: Thomas Zimmermann <tzimmermann@suse.de>
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Cc: Maxime Ripard <mripard@kernel.org>
Cc: Thomas Zimmermann <tzimmermann@suse.de>
Cc: David Airlie <airlied@linux.ie>
Cc: Daniel Vetter <daniel@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20210812131412.2487363-2-daniel.vetter@ffwll.ch
2021-08-12 21:14:10 +08:00
|
|
|
ret = vmf_insert_pfn(vma, vmf->address, page_to_pfn(page));
|
2021-02-23 23:51:24 +08:00
|
|
|
}
|
2019-03-13 08:43:44 +08:00
|
|
|
|
2021-02-23 23:51:24 +08:00
|
|
|
mutex_unlock(&shmem->pages_lock);
|
|
|
|
|
|
|
|
return ret;
|
2019-03-13 08:43:44 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
|
|
|
|
{
|
|
|
|
struct drm_gem_object *obj = vma->vm_private_data;
|
|
|
|
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
|
|
|
|
int ret;
|
|
|
|
|
2020-05-11 17:35:53 +08:00
|
|
|
WARN_ON(shmem->base.import_attach);
|
|
|
|
|
2019-03-13 08:43:44 +08:00
|
|
|
ret = drm_gem_shmem_get_pages(shmem);
|
|
|
|
WARN_ON_ONCE(ret != 0);
|
|
|
|
|
|
|
|
drm_gem_vm_open(vma);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
|
|
|
|
{
|
|
|
|
struct drm_gem_object *obj = vma->vm_private_data;
|
|
|
|
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
|
|
|
|
|
|
|
|
drm_gem_shmem_put_pages(shmem);
|
|
|
|
drm_gem_vm_close(vma);
|
|
|
|
}
|
|
|
|
|
2022-02-09 23:56:33 +08:00
|
|
|
const struct vm_operations_struct drm_gem_shmem_vm_ops = {
|
2019-03-13 08:43:44 +08:00
|
|
|
.fault = drm_gem_shmem_fault,
|
|
|
|
.open = drm_gem_shmem_vm_open,
|
|
|
|
.close = drm_gem_shmem_vm_close,
|
|
|
|
};
|
2022-02-09 23:56:33 +08:00
|
|
|
EXPORT_SYMBOL_GPL(drm_gem_shmem_vm_ops);
|
2019-03-13 08:43:44 +08:00
|
|
|
|
|
|
|
/**
|
|
|
|
* drm_gem_shmem_mmap - Memory-map a shmem GEM object
|
2021-11-08 17:31:49 +08:00
|
|
|
* @shmem: shmem GEM object
|
2019-03-13 08:43:44 +08:00
|
|
|
* @vma: VMA for the area to be mapped
|
|
|
|
*
|
|
|
|
* This function implements an augmented version of the GEM DRM file mmap
|
2021-11-08 17:31:48 +08:00
|
|
|
* operation for shmem objects.
|
2019-03-13 08:43:44 +08:00
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* 0 on success or a negative error code on failure.
|
|
|
|
*/
|
2021-11-08 17:31:49 +08:00
|
|
|
int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct *vma)
|
2019-03-13 08:43:44 +08:00
|
|
|
{
|
2021-11-08 17:31:49 +08:00
|
|
|
struct drm_gem_object *obj = &shmem->base;
|
2019-03-13 08:43:44 +08:00
|
|
|
int ret;
|
|
|
|
|
2020-10-28 05:49:22 +08:00
|
|
|
if (obj->import_attach) {
|
|
|
|
/* Drop the reference drm_gem_mmap_obj() acquired.*/
|
|
|
|
drm_gem_object_put(obj);
|
|
|
|
vma->vm_private_data = NULL;
|
|
|
|
|
2020-05-11 17:35:52 +08:00
|
|
|
return dma_buf_mmap(obj->dma_buf, vma, 0);
|
2020-10-28 05:49:22 +08:00
|
|
|
}
|
2020-05-11 17:35:52 +08:00
|
|
|
|
2019-03-13 08:43:44 +08:00
|
|
|
ret = drm_gem_shmem_get_pages(shmem);
|
|
|
|
if (ret) {
|
|
|
|
drm_gem_vm_close(vma);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2022-02-09 23:56:34 +08:00
|
|
|
vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
|
2020-02-26 23:47:50 +08:00
|
|
|
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
|
2020-11-17 21:31:55 +08:00
|
|
|
if (shmem->map_wc)
|
2020-02-26 23:47:50 +08:00
|
|
|
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
|
2019-03-13 08:43:44 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* drm_gem_shmem_print_info() - Print &drm_gem_shmem_object info for debugfs
|
2021-11-08 17:31:49 +08:00
|
|
|
* @shmem: shmem GEM object
|
2019-03-13 08:43:44 +08:00
|
|
|
* @p: DRM printer
|
|
|
|
* @indent: Tab indentation level
|
|
|
|
*/
|
2021-11-08 17:31:49 +08:00
|
|
|
void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem,
|
|
|
|
struct drm_printer *p, unsigned int indent)
|
2019-03-13 08:43:44 +08:00
|
|
|
{
|
|
|
|
drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count);
|
|
|
|
drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count);
|
|
|
|
drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(drm_gem_shmem_print_info);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* drm_gem_shmem_get_sg_table - Provide a scatter/gather table of pinned
|
|
|
|
* pages for a shmem GEM object
|
2021-11-08 17:31:49 +08:00
|
|
|
* @shmem: shmem GEM object
|
2019-03-13 08:43:44 +08:00
|
|
|
*
|
|
|
|
* This function exports a scatter/gather table suitable for PRIME usage by
|
2021-11-08 17:31:48 +08:00
|
|
|
* calling the standard DMA mapping API.
|
2020-05-11 17:35:48 +08:00
|
|
|
*
|
|
|
|
* Drivers who need to acquire an scatter/gather table for objects need to call
|
|
|
|
* drm_gem_shmem_get_pages_sgt() instead.
|
2019-03-13 08:43:44 +08:00
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* A pointer to the scatter/gather table of pinned pages or NULL on failure.
|
|
|
|
*/
|
2021-11-08 17:31:49 +08:00
|
|
|
struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem)
|
2019-03-13 08:43:44 +08:00
|
|
|
{
|
2021-11-08 17:31:49 +08:00
|
|
|
struct drm_gem_object *obj = &shmem->base;
|
2019-03-13 08:43:44 +08:00
|
|
|
|
2020-05-11 17:35:53 +08:00
|
|
|
WARN_ON(shmem->base.import_attach);
|
|
|
|
|
2020-09-07 19:24:25 +08:00
|
|
|
return drm_prime_pages_to_sg(obj->dev, shmem->pages, obj->size >> PAGE_SHIFT);
|
2019-03-13 08:43:44 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* drm_gem_shmem_get_pages_sgt - Pin pages, dma map them, and return a
|
|
|
|
* scatter/gather table for a shmem GEM object.
|
2021-11-08 17:31:49 +08:00
|
|
|
* @shmem: shmem GEM object
|
2019-03-13 08:43:44 +08:00
|
|
|
*
|
|
|
|
* This function returns a scatter/gather table suitable for driver usage. If
|
|
|
|
* the sg table doesn't exist, the pages are pinned, dma-mapped, and a sg
|
|
|
|
* table created.
|
|
|
|
*
|
2020-05-11 17:35:48 +08:00
|
|
|
* This is the main function for drivers to get at backing storage, and it hides
|
|
|
|
* and difference between dma-buf imported and natively allocated objects.
|
|
|
|
* drm_gem_shmem_get_sg_table() should not be directly called by drivers.
|
|
|
|
*
|
2019-03-13 08:43:44 +08:00
|
|
|
* Returns:
|
|
|
|
* A pointer to the scatter/gather table of pinned pages or errno on failure.
|
|
|
|
*/
|
2021-11-08 17:31:49 +08:00
|
|
|
struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem)
|
2019-03-13 08:43:44 +08:00
|
|
|
{
|
2021-11-08 17:31:49 +08:00
|
|
|
struct drm_gem_object *obj = &shmem->base;
|
2019-03-13 08:43:44 +08:00
|
|
|
int ret;
|
|
|
|
struct sg_table *sgt;
|
|
|
|
|
|
|
|
if (shmem->sgt)
|
|
|
|
return shmem->sgt;
|
|
|
|
|
|
|
|
WARN_ON(obj->import_attach);
|
|
|
|
|
|
|
|
ret = drm_gem_shmem_get_pages(shmem);
|
|
|
|
if (ret)
|
|
|
|
return ERR_PTR(ret);
|
|
|
|
|
2021-11-08 17:31:49 +08:00
|
|
|
sgt = drm_gem_shmem_get_sg_table(shmem);
|
2019-03-13 08:43:44 +08:00
|
|
|
if (IS_ERR(sgt)) {
|
|
|
|
ret = PTR_ERR(sgt);
|
|
|
|
goto err_put_pages;
|
|
|
|
}
|
|
|
|
/* Map the pages for use by the h/w. */
|
2020-05-11 18:27:54 +08:00
|
|
|
ret = dma_map_sgtable(obj->dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
|
|
|
|
if (ret)
|
|
|
|
goto err_free_sgt;
|
2019-03-13 08:43:44 +08:00
|
|
|
|
|
|
|
shmem->sgt = sgt;
|
|
|
|
|
|
|
|
return sgt;
|
|
|
|
|
2020-05-11 18:27:54 +08:00
|
|
|
err_free_sgt:
|
|
|
|
sg_free_table(sgt);
|
|
|
|
kfree(sgt);
|
2019-03-13 08:43:44 +08:00
|
|
|
err_put_pages:
|
|
|
|
drm_gem_shmem_put_pages(shmem);
|
|
|
|
return ERR_PTR(ret);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(drm_gem_shmem_get_pages_sgt);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* drm_gem_shmem_prime_import_sg_table - Produce a shmem GEM object from
|
|
|
|
* another driver's scatter/gather table of pinned pages
|
|
|
|
* @dev: Device to import into
|
|
|
|
* @attach: DMA-BUF attachment
|
|
|
|
* @sgt: Scatter/gather table of pinned pages
|
|
|
|
*
|
|
|
|
* This function imports a scatter/gather table exported via DMA-BUF by
|
|
|
|
* another driver. Drivers that use the shmem helpers should set this as their
|
|
|
|
* &drm_driver.gem_prime_import_sg_table callback.
|
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* A pointer to a newly created GEM object or an ERR_PTR-encoded negative
|
|
|
|
* error code on failure.
|
|
|
|
*/
|
|
|
|
struct drm_gem_object *
|
|
|
|
drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
|
|
|
|
struct dma_buf_attachment *attach,
|
|
|
|
struct sg_table *sgt)
|
|
|
|
{
|
|
|
|
size_t size = PAGE_ALIGN(attach->dmabuf->size);
|
|
|
|
struct drm_gem_shmem_object *shmem;
|
|
|
|
|
2020-06-16 19:47:23 +08:00
|
|
|
shmem = __drm_gem_shmem_create(dev, size, true);
|
2019-03-13 08:43:44 +08:00
|
|
|
if (IS_ERR(shmem))
|
|
|
|
return ERR_CAST(shmem);
|
|
|
|
|
|
|
|
shmem->sgt = sgt;
|
|
|
|
|
|
|
|
DRM_DEBUG_PRIME("size = %zu\n", size);
|
|
|
|
|
|
|
|
return &shmem->base;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_sg_table);
|
2021-10-20 21:19:41 +08:00
|
|
|
|
|
|
|
MODULE_DESCRIPTION("DRM SHMEM memory-management helpers");
|
2021-10-28 05:25:05 +08:00
|
|
|
MODULE_IMPORT_NS(DMA_BUF);
|
2021-10-20 21:19:41 +08:00
|
|
|
MODULE_LICENSE("GPL v2");
|