2019-06-04 16:11:33 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2012-08-15 20:59:49 +08:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2012 Russell King
|
|
|
|
*/
|
|
|
|
#include <linux/dma-buf.h>
|
|
|
|
#include <linux/dma-mapping.h>
|
|
|
|
#include <linux/shmem_fs.h>
|
|
|
|
#include "armada_drm.h"
|
|
|
|
#include "armada_gem.h"
|
|
|
|
#include <drm/armada_drm.h>
|
|
|
|
#include "armada_ioctlP.h"
|
|
|
|
|
2018-07-30 18:52:31 +08:00
|
|
|
static vm_fault_t armada_gem_vm_fault(struct vm_fault *vmf)
|
2012-08-15 20:59:49 +08:00
|
|
|
{
|
2017-02-25 06:56:41 +08:00
|
|
|
struct drm_gem_object *gobj = vmf->vma->vm_private_data;
|
|
|
|
struct armada_gem_object *obj = drm_to_armada_gem(gobj);
|
2012-08-15 20:59:49 +08:00
|
|
|
unsigned long pfn = obj->phys_addr >> PAGE_SHIFT;
|
|
|
|
|
2017-02-25 06:56:41 +08:00
|
|
|
pfn += (vmf->address - vmf->vma->vm_start) >> PAGE_SHIFT;
|
2018-07-30 18:52:31 +08:00
|
|
|
return vmf_insert_pfn(vmf->vma, vmf->address, pfn);
|
2012-08-15 20:59:49 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
const struct vm_operations_struct armada_gem_vm_ops = {
|
|
|
|
.fault = armada_gem_vm_fault,
|
|
|
|
.open = drm_gem_vm_open,
|
|
|
|
.close = drm_gem_vm_close,
|
|
|
|
};
|
|
|
|
|
|
|
|
static size_t roundup_gem_size(size_t size)
|
|
|
|
{
|
|
|
|
return roundup(size, PAGE_SIZE);
|
|
|
|
}
|
|
|
|
|
|
|
|
void armada_gem_free_object(struct drm_gem_object *obj)
|
|
|
|
{
|
|
|
|
struct armada_gem_object *dobj = drm_to_armada_gem(obj);
|
2015-11-24 17:00:36 +08:00
|
|
|
struct armada_private *priv = obj->dev->dev_private;
|
2012-08-15 20:59:49 +08:00
|
|
|
|
|
|
|
DRM_DEBUG_DRIVER("release obj %p\n", dobj);
|
|
|
|
|
|
|
|
drm_gem_free_mmap_offset(&dobj->obj);
|
|
|
|
|
2015-11-24 17:00:36 +08:00
|
|
|
might_lock(&priv->linear_lock);
|
|
|
|
|
2012-08-15 20:59:49 +08:00
|
|
|
if (dobj->page) {
|
|
|
|
/* page backed memory */
|
|
|
|
unsigned int order = get_order(dobj->obj.size);
|
|
|
|
__free_pages(dobj->page, order);
|
|
|
|
} else if (dobj->linear) {
|
|
|
|
/* linear backed memory */
|
2015-11-24 17:00:36 +08:00
|
|
|
mutex_lock(&priv->linear_lock);
|
2012-08-15 20:59:49 +08:00
|
|
|
drm_mm_remove_node(dobj->linear);
|
2015-11-24 17:00:36 +08:00
|
|
|
mutex_unlock(&priv->linear_lock);
|
2012-08-15 20:59:49 +08:00
|
|
|
kfree(dobj->linear);
|
|
|
|
if (dobj->addr)
|
|
|
|
iounmap(dobj->addr);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (dobj->obj.import_attach) {
|
|
|
|
/* We only ever display imported data */
|
2015-06-15 17:14:51 +08:00
|
|
|
if (dobj->sgt)
|
|
|
|
dma_buf_unmap_attachment(dobj->obj.import_attach,
|
|
|
|
dobj->sgt, DMA_TO_DEVICE);
|
2012-08-15 20:59:49 +08:00
|
|
|
drm_prime_gem_destroy(&dobj->obj, NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
drm_gem_object_release(&dobj->obj);
|
|
|
|
|
|
|
|
kfree(dobj);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj)
|
|
|
|
{
|
|
|
|
struct armada_private *priv = dev->dev_private;
|
|
|
|
size_t size = obj->obj.size;
|
|
|
|
|
|
|
|
if (obj->page || obj->linear)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If it is a small allocation (typically cursor, which will
|
|
|
|
* be 32x64 or 64x32 ARGB pixels) try to get it from the system.
|
|
|
|
* Framebuffers will never be this small (our minimum size for
|
|
|
|
* framebuffers is larger than this anyway.) Such objects are
|
|
|
|
* only accessed by the CPU so we don't need any special handing
|
|
|
|
* here.
|
|
|
|
*/
|
|
|
|
if (size <= 8192) {
|
|
|
|
unsigned int order = get_order(size);
|
|
|
|
struct page *p = alloc_pages(GFP_KERNEL, order);
|
|
|
|
|
|
|
|
if (p) {
|
|
|
|
obj->addr = page_address(p);
|
|
|
|
obj->phys_addr = page_to_phys(p);
|
|
|
|
obj->page = p;
|
|
|
|
|
|
|
|
memset(obj->addr, 0, PAGE_ALIGN(size));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We could grab something from CMA if it's enabled, but that
|
|
|
|
* involves building in a problem:
|
|
|
|
*
|
|
|
|
* CMA's interface uses dma_alloc_coherent(), which provides us
|
|
|
|
* with an CPU virtual address and a device address.
|
|
|
|
*
|
|
|
|
* The CPU virtual address may be either an address in the kernel
|
|
|
|
* direct mapped region (for example, as it would be on x86) or
|
|
|
|
* it may be remapped into another part of kernel memory space
|
|
|
|
* (eg, as it would be on ARM.) This means virt_to_phys() on the
|
|
|
|
* returned virtual address is invalid depending on the architecture
|
|
|
|
* implementation.
|
|
|
|
*
|
|
|
|
* The device address may also not be a physical address; it may
|
|
|
|
* be that there is some kind of remapping between the device and
|
|
|
|
* system RAM, which makes the use of the device address also
|
|
|
|
* unsafe to re-use as a physical address.
|
|
|
|
*
|
|
|
|
* This makes DRM usage of dma_alloc_coherent() in a generic way
|
|
|
|
* at best very questionable and unsafe.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* Otherwise, grab it from our linear allocation */
|
|
|
|
if (!obj->page) {
|
|
|
|
struct drm_mm_node *node;
|
|
|
|
unsigned align = min_t(unsigned, size, SZ_2M);
|
|
|
|
void __iomem *ptr;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
node = kzalloc(sizeof(*node), GFP_KERNEL);
|
|
|
|
if (!node)
|
|
|
|
return -ENOSPC;
|
|
|
|
|
2015-11-24 17:00:36 +08:00
|
|
|
mutex_lock(&priv->linear_lock);
|
2017-02-03 05:04:38 +08:00
|
|
|
ret = drm_mm_insert_node_generic(&priv->linear, node,
|
|
|
|
size, align, 0, 0);
|
2015-11-24 17:00:36 +08:00
|
|
|
mutex_unlock(&priv->linear_lock);
|
2012-08-15 20:59:49 +08:00
|
|
|
if (ret) {
|
|
|
|
kfree(node);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
obj->linear = node;
|
|
|
|
|
|
|
|
/* Ensure that the memory we're returning is cleared. */
|
|
|
|
ptr = ioremap_wc(obj->linear->start, size);
|
|
|
|
if (!ptr) {
|
2015-11-24 17:00:36 +08:00
|
|
|
mutex_lock(&priv->linear_lock);
|
2012-08-15 20:59:49 +08:00
|
|
|
drm_mm_remove_node(obj->linear);
|
2015-11-24 17:00:36 +08:00
|
|
|
mutex_unlock(&priv->linear_lock);
|
2012-08-15 20:59:49 +08:00
|
|
|
kfree(obj->linear);
|
|
|
|
obj->linear = NULL;
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
memset_io(ptr, 0, size);
|
|
|
|
iounmap(ptr);
|
|
|
|
|
|
|
|
obj->phys_addr = obj->linear->start;
|
|
|
|
obj->dev_addr = obj->linear->start;
|
2017-05-22 16:46:22 +08:00
|
|
|
obj->mapped = true;
|
2012-08-15 20:59:49 +08:00
|
|
|
}
|
|
|
|
|
2013-11-27 23:46:55 +08:00
|
|
|
DRM_DEBUG_DRIVER("obj %p phys %#llx dev %#llx\n", obj,
|
|
|
|
(unsigned long long)obj->phys_addr,
|
|
|
|
(unsigned long long)obj->dev_addr);
|
2012-08-15 20:59:49 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void *
|
|
|
|
armada_gem_map_object(struct drm_device *dev, struct armada_gem_object *dobj)
|
|
|
|
{
|
|
|
|
/* only linear objects need to be ioremap'd */
|
|
|
|
if (!dobj->addr && dobj->linear)
|
|
|
|
dobj->addr = ioremap_wc(dobj->phys_addr, dobj->obj.size);
|
|
|
|
return dobj->addr;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct armada_gem_object *
|
|
|
|
armada_gem_alloc_private_object(struct drm_device *dev, size_t size)
|
|
|
|
{
|
|
|
|
struct armada_gem_object *obj;
|
|
|
|
|
|
|
|
size = roundup_gem_size(size);
|
|
|
|
|
|
|
|
obj = kzalloc(sizeof(*obj), GFP_KERNEL);
|
|
|
|
if (!obj)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
drm_gem_private_object_init(dev, &obj->obj, size);
|
|
|
|
|
|
|
|
DRM_DEBUG_DRIVER("alloc private obj %p size %zu\n", obj, size);
|
|
|
|
|
|
|
|
return obj;
|
|
|
|
}
|
|
|
|
|
2016-10-22 17:03:58 +08:00
|
|
|
static struct armada_gem_object *armada_gem_alloc_object(struct drm_device *dev,
|
2012-08-15 20:59:49 +08:00
|
|
|
size_t size)
|
|
|
|
{
|
|
|
|
struct armada_gem_object *obj;
|
|
|
|
struct address_space *mapping;
|
|
|
|
|
|
|
|
size = roundup_gem_size(size);
|
|
|
|
|
|
|
|
obj = kzalloc(sizeof(*obj), GFP_KERNEL);
|
|
|
|
if (!obj)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
if (drm_gem_object_init(dev, &obj->obj, size)) {
|
|
|
|
kfree(obj);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2015-12-05 12:45:44 +08:00
|
|
|
mapping = obj->obj.filp->f_mapping;
|
2012-08-15 20:59:49 +08:00
|
|
|
mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE);
|
|
|
|
|
|
|
|
DRM_DEBUG_DRIVER("alloc obj %p size %zu\n", obj, size);
|
|
|
|
|
|
|
|
return obj;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Dumb alloc support */
|
|
|
|
int armada_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
|
|
|
|
struct drm_mode_create_dumb *args)
|
|
|
|
{
|
|
|
|
struct armada_gem_object *dobj;
|
|
|
|
u32 handle;
|
|
|
|
size_t size;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
args->pitch = armada_pitch(args->width, args->bpp);
|
|
|
|
args->size = size = args->pitch * args->height;
|
|
|
|
|
|
|
|
dobj = armada_gem_alloc_private_object(dev, size);
|
|
|
|
if (dobj == NULL)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
ret = armada_gem_linear_back(dev, dobj);
|
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
ret = drm_gem_handle_create(file, &dobj->obj, &handle);
|
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
args->handle = handle;
|
|
|
|
|
|
|
|
/* drop reference from allocate - handle holds it now */
|
|
|
|
DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
|
|
|
|
err:
|
2017-09-21 02:54:48 +08:00
|
|
|
drm_gem_object_put_unlocked(&dobj->obj);
|
2012-08-15 20:59:49 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Private driver gem ioctls */
|
|
|
|
int armada_gem_create_ioctl(struct drm_device *dev, void *data,
|
|
|
|
struct drm_file *file)
|
|
|
|
{
|
|
|
|
struct drm_armada_gem_create *args = data;
|
|
|
|
struct armada_gem_object *dobj;
|
|
|
|
size_t size;
|
|
|
|
u32 handle;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (args->size == 0)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
size = args->size;
|
|
|
|
|
|
|
|
dobj = armada_gem_alloc_object(dev, size);
|
|
|
|
if (dobj == NULL)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
ret = drm_gem_handle_create(file, &dobj->obj, &handle);
|
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
args->handle = handle;
|
|
|
|
|
|
|
|
/* drop reference from allocate - handle holds it now */
|
|
|
|
DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
|
|
|
|
err:
|
2017-09-21 02:54:48 +08:00
|
|
|
drm_gem_object_put_unlocked(&dobj->obj);
|
2012-08-15 20:59:49 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Map a shmem-backed object into process memory space */
|
|
|
|
int armada_gem_mmap_ioctl(struct drm_device *dev, void *data,
|
|
|
|
struct drm_file *file)
|
|
|
|
{
|
|
|
|
struct drm_armada_gem_mmap *args = data;
|
|
|
|
struct armada_gem_object *dobj;
|
|
|
|
unsigned long addr;
|
|
|
|
|
2016-05-09 18:04:54 +08:00
|
|
|
dobj = armada_gem_object_lookup(file, args->handle);
|
2012-08-15 20:59:49 +08:00
|
|
|
if (dobj == NULL)
|
|
|
|
return -ENOENT;
|
|
|
|
|
|
|
|
if (!dobj->obj.filp) {
|
2017-09-21 02:54:48 +08:00
|
|
|
drm_gem_object_put_unlocked(&dobj->obj);
|
2012-08-15 20:59:49 +08:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
addr = vm_mmap(dobj->obj.filp, 0, args->size, PROT_READ | PROT_WRITE,
|
|
|
|
MAP_SHARED, args->offset);
|
2017-09-21 02:54:48 +08:00
|
|
|
drm_gem_object_put_unlocked(&dobj->obj);
|
2012-08-15 20:59:49 +08:00
|
|
|
if (IS_ERR_VALUE(addr))
|
|
|
|
return addr;
|
|
|
|
|
|
|
|
args->addr = addr;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int armada_gem_pwrite_ioctl(struct drm_device *dev, void *data,
|
|
|
|
struct drm_file *file)
|
|
|
|
{
|
|
|
|
struct drm_armada_gem_pwrite *args = data;
|
|
|
|
struct armada_gem_object *dobj;
|
|
|
|
char __user *ptr;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
DRM_DEBUG_DRIVER("handle %u off %u size %u ptr 0x%llx\n",
|
|
|
|
args->handle, args->offset, args->size, args->ptr);
|
|
|
|
|
|
|
|
if (args->size == 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
ptr = (char __user *)(uintptr_t)args->ptr;
|
|
|
|
|
Remove 'type' argument from access_ok() function
Nobody has actually used the type (VERIFY_READ vs VERIFY_WRITE) argument
of the user address range verification function since we got rid of the
old racy i386-only code to walk page tables by hand.
It existed because the original 80386 would not honor the write protect
bit when in kernel mode, so you had to do COW by hand before doing any
user access. But we haven't supported that in a long time, and these
days the 'type' argument is a purely historical artifact.
A discussion about extending 'user_access_begin()' to do the range
checking resulted this patch, because there is no way we're going to
move the old VERIFY_xyz interface to that model. And it's best done at
the end of the merge window when I've done most of my merges, so let's
just get this done once and for all.
This patch was mostly done with a sed-script, with manual fix-ups for
the cases that weren't of the trivial 'access_ok(VERIFY_xyz' form.
There were a couple of notable cases:
- csky still had the old "verify_area()" name as an alias.
- the iter_iov code had magical hardcoded knowledge of the actual
values of VERIFY_{READ,WRITE} (not that they mattered, since nothing
really used it)
- microblaze used the type argument for a debug printout
but other than those oddities this should be a total no-op patch.
I tried to fix up all architectures, did fairly extensive grepping for
access_ok() uses, and the changes are trivial, but I may have missed
something. Any missed conversion should be trivially fixable, though.
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2019-01-04 10:57:57 +08:00
|
|
|
if (!access_ok(ptr, args->size))
|
2012-08-15 20:59:49 +08:00
|
|
|
return -EFAULT;
|
|
|
|
|
2016-09-18 06:02:44 +08:00
|
|
|
ret = fault_in_pages_readable(ptr, args->size);
|
2012-08-15 20:59:49 +08:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2016-05-09 18:04:54 +08:00
|
|
|
dobj = armada_gem_object_lookup(file, args->handle);
|
2012-08-15 20:59:49 +08:00
|
|
|
if (dobj == NULL)
|
|
|
|
return -ENOENT;
|
|
|
|
|
|
|
|
/* Must be a kernel-mapped object */
|
|
|
|
if (!dobj->addr)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (args->offset > dobj->obj.size ||
|
|
|
|
args->size > dobj->obj.size - args->offset) {
|
|
|
|
DRM_ERROR("invalid size: object size %u\n", dobj->obj.size);
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto unref;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (copy_from_user(dobj->addr + args->offset, ptr, args->size)) {
|
|
|
|
ret = -EFAULT;
|
|
|
|
} else if (dobj->update) {
|
|
|
|
dobj->update(dobj->update_data);
|
|
|
|
ret = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
unref:
|
2017-09-21 02:54:48 +08:00
|
|
|
drm_gem_object_put_unlocked(&dobj->obj);
|
2012-08-15 20:59:49 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Prime support */
|
2016-10-22 17:03:58 +08:00
|
|
|
static struct sg_table *
|
2012-08-15 20:59:49 +08:00
|
|
|
armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
|
|
|
|
enum dma_data_direction dir)
|
|
|
|
{
|
|
|
|
struct drm_gem_object *obj = attach->dmabuf->priv;
|
|
|
|
struct armada_gem_object *dobj = drm_to_armada_gem(obj);
|
|
|
|
struct scatterlist *sg;
|
|
|
|
struct sg_table *sgt;
|
|
|
|
int i, num;
|
|
|
|
|
|
|
|
sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
|
|
|
|
if (!sgt)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
if (dobj->obj.filp) {
|
|
|
|
struct address_space *mapping;
|
|
|
|
int count;
|
|
|
|
|
|
|
|
count = dobj->obj.size / PAGE_SIZE;
|
|
|
|
if (sg_alloc_table(sgt, count, GFP_KERNEL))
|
|
|
|
goto free_sgt;
|
|
|
|
|
2015-12-05 12:45:44 +08:00
|
|
|
mapping = dobj->obj.filp->f_mapping;
|
2012-08-15 20:59:49 +08:00
|
|
|
|
|
|
|
for_each_sg(sgt->sgl, sg, count, i) {
|
|
|
|
struct page *page;
|
|
|
|
|
2014-05-25 20:34:09 +08:00
|
|
|
page = shmem_read_mapping_page(mapping, i);
|
2012-08-15 20:59:49 +08:00
|
|
|
if (IS_ERR(page)) {
|
|
|
|
num = i;
|
|
|
|
goto release;
|
|
|
|
}
|
|
|
|
|
|
|
|
sg_set_page(sg, page, PAGE_SIZE, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0) {
|
|
|
|
num = sgt->nents;
|
|
|
|
goto release;
|
|
|
|
}
|
|
|
|
} else if (dobj->page) {
|
|
|
|
/* Single contiguous page */
|
|
|
|
if (sg_alloc_table(sgt, 1, GFP_KERNEL))
|
|
|
|
goto free_sgt;
|
|
|
|
|
|
|
|
sg_set_page(sgt->sgl, dobj->page, dobj->obj.size, 0);
|
|
|
|
|
|
|
|
if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
|
|
|
|
goto free_table;
|
|
|
|
} else if (dobj->linear) {
|
|
|
|
/* Single contiguous physical region - no struct page */
|
|
|
|
if (sg_alloc_table(sgt, 1, GFP_KERNEL))
|
|
|
|
goto free_sgt;
|
|
|
|
sg_dma_address(sgt->sgl) = dobj->dev_addr;
|
|
|
|
sg_dma_len(sgt->sgl) = dobj->obj.size;
|
|
|
|
} else {
|
|
|
|
goto free_sgt;
|
|
|
|
}
|
|
|
|
return sgt;
|
|
|
|
|
|
|
|
release:
|
|
|
|
for_each_sg(sgt->sgl, sg, num, i)
|
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time
ago with promise that one day it will be possible to implement page
cache with bigger chunks than PAGE_SIZE.
This promise never materialized. And unlikely will.
We have many places where PAGE_CACHE_SIZE assumed to be equal to
PAGE_SIZE. And it's constant source of confusion on whether
PAGE_CACHE_* or PAGE_* constant should be used in a particular case,
especially on the border between fs and mm.
Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much
breakage to be doable.
Let's stop pretending that pages in page cache are special. They are
not.
The changes are pretty straight-forward:
- <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN};
- page_cache_get() -> get_page();
- page_cache_release() -> put_page();
This patch contains automated changes generated with coccinelle using
script below. For some reason, coccinelle doesn't patch header files.
I've called spatch for them manually.
The only adjustment after coccinelle is revert of changes to
PAGE_CAHCE_ALIGN definition: we are going to drop it later.
There are few places in the code where coccinelle didn't reach. I'll
fix them manually in a separate patch. Comments and documentation also
will be addressed with the separate patch.
virtual patch
@@
expression E;
@@
- E << (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
expression E;
@@
- E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
@@
- PAGE_CACHE_SHIFT
+ PAGE_SHIFT
@@
@@
- PAGE_CACHE_SIZE
+ PAGE_SIZE
@@
@@
- PAGE_CACHE_MASK
+ PAGE_MASK
@@
expression E;
@@
- PAGE_CACHE_ALIGN(E)
+ PAGE_ALIGN(E)
@@
expression E;
@@
- page_cache_get(E)
+ get_page(E)
@@
expression E;
@@
- page_cache_release(E)
+ put_page(E)
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 20:29:47 +08:00
|
|
|
put_page(sg_page(sg));
|
2012-08-15 20:59:49 +08:00
|
|
|
free_table:
|
|
|
|
sg_free_table(sgt);
|
|
|
|
free_sgt:
|
|
|
|
kfree(sgt);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void armada_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
|
|
|
|
struct sg_table *sgt, enum dma_data_direction dir)
|
|
|
|
{
|
|
|
|
struct drm_gem_object *obj = attach->dmabuf->priv;
|
|
|
|
struct armada_gem_object *dobj = drm_to_armada_gem(obj);
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (!dobj->linear)
|
|
|
|
dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
|
|
|
|
|
|
|
|
if (dobj->obj.filp) {
|
|
|
|
struct scatterlist *sg;
|
|
|
|
for_each_sg(sgt->sgl, sg, sgt->nents, i)
|
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time
ago with promise that one day it will be possible to implement page
cache with bigger chunks than PAGE_SIZE.
This promise never materialized. And unlikely will.
We have many places where PAGE_CACHE_SIZE assumed to be equal to
PAGE_SIZE. And it's constant source of confusion on whether
PAGE_CACHE_* or PAGE_* constant should be used in a particular case,
especially on the border between fs and mm.
Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much
breakage to be doable.
Let's stop pretending that pages in page cache are special. They are
not.
The changes are pretty straight-forward:
- <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN};
- page_cache_get() -> get_page();
- page_cache_release() -> put_page();
This patch contains automated changes generated with coccinelle using
script below. For some reason, coccinelle doesn't patch header files.
I've called spatch for them manually.
The only adjustment after coccinelle is revert of changes to
PAGE_CAHCE_ALIGN definition: we are going to drop it later.
There are few places in the code where coccinelle didn't reach. I'll
fix them manually in a separate patch. Comments and documentation also
will be addressed with the separate patch.
virtual patch
@@
expression E;
@@
- E << (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
expression E;
@@
- E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
@@
- PAGE_CACHE_SHIFT
+ PAGE_SHIFT
@@
@@
- PAGE_CACHE_SIZE
+ PAGE_SIZE
@@
@@
- PAGE_CACHE_MASK
+ PAGE_MASK
@@
expression E;
@@
- PAGE_CACHE_ALIGN(E)
+ PAGE_ALIGN(E)
@@
expression E;
@@
- page_cache_get(E)
+ get_page(E)
@@
expression E;
@@
- page_cache_release(E)
+ put_page(E)
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 20:29:47 +08:00
|
|
|
put_page(sg_page(sg));
|
2012-08-15 20:59:49 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
sg_free_table(sgt);
|
|
|
|
kfree(sgt);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void *armada_gem_dmabuf_no_kmap(struct dma_buf *buf, unsigned long n)
|
|
|
|
{
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
armada_gem_dmabuf_no_kunmap(struct dma_buf *buf, unsigned long n, void *addr)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
armada_gem_dmabuf_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
|
|
|
|
{
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct dma_buf_ops armada_gem_prime_dmabuf_ops = {
|
|
|
|
.map_dma_buf = armada_gem_prime_map_dma_buf,
|
|
|
|
.unmap_dma_buf = armada_gem_prime_unmap_dma_buf,
|
|
|
|
.release = drm_gem_dmabuf_release,
|
2017-04-20 03:36:10 +08:00
|
|
|
.map = armada_gem_dmabuf_no_kmap,
|
|
|
|
.unmap = armada_gem_dmabuf_no_kunmap,
|
2012-08-15 20:59:49 +08:00
|
|
|
.mmap = armada_gem_dmabuf_mmap,
|
|
|
|
};
|
|
|
|
|
|
|
|
struct dma_buf *
|
2019-06-15 04:35:25 +08:00
|
|
|
armada_gem_prime_export(struct drm_gem_object *obj, int flags)
|
2012-08-15 20:59:49 +08:00
|
|
|
{
|
2015-01-23 15:23:43 +08:00
|
|
|
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
|
|
|
|
|
|
|
|
exp_info.ops = &armada_gem_prime_dmabuf_ops;
|
|
|
|
exp_info.size = obj->size;
|
|
|
|
exp_info.flags = O_RDWR;
|
|
|
|
exp_info.priv = obj;
|
|
|
|
|
2019-06-15 04:35:25 +08:00
|
|
|
return drm_gem_dmabuf_export(obj->dev, &exp_info);
|
2012-08-15 20:59:49 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
struct drm_gem_object *
|
|
|
|
armada_gem_prime_import(struct drm_device *dev, struct dma_buf *buf)
|
|
|
|
{
|
|
|
|
struct dma_buf_attachment *attach;
|
|
|
|
struct armada_gem_object *dobj;
|
|
|
|
|
|
|
|
if (buf->ops == &armada_gem_prime_dmabuf_ops) {
|
|
|
|
struct drm_gem_object *obj = buf->priv;
|
|
|
|
if (obj->dev == dev) {
|
|
|
|
/*
|
|
|
|
* Importing our own dmabuf(s) increases the
|
|
|
|
* refcount on the gem object itself.
|
|
|
|
*/
|
2017-09-21 02:54:48 +08:00
|
|
|
drm_gem_object_get(obj);
|
2012-08-15 20:59:49 +08:00
|
|
|
return obj;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
attach = dma_buf_attach(buf, dev->dev);
|
|
|
|
if (IS_ERR(attach))
|
|
|
|
return ERR_CAST(attach);
|
|
|
|
|
|
|
|
dobj = armada_gem_alloc_private_object(dev, buf->size);
|
|
|
|
if (!dobj) {
|
|
|
|
dma_buf_detach(buf, attach);
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
}
|
|
|
|
|
|
|
|
dobj->obj.import_attach = attach;
|
2013-12-08 00:28:39 +08:00
|
|
|
get_dma_buf(buf);
|
2012-08-15 20:59:49 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Don't call dma_buf_map_attachment() here - it maps the
|
|
|
|
* scatterlist immediately for DMA, and this is not always
|
|
|
|
* an appropriate thing to do.
|
|
|
|
*/
|
|
|
|
return &dobj->obj;
|
|
|
|
}
|
|
|
|
|
|
|
|
int armada_gem_map_import(struct armada_gem_object *dobj)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
dobj->sgt = dma_buf_map_attachment(dobj->obj.import_attach,
|
2016-10-30 23:38:42 +08:00
|
|
|
DMA_TO_DEVICE);
|
2012-08-15 20:59:49 +08:00
|
|
|
if (IS_ERR(dobj->sgt)) {
|
|
|
|
ret = PTR_ERR(dobj->sgt);
|
|
|
|
dobj->sgt = NULL;
|
|
|
|
DRM_ERROR("dma_buf_map_attachment() error: %d\n", ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
if (dobj->sgt->nents > 1) {
|
|
|
|
DRM_ERROR("dma_buf_map_attachment() returned an (unsupported) scattered list\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
if (sg_dma_len(dobj->sgt->sgl) < dobj->obj.size) {
|
|
|
|
DRM_ERROR("dma_buf_map_attachment() returned a small buffer\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
dobj->dev_addr = sg_dma_address(dobj->sgt->sgl);
|
2017-05-22 16:46:22 +08:00
|
|
|
dobj->mapped = true;
|
2012-08-15 20:59:49 +08:00
|
|
|
return 0;
|
|
|
|
}
|