drm/i915: Use pagecache write to prepopulate shmemfs from pwrite-ioctl
Before we instantiate/pin the backing store for our use, we
can prepopulate the shmemfs filp efficiently using a write into the
pagecache. We avoid the penalty of instantiating all the pages, important
if the user is just writing to a few and never uses the object on the GPU,
and using a direct write into shmemfs allows it to avoid the cost of
retrieving a page (mostly the clear-before-use, but in theory we could
curtail swapin) before it is overwritten.
This can be extended later to provide additional specialisation for
other backends (other than shmemfs). For now it provides a defense
against very large write-only allocations from exhausting all of system
memory.
v2: Smelling fixes.
Fixes: fe115628d5
("drm/i915: Implement pwrite without struct-mutex")
References: https://bugs.freedesktop.org/show_bug.cgi?id=99107
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.william.auld@gmail.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Cc: <stable@vger.kernel.org> # v4.10+
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20170307120338.7277-2-chris@chris-wilson.co.uk
This commit is contained in:
parent
4e5462ee84
commit
7c55e2c577
|
@ -1452,6 +1452,12 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
|
|||
|
||||
trace_i915_gem_object_pwrite(obj, args->offset, args->size);
|
||||
|
||||
ret = -ENODEV;
|
||||
if (obj->ops->pwrite)
|
||||
ret = obj->ops->pwrite(obj, args);
|
||||
if (ret != -ENODEV)
|
||||
goto err;
|
||||
|
||||
ret = i915_gem_object_wait(obj,
|
||||
I915_WAIT_INTERRUPTIBLE |
|
||||
I915_WAIT_ALL,
|
||||
|
@ -2578,6 +2584,75 @@ err_unlock:
|
|||
goto out_unlock;
|
||||
}
|
||||
|
||||
static int
|
||||
i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
|
||||
const struct drm_i915_gem_pwrite *arg)
|
||||
{
|
||||
struct address_space *mapping = obj->base.filp->f_mapping;
|
||||
char __user *user_data = u64_to_user_ptr(arg->data_ptr);
|
||||
u64 remain, offset;
|
||||
unsigned int pg;
|
||||
|
||||
/* Before we instantiate/pin the backing store for our use, we
|
||||
* can prepopulate the shmemfs filp efficiently using a write into
|
||||
* the pagecache. We avoid the penalty of instantiating all the
|
||||
* pages, important if the user is just writing to a few and never
|
||||
* uses the object on the GPU, and using a direct write into shmemfs
|
||||
* allows it to avoid the cost of retrieving a page (either swapin
|
||||
* or clearing-before-use) before it is overwritten.
|
||||
*/
|
||||
if (READ_ONCE(obj->mm.pages))
|
||||
return -ENODEV;
|
||||
|
||||
/* Before the pages are instantiated the object is treated as being
|
||||
* in the CPU domain. The pages will be clflushed as required before
|
||||
* use, and we can freely write into the pages directly. If userspace
|
||||
* races pwrite with any other operation; corruption will ensue -
|
||||
* that is userspace's prerogative!
|
||||
*/
|
||||
|
||||
remain = arg->size;
|
||||
offset = arg->offset;
|
||||
pg = offset_in_page(offset);
|
||||
|
||||
do {
|
||||
unsigned int len, unwritten;
|
||||
struct page *page;
|
||||
void *data, *vaddr;
|
||||
int err;
|
||||
|
||||
len = PAGE_SIZE - pg;
|
||||
if (len > remain)
|
||||
len = remain;
|
||||
|
||||
err = pagecache_write_begin(obj->base.filp, mapping,
|
||||
offset, len, 0,
|
||||
&page, &data);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
vaddr = kmap(page);
|
||||
unwritten = copy_from_user(vaddr + pg, user_data, len);
|
||||
kunmap(page);
|
||||
|
||||
err = pagecache_write_end(obj->base.filp, mapping,
|
||||
offset, len, len - unwritten,
|
||||
page, data);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
if (unwritten)
|
||||
return -EFAULT;
|
||||
|
||||
remain -= len;
|
||||
user_data += len;
|
||||
offset += len;
|
||||
pg = 0;
|
||||
} while (remain);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool ban_context(const struct i915_gem_context *ctx)
|
||||
{
|
||||
return (i915_gem_context_is_bannable(ctx) &&
|
||||
|
@ -3994,8 +4069,11 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
|
|||
static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
|
||||
.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
|
||||
I915_GEM_OBJECT_IS_SHRINKABLE,
|
||||
|
||||
.get_pages = i915_gem_object_get_pages_gtt,
|
||||
.put_pages = i915_gem_object_put_pages_gtt,
|
||||
|
||||
.pwrite = i915_gem_object_pwrite_gtt,
|
||||
};
|
||||
|
||||
struct drm_i915_gem_object *
|
||||
|
|
|
@ -56,6 +56,9 @@ struct drm_i915_gem_object_ops {
|
|||
struct sg_table *(*get_pages)(struct drm_i915_gem_object *);
|
||||
void (*put_pages)(struct drm_i915_gem_object *, struct sg_table *);
|
||||
|
||||
int (*pwrite)(struct drm_i915_gem_object *,
|
||||
const struct drm_i915_gem_pwrite *);
|
||||
|
||||
int (*dmabuf_export)(struct drm_i915_gem_object *);
|
||||
void (*release)(struct drm_i915_gem_object *);
|
||||
};
|
||||
|
|
Loading…
Reference in New Issue