2014-12-12 04:13:08 +08:00
|
|
|
/*
|
|
|
|
* Copyright © 2014 Intel Corporation
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
|
|
* to deal in the Software without restriction, including without limitation
|
|
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
|
|
* Software is furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice (including the next
|
|
|
|
* paragraph) shall be included in all copies or substantial portions of the
|
|
|
|
* Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
|
|
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
|
|
|
* IN THE SOFTWARE.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "i915_drv.h"
|
2015-04-07 23:20:34 +08:00
|
|
|
#include "i915_gem_batch_pool.h"
|
2014-12-12 04:13:08 +08:00
|
|
|
|
|
|
|
/**
|
|
|
|
* DOC: batch pool
|
|
|
|
*
|
|
|
|
* In order to submit batch buffers as 'secure', the software command parser
|
|
|
|
* must ensure that a batch buffer cannot be modified after parsing. It does
|
|
|
|
* this by copying the user provided batch buffer contents to a kernel owned
|
|
|
|
* buffer from which the hardware will actually execute, and by carefully
|
|
|
|
* managing the address space bindings for such buffers.
|
|
|
|
*
|
|
|
|
* The batch pool framework provides a mechanism for the driver to manage a
|
|
|
|
* set of scratch buffers to use for this purpose. The framework can be
|
|
|
|
* extended to support other uses cases should they arise.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/**
|
|
|
|
* i915_gem_batch_pool_init() - initialize a batch buffer pool
|
2016-08-04 23:32:19 +08:00
|
|
|
* @engine: the associated request submission engine
|
2014-12-12 04:13:08 +08:00
|
|
|
* @pool: the batch buffer pool
|
|
|
|
*/
|
2016-08-04 23:32:19 +08:00
|
|
|
void i915_gem_batch_pool_init(struct intel_engine_cs *engine,
|
2014-12-12 04:13:08 +08:00
|
|
|
struct i915_gem_batch_pool *pool)
|
|
|
|
{
|
2015-04-07 23:20:38 +08:00
|
|
|
int n;
|
|
|
|
|
2016-08-04 23:32:19 +08:00
|
|
|
pool->engine = engine;
|
2015-04-07 23:20:38 +08:00
|
|
|
|
|
|
|
for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++)
|
|
|
|
INIT_LIST_HEAD(&pool->cache_list[n]);
|
2014-12-12 04:13:08 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* i915_gem_batch_pool_fini() - clean up a batch buffer pool
|
|
|
|
* @pool: the pool to clean up
|
|
|
|
*
|
|
|
|
* Note: Callers must hold the struct_mutex.
|
|
|
|
*/
|
|
|
|
void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool)
|
|
|
|
{
|
2015-04-07 23:20:38 +08:00
|
|
|
int n;
|
|
|
|
|
2016-08-04 23:32:19 +08:00
|
|
|
lockdep_assert_held(&pool->engine->i915->drm.struct_mutex);
|
2014-12-12 04:13:08 +08:00
|
|
|
|
2015-04-07 23:20:38 +08:00
|
|
|
for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) {
|
2016-07-26 19:01:53 +08:00
|
|
|
struct drm_i915_gem_object *obj, *next;
|
2014-12-12 04:13:08 +08:00
|
|
|
|
2016-07-26 19:01:53 +08:00
|
|
|
list_for_each_entry_safe(obj, next,
|
|
|
|
&pool->cache_list[n],
|
|
|
|
batch_pool_link)
|
2016-10-28 20:58:29 +08:00
|
|
|
__i915_gem_object_release_unless_active(obj);
|
2016-07-26 19:01:53 +08:00
|
|
|
|
|
|
|
INIT_LIST_HEAD(&pool->cache_list[n]);
|
2014-12-12 04:13:08 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2015-04-07 23:20:35 +08:00
|
|
|
* i915_gem_batch_pool_get() - allocate a buffer from the pool
|
2014-12-12 04:13:08 +08:00
|
|
|
* @pool: the batch buffer pool
|
|
|
|
* @size: the minimum desired size of the returned buffer
|
|
|
|
*
|
2015-04-07 23:20:35 +08:00
|
|
|
* Returns an inactive buffer from @pool with at least @size bytes,
|
|
|
|
* with the pages pinned. The caller must i915_gem_object_unpin_pages()
|
|
|
|
* on the returned object.
|
2014-12-12 04:13:08 +08:00
|
|
|
*
|
|
|
|
* Note: Callers must hold the struct_mutex
|
|
|
|
*
|
2015-04-07 23:20:35 +08:00
|
|
|
* Return: the buffer object or an error pointer
|
2014-12-12 04:13:08 +08:00
|
|
|
*/
|
|
|
|
struct drm_i915_gem_object *
|
|
|
|
i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
|
|
|
|
size_t size)
|
|
|
|
{
|
|
|
|
struct drm_i915_gem_object *obj = NULL;
|
2016-10-28 20:58:30 +08:00
|
|
|
struct drm_i915_gem_object *tmp;
|
2015-04-07 23:20:38 +08:00
|
|
|
struct list_head *list;
|
2016-10-28 20:58:30 +08:00
|
|
|
int n, ret;
|
2014-12-12 04:13:08 +08:00
|
|
|
|
2016-08-04 23:32:19 +08:00
|
|
|
lockdep_assert_held(&pool->engine->i915->drm.struct_mutex);
|
2014-12-12 04:13:08 +08:00
|
|
|
|
2015-04-07 23:20:38 +08:00
|
|
|
/* Compute a power-of-two bucket, but throw everything greater than
|
|
|
|
* 16KiB into the same bucket: i.e. the the buckets hold objects of
|
|
|
|
* (1 page, 2 pages, 4 pages, 8+ pages).
|
|
|
|
*/
|
|
|
|
n = fls(size >> PAGE_SHIFT) - 1;
|
|
|
|
if (n >= ARRAY_SIZE(pool->cache_list))
|
|
|
|
n = ARRAY_SIZE(pool->cache_list) - 1;
|
|
|
|
list = &pool->cache_list[n];
|
|
|
|
|
2016-10-28 20:58:30 +08:00
|
|
|
list_for_each_entry(tmp, list, batch_pool_link) {
|
2015-04-07 23:20:36 +08:00
|
|
|
/* The batches are strictly LRU ordered */
|
drm/i915: Move GEM activity tracking into a common struct reservation_object
In preparation to support many distinct timelines, we need to expand the
activity tracking on the GEM object to handle more than just a request
per engine. We already use the struct reservation_object on the dma-buf
to handle many fence contexts, so integrating that into the GEM object
itself is the preferred solution. (For example, we can now share the same
reservation_object between every consumer/producer using this buffer and
skip the manual import/export via dma-buf.)
v2: Reimplement busy-ioctl (by walking the reservation object), postpone
the ABI change for another day. Similarly use the reservation object to
find the last_write request (if active and from i915) for choosing
display CS flips.
Caveats:
* busy-ioctl: busy-ioctl only reports on the native fences, it will not
warn of stalls (in set-domain-ioctl, pread/pwrite etc) if the object is
being rendered to by external fences. It also will not report the same
busy state as wait-ioctl (or polling on the dma-buf) in the same
circumstances. On the plus side, it does retain reporting of which
*i915* engines are engaged with this object.
* non-blocking atomic modesets take a step backwards as the wait for
render completion blocks the ioctl. This is fixed in a subsequent
patch to use a fence instead for awaiting on the rendering, see
"drm/i915: Restore nonblocking awaits for modesetting"
* dynamic array manipulation for shared-fences in reservation is slower
than the previous lockless static assignment (e.g. gem_exec_lut_handle
runtime on ivb goes from 42s to 66s), mainly due to atomic operations
(maintaining the fence refcounts).
* loss of object-level retirement callbacks, emulated by VMA retirement
tracking.
* minor loss of object-level last activity information from debugfs,
could be replaced with per-vma information if desired
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20161028125858.23563-21-chris@chris-wilson.co.uk
2016-10-28 20:58:44 +08:00
|
|
|
if (i915_gem_object_is_active(tmp))
|
2015-04-07 23:20:36 +08:00
|
|
|
break;
|
2014-12-12 04:13:08 +08:00
|
|
|
|
drm/i915: Move GEM activity tracking into a common struct reservation_object
In preparation to support many distinct timelines, we need to expand the
activity tracking on the GEM object to handle more than just a request
per engine. We already use the struct reservation_object on the dma-buf
to handle many fence contexts, so integrating that into the GEM object
itself is the preferred solution. (For example, we can now share the same
reservation_object between every consumer/producer using this buffer and
skip the manual import/export via dma-buf.)
v2: Reimplement busy-ioctl (by walking the reservation object), postpone
the ABI change for another day. Similarly use the reservation object to
find the last_write request (if active and from i915) for choosing
display CS flips.
Caveats:
* busy-ioctl: busy-ioctl only reports on the native fences, it will not
warn of stalls (in set-domain-ioctl, pread/pwrite etc) if the object is
being rendered to by external fences. It also will not report the same
busy state as wait-ioctl (or polling on the dma-buf) in the same
circumstances. On the plus side, it does retain reporting of which
*i915* engines are engaged with this object.
* non-blocking atomic modesets take a step backwards as the wait for
render completion blocks the ioctl. This is fixed in a subsequent
patch to use a fence instead for awaiting on the rendering, see
"drm/i915: Restore nonblocking awaits for modesetting"
* dynamic array manipulation for shared-fences in reservation is slower
than the previous lockless static assignment (e.g. gem_exec_lut_handle
runtime on ivb goes from 42s to 66s), mainly due to atomic operations
(maintaining the fence refcounts).
* loss of object-level retirement callbacks, emulated by VMA retirement
tracking.
* minor loss of object-level last activity information from debugfs,
could be replaced with per-vma information if desired
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20161028125858.23563-21-chris@chris-wilson.co.uk
2016-10-28 20:58:44 +08:00
|
|
|
GEM_BUG_ON(!reservation_object_test_signaled_rcu(tmp->resv,
|
|
|
|
true));
|
|
|
|
|
2015-04-07 23:20:38 +08:00
|
|
|
if (tmp->base.size >= size) {
|
drm/i915: Move GEM activity tracking into a common struct reservation_object
In preparation to support many distinct timelines, we need to expand the
activity tracking on the GEM object to handle more than just a request
per engine. We already use the struct reservation_object on the dma-buf
to handle many fence contexts, so integrating that into the GEM object
itself is the preferred solution. (For example, we can now share the same
reservation_object between every consumer/producer using this buffer and
skip the manual import/export via dma-buf.)
v2: Reimplement busy-ioctl (by walking the reservation object), postpone
the ABI change for another day. Similarly use the reservation object to
find the last_write request (if active and from i915) for choosing
display CS flips.
Caveats:
* busy-ioctl: busy-ioctl only reports on the native fences, it will not
warn of stalls (in set-domain-ioctl, pread/pwrite etc) if the object is
being rendered to by external fences. It also will not report the same
busy state as wait-ioctl (or polling on the dma-buf) in the same
circumstances. On the plus side, it does retain reporting of which
*i915* engines are engaged with this object.
* non-blocking atomic modesets take a step backwards as the wait for
render completion blocks the ioctl. This is fixed in a subsequent
patch to use a fence instead for awaiting on the rendering, see
"drm/i915: Restore nonblocking awaits for modesetting"
* dynamic array manipulation for shared-fences in reservation is slower
than the previous lockless static assignment (e.g. gem_exec_lut_handle
runtime on ivb goes from 42s to 66s), mainly due to atomic operations
(maintaining the fence refcounts).
* loss of object-level retirement callbacks, emulated by VMA retirement
tracking.
* minor loss of object-level last activity information from debugfs,
could be replaced with per-vma information if desired
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20161028125858.23563-21-chris@chris-wilson.co.uk
2016-10-28 20:58:44 +08:00
|
|
|
/* Clear the set of shared fences early */
|
|
|
|
ww_mutex_lock(&tmp->resv->lock, NULL);
|
|
|
|
reservation_object_add_excl_fence(tmp->resv, NULL);
|
|
|
|
ww_mutex_unlock(&tmp->resv->lock);
|
|
|
|
|
2014-12-12 04:13:08 +08:00
|
|
|
obj = tmp;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-04-07 23:20:35 +08:00
|
|
|
if (obj == NULL) {
|
2016-10-28 20:58:30 +08:00
|
|
|
obj = i915_gem_object_create_internal(pool->engine->i915, size);
|
2016-04-25 20:32:13 +08:00
|
|
|
if (IS_ERR(obj))
|
|
|
|
return obj;
|
2015-04-07 23:20:35 +08:00
|
|
|
}
|
2014-12-12 04:13:11 +08:00
|
|
|
|
2016-10-28 20:58:35 +08:00
|
|
|
ret = i915_gem_object_pin_pages(obj);
|
2016-10-28 20:58:30 +08:00
|
|
|
if (ret)
|
|
|
|
return ERR_PTR(ret);
|
|
|
|
|
2015-04-07 23:20:38 +08:00
|
|
|
list_move_tail(&obj->batch_pool_link, list);
|
2014-12-12 04:13:08 +08:00
|
|
|
return obj;
|
|
|
|
}
|