drm/i915: Export our request as a dma-buf fence on the reservation object
If the GEM objects being rendered with in this request have been exported via dma-buf to a third party, hook ourselves into the dma-buf reservation object so that the third party can serialise with our rendering via the dma-buf fences. Testcase: igt/prime_busy Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch> Link: http://patchwork.freedesktop.org/patch/msgid/1470324762-2545-26-git-send-email-chris@chris-wilson.co.uk
This commit is contained in:
parent
0eafec6d32
commit
ad778f8967
|
@ -23,9 +23,13 @@
|
||||||
* Authors:
|
* Authors:
|
||||||
* Dave Airlie <airlied@redhat.com>
|
* Dave Airlie <airlied@redhat.com>
|
||||||
*/
|
*/
|
||||||
#include <drm/drmP.h>
|
|
||||||
#include "i915_drv.h"
|
|
||||||
#include <linux/dma-buf.h>
|
#include <linux/dma-buf.h>
|
||||||
|
#include <linux/reservation.h>
|
||||||
|
|
||||||
|
#include <drm/drmP.h>
|
||||||
|
|
||||||
|
#include "i915_drv.h"
|
||||||
|
|
||||||
static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf)
|
static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf)
|
||||||
{
|
{
|
||||||
|
@ -218,25 +222,73 @@ static const struct dma_buf_ops i915_dmabuf_ops = {
|
||||||
.end_cpu_access = i915_gem_end_cpu_access,
|
.end_cpu_access = i915_gem_end_cpu_access,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static void export_fences(struct drm_i915_gem_object *obj,
|
||||||
|
struct dma_buf *dma_buf)
|
||||||
|
{
|
||||||
|
struct reservation_object *resv = dma_buf->resv;
|
||||||
|
struct drm_i915_gem_request *req;
|
||||||
|
unsigned long active;
|
||||||
|
int idx;
|
||||||
|
|
||||||
|
active = __I915_BO_ACTIVE(obj);
|
||||||
|
if (!active)
|
||||||
|
return;
|
||||||
|
|
||||||
|
/* Serialise with execbuf to prevent concurrent fence-loops */
|
||||||
|
mutex_lock(&obj->base.dev->struct_mutex);
|
||||||
|
|
||||||
|
/* Mark the object for future fences before racily adding old fences */
|
||||||
|
obj->base.dma_buf = dma_buf;
|
||||||
|
|
||||||
|
ww_mutex_lock(&resv->lock, NULL);
|
||||||
|
|
||||||
|
for_each_active(active, idx) {
|
||||||
|
req = i915_gem_active_get(&obj->last_read[idx],
|
||||||
|
&obj->base.dev->struct_mutex);
|
||||||
|
if (!req)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
if (reservation_object_reserve_shared(resv) == 0)
|
||||||
|
reservation_object_add_shared_fence(resv, &req->fence);
|
||||||
|
|
||||||
|
i915_gem_request_put(req);
|
||||||
|
}
|
||||||
|
|
||||||
|
req = i915_gem_active_get(&obj->last_write,
|
||||||
|
&obj->base.dev->struct_mutex);
|
||||||
|
if (req) {
|
||||||
|
reservation_object_add_excl_fence(resv, &req->fence);
|
||||||
|
i915_gem_request_put(req);
|
||||||
|
}
|
||||||
|
|
||||||
|
ww_mutex_unlock(&resv->lock);
|
||||||
|
mutex_unlock(&obj->base.dev->struct_mutex);
|
||||||
|
}
|
||||||
|
|
||||||
struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
|
struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
|
||||||
struct drm_gem_object *gem_obj, int flags)
|
struct drm_gem_object *gem_obj, int flags)
|
||||||
{
|
{
|
||||||
struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
|
struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
|
||||||
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
|
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
|
||||||
|
struct dma_buf *dma_buf;
|
||||||
|
|
||||||
exp_info.ops = &i915_dmabuf_ops;
|
exp_info.ops = &i915_dmabuf_ops;
|
||||||
exp_info.size = gem_obj->size;
|
exp_info.size = gem_obj->size;
|
||||||
exp_info.flags = flags;
|
exp_info.flags = flags;
|
||||||
exp_info.priv = gem_obj;
|
exp_info.priv = gem_obj;
|
||||||
|
|
||||||
|
|
||||||
if (obj->ops->dmabuf_export) {
|
if (obj->ops->dmabuf_export) {
|
||||||
int ret = obj->ops->dmabuf_export(obj);
|
int ret = obj->ops->dmabuf_export(obj);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ERR_PTR(ret);
|
return ERR_PTR(ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
return dma_buf_export(&exp_info);
|
dma_buf = dma_buf_export(&exp_info);
|
||||||
|
if (IS_ERR(dma_buf))
|
||||||
|
return dma_buf;
|
||||||
|
|
||||||
|
export_fences(obj, dma_buf);
|
||||||
|
return dma_buf;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
|
static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
|
||||||
|
|
|
@ -26,14 +26,18 @@
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#include <linux/dma_remapping.h>
|
||||||
|
#include <linux/reservation.h>
|
||||||
|
#include <linux/uaccess.h>
|
||||||
|
|
||||||
#include <drm/drmP.h>
|
#include <drm/drmP.h>
|
||||||
#include <drm/i915_drm.h>
|
#include <drm/i915_drm.h>
|
||||||
|
|
||||||
#include "i915_drv.h"
|
#include "i915_drv.h"
|
||||||
|
#include "i915_gem_dmabuf.h"
|
||||||
#include "i915_trace.h"
|
#include "i915_trace.h"
|
||||||
#include "intel_drv.h"
|
#include "intel_drv.h"
|
||||||
#include "intel_frontbuffer.h"
|
#include "intel_frontbuffer.h"
|
||||||
#include <linux/dma_remapping.h>
|
|
||||||
#include <linux/uaccess.h>
|
|
||||||
|
|
||||||
#define __EXEC_OBJECT_HAS_PIN (1<<31)
|
#define __EXEC_OBJECT_HAS_PIN (1<<31)
|
||||||
#define __EXEC_OBJECT_HAS_FENCE (1<<30)
|
#define __EXEC_OBJECT_HAS_FENCE (1<<30)
|
||||||
|
@ -1205,6 +1209,28 @@ void i915_vma_move_to_active(struct i915_vma *vma,
|
||||||
list_move_tail(&vma->vm_link, &vma->vm->active_list);
|
list_move_tail(&vma->vm_link, &vma->vm->active_list);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void eb_export_fence(struct drm_i915_gem_object *obj,
|
||||||
|
struct drm_i915_gem_request *req,
|
||||||
|
unsigned int flags)
|
||||||
|
{
|
||||||
|
struct reservation_object *resv;
|
||||||
|
|
||||||
|
resv = i915_gem_object_get_dmabuf_resv(obj);
|
||||||
|
if (!resv)
|
||||||
|
return;
|
||||||
|
|
||||||
|
/* Ignore errors from failing to allocate the new fence, we can't
|
||||||
|
* handle an error right now. Worst case should be missed
|
||||||
|
* synchronisation leading to rendering corruption.
|
||||||
|
*/
|
||||||
|
ww_mutex_lock(&resv->lock, NULL);
|
||||||
|
if (flags & EXEC_OBJECT_WRITE)
|
||||||
|
reservation_object_add_excl_fence(resv, &req->fence);
|
||||||
|
else if (reservation_object_reserve_shared(resv) == 0)
|
||||||
|
reservation_object_add_shared_fence(resv, &req->fence);
|
||||||
|
ww_mutex_unlock(&resv->lock);
|
||||||
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
i915_gem_execbuffer_move_to_active(struct list_head *vmas,
|
i915_gem_execbuffer_move_to_active(struct list_head *vmas,
|
||||||
struct drm_i915_gem_request *req)
|
struct drm_i915_gem_request *req)
|
||||||
|
@ -1224,6 +1250,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
|
||||||
obj->base.read_domains = obj->base.pending_read_domains;
|
obj->base.read_domains = obj->base.pending_read_domains;
|
||||||
|
|
||||||
i915_vma_move_to_active(vma, req, vma->exec_entry->flags);
|
i915_vma_move_to_active(vma, req, vma->exec_entry->flags);
|
||||||
|
eb_export_fence(obj, req, vma->exec_entry->flags);
|
||||||
trace_i915_gem_object_change_domain(obj, old_read, old_write);
|
trace_i915_gem_object_change_domain(obj, old_read, old_write);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue