diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 610c3307a02a..f6ce811a024a 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -891,10 +891,10 @@ err: } static int -i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring, +i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req, struct list_head *vmas) { - const unsigned other_rings = ~intel_ring_flag(ring); + const unsigned other_rings = ~intel_ring_flag(req->ring); struct i915_vma *vma; uint32_t flush_domains = 0; bool flush_chipset = false; @@ -904,7 +904,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring, struct drm_i915_gem_object *obj = vma->obj; if (obj->active & other_rings) { - ret = i915_gem_object_sync(obj, ring); + ret = i915_gem_object_sync(obj, req->ring); if (ret) return ret; } @@ -916,7 +916,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring, } if (flush_chipset) - i915_gem_chipset_flush(ring->dev); + i915_gem_chipset_flush(req->ring->dev); if (flush_domains & I915_GEM_DOMAIN_GTT) wmb(); @@ -924,7 +924,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring, /* Unconditionally invalidate gpu caches and ensure that we do flush * any residual writes from the previous batch. */ - return intel_ring_invalidate_all_caches(ring); + return intel_ring_invalidate_all_caches(req->ring); } static bool @@ -1246,7 +1246,7 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params, } } - ret = i915_gem_execbuffer_move_to_gpu(ring, vmas); + ret = i915_gem_execbuffer_move_to_gpu(params->request, vmas); if (ret) goto error; diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index d94c01547304..aa12a595a309 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -624,12 +624,10 @@ static int logical_ring_invalidate_all_caches(struct intel_ringbuffer *ringbuf, return 0; } -static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf, - struct intel_context *ctx, +static int execlists_move_to_gpu(struct drm_i915_gem_request *req, struct list_head *vmas) { - struct intel_engine_cs *ring = ringbuf->ring; - const unsigned other_rings = ~intel_ring_flag(ring); + const unsigned other_rings = ~intel_ring_flag(req->ring); struct i915_vma *vma; uint32_t flush_domains = 0; bool flush_chipset = false; @@ -639,7 +637,7 @@ static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf, struct drm_i915_gem_object *obj = vma->obj; if (obj->active & other_rings) { - ret = i915_gem_object_sync(obj, ring); + ret = i915_gem_object_sync(obj, req->ring); if (ret) return ret; } @@ -656,7 +654,7 @@ static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf, /* Unconditionally invalidate gpu caches and ensure that we do flush * any residual writes from the previous batch. */ - return logical_ring_invalidate_all_caches(ringbuf, ctx); + return logical_ring_invalidate_all_caches(req->ringbuf, req->ctx); } int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request) @@ -918,7 +916,7 @@ int intel_execlists_submission(struct i915_execbuffer_params *params, return -EINVAL; } - ret = execlists_move_to_gpu(ringbuf, params->ctx, vmas); + ret = execlists_move_to_gpu(params->request, vmas); if (ret) return ret;