drm/i915: Remove superfluous i915_add_request_no_flush() helper
The only time we need to emit a flush inside request emission is after an execbuffer, for which we can use the full __i915_add_request(). All other instances want the simpler i915_add_request() without flushing, so remove the useless helper. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20170317114709.8388-1-chris@chris-wilson.co.uk
This commit is contained in:
parent
e3b1895fc1
commit
e642c85b03
|
@ -212,7 +212,7 @@ out:
|
|||
workload->status = ret;
|
||||
|
||||
if (!IS_ERR_OR_NULL(rq))
|
||||
i915_add_request_no_flush(rq);
|
||||
i915_add_request(rq);
|
||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -933,7 +933,7 @@ int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
|
|||
}
|
||||
|
||||
ret = i915_switch_context(req);
|
||||
i915_add_request_no_flush(req);
|
||||
i915_add_request(req);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -267,8 +267,6 @@ int i915_gem_request_await_dma_fence(struct drm_i915_gem_request *req,
|
|||
|
||||
void __i915_add_request(struct drm_i915_gem_request *req, bool flush_caches);
|
||||
#define i915_add_request(req) \
|
||||
__i915_add_request(req, true)
|
||||
#define i915_add_request_no_flush(req) \
|
||||
__i915_add_request(req, false)
|
||||
|
||||
void __i915_gem_request_submit(struct drm_i915_gem_request *request);
|
||||
|
|
|
@ -10668,7 +10668,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
|
|||
intel_mark_page_flip_active(intel_crtc, work);
|
||||
|
||||
work->flip_queued_req = i915_gem_request_get(request);
|
||||
i915_add_request_no_flush(request);
|
||||
i915_add_request(request);
|
||||
}
|
||||
|
||||
i915_gem_object_wait_priority(obj, 0, I915_PRIORITY_DISPLAY);
|
||||
|
@ -10684,7 +10684,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
|
|||
return 0;
|
||||
|
||||
cleanup_request:
|
||||
i915_add_request_no_flush(request);
|
||||
i915_add_request(request);
|
||||
cleanup_unpin:
|
||||
to_intel_plane_state(primary->state)->vma = work->old_vma;
|
||||
intel_unpin_fb_vma(vma);
|
||||
|
|
|
@ -278,7 +278,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
|
|||
|
||||
cs = intel_ring_begin(req, 4);
|
||||
if (IS_ERR(cs)) {
|
||||
i915_add_request_no_flush(req);
|
||||
i915_add_request(req);
|
||||
return PTR_ERR(cs);
|
||||
}
|
||||
|
||||
|
@ -343,7 +343,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
|
|||
|
||||
cs = intel_ring_begin(req, 2);
|
||||
if (IS_ERR(cs)) {
|
||||
i915_add_request_no_flush(req);
|
||||
i915_add_request(req);
|
||||
return PTR_ERR(cs);
|
||||
}
|
||||
|
||||
|
@ -419,7 +419,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
|
|||
|
||||
cs = intel_ring_begin(req, 6);
|
||||
if (IS_ERR(cs)) {
|
||||
i915_add_request_no_flush(req);
|
||||
i915_add_request(req);
|
||||
return PTR_ERR(cs);
|
||||
}
|
||||
|
||||
|
@ -477,7 +477,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
|
|||
|
||||
cs = intel_ring_begin(req, 2);
|
||||
if (IS_ERR(cs)) {
|
||||
i915_add_request_no_flush(req);
|
||||
i915_add_request(req);
|
||||
return PTR_ERR(cs);
|
||||
}
|
||||
|
||||
|
|
|
@ -7086,7 +7086,7 @@ static void __intel_autoenable_gt_powersave(struct work_struct *work)
|
|||
rcs->init_context(req);
|
||||
|
||||
/* Mark the device busy, calling intel_enable_gt_powersave() */
|
||||
i915_add_request_no_flush(req);
|
||||
i915_add_request(req);
|
||||
|
||||
unlock:
|
||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||
|
|
Loading…
Reference in New Issue