drm/i915/overlay: Switch to using i915_active tracking
Remove the raw i915_active_request tracking in favour of the higher level i915_active tracking for the sole purpose of making the lockless transition easier in later patches. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Matthew Auld <matthew.auld@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190812174804.26180-2-chris@chris-wilson.co.uk
This commit is contained in:
parent
3d6792cf0a
commit
a21ce8ad12
|
@ -191,7 +191,8 @@ struct intel_overlay {
|
|||
struct overlay_registers __iomem *regs;
|
||||
u32 flip_addr;
|
||||
/* flip handling */
|
||||
struct i915_active_request last_flip;
|
||||
struct i915_active last_flip;
|
||||
void (*flip_complete)(struct intel_overlay *ovl);
|
||||
};
|
||||
|
||||
static void i830_overlay_clock_gating(struct drm_i915_private *dev_priv,
|
||||
|
@ -217,30 +218,25 @@ static void i830_overlay_clock_gating(struct drm_i915_private *dev_priv,
|
|||
PCI_DEVFN(0, 0), I830_CLOCK_GATE, val);
|
||||
}
|
||||
|
||||
static void intel_overlay_submit_request(struct intel_overlay *overlay,
|
||||
struct i915_request *rq,
|
||||
i915_active_retire_fn retire)
|
||||
static struct i915_request *
|
||||
alloc_request(struct intel_overlay *overlay, void (*fn)(struct intel_overlay *))
|
||||
{
|
||||
GEM_BUG_ON(i915_active_request_peek(&overlay->last_flip,
|
||||
&overlay->i915->drm.struct_mutex));
|
||||
i915_active_request_set_retire_fn(&overlay->last_flip, retire,
|
||||
&overlay->i915->drm.struct_mutex);
|
||||
__i915_active_request_set(&overlay->last_flip, rq);
|
||||
i915_request_add(rq);
|
||||
}
|
||||
struct i915_request *rq;
|
||||
int err;
|
||||
|
||||
static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
|
||||
struct i915_request *rq,
|
||||
i915_active_retire_fn retire)
|
||||
{
|
||||
intel_overlay_submit_request(overlay, rq, retire);
|
||||
return i915_active_request_retire(&overlay->last_flip,
|
||||
&overlay->i915->drm.struct_mutex);
|
||||
}
|
||||
overlay->flip_complete = fn;
|
||||
|
||||
static struct i915_request *alloc_request(struct intel_overlay *overlay)
|
||||
{
|
||||
return i915_request_create(overlay->context);
|
||||
rq = i915_request_create(overlay->context);
|
||||
if (IS_ERR(rq))
|
||||
return rq;
|
||||
|
||||
err = i915_active_ref(&overlay->last_flip, rq->fence.context, rq);
|
||||
if (err) {
|
||||
i915_request_add(rq);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
return rq;
|
||||
}
|
||||
|
||||
/* overlay needs to be disable in OCMD reg */
|
||||
|
@ -252,7 +248,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
|
|||
|
||||
WARN_ON(overlay->active);
|
||||
|
||||
rq = alloc_request(overlay);
|
||||
rq = alloc_request(overlay, NULL);
|
||||
if (IS_ERR(rq))
|
||||
return PTR_ERR(rq);
|
||||
|
||||
|
@ -273,7 +269,9 @@ static int intel_overlay_on(struct intel_overlay *overlay)
|
|||
*cs++ = MI_NOOP;
|
||||
intel_ring_advance(rq, cs);
|
||||
|
||||
return intel_overlay_do_wait_request(overlay, rq, NULL);
|
||||
i915_request_add(rq);
|
||||
|
||||
return i915_active_wait(&overlay->last_flip);
|
||||
}
|
||||
|
||||
static void intel_overlay_flip_prepare(struct intel_overlay *overlay,
|
||||
|
@ -317,7 +315,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
|
|||
if (tmp & (1 << 17))
|
||||
DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
|
||||
|
||||
rq = alloc_request(overlay);
|
||||
rq = alloc_request(overlay, NULL);
|
||||
if (IS_ERR(rq))
|
||||
return PTR_ERR(rq);
|
||||
|
||||
|
@ -332,8 +330,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
|
|||
intel_ring_advance(rq, cs);
|
||||
|
||||
intel_overlay_flip_prepare(overlay, vma);
|
||||
|
||||
intel_overlay_submit_request(overlay, rq, NULL);
|
||||
i915_request_add(rq);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -354,20 +351,13 @@ static void intel_overlay_release_old_vma(struct intel_overlay *overlay)
|
|||
}
|
||||
|
||||
static void
|
||||
intel_overlay_release_old_vid_tail(struct i915_active_request *active,
|
||||
struct i915_request *rq)
|
||||
intel_overlay_release_old_vid_tail(struct intel_overlay *overlay)
|
||||
{
|
||||
struct intel_overlay *overlay =
|
||||
container_of(active, typeof(*overlay), last_flip);
|
||||
|
||||
intel_overlay_release_old_vma(overlay);
|
||||
}
|
||||
|
||||
static void intel_overlay_off_tail(struct i915_active_request *active,
|
||||
struct i915_request *rq)
|
||||
static void intel_overlay_off_tail(struct intel_overlay *overlay)
|
||||
{
|
||||
struct intel_overlay *overlay =
|
||||
container_of(active, typeof(*overlay), last_flip);
|
||||
struct drm_i915_private *dev_priv = overlay->i915;
|
||||
|
||||
intel_overlay_release_old_vma(overlay);
|
||||
|
@ -380,6 +370,16 @@ static void intel_overlay_off_tail(struct i915_active_request *active,
|
|||
i830_overlay_clock_gating(dev_priv, true);
|
||||
}
|
||||
|
||||
static void
|
||||
intel_overlay_last_flip_retire(struct i915_active *active)
|
||||
{
|
||||
struct intel_overlay *overlay =
|
||||
container_of(active, typeof(*overlay), last_flip);
|
||||
|
||||
if (overlay->flip_complete)
|
||||
overlay->flip_complete(overlay);
|
||||
}
|
||||
|
||||
/* overlay needs to be disabled in OCMD reg */
|
||||
static int intel_overlay_off(struct intel_overlay *overlay)
|
||||
{
|
||||
|
@ -394,7 +394,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
|
|||
* of the hw. Do it in both cases */
|
||||
flip_addr |= OFC_UPDATE;
|
||||
|
||||
rq = alloc_request(overlay);
|
||||
rq = alloc_request(overlay, intel_overlay_off_tail);
|
||||
if (IS_ERR(rq))
|
||||
return PTR_ERR(rq);
|
||||
|
||||
|
@ -417,17 +417,16 @@ static int intel_overlay_off(struct intel_overlay *overlay)
|
|||
intel_ring_advance(rq, cs);
|
||||
|
||||
intel_overlay_flip_prepare(overlay, NULL);
|
||||
i915_request_add(rq);
|
||||
|
||||
return intel_overlay_do_wait_request(overlay, rq,
|
||||
intel_overlay_off_tail);
|
||||
return i915_active_wait(&overlay->last_flip);
|
||||
}
|
||||
|
||||
/* recover from an interruption due to a signal
|
||||
* We have to be careful not to repeat work forever an make forward progess. */
|
||||
static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
|
||||
{
|
||||
return i915_active_request_retire(&overlay->last_flip,
|
||||
&overlay->i915->drm.struct_mutex);
|
||||
return i915_active_wait(&overlay->last_flip);
|
||||
}
|
||||
|
||||
/* Wait for pending overlay flip and release old frame.
|
||||
|
@ -437,43 +436,40 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
|
|||
static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = overlay->i915;
|
||||
struct i915_request *rq;
|
||||
u32 *cs;
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&dev_priv->drm.struct_mutex);
|
||||
|
||||
/* Only wait if there is actually an old frame to release to
|
||||
/*
|
||||
* Only wait if there is actually an old frame to release to
|
||||
* guarantee forward progress.
|
||||
*/
|
||||
if (!overlay->old_vma)
|
||||
return 0;
|
||||
|
||||
if (I915_READ(GEN2_ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) {
|
||||
/* synchronous slowpath */
|
||||
struct i915_request *rq;
|
||||
if (!(I915_READ(GEN2_ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT)) {
|
||||
intel_overlay_release_old_vid_tail(overlay);
|
||||
return 0;
|
||||
}
|
||||
|
||||
rq = alloc_request(overlay);
|
||||
if (IS_ERR(rq))
|
||||
return PTR_ERR(rq);
|
||||
rq = alloc_request(overlay, intel_overlay_release_old_vid_tail);
|
||||
if (IS_ERR(rq))
|
||||
return PTR_ERR(rq);
|
||||
|
||||
cs = intel_ring_begin(rq, 2);
|
||||
if (IS_ERR(cs)) {
|
||||
i915_request_add(rq);
|
||||
return PTR_ERR(cs);
|
||||
}
|
||||
cs = intel_ring_begin(rq, 2);
|
||||
if (IS_ERR(cs)) {
|
||||
i915_request_add(rq);
|
||||
return PTR_ERR(cs);
|
||||
}
|
||||
|
||||
*cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
|
||||
*cs++ = MI_NOOP;
|
||||
intel_ring_advance(rq, cs);
|
||||
*cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
|
||||
*cs++ = MI_NOOP;
|
||||
intel_ring_advance(rq, cs);
|
||||
|
||||
ret = intel_overlay_do_wait_request(overlay, rq,
|
||||
intel_overlay_release_old_vid_tail);
|
||||
if (ret)
|
||||
return ret;
|
||||
} else
|
||||
intel_overlay_release_old_vid_tail(&overlay->last_flip, NULL);
|
||||
i915_request_add(rq);
|
||||
|
||||
return 0;
|
||||
return i915_active_wait(&overlay->last_flip);
|
||||
}
|
||||
|
||||
void intel_overlay_reset(struct drm_i915_private *dev_priv)
|
||||
|
@ -1375,7 +1371,9 @@ void intel_overlay_setup(struct drm_i915_private *dev_priv)
|
|||
overlay->contrast = 75;
|
||||
overlay->saturation = 146;
|
||||
|
||||
INIT_ACTIVE_REQUEST(&overlay->last_flip);
|
||||
i915_active_init(dev_priv,
|
||||
&overlay->last_flip,
|
||||
NULL, intel_overlay_last_flip_retire);
|
||||
|
||||
ret = get_registers(overlay, OVERLAY_NEEDS_PHYSICAL(dev_priv));
|
||||
if (ret)
|
||||
|
@ -1409,6 +1407,7 @@ void intel_overlay_cleanup(struct drm_i915_private *dev_priv)
|
|||
WARN_ON(overlay->active);
|
||||
|
||||
i915_gem_object_put(overlay->reg_bo);
|
||||
i915_active_fini(&overlay->last_flip);
|
||||
|
||||
kfree(overlay);
|
||||
}
|
||||
|
|
|
@ -89,25 +89,6 @@ int __must_check
|
|||
i915_active_request_set(struct i915_active_request *active,
|
||||
struct i915_request *rq);
|
||||
|
||||
/**
|
||||
* i915_active_request_set_retire_fn - updates the retirement callback
|
||||
* @active - the active tracker
|
||||
* @fn - the routine called when the request is retired
|
||||
* @mutex - struct_mutex used to guard retirements
|
||||
*
|
||||
* i915_active_request_set_retire_fn() updates the function pointer that
|
||||
* is called when the final request associated with the @active tracker
|
||||
* is retired.
|
||||
*/
|
||||
static inline void
|
||||
i915_active_request_set_retire_fn(struct i915_active_request *active,
|
||||
i915_active_retire_fn fn,
|
||||
struct mutex *mutex)
|
||||
{
|
||||
lockdep_assert_held(mutex);
|
||||
active->retire = fn ?: i915_active_retire_noop;
|
||||
}
|
||||
|
||||
/**
|
||||
* i915_active_request_raw - return the active request
|
||||
* @active - the active tracker
|
||||
|
|
Loading…
Reference in New Issue