drm/i915: Use vblank evade mechanism in mmio_flip

Currently we program just DPSCNTR and DSPSTRIDE directly from the ring
interrupt handler, which is fine since the hardware guarantees that
those are update atomically. When we have atomic page flips we'll want
to be able to update also the offset registers, and then we need to use
the vblank evade mechanism to guarantee atomicity. Since that mechanism
introduces a wait, we need to do the actual register write from a work
when it is triggered by the ring interrupt.

v2: Explain the need for mmio_flip.work in the commit message (Paulo)
    Initialize the mmio_flip work in intel_crtc_init() (Paulo)
    Prevent new flips the previous flip work finishes (Paulo)
    Don't acquire modeset locks for mmio flip work

Note: Paulo had reservations about the work item leaking over a plane
disable. But insofar as we do lack these checks that issue is already
present with the existing code.

Signed-off-by: Ander Conselvan de Oliveira <ander.conselvan.de.oliveira@intel.com>
Reviewed-by: Paulo Zanoni <paulo.r.zanoni@intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
This commit is contained in:
Ander Conselvan de Oliveira 2014-10-28 15:10:14 +02:00 committed by Daniel Vetter
parent 0594a3d9c0
commit 9362c7c576
3 changed files with 39 additions and 7 deletions

View File

@ -9401,11 +9401,15 @@ static void intel_do_mmio_flip(struct intel_crtc *intel_crtc)
struct intel_framebuffer *intel_fb =
to_intel_framebuffer(intel_crtc->base.primary->fb);
struct drm_i915_gem_object *obj = intel_fb->obj;
bool atomic_update;
u32 start_vbl_count;
u32 dspcntr;
u32 reg;
intel_mark_page_flip_active(intel_crtc);
atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count);
reg = DSPCNTR(intel_crtc->plane);
dspcntr = I915_READ(reg);
@ -9419,6 +9423,21 @@ static void intel_do_mmio_flip(struct intel_crtc *intel_crtc)
I915_WRITE(DSPSURF(intel_crtc->plane),
intel_crtc->unpin_work->gtt_offset);
POSTING_READ(DSPSURF(intel_crtc->plane));
if (atomic_update)
intel_pipe_update_end(intel_crtc, start_vbl_count);
spin_lock_irq(&dev_priv->mmio_flip_lock);
intel_crtc->mmio_flip.status = INTEL_MMIO_FLIP_IDLE;
spin_unlock_irq(&dev_priv->mmio_flip_lock);
}
static void intel_mmio_flip_work_func(struct work_struct *work)
{
struct intel_crtc *intel_crtc =
container_of(work, struct intel_crtc, mmio_flip.work);
intel_do_mmio_flip(intel_crtc);
}
static int intel_postpone_flip(struct drm_i915_gem_object *obj)
@ -9461,15 +9480,15 @@ void intel_notify_mmio_flip(struct intel_engine_cs *ring)
struct intel_mmio_flip *mmio_flip;
mmio_flip = &intel_crtc->mmio_flip;
if (mmio_flip->seqno == 0)
if (mmio_flip->status != INTEL_MMIO_FLIP_WAIT_RING)
continue;
if (ring->id != mmio_flip->ring_id)
continue;
if (i915_seqno_passed(seqno, mmio_flip->seqno)) {
intel_do_mmio_flip(intel_crtc);
mmio_flip->seqno = 0;
schedule_work(&intel_crtc->mmio_flip.work);
mmio_flip->status = INTEL_MMIO_FLIP_WORK_SCHEDULED;
ring->irq_put(ring);
}
}
@ -9487,7 +9506,7 @@ static int intel_queue_mmio_flip(struct drm_device *dev,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int ret;
if (WARN_ON(intel_crtc->mmio_flip.seqno))
if (WARN_ON(intel_crtc->mmio_flip.status != INTEL_MMIO_FLIP_IDLE))
return -EBUSY;
ret = intel_postpone_flip(obj);
@ -9499,6 +9518,7 @@ static int intel_queue_mmio_flip(struct drm_device *dev,
}
spin_lock_irq(&dev_priv->mmio_flip_lock);
intel_crtc->mmio_flip.status = INTEL_MMIO_FLIP_WAIT_RING;
intel_crtc->mmio_flip.seqno = obj->last_write_seqno;
intel_crtc->mmio_flip.ring_id = obj->ring->id;
spin_unlock_irq(&dev_priv->mmio_flip_lock);
@ -11983,6 +12003,8 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
INIT_WORK(&intel_crtc->mmio_flip.work, intel_mmio_flip_work_func);
drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);

View File

@ -399,9 +399,17 @@ struct intel_pipe_wm {
bool sprites_scaled;
};
enum intel_mmio_flip_status {
INTEL_MMIO_FLIP_IDLE = 0,
INTEL_MMIO_FLIP_WAIT_RING,
INTEL_MMIO_FLIP_WORK_SCHEDULED,
};
struct intel_mmio_flip {
u32 seqno;
u32 ring_id;
enum intel_mmio_flip_status status;
struct work_struct work;
};
struct intel_crtc {
@ -1167,7 +1175,9 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv);
bool intel_pipe_update_start(struct intel_crtc *crtc,
uint32_t *start_vbl_count);
void intel_pipe_update_end(struct intel_crtc *crtc, u32 start_vbl_count);
/* intel_tv.c */
void intel_tv_init(struct drm_device *dev);

View File

@ -76,7 +76,7 @@ static int usecs_to_scanlines(const struct drm_display_mode *mode, int usecs)
*
* Return: true if the call was successful
*/
static bool intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl_count)
bool intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl_count)
{
struct drm_device *dev = crtc->base.dev;
const struct drm_display_mode *mode = &crtc->config.adjusted_mode;
@ -149,7 +149,7 @@ static bool intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl
* re-enables interrupts and verifies the update was actually completed
* before a vblank using the value of @start_vbl_count.
*/
static void intel_pipe_update_end(struct intel_crtc *crtc, u32 start_vbl_count)
void intel_pipe_update_end(struct intel_crtc *crtc, u32 start_vbl_count)
{
struct drm_device *dev = crtc->base.dev;
enum pipe pipe = crtc->pipe;