drm/i915: inline enable/disable_irq into ring->get/put_irq

Now that these are properly refactored this additional indirection
doesn't really buy us anything but confusion. Hence inline them.

This duplicates the ironlake gt enable/disable code snippet, but we've
already separate ilk from gen6+ gt irq in i915_irq.c, so I think this
makes more sense.

Reviewed-by: Eric Anholt <eric@anholt.net>
Signed-Off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
This commit is contained in:
Daniel Vetter 2012-04-11 22:12:59 +02:00
parent 28f0cbf71f
commit f637fde434
1 changed files with 26 additions and 42 deletions

View File

@ -591,38 +591,6 @@ pc_render_get_seqno(struct intel_ring_buffer *ring)
return pc->cpu_page[0];
}
static void
ironlake_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
{
dev_priv->gt_irq_mask &= ~mask;
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
POSTING_READ(GTIMR);
}
static void
ironlake_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
{
dev_priv->gt_irq_mask |= mask;
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
POSTING_READ(GTIMR);
}
static void
i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
{
dev_priv->irq_mask &= ~mask;
I915_WRITE(IMR, dev_priv->irq_mask);
POSTING_READ(IMR);
}
static void
i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
{
dev_priv->irq_mask |= mask;
I915_WRITE(IMR, dev_priv->irq_mask);
POSTING_READ(IMR);
}
static bool
gen5_ring_get_irq(struct intel_ring_buffer *ring)
{
@ -633,8 +601,11 @@ gen5_ring_get_irq(struct intel_ring_buffer *ring)
return false;
spin_lock(&ring->irq_lock);
if (ring->irq_refcount++ == 0)
ironlake_enable_irq(dev_priv, ring->irq_enable_mask);
if (ring->irq_refcount++ == 0) {
dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
POSTING_READ(GTIMR);
}
spin_unlock(&ring->irq_lock);
return true;
@ -647,8 +618,11 @@ gen5_ring_put_irq(struct intel_ring_buffer *ring)
drm_i915_private_t *dev_priv = dev->dev_private;
spin_lock(&ring->irq_lock);
if (--ring->irq_refcount == 0)
ironlake_disable_irq(dev_priv, ring->irq_enable_mask);
if (--ring->irq_refcount == 0) {
dev_priv->gt_irq_mask |= ring->irq_enable_mask;
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
POSTING_READ(GTIMR);
}
spin_unlock(&ring->irq_lock);
}
@ -662,8 +636,11 @@ i9xx_ring_get_irq(struct intel_ring_buffer *ring)
return false;
spin_lock(&ring->irq_lock);
if (ring->irq_refcount++ == 0)
i915_enable_irq(dev_priv, ring->irq_enable_mask);
if (ring->irq_refcount++ == 0) {
dev_priv->irq_mask &= ~ring->irq_enable_mask;
I915_WRITE(IMR, dev_priv->irq_mask);
POSTING_READ(IMR);
}
spin_unlock(&ring->irq_lock);
return true;
@ -676,8 +653,11 @@ i9xx_ring_put_irq(struct intel_ring_buffer *ring)
drm_i915_private_t *dev_priv = dev->dev_private;
spin_lock(&ring->irq_lock);
if (--ring->irq_refcount == 0)
i915_disable_irq(dev_priv, ring->irq_enable_mask);
if (--ring->irq_refcount == 0) {
dev_priv->irq_mask |= ring->irq_enable_mask;
I915_WRITE(IMR, dev_priv->irq_mask);
POSTING_READ(IMR);
}
spin_unlock(&ring->irq_lock);
}
@ -769,7 +749,9 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring)
spin_lock(&ring->irq_lock);
if (ring->irq_refcount++ == 0) {
I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
ironlake_enable_irq(dev_priv, ring->irq_enable_mask);
dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
POSTING_READ(GTIMR);
}
spin_unlock(&ring->irq_lock);
@ -785,7 +767,9 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring)
spin_lock(&ring->irq_lock);
if (--ring->irq_refcount == 0) {
I915_WRITE_IMR(ring, ~0);
ironlake_disable_irq(dev_priv, ring->irq_enable_mask);
dev_priv->gt_irq_mask |= ring->irq_enable_mask;
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
POSTING_READ(GTIMR);
}
spin_unlock(&ring->irq_lock);