drm/i915: open code gen6+ ring irqs
We can now open-code the get/put irq functions as they were just abstracting single register definitions. It would be nice to merge this in with the IRQ handling code... but that is too much work for me at present. In addition I could probably collapse this in to a lot of the Ironlake stuff, but I don't think it's worth the potential regressions. This patch itself should not effect functionality. CC: Jesse Barnes <jbarnes@virtuousgeek.org> Signed-off-by: Ben Widawsky <benjamin.widawsky@intel.com> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
This commit is contained in:
parent
e2a1e2f024
commit
25c063004a
|
@ -788,10 +788,11 @@ ring_add_request(struct intel_ring_buffer *ring,
|
|||
}
|
||||
|
||||
static bool
|
||||
gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 mask)
|
||||
gen6_ring_get_irq(struct intel_ring_buffer *ring)
|
||||
{
|
||||
struct drm_device *dev = ring->dev;
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
u32 mask = ring->irq_enable;
|
||||
|
||||
if (!dev->irq_enabled)
|
||||
return false;
|
||||
|
@ -813,10 +814,11 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 mask)
|
|||
}
|
||||
|
||||
static void
|
||||
gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 mask)
|
||||
gen6_ring_put_irq(struct intel_ring_buffer *ring)
|
||||
{
|
||||
struct drm_device *dev = ring->dev;
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
u32 mask = ring->irq_enable;
|
||||
|
||||
spin_lock(&ring->irq_lock);
|
||||
if (--ring->irq_refcount == 0) {
|
||||
|
@ -1373,30 +1375,6 @@ gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static bool
|
||||
gen6_render_ring_get_irq(struct intel_ring_buffer *ring)
|
||||
{
|
||||
return gen6_ring_get_irq(ring, GT_USER_INTERRUPT);
|
||||
}
|
||||
|
||||
static void
|
||||
gen6_render_ring_put_irq(struct intel_ring_buffer *ring)
|
||||
{
|
||||
return gen6_ring_put_irq(ring, GT_USER_INTERRUPT);
|
||||
}
|
||||
|
||||
static bool
|
||||
gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring)
|
||||
{
|
||||
return gen6_ring_get_irq(ring, GEN6_BSD_USER_INTERRUPT);
|
||||
}
|
||||
|
||||
static void
|
||||
gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring)
|
||||
{
|
||||
return gen6_ring_put_irq(ring, GEN6_BSD_USER_INTERRUPT);
|
||||
}
|
||||
|
||||
/* ring buffer for Video Codec for Gen6+ */
|
||||
static const struct intel_ring_buffer gen6_bsd_ring = {
|
||||
.name = "gen6 bsd ring",
|
||||
|
@ -1408,8 +1386,9 @@ static const struct intel_ring_buffer gen6_bsd_ring = {
|
|||
.flush = gen6_ring_flush,
|
||||
.add_request = gen6_add_request,
|
||||
.get_seqno = gen6_ring_get_seqno,
|
||||
.irq_get = gen6_bsd_ring_get_irq,
|
||||
.irq_put = gen6_bsd_ring_put_irq,
|
||||
.irq_enable = GEN6_BSD_USER_INTERRUPT,
|
||||
.irq_get = gen6_ring_get_irq,
|
||||
.irq_put = gen6_ring_put_irq,
|
||||
.dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
|
||||
.sync_to = gen6_bsd_ring_sync_to,
|
||||
.semaphore_register = {MI_SEMAPHORE_SYNC_VR,
|
||||
|
@ -1420,18 +1399,6 @@ static const struct intel_ring_buffer gen6_bsd_ring = {
|
|||
|
||||
/* Blitter support (SandyBridge+) */
|
||||
|
||||
static bool
|
||||
blt_ring_get_irq(struct intel_ring_buffer *ring)
|
||||
{
|
||||
return gen6_ring_get_irq(ring, GEN6_BLITTER_USER_INTERRUPT);
|
||||
}
|
||||
|
||||
static void
|
||||
blt_ring_put_irq(struct intel_ring_buffer *ring)
|
||||
{
|
||||
gen6_ring_put_irq(ring, GEN6_BLITTER_USER_INTERRUPT);
|
||||
}
|
||||
|
||||
static int blt_ring_flush(struct intel_ring_buffer *ring,
|
||||
u32 invalidate, u32 flush)
|
||||
{
|
||||
|
@ -1463,8 +1430,9 @@ static const struct intel_ring_buffer gen6_blt_ring = {
|
|||
.flush = blt_ring_flush,
|
||||
.add_request = gen6_add_request,
|
||||
.get_seqno = gen6_ring_get_seqno,
|
||||
.irq_get = blt_ring_get_irq,
|
||||
.irq_put = blt_ring_put_irq,
|
||||
.irq_get = gen6_ring_get_irq,
|
||||
.irq_put = gen6_ring_put_irq,
|
||||
.irq_enable = GEN6_BLITTER_USER_INTERRUPT,
|
||||
.dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
|
||||
.sync_to = gen6_blt_ring_sync_to,
|
||||
.semaphore_register = {MI_SEMAPHORE_SYNC_BR,
|
||||
|
@ -1482,8 +1450,9 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
|
|||
if (INTEL_INFO(dev)->gen >= 6) {
|
||||
ring->add_request = gen6_add_request;
|
||||
ring->flush = gen6_render_ring_flush;
|
||||
ring->irq_get = gen6_render_ring_get_irq;
|
||||
ring->irq_put = gen6_render_ring_put_irq;
|
||||
ring->irq_get = gen6_ring_get_irq;
|
||||
ring->irq_put = gen6_ring_put_irq;
|
||||
ring->irq_enable = GT_USER_INTERRUPT;
|
||||
ring->get_seqno = gen6_ring_get_seqno;
|
||||
} else if (IS_GEN5(dev)) {
|
||||
ring->add_request = pc_render_add_request;
|
||||
|
@ -1506,8 +1475,9 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
|
|||
*ring = render_ring;
|
||||
if (INTEL_INFO(dev)->gen >= 6) {
|
||||
ring->add_request = gen6_add_request;
|
||||
ring->irq_get = gen6_render_ring_get_irq;
|
||||
ring->irq_put = gen6_render_ring_put_irq;
|
||||
ring->irq_get = gen6_ring_get_irq;
|
||||
ring->irq_put = gen6_ring_put_irq;
|
||||
ring->irq_enable = GT_USER_INTERRUPT;
|
||||
} else if (IS_GEN5(dev)) {
|
||||
ring->add_request = pc_render_add_request;
|
||||
ring->get_seqno = pc_render_get_seqno;
|
||||
|
|
|
@ -59,6 +59,7 @@ struct intel_ring_buffer {
|
|||
spinlock_t irq_lock;
|
||||
u32 irq_refcount;
|
||||
u32 irq_mask;
|
||||
u32 irq_enable; /* IRQs enabled for this ring */
|
||||
u32 irq_seqno; /* last seq seem at irq time */
|
||||
u32 trace_irq_seqno;
|
||||
u32 waiting_seqno;
|
||||
|
|
Loading…
Reference in New Issue