drm/i915: Use a common seqno for all rings.

This will be used by the eviction logic to maintain fairness between the
rings.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Eric Anholt <eric@anholt.net>
This commit is contained in:
Chris Wilson 2010-08-07 11:01:22 +01:00 committed by Eric Anholt
parent 0108a3edd5
commit 6f392d5486
4 changed files with 29 additions and 23 deletions

View File

@ -244,6 +244,7 @@ typedef struct drm_i915_private {
struct pci_dev *bridge_dev;
struct intel_ring_buffer render_ring;
struct intel_ring_buffer bsd_ring;
uint32_t next_seqno;
drm_dma_handle_t *status_page_dmah;
void *seqno_page;
@ -573,8 +574,6 @@ typedef struct drm_i915_private {
*/
struct delayed_work retire_work;
uint32_t next_gem_seqno;
/**
* Waiting sequence number, if any
*/

View File

@ -4714,6 +4714,8 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
goto cleanup_render_ring;
}
dev_priv->next_seqno = 1;
return 0;
cleanup_render_ring:

View File

@ -33,18 +33,35 @@
#include "i915_drm.h"
#include "i915_trace.h"
static u32 i915_gem_get_seqno(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
u32 seqno;
seqno = dev_priv->next_seqno;
/* reserve 0 for non-seqno */
if (++dev_priv->next_seqno == 0)
dev_priv->next_seqno = 1;
return seqno;
}
static void
render_ring_flush(struct drm_device *dev,
struct intel_ring_buffer *ring,
u32 invalidate_domains,
u32 flush_domains)
{
drm_i915_private_t *dev_priv = dev->dev_private;
u32 cmd;
#if WATCH_EXEC
DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
invalidate_domains, flush_domains);
#endif
u32 cmd;
trace_i915_gem_request_flush(dev, ring->next_seqno,
trace_i915_gem_request_flush(dev, dev_priv->next_seqno,
invalidate_domains, flush_domains);
if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) {
@ -233,9 +250,10 @@ render_ring_add_request(struct drm_device *dev,
struct drm_file *file_priv,
u32 flush_domains)
{
u32 seqno;
drm_i915_private_t *dev_priv = dev->dev_private;
seqno = intel_ring_get_seqno(dev, ring);
u32 seqno;
seqno = i915_gem_get_seqno(dev);
if (IS_GEN6(dev)) {
BEGIN_LP_RING(6);
@ -405,7 +423,9 @@ bsd_ring_add_request(struct drm_device *dev,
u32 flush_domains)
{
u32 seqno;
seqno = intel_ring_get_seqno(dev, ring);
seqno = i915_gem_get_seqno(dev);
intel_ring_begin(dev, ring, 4);
intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX);
intel_ring_emit(dev, ring,
@ -479,7 +499,7 @@ render_ring_dispatch_gem_execbuffer(struct drm_device *dev,
exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
exec_len = (uint32_t) exec->batch_len;
trace_i915_gem_request_submit(dev, dev_priv->mm.next_gem_seqno + 1);
trace_i915_gem_request_submit(dev, dev_priv->next_seqno + 1);
count = nbox ? nbox : 1;
@ -757,18 +777,6 @@ void intel_fill_struct(struct drm_device *dev,
intel_ring_advance(dev, ring);
}
u32 intel_ring_get_seqno(struct drm_device *dev,
struct intel_ring_buffer *ring)
{
u32 seqno;
seqno = ring->next_seqno;
/* reserve 0 for non-seqno */
if (++ring->next_seqno == 0)
ring->next_seqno = 1;
return seqno;
}
struct intel_ring_buffer render_ring = {
.name = "render ring",
.regs = {
@ -786,7 +794,6 @@ struct intel_ring_buffer render_ring = {
.head = 0,
.tail = 0,
.space = 0,
.next_seqno = 1,
.user_irq_refcount = 0,
.irq_gem_seqno = 0,
.waiting_gem_seqno = 0,
@ -825,7 +832,6 @@ struct intel_ring_buffer bsd_ring = {
.head = 0,
.tail = 0,
.space = 0,
.next_seqno = 1,
.user_irq_refcount = 0,
.irq_gem_seqno = 0,
.waiting_gem_seqno = 0,

View File

@ -26,7 +26,6 @@ struct intel_ring_buffer {
unsigned int head;
unsigned int tail;
unsigned int space;
u32 next_seqno;
struct intel_hw_status_page status_page;
u32 irq_gem_seqno; /* last seq seem at irq time */