drm/i915: Rename 'flags' to 'dispatch_flags' for better code reading

There is a flags word that is passed through the execbuffer code path all the
way from initial decoding of the user parameters down to the very final dispatch
buffer call. It is simply called 'flags'. Unfortuantely, there are many other
flags words floating around in the same blocks of code. Even more once the GPU
scheduler arrives.

This patch makes it more obvious exactly which flags word is which by renaming
'flags' to 'dispatch_flags'. Note that the bit definitions for this flags word
already have an 'I915_DISPATCH_' prefix on them and so are not quite so
ambiguous.

OTC-Jira: VIZ-1587
Signed-off-by: John Harrison <John.C.Harrison@Intel.com>
[danvet: Resolve conflict with Chris' rework of the bb parsing.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
This commit is contained in:
John Harrison 2015-02-13 11:48:10 +00:00 committed by Daniel Vetter
parent 06dc68d680
commit 8e004efc16
5 changed files with 41 additions and 35 deletions

View File

@ -1127,7 +1127,7 @@ i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
struct drm_i915_gem_execbuffer2 *args,
struct list_head *vmas,
struct drm_i915_gem_object *batch_obj,
u64 exec_start, u32 flags)
u64 exec_start, u32 dispatch_flags)
{
struct drm_clip_rect *cliprects = NULL;
struct drm_i915_private *dev_priv = dev->dev_private;
@ -1255,19 +1255,19 @@ i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
ret = ring->dispatch_execbuffer(ring,
exec_start, exec_len,
flags);
dispatch_flags);
if (ret)
goto error;
}
} else {
ret = ring->dispatch_execbuffer(ring,
exec_start, exec_len,
flags);
dispatch_flags);
if (ret)
return ret;
}
trace_i915_gem_ring_dispatch(intel_ring_get_request(ring), flags);
trace_i915_gem_ring_dispatch(intel_ring_get_request(ring), dispatch_flags);
i915_gem_execbuffer_move_to_active(vmas, ring);
i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
@ -1342,7 +1342,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct i915_address_space *vm;
const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
u64 exec_start = args->batch_start_offset;
u32 flags;
u32 dispatch_flags;
int ret;
bool need_relocs;
@ -1353,15 +1353,15 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (ret)
return ret;
flags = 0;
dispatch_flags = 0;
if (args->flags & I915_EXEC_SECURE) {
if (!file->is_master || !capable(CAP_SYS_ADMIN))
return -EPERM;
flags |= I915_DISPATCH_SECURE;
dispatch_flags |= I915_DISPATCH_SECURE;
}
if (args->flags & I915_EXEC_IS_PINNED)
flags |= I915_DISPATCH_PINNED;
dispatch_flags |= I915_DISPATCH_PINNED;
if ((args->flags & I915_EXEC_RING_MASK) > LAST_USER_RING) {
DRM_DEBUG("execbuf with unknown ring: %d\n",
@ -1501,7 +1501,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
* this check when that is fixed.
*/
if (USES_FULL_PPGTT(dev))
flags |= I915_DISPATCH_SECURE;
dispatch_flags |= I915_DISPATCH_SECURE;
exec_start = 0;
}
@ -1511,7 +1511,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
/* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
* batch" bit. Hence we need to pin secure batches into the global gtt.
* hsw should have this fixed, but bdw mucks it up again. */
if (flags & I915_DISPATCH_SECURE) {
if (dispatch_flags & I915_DISPATCH_SECURE) {
/*
* So on first glance it looks freaky that we pin the batch here
* outside of the reservation loop. But:
@ -1531,7 +1531,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
exec_start += i915_gem_obj_offset(batch_obj, vm);
ret = dev_priv->gt.do_execbuf(dev, file, ring, ctx, args,
&eb->vmas, batch_obj, exec_start, flags);
&eb->vmas, batch_obj, exec_start,
dispatch_flags);
/*
* FIXME: We crucially rely upon the active tracking for the (ppgtt)
@ -1539,7 +1540,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
* needs to be adjusted to also track the ggtt batch vma properly as
* active.
*/
if (flags & I915_DISPATCH_SECURE)
if (dispatch_flags & I915_DISPATCH_SECURE)
i915_gem_object_ggtt_unpin(batch_obj);
err:
/* the request owns the ref now */

View File

@ -620,7 +620,7 @@ static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf,
* @vmas: list of vmas.
* @batch_obj: the batchbuffer to submit.
* @exec_start: batchbuffer start virtual address pointer.
* @flags: translated execbuffer call flags.
* @dispatch_flags: translated execbuffer call flags.
*
* This is the evil twin version of i915_gem_ringbuffer_submission. It abstracts
* away the submission details of the execbuffer ioctl call.
@ -633,7 +633,7 @@ int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
struct drm_i915_gem_execbuffer2 *args,
struct list_head *vmas,
struct drm_i915_gem_object *batch_obj,
u64 exec_start, u32 flags)
u64 exec_start, u32 dispatch_flags)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
@ -706,7 +706,7 @@ int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
dev_priv->relative_constants_mode = instp_mode;
}
ret = ring->emit_bb_start(ringbuf, ctx, exec_start, flags);
ret = ring->emit_bb_start(ringbuf, ctx, exec_start, dispatch_flags);
if (ret)
return ret;
@ -1163,9 +1163,9 @@ static int gen9_init_render_ring(struct intel_engine_cs *ring)
static int gen8_emit_bb_start(struct intel_ringbuffer *ringbuf,
struct intel_context *ctx,
u64 offset, unsigned flags)
u64 offset, unsigned dispatch_flags)
{
bool ppgtt = !(flags & I915_DISPATCH_SECURE);
bool ppgtt = !(dispatch_flags & I915_DISPATCH_SECURE);
int ret;
ret = intel_logical_ring_begin(ringbuf, ctx, 4);

View File

@ -84,7 +84,7 @@ int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
struct drm_i915_gem_execbuffer2 *args,
struct list_head *vmas,
struct drm_i915_gem_object *batch_obj,
u64 exec_start, u32 flags);
u64 exec_start, u32 dispatch_flags);
u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj);
void intel_lrc_irq_handler(struct intel_engine_cs *ring);

View File

@ -1741,7 +1741,7 @@ gen8_ring_put_irq(struct intel_engine_cs *ring)
static int
i965_dispatch_execbuffer(struct intel_engine_cs *ring,
u64 offset, u32 length,
unsigned flags)
unsigned dispatch_flags)
{
int ret;
@ -1752,7 +1752,8 @@ i965_dispatch_execbuffer(struct intel_engine_cs *ring,
intel_ring_emit(ring,
MI_BATCH_BUFFER_START |
MI_BATCH_GTT |
(flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965));
(dispatch_flags & I915_DISPATCH_SECURE ?
0 : MI_BATCH_NON_SECURE_I965));
intel_ring_emit(ring, offset);
intel_ring_advance(ring);
@ -1765,8 +1766,8 @@ i965_dispatch_execbuffer(struct intel_engine_cs *ring,
#define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT)
static int
i830_dispatch_execbuffer(struct intel_engine_cs *ring,
u64 offset, u32 len,
unsigned flags)
u64 offset, u32 len,
unsigned dispatch_flags)
{
u32 cs_offset = ring->scratch.gtt_offset;
int ret;
@ -1784,7 +1785,7 @@ i830_dispatch_execbuffer(struct intel_engine_cs *ring,
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
if ((flags & I915_DISPATCH_PINNED) == 0) {
if ((dispatch_flags & I915_DISPATCH_PINNED) == 0) {
if (len > I830_BATCH_LIMIT)
return -ENOSPC;
@ -1816,7 +1817,8 @@ i830_dispatch_execbuffer(struct intel_engine_cs *ring,
return ret;
intel_ring_emit(ring, MI_BATCH_BUFFER);
intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
0 : MI_BATCH_NON_SECURE));
intel_ring_emit(ring, offset + len - 8);
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
@ -1827,7 +1829,7 @@ i830_dispatch_execbuffer(struct intel_engine_cs *ring,
static int
i915_dispatch_execbuffer(struct intel_engine_cs *ring,
u64 offset, u32 len,
unsigned flags)
unsigned dispatch_flags)
{
int ret;
@ -1836,7 +1838,8 @@ i915_dispatch_execbuffer(struct intel_engine_cs *ring,
return ret;
intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
0 : MI_BATCH_NON_SECURE));
intel_ring_advance(ring);
return 0;
@ -2395,9 +2398,10 @@ static int gen6_bsd_ring_flush(struct intel_engine_cs *ring,
static int
gen8_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
u64 offset, u32 len,
unsigned flags)
unsigned dispatch_flags)
{
bool ppgtt = USES_PPGTT(ring->dev) && !(flags & I915_DISPATCH_SECURE);
bool ppgtt = USES_PPGTT(ring->dev) &&
!(dispatch_flags & I915_DISPATCH_SECURE);
int ret;
ret = intel_ring_begin(ring, 4);
@ -2416,8 +2420,8 @@ gen8_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
static int
hsw_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
u64 offset, u32 len,
unsigned flags)
u64 offset, u32 len,
unsigned dispatch_flags)
{
int ret;
@ -2427,7 +2431,7 @@ hsw_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
intel_ring_emit(ring,
MI_BATCH_BUFFER_START |
(flags & I915_DISPATCH_SECURE ?
(dispatch_flags & I915_DISPATCH_SECURE ?
0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW));
/* bit0-7 is the length on GEN6+ */
intel_ring_emit(ring, offset);
@ -2439,7 +2443,7 @@ hsw_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
static int
gen6_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
u64 offset, u32 len,
unsigned flags)
unsigned dispatch_flags)
{
int ret;
@ -2449,7 +2453,8 @@ gen6_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
intel_ring_emit(ring,
MI_BATCH_BUFFER_START |
(flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965));
(dispatch_flags & I915_DISPATCH_SECURE ?
0 : MI_BATCH_NON_SECURE_I965));
/* bit0-7 is the length on GEN6+ */
intel_ring_emit(ring, offset);
intel_ring_advance(ring);

View File

@ -164,7 +164,7 @@ struct intel_engine_cs {
u32 seqno);
int (*dispatch_execbuffer)(struct intel_engine_cs *ring,
u64 offset, u32 length,
unsigned flags);
unsigned dispatch_flags);
#define I915_DISPATCH_SECURE 0x1
#define I915_DISPATCH_PINNED 0x2
void (*cleanup)(struct intel_engine_cs *ring);
@ -242,7 +242,7 @@ struct intel_engine_cs {
u32 flush_domains);
int (*emit_bb_start)(struct intel_ringbuffer *ringbuf,
struct intel_context *ctx,
u64 offset, unsigned flags);
u64 offset, unsigned dispatch_flags);
/**
* List of objects currently involved in rendering from the