drm/i915: Rename request->ringbuf to request->ring
Now that we have disambuigated ring and engine, we can use the clearer and more consistent name for the intel_ringbuffer pointer in the request. @@ struct drm_i915_gem_request *r; @@ - r->ringbuf + r->ring Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/1469432687-22756-12-git-send-email-chris@chris-wilson.co.uk Link: http://patchwork.freedesktop.org/patch/msgid/1470174640-18242-2-git-send-email-chris@chris-wilson.co.uk
This commit is contained in:
parent
b5321f309b
commit
1dae2dfb0b
|
@ -552,7 +552,7 @@ static inline int
|
|||
mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = req->i915;
|
||||
struct intel_ringbuffer *ring = req->ringbuf;
|
||||
struct intel_ringbuffer *ring = req->ring;
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
u32 flags = hw_flags | MI_MM_SPACE_GTT;
|
||||
const int num_rings =
|
||||
|
@ -655,7 +655,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
|
|||
static int remap_l3(struct drm_i915_gem_request *req, int slice)
|
||||
{
|
||||
u32 *remap_info = req->i915->l3_parity.remap_info[slice];
|
||||
struct intel_ringbuffer *ring = req->ringbuf;
|
||||
struct intel_ringbuffer *ring = req->ring;
|
||||
int i, ret;
|
||||
|
||||
if (!remap_info)
|
||||
|
|
|
@ -1173,7 +1173,7 @@ i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params)
|
|||
static int
|
||||
i915_reset_gen7_sol_offsets(struct drm_i915_gem_request *req)
|
||||
{
|
||||
struct intel_ringbuffer *ring = req->ringbuf;
|
||||
struct intel_ringbuffer *ring = req->ring;
|
||||
int ret, i;
|
||||
|
||||
if (!IS_GEN7(req->i915) || req->engine->id != RCS) {
|
||||
|
@ -1303,7 +1303,7 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
|
|||
|
||||
if (params->engine->id == RCS &&
|
||||
instp_mode != dev_priv->relative_constants_mode) {
|
||||
struct intel_ringbuffer *ring = params->request->ringbuf;
|
||||
struct intel_ringbuffer *ring = params->request->ring;
|
||||
|
||||
ret = intel_ring_begin(params->request, 4);
|
||||
if (ret)
|
||||
|
|
|
@ -669,7 +669,7 @@ static int gen8_write_pdp(struct drm_i915_gem_request *req,
|
|||
unsigned entry,
|
||||
dma_addr_t addr)
|
||||
{
|
||||
struct intel_ringbuffer *ring = req->ringbuf;
|
||||
struct intel_ringbuffer *ring = req->ring;
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
int ret;
|
||||
|
||||
|
@ -1661,7 +1661,7 @@ static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
|
|||
static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
|
||||
struct drm_i915_gem_request *req)
|
||||
{
|
||||
struct intel_ringbuffer *ring = req->ringbuf;
|
||||
struct intel_ringbuffer *ring = req->ring;
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
int ret;
|
||||
|
||||
|
@ -1688,7 +1688,7 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
|
|||
static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
|
||||
struct drm_i915_gem_request *req)
|
||||
{
|
||||
struct intel_ringbuffer *ring = req->ringbuf;
|
||||
struct intel_ringbuffer *ring = req->ring;
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
int ret;
|
||||
|
||||
|
|
|
@ -170,7 +170,7 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
|
|||
* Note this requires that we are always called in request
|
||||
* completion order.
|
||||
*/
|
||||
request->ringbuf->last_retired_head = request->postfix;
|
||||
request->ring->last_retired_head = request->postfix;
|
||||
|
||||
i915_gem_request_remove_from_client(request);
|
||||
|
||||
|
@ -423,7 +423,7 @@ void __i915_add_request(struct drm_i915_gem_request *request,
|
|||
bool flush_caches)
|
||||
{
|
||||
struct intel_engine_cs *engine;
|
||||
struct intel_ringbuffer *ringbuf;
|
||||
struct intel_ringbuffer *ring;
|
||||
u32 request_start;
|
||||
u32 reserved_tail;
|
||||
int ret;
|
||||
|
@ -432,14 +432,14 @@ void __i915_add_request(struct drm_i915_gem_request *request,
|
|||
return;
|
||||
|
||||
engine = request->engine;
|
||||
ringbuf = request->ringbuf;
|
||||
ring = request->ring;
|
||||
|
||||
/*
|
||||
* To ensure that this call will not fail, space for its emissions
|
||||
* should already have been reserved in the ring buffer. Let the ring
|
||||
* know that it is time to use that space up.
|
||||
*/
|
||||
request_start = intel_ring_get_tail(ringbuf);
|
||||
request_start = intel_ring_get_tail(ring);
|
||||
reserved_tail = request->reserved_space;
|
||||
request->reserved_space = 0;
|
||||
|
||||
|
@ -486,21 +486,21 @@ void __i915_add_request(struct drm_i915_gem_request *request,
|
|||
* GPU processing the request, we never over-estimate the
|
||||
* position of the head.
|
||||
*/
|
||||
request->postfix = intel_ring_get_tail(ringbuf);
|
||||
request->postfix = intel_ring_get_tail(ring);
|
||||
|
||||
if (i915.enable_execlists) {
|
||||
ret = engine->emit_request(request);
|
||||
} else {
|
||||
ret = engine->add_request(request);
|
||||
|
||||
request->tail = intel_ring_get_tail(ringbuf);
|
||||
request->tail = intel_ring_get_tail(ring);
|
||||
}
|
||||
/* Not allowed to fail! */
|
||||
WARN(ret, "emit|add_request failed: %d!\n", ret);
|
||||
/* Sanity check that the reserved size was large enough. */
|
||||
ret = intel_ring_get_tail(ringbuf) - request_start;
|
||||
ret = intel_ring_get_tail(ring) - request_start;
|
||||
if (ret < 0)
|
||||
ret += ringbuf->size;
|
||||
ret += ring->size;
|
||||
WARN_ONCE(ret > reserved_tail,
|
||||
"Not enough space reserved (%d bytes) "
|
||||
"for adding the request (%d bytes)\n",
|
||||
|
|
|
@ -61,7 +61,7 @@ struct drm_i915_gem_request {
|
|||
*/
|
||||
struct i915_gem_context *ctx;
|
||||
struct intel_engine_cs *engine;
|
||||
struct intel_ringbuffer *ringbuf;
|
||||
struct intel_ringbuffer *ring;
|
||||
struct intel_signal_node signaling;
|
||||
|
||||
/** GEM sequence number associated with the previous request,
|
||||
|
|
|
@ -1097,7 +1097,7 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
|
|||
request = i915_gem_find_active_request(engine);
|
||||
if (request) {
|
||||
struct i915_address_space *vm;
|
||||
struct intel_ringbuffer *rb;
|
||||
struct intel_ringbuffer *ring;
|
||||
|
||||
vm = request->ctx->ppgtt ?
|
||||
&request->ctx->ppgtt->base : &ggtt->base;
|
||||
|
@ -1114,7 +1114,7 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
|
|||
if (HAS_BROKEN_CS_TLB(dev_priv))
|
||||
ee->wa_batchbuffer =
|
||||
i915_error_ggtt_object_create(dev_priv,
|
||||
engine->scratch.obj);
|
||||
engine->scratch.obj);
|
||||
|
||||
if (request->pid) {
|
||||
struct task_struct *task;
|
||||
|
@ -1131,23 +1131,20 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
|
|||
error->simulated |=
|
||||
request->ctx->flags & CONTEXT_NO_ERROR_CAPTURE;
|
||||
|
||||
rb = request->ringbuf;
|
||||
ee->cpu_ring_head = rb->head;
|
||||
ee->cpu_ring_tail = rb->tail;
|
||||
ring = request->ring;
|
||||
ee->cpu_ring_head = ring->head;
|
||||
ee->cpu_ring_tail = ring->tail;
|
||||
ee->ringbuffer =
|
||||
i915_error_ggtt_object_create(dev_priv,
|
||||
rb->obj);
|
||||
ring->obj);
|
||||
}
|
||||
|
||||
ee->hws_page =
|
||||
i915_error_ggtt_object_create(dev_priv,
|
||||
engine->status_page.obj);
|
||||
|
||||
if (engine->wa_ctx.obj) {
|
||||
ee->wa_ctx =
|
||||
i915_error_ggtt_object_create(dev_priv,
|
||||
engine->wa_ctx.obj);
|
||||
}
|
||||
ee->wa_ctx = i915_error_ggtt_object_create(dev_priv,
|
||||
engine->wa_ctx.obj);
|
||||
|
||||
i915_gem_record_active_context(engine, error, ee);
|
||||
|
||||
|
|
|
@ -11115,7 +11115,7 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
|
|||
struct drm_i915_gem_request *req,
|
||||
uint32_t flags)
|
||||
{
|
||||
struct intel_ringbuffer *ring = req->ringbuf;
|
||||
struct intel_ringbuffer *ring = req->ring;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
u32 flip_mask;
|
||||
int ret;
|
||||
|
@ -11149,7 +11149,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
|
|||
struct drm_i915_gem_request *req,
|
||||
uint32_t flags)
|
||||
{
|
||||
struct intel_ringbuffer *ring = req->ringbuf;
|
||||
struct intel_ringbuffer *ring = req->ring;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
u32 flip_mask;
|
||||
int ret;
|
||||
|
@ -11180,7 +11180,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
|
|||
struct drm_i915_gem_request *req,
|
||||
uint32_t flags)
|
||||
{
|
||||
struct intel_ringbuffer *ring = req->ringbuf;
|
||||
struct intel_ringbuffer *ring = req->ring;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
uint32_t pf, pipesrc;
|
||||
|
@ -11218,7 +11218,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
|
|||
struct drm_i915_gem_request *req,
|
||||
uint32_t flags)
|
||||
{
|
||||
struct intel_ringbuffer *ring = req->ringbuf;
|
||||
struct intel_ringbuffer *ring = req->ring;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
uint32_t pf, pipesrc;
|
||||
|
@ -11253,7 +11253,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
|
|||
struct drm_i915_gem_request *req,
|
||||
uint32_t flags)
|
||||
{
|
||||
struct intel_ringbuffer *ring = req->ringbuf;
|
||||
struct intel_ringbuffer *ring = req->ring;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
uint32_t plane_bit = 0;
|
||||
int len, ret;
|
||||
|
|
|
@ -714,7 +714,7 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request
|
|||
return ret;
|
||||
}
|
||||
|
||||
request->ringbuf = ce->ringbuf;
|
||||
request->ring = ce->ringbuf;
|
||||
|
||||
if (i915.enable_guc_submission) {
|
||||
/*
|
||||
|
@ -770,11 +770,11 @@ err_unpin:
|
|||
static int
|
||||
intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
|
||||
{
|
||||
struct intel_ringbuffer *ringbuf = request->ringbuf;
|
||||
struct intel_ringbuffer *ring = request->ring;
|
||||
struct intel_engine_cs *engine = request->engine;
|
||||
|
||||
intel_ring_advance(ringbuf);
|
||||
request->tail = ringbuf->tail;
|
||||
intel_ring_advance(ring);
|
||||
request->tail = ring->tail;
|
||||
|
||||
/*
|
||||
* Here we add two extra NOOPs as padding to avoid
|
||||
|
@ -782,9 +782,9 @@ intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
|
|||
*
|
||||
* Caller must reserve WA_TAIL_DWORDS for us!
|
||||
*/
|
||||
intel_ring_emit(ringbuf, MI_NOOP);
|
||||
intel_ring_emit(ringbuf, MI_NOOP);
|
||||
intel_ring_advance(ringbuf);
|
||||
intel_ring_emit(ring, MI_NOOP);
|
||||
intel_ring_emit(ring, MI_NOOP);
|
||||
intel_ring_advance(ring);
|
||||
|
||||
/* We keep the previous context alive until we retire the following
|
||||
* request. This ensures that any the context object is still pinned
|
||||
|
@ -821,7 +821,7 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
|
|||
struct drm_device *dev = params->dev;
|
||||
struct intel_engine_cs *engine = params->engine;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_ringbuffer *ringbuf = params->ctx->engine[engine->id].ringbuf;
|
||||
struct intel_ringbuffer *ring = params->request->ring;
|
||||
u64 exec_start;
|
||||
int instp_mode;
|
||||
u32 instp_mask;
|
||||
|
@ -833,7 +833,7 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
|
|||
case I915_EXEC_CONSTANTS_REL_GENERAL:
|
||||
case I915_EXEC_CONSTANTS_ABSOLUTE:
|
||||
case I915_EXEC_CONSTANTS_REL_SURFACE:
|
||||
if (instp_mode != 0 && engine != &dev_priv->engine[RCS]) {
|
||||
if (instp_mode != 0 && engine->id != RCS) {
|
||||
DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -862,17 +862,17 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (engine == &dev_priv->engine[RCS] &&
|
||||
if (engine->id == RCS &&
|
||||
instp_mode != dev_priv->relative_constants_mode) {
|
||||
ret = intel_ring_begin(params->request, 4);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
intel_ring_emit(ringbuf, MI_NOOP);
|
||||
intel_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(1));
|
||||
intel_ring_emit_reg(ringbuf, INSTPM);
|
||||
intel_ring_emit(ringbuf, instp_mask << 16 | instp_mode);
|
||||
intel_ring_advance(ringbuf);
|
||||
intel_ring_emit(ring, MI_NOOP);
|
||||
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
|
||||
intel_ring_emit_reg(ring, INSTPM);
|
||||
intel_ring_emit(ring, instp_mask << 16 | instp_mode);
|
||||
intel_ring_advance(ring);
|
||||
|
||||
dev_priv->relative_constants_mode = instp_mode;
|
||||
}
|
||||
|
@ -1030,7 +1030,7 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
|
|||
{
|
||||
int ret, i;
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
struct intel_ringbuffer *ringbuf = req->ringbuf;
|
||||
struct intel_ringbuffer *ring = req->ring;
|
||||
struct i915_workarounds *w = &req->i915->workarounds;
|
||||
|
||||
if (w->count == 0)
|
||||
|
@ -1045,14 +1045,14 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
intel_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(w->count));
|
||||
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count));
|
||||
for (i = 0; i < w->count; i++) {
|
||||
intel_ring_emit_reg(ringbuf, w->reg[i].addr);
|
||||
intel_ring_emit(ringbuf, w->reg[i].value);
|
||||
intel_ring_emit_reg(ring, w->reg[i].addr);
|
||||
intel_ring_emit(ring, w->reg[i].value);
|
||||
}
|
||||
intel_ring_emit(ringbuf, MI_NOOP);
|
||||
intel_ring_emit(ring, MI_NOOP);
|
||||
|
||||
intel_ring_advance(ringbuf);
|
||||
intel_ring_advance(ring);
|
||||
|
||||
engine->gpu_caches_dirty = true;
|
||||
ret = logical_ring_flush_all_caches(req);
|
||||
|
@ -1553,7 +1553,7 @@ static int gen9_init_render_ring(struct intel_engine_cs *engine)
|
|||
static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
|
||||
{
|
||||
struct i915_hw_ppgtt *ppgtt = req->ctx->ppgtt;
|
||||
struct intel_ringbuffer *ring = req->ringbuf;
|
||||
struct intel_ringbuffer *ring = req->ring;
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
const int num_lri_cmds = GEN8_LEGACY_PDPES * 2;
|
||||
int i, ret;
|
||||
|
@ -1581,7 +1581,7 @@ static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
|
|||
static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
|
||||
u64 offset, unsigned dispatch_flags)
|
||||
{
|
||||
struct intel_ringbuffer *ring = req->ringbuf;
|
||||
struct intel_ringbuffer *ring = req->ring;
|
||||
bool ppgtt = !(dispatch_flags & I915_DISPATCH_SECURE);
|
||||
int ret;
|
||||
|
||||
|
@ -1638,8 +1638,7 @@ static int gen8_emit_flush(struct drm_i915_gem_request *request,
|
|||
u32 invalidate_domains,
|
||||
u32 unused)
|
||||
{
|
||||
struct intel_ringbuffer *ring = request->ringbuf;
|
||||
struct intel_engine_cs *engine = ring->engine;
|
||||
struct intel_ringbuffer *ring = request->ring;
|
||||
uint32_t cmd;
|
||||
int ret;
|
||||
|
||||
|
@ -1658,7 +1657,7 @@ static int gen8_emit_flush(struct drm_i915_gem_request *request,
|
|||
|
||||
if (invalidate_domains & I915_GEM_GPU_DOMAINS) {
|
||||
cmd |= MI_INVALIDATE_TLB;
|
||||
if (engine->id == VCS)
|
||||
if (request->engine->id == VCS)
|
||||
cmd |= MI_INVALIDATE_BSD;
|
||||
}
|
||||
|
||||
|
@ -1677,7 +1676,7 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
|
|||
u32 invalidate_domains,
|
||||
u32 flush_domains)
|
||||
{
|
||||
struct intel_ringbuffer *ring = request->ringbuf;
|
||||
struct intel_ringbuffer *ring = request->ring;
|
||||
struct intel_engine_cs *engine = request->engine;
|
||||
u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
|
||||
bool vf_flush_wa = false, dc_flush_wa = false;
|
||||
|
@ -1791,7 +1790,7 @@ static void bxt_a_seqno_barrier(struct intel_engine_cs *engine)
|
|||
|
||||
static int gen8_emit_request(struct drm_i915_gem_request *request)
|
||||
{
|
||||
struct intel_ringbuffer *ring = request->ringbuf;
|
||||
struct intel_ringbuffer *ring = request->ring;
|
||||
int ret;
|
||||
|
||||
ret = intel_ring_begin(request, 6 + WA_TAIL_DWORDS);
|
||||
|
@ -1814,7 +1813,7 @@ static int gen8_emit_request(struct drm_i915_gem_request *request)
|
|||
|
||||
static int gen8_emit_request_render(struct drm_i915_gem_request *request)
|
||||
{
|
||||
struct intel_ringbuffer *ring = request->ringbuf;
|
||||
struct intel_ringbuffer *ring = request->ring;
|
||||
int ret;
|
||||
|
||||
ret = intel_ring_begin(request, 8 + WA_TAIL_DWORDS);
|
||||
|
|
|
@ -276,7 +276,7 @@ int intel_mocs_init_engine(struct intel_engine_cs *engine)
|
|||
static int emit_mocs_control_table(struct drm_i915_gem_request *req,
|
||||
const struct drm_i915_mocs_table *table)
|
||||
{
|
||||
struct intel_ringbuffer *ringbuf = req->ringbuf;
|
||||
struct intel_ringbuffer *ring = req->ring;
|
||||
enum intel_engine_id engine = req->engine->id;
|
||||
unsigned int index;
|
||||
int ret;
|
||||
|
@ -288,11 +288,11 @@ static int emit_mocs_control_table(struct drm_i915_gem_request *req,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
intel_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES));
|
||||
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES));
|
||||
|
||||
for (index = 0; index < table->size; index++) {
|
||||
intel_ring_emit_reg(ringbuf, mocs_register(engine, index));
|
||||
intel_ring_emit(ringbuf, table->table[index].control_value);
|
||||
intel_ring_emit_reg(ring, mocs_register(engine, index));
|
||||
intel_ring_emit(ring, table->table[index].control_value);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -304,12 +304,12 @@ static int emit_mocs_control_table(struct drm_i915_gem_request *req,
|
|||
* that value to all the used entries.
|
||||
*/
|
||||
for (; index < GEN9_NUM_MOCS_ENTRIES; index++) {
|
||||
intel_ring_emit_reg(ringbuf, mocs_register(engine, index));
|
||||
intel_ring_emit(ringbuf, table->table[0].control_value);
|
||||
intel_ring_emit_reg(ring, mocs_register(engine, index));
|
||||
intel_ring_emit(ring, table->table[0].control_value);
|
||||
}
|
||||
|
||||
intel_ring_emit(ringbuf, MI_NOOP);
|
||||
intel_ring_advance(ringbuf);
|
||||
intel_ring_emit(ring, MI_NOOP);
|
||||
intel_ring_advance(ring);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -336,7 +336,7 @@ static inline u32 l3cc_combine(const struct drm_i915_mocs_table *table,
|
|||
static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
|
||||
const struct drm_i915_mocs_table *table)
|
||||
{
|
||||
struct intel_ringbuffer *ringbuf = req->ringbuf;
|
||||
struct intel_ringbuffer *ring = req->ring;
|
||||
unsigned int i;
|
||||
int ret;
|
||||
|
||||
|
@ -347,18 +347,18 @@ static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
intel_ring_emit(ringbuf,
|
||||
intel_ring_emit(ring,
|
||||
MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES / 2));
|
||||
|
||||
for (i = 0; i < table->size/2; i++) {
|
||||
intel_ring_emit_reg(ringbuf, GEN9_LNCFCMOCS(i));
|
||||
intel_ring_emit(ringbuf, l3cc_combine(table, 2*i, 2*i+1));
|
||||
intel_ring_emit_reg(ring, GEN9_LNCFCMOCS(i));
|
||||
intel_ring_emit(ring, l3cc_combine(table, 2*i, 2*i+1));
|
||||
}
|
||||
|
||||
if (table->size & 0x01) {
|
||||
/* Odd table size - 1 left over */
|
||||
intel_ring_emit_reg(ringbuf, GEN9_LNCFCMOCS(i));
|
||||
intel_ring_emit(ringbuf, l3cc_combine(table, 2*i, 0));
|
||||
intel_ring_emit_reg(ring, GEN9_LNCFCMOCS(i));
|
||||
intel_ring_emit(ring, l3cc_combine(table, 2*i, 0));
|
||||
i++;
|
||||
}
|
||||
|
||||
|
@ -368,12 +368,12 @@ static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
|
|||
* they are reserved by the hardware.
|
||||
*/
|
||||
for (; i < GEN9_NUM_MOCS_ENTRIES / 2; i++) {
|
||||
intel_ring_emit_reg(ringbuf, GEN9_LNCFCMOCS(i));
|
||||
intel_ring_emit(ringbuf, l3cc_combine(table, 0, 0));
|
||||
intel_ring_emit_reg(ring, GEN9_LNCFCMOCS(i));
|
||||
intel_ring_emit(ring, l3cc_combine(table, 0, 0));
|
||||
}
|
||||
|
||||
intel_ring_emit(ringbuf, MI_NOOP);
|
||||
intel_ring_advance(ringbuf);
|
||||
intel_ring_emit(ring, MI_NOOP);
|
||||
intel_ring_advance(ring);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -253,7 +253,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
|
|||
|
||||
overlay->active = true;
|
||||
|
||||
ring = req->ringbuf;
|
||||
ring = req->ring;
|
||||
intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_ON);
|
||||
intel_ring_emit(ring, overlay->flip_addr | OFC_UPDATE);
|
||||
intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
|
||||
|
@ -295,7 +295,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
|
|||
return ret;
|
||||
}
|
||||
|
||||
ring = req->ringbuf;
|
||||
ring = req->ring;
|
||||
intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
|
||||
intel_ring_emit(ring, flip_addr);
|
||||
intel_ring_advance(ring);
|
||||
|
@ -362,7 +362,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
|
|||
return ret;
|
||||
}
|
||||
|
||||
ring = req->ringbuf;
|
||||
ring = req->ring;
|
||||
/* wait for overlay to go idle */
|
||||
intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
|
||||
intel_ring_emit(ring, flip_addr);
|
||||
|
@ -438,7 +438,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
|
|||
return ret;
|
||||
}
|
||||
|
||||
ring = req->ringbuf;
|
||||
ring = req->ring;
|
||||
intel_ring_emit(ring,
|
||||
MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
|
||||
intel_ring_emit(ring, MI_NOOP);
|
||||
|
|
|
@ -70,7 +70,7 @@ gen2_render_ring_flush(struct drm_i915_gem_request *req,
|
|||
u32 invalidate_domains,
|
||||
u32 flush_domains)
|
||||
{
|
||||
struct intel_ringbuffer *ring = req->ringbuf;
|
||||
struct intel_ringbuffer *ring = req->ring;
|
||||
u32 cmd;
|
||||
int ret;
|
||||
|
||||
|
@ -97,7 +97,7 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req,
|
|||
u32 invalidate_domains,
|
||||
u32 flush_domains)
|
||||
{
|
||||
struct intel_ringbuffer *ring = req->ringbuf;
|
||||
struct intel_ringbuffer *ring = req->ring;
|
||||
u32 cmd;
|
||||
int ret;
|
||||
|
||||
|
@ -187,7 +187,7 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req,
|
|||
static int
|
||||
intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
|
||||
{
|
||||
struct intel_ringbuffer *ring = req->ringbuf;
|
||||
struct intel_ringbuffer *ring = req->ring;
|
||||
u32 scratch_addr =
|
||||
req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
|
||||
int ret;
|
||||
|
@ -224,7 +224,7 @@ static int
|
|||
gen6_render_ring_flush(struct drm_i915_gem_request *req,
|
||||
u32 invalidate_domains, u32 flush_domains)
|
||||
{
|
||||
struct intel_ringbuffer *ring = req->ringbuf;
|
||||
struct intel_ringbuffer *ring = req->ring;
|
||||
u32 scratch_addr =
|
||||
req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
|
||||
u32 flags = 0;
|
||||
|
@ -277,7 +277,7 @@ gen6_render_ring_flush(struct drm_i915_gem_request *req,
|
|||
static int
|
||||
gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req)
|
||||
{
|
||||
struct intel_ringbuffer *ring = req->ringbuf;
|
||||
struct intel_ringbuffer *ring = req->ring;
|
||||
int ret;
|
||||
|
||||
ret = intel_ring_begin(req, 4);
|
||||
|
@ -299,7 +299,7 @@ static int
|
|||
gen7_render_ring_flush(struct drm_i915_gem_request *req,
|
||||
u32 invalidate_domains, u32 flush_domains)
|
||||
{
|
||||
struct intel_ringbuffer *ring = req->ringbuf;
|
||||
struct intel_ringbuffer *ring = req->ring;
|
||||
u32 scratch_addr =
|
||||
req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
|
||||
u32 flags = 0;
|
||||
|
@ -364,7 +364,7 @@ static int
|
|||
gen8_emit_pipe_control(struct drm_i915_gem_request *req,
|
||||
u32 flags, u32 scratch_addr)
|
||||
{
|
||||
struct intel_ringbuffer *ring = req->ringbuf;
|
||||
struct intel_ringbuffer *ring = req->ring;
|
||||
int ret;
|
||||
|
||||
ret = intel_ring_begin(req, 6);
|
||||
|
@ -680,7 +680,7 @@ err:
|
|||
|
||||
static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
|
||||
{
|
||||
struct intel_ringbuffer *ring = req->ringbuf;
|
||||
struct intel_ringbuffer *ring = req->ring;
|
||||
struct i915_workarounds *w = &req->i915->workarounds;
|
||||
int ret, i;
|
||||
|
||||
|
@ -1338,7 +1338,7 @@ static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
|
|||
unsigned int num_dwords)
|
||||
{
|
||||
#define MBOX_UPDATE_DWORDS 8
|
||||
struct intel_ringbuffer *signaller = signaller_req->ringbuf;
|
||||
struct intel_ringbuffer *signaller = signaller_req->ring;
|
||||
struct drm_i915_private *dev_priv = signaller_req->i915;
|
||||
struct intel_engine_cs *waiter;
|
||||
enum intel_engine_id id;
|
||||
|
@ -1380,7 +1380,7 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
|
|||
unsigned int num_dwords)
|
||||
{
|
||||
#define MBOX_UPDATE_DWORDS 6
|
||||
struct intel_ringbuffer *signaller = signaller_req->ringbuf;
|
||||
struct intel_ringbuffer *signaller = signaller_req->ring;
|
||||
struct drm_i915_private *dev_priv = signaller_req->i915;
|
||||
struct intel_engine_cs *waiter;
|
||||
enum intel_engine_id id;
|
||||
|
@ -1419,7 +1419,7 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
|
|||
static int gen6_signal(struct drm_i915_gem_request *signaller_req,
|
||||
unsigned int num_dwords)
|
||||
{
|
||||
struct intel_ringbuffer *signaller = signaller_req->ringbuf;
|
||||
struct intel_ringbuffer *signaller = signaller_req->ring;
|
||||
struct drm_i915_private *dev_priv = signaller_req->i915;
|
||||
struct intel_engine_cs *useless;
|
||||
enum intel_engine_id id;
|
||||
|
@ -1464,7 +1464,7 @@ static int
|
|||
gen6_add_request(struct drm_i915_gem_request *req)
|
||||
{
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
struct intel_ringbuffer *ring = req->ringbuf;
|
||||
struct intel_ringbuffer *ring = req->ring;
|
||||
int ret;
|
||||
|
||||
if (engine->semaphore.signal)
|
||||
|
@ -1488,7 +1488,7 @@ static int
|
|||
gen8_render_add_request(struct drm_i915_gem_request *req)
|
||||
{
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
struct intel_ringbuffer *ring = req->ringbuf;
|
||||
struct intel_ringbuffer *ring = req->ring;
|
||||
int ret;
|
||||
|
||||
if (engine->semaphore.signal)
|
||||
|
@ -1533,7 +1533,7 @@ gen8_ring_sync(struct drm_i915_gem_request *waiter_req,
|
|||
struct intel_engine_cs *signaller,
|
||||
u32 seqno)
|
||||
{
|
||||
struct intel_ringbuffer *waiter = waiter_req->ringbuf;
|
||||
struct intel_ringbuffer *waiter = waiter_req->ring;
|
||||
struct drm_i915_private *dev_priv = waiter_req->i915;
|
||||
u64 offset = GEN8_WAIT_OFFSET(waiter_req->engine, signaller->id);
|
||||
struct i915_hw_ppgtt *ppgtt;
|
||||
|
@ -1567,7 +1567,7 @@ gen6_ring_sync(struct drm_i915_gem_request *waiter_req,
|
|||
struct intel_engine_cs *signaller,
|
||||
u32 seqno)
|
||||
{
|
||||
struct intel_ringbuffer *waiter = waiter_req->ringbuf;
|
||||
struct intel_ringbuffer *waiter = waiter_req->ring;
|
||||
u32 dw1 = MI_SEMAPHORE_MBOX |
|
||||
MI_SEMAPHORE_COMPARE |
|
||||
MI_SEMAPHORE_REGISTER;
|
||||
|
@ -1701,7 +1701,7 @@ bsd_ring_flush(struct drm_i915_gem_request *req,
|
|||
u32 invalidate_domains,
|
||||
u32 flush_domains)
|
||||
{
|
||||
struct intel_ringbuffer *ring = req->ringbuf;
|
||||
struct intel_ringbuffer *ring = req->ring;
|
||||
int ret;
|
||||
|
||||
ret = intel_ring_begin(req, 2);
|
||||
|
@ -1717,7 +1717,7 @@ bsd_ring_flush(struct drm_i915_gem_request *req,
|
|||
static int
|
||||
i9xx_add_request(struct drm_i915_gem_request *req)
|
||||
{
|
||||
struct intel_ringbuffer *ring = req->ringbuf;
|
||||
struct intel_ringbuffer *ring = req->ring;
|
||||
int ret;
|
||||
|
||||
ret = intel_ring_begin(req, 4);
|
||||
|
@ -1795,7 +1795,7 @@ i965_dispatch_execbuffer(struct drm_i915_gem_request *req,
|
|||
u64 offset, u32 length,
|
||||
unsigned dispatch_flags)
|
||||
{
|
||||
struct intel_ringbuffer *ring = req->ringbuf;
|
||||
struct intel_ringbuffer *ring = req->ring;
|
||||
int ret;
|
||||
|
||||
ret = intel_ring_begin(req, 2);
|
||||
|
@ -1822,7 +1822,7 @@ i830_dispatch_execbuffer(struct drm_i915_gem_request *req,
|
|||
u64 offset, u32 len,
|
||||
unsigned dispatch_flags)
|
||||
{
|
||||
struct intel_ringbuffer *ring = req->ringbuf;
|
||||
struct intel_ringbuffer *ring = req->ring;
|
||||
u32 cs_offset = req->engine->scratch.gtt_offset;
|
||||
int ret;
|
||||
|
||||
|
@ -1884,7 +1884,7 @@ i915_dispatch_execbuffer(struct drm_i915_gem_request *req,
|
|||
u64 offset, u32 len,
|
||||
unsigned dispatch_flags)
|
||||
{
|
||||
struct intel_ringbuffer *ring = req->ringbuf;
|
||||
struct intel_ringbuffer *ring = req->ring;
|
||||
int ret;
|
||||
|
||||
ret = intel_ring_begin(req, 2);
|
||||
|
@ -2312,7 +2312,7 @@ int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request)
|
|||
*/
|
||||
request->reserved_space += LEGACY_REQUEST_SIZE;
|
||||
|
||||
request->ringbuf = request->engine->buffer;
|
||||
request->ring = request->engine->buffer;
|
||||
|
||||
ret = intel_ring_begin(request, 0);
|
||||
if (ret)
|
||||
|
@ -2324,12 +2324,12 @@ int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request)
|
|||
|
||||
static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
|
||||
{
|
||||
struct intel_ringbuffer *ringbuf = req->ringbuf;
|
||||
struct intel_ringbuffer *ring = req->ring;
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
struct drm_i915_gem_request *target;
|
||||
|
||||
intel_ring_update_space(ringbuf);
|
||||
if (ringbuf->space >= bytes)
|
||||
intel_ring_update_space(ring);
|
||||
if (ring->space >= bytes)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
|
@ -2351,12 +2351,12 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
|
|||
* from multiple ringbuffers. Here, we must ignore any that
|
||||
* aren't from the ringbuffer we're considering.
|
||||
*/
|
||||
if (target->ringbuf != ringbuf)
|
||||
if (target->ring != ring)
|
||||
continue;
|
||||
|
||||
/* Would completion of this request free enough space? */
|
||||
space = __intel_ring_space(target->postfix, ringbuf->tail,
|
||||
ringbuf->size);
|
||||
space = __intel_ring_space(target->postfix, ring->tail,
|
||||
ring->size);
|
||||
if (space >= bytes)
|
||||
break;
|
||||
}
|
||||
|
@ -2369,9 +2369,9 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
|
|||
|
||||
int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
|
||||
{
|
||||
struct intel_ringbuffer *ringbuf = req->ringbuf;
|
||||
int remain_actual = ringbuf->size - ringbuf->tail;
|
||||
int remain_usable = ringbuf->effective_size - ringbuf->tail;
|
||||
struct intel_ringbuffer *ring = req->ring;
|
||||
int remain_actual = ring->size - ring->tail;
|
||||
int remain_usable = ring->effective_size - ring->tail;
|
||||
int bytes = num_dwords * sizeof(u32);
|
||||
int total_bytes, wait_bytes;
|
||||
bool need_wrap = false;
|
||||
|
@ -2398,35 +2398,35 @@ int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
|
|||
wait_bytes = total_bytes;
|
||||
}
|
||||
|
||||
if (wait_bytes > ringbuf->space) {
|
||||
if (wait_bytes > ring->space) {
|
||||
int ret = wait_for_space(req, wait_bytes);
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
|
||||
intel_ring_update_space(ringbuf);
|
||||
if (unlikely(ringbuf->space < wait_bytes))
|
||||
intel_ring_update_space(ring);
|
||||
if (unlikely(ring->space < wait_bytes))
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
if (unlikely(need_wrap)) {
|
||||
GEM_BUG_ON(remain_actual > ringbuf->space);
|
||||
GEM_BUG_ON(ringbuf->tail + remain_actual > ringbuf->size);
|
||||
GEM_BUG_ON(remain_actual > ring->space);
|
||||
GEM_BUG_ON(ring->tail + remain_actual > ring->size);
|
||||
|
||||
/* Fill the tail with MI_NOOP */
|
||||
memset(ringbuf->vaddr + ringbuf->tail, 0, remain_actual);
|
||||
ringbuf->tail = 0;
|
||||
ringbuf->space -= remain_actual;
|
||||
memset(ring->vaddr + ring->tail, 0, remain_actual);
|
||||
ring->tail = 0;
|
||||
ring->space -= remain_actual;
|
||||
}
|
||||
|
||||
ringbuf->space -= bytes;
|
||||
GEM_BUG_ON(ringbuf->space < 0);
|
||||
ring->space -= bytes;
|
||||
GEM_BUG_ON(ring->space < 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Align the ring tail to a cacheline boundary */
|
||||
int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
|
||||
{
|
||||
struct intel_ringbuffer *ring = req->ringbuf;
|
||||
struct intel_ringbuffer *ring = req->ring;
|
||||
int num_dwords =
|
||||
(ring->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
|
||||
int ret;
|
||||
|
@ -2533,7 +2533,7 @@ static void gen6_bsd_ring_write_tail(struct intel_engine_cs *engine,
|
|||
static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
|
||||
u32 invalidate, u32 flush)
|
||||
{
|
||||
struct intel_ringbuffer *ring = req->ringbuf;
|
||||
struct intel_ringbuffer *ring = req->ring;
|
||||
uint32_t cmd;
|
||||
int ret;
|
||||
|
||||
|
@ -2579,7 +2579,7 @@ gen8_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
|
|||
u64 offset, u32 len,
|
||||
unsigned dispatch_flags)
|
||||
{
|
||||
struct intel_ringbuffer *ring = req->ringbuf;
|
||||
struct intel_ringbuffer *ring = req->ring;
|
||||
bool ppgtt = USES_PPGTT(req->i915) &&
|
||||
!(dispatch_flags & I915_DISPATCH_SECURE);
|
||||
int ret;
|
||||
|
@ -2605,7 +2605,7 @@ hsw_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
|
|||
u64 offset, u32 len,
|
||||
unsigned dispatch_flags)
|
||||
{
|
||||
struct intel_ringbuffer *ring = req->ringbuf;
|
||||
struct intel_ringbuffer *ring = req->ring;
|
||||
int ret;
|
||||
|
||||
ret = intel_ring_begin(req, 2);
|
||||
|
@ -2630,7 +2630,7 @@ gen6_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
|
|||
u64 offset, u32 len,
|
||||
unsigned dispatch_flags)
|
||||
{
|
||||
struct intel_ringbuffer *ring = req->ringbuf;
|
||||
struct intel_ringbuffer *ring = req->ring;
|
||||
int ret;
|
||||
|
||||
ret = intel_ring_begin(req, 2);
|
||||
|
@ -2653,7 +2653,7 @@ gen6_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
|
|||
static int gen6_ring_flush(struct drm_i915_gem_request *req,
|
||||
u32 invalidate, u32 flush)
|
||||
{
|
||||
struct intel_ringbuffer *ring = req->ringbuf;
|
||||
struct intel_ringbuffer *ring = req->ring;
|
||||
uint32_t cmd;
|
||||
int ret;
|
||||
|
||||
|
|
Loading…
Reference in New Issue