drm/i915: Make own struct for execlist items
Engine's execlist related items have been increasing to a point where a separate struct is warranted. Carve execlist specific items to a dedicated struct to add clarity. v2: add kerneldoc and fix whitespace (Joonas, Chris) v3: csb_mmio changes, rebase v4: s/\b(el|execlist)\b/execlists/ (Joonas) Suggested-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Chris Wilson <chris@chris-wilson.co.uk> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Signed-off-by: Mika Kuoppala <mika.kuoppala@intel.com> Acked-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Reviewed-by: Michał Winiarski <michal.winiarski@intel.com> (v3) Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> (v3) Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20170922124307.10914-1-mika.kuoppala@intel.com
This commit is contained in:
parent
d27ffc1d00
commit
b620e87021
|
@ -3323,7 +3323,7 @@ static int i915_engine_info(struct seq_file *m, void *unused)
|
||||||
read = GEN8_CSB_READ_PTR(ptr);
|
read = GEN8_CSB_READ_PTR(ptr);
|
||||||
write = GEN8_CSB_WRITE_PTR(ptr);
|
write = GEN8_CSB_WRITE_PTR(ptr);
|
||||||
seq_printf(m, "\tExeclist CSB read %d [%d cached], write %d [%d from hws], interrupt posted? %s\n",
|
seq_printf(m, "\tExeclist CSB read %d [%d cached], write %d [%d from hws], interrupt posted? %s\n",
|
||||||
read, engine->csb_head,
|
read, engine->execlists.csb_head,
|
||||||
write,
|
write,
|
||||||
intel_read_status_page(engine, intel_hws_csb_write_index(engine->i915)),
|
intel_read_status_page(engine, intel_hws_csb_write_index(engine->i915)),
|
||||||
yesno(test_bit(ENGINE_IRQ_EXECLIST,
|
yesno(test_bit(ENGINE_IRQ_EXECLIST,
|
||||||
|
@ -3345,10 +3345,10 @@ static int i915_engine_info(struct seq_file *m, void *unused)
|
||||||
}
|
}
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
for (idx = 0; idx < ARRAY_SIZE(engine->execlist_port); idx++) {
|
for (idx = 0; idx < ARRAY_SIZE(engine->execlists.port); idx++) {
|
||||||
unsigned int count;
|
unsigned int count;
|
||||||
|
|
||||||
rq = port_unpack(&engine->execlist_port[idx],
|
rq = port_unpack(&engine->execlists.port[idx],
|
||||||
&count);
|
&count);
|
||||||
if (rq) {
|
if (rq) {
|
||||||
seq_printf(m, "\t\tELSP[%d] count=%d, ",
|
seq_printf(m, "\t\tELSP[%d] count=%d, ",
|
||||||
|
@ -3362,7 +3362,7 @@ static int i915_engine_info(struct seq_file *m, void *unused)
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
|
|
||||||
spin_lock_irq(&engine->timeline->lock);
|
spin_lock_irq(&engine->timeline->lock);
|
||||||
for (rb = engine->execlist_first; rb; rb = rb_next(rb)){
|
for (rb = engine->execlists.first; rb; rb = rb_next(rb)) {
|
||||||
struct i915_priolist *p =
|
struct i915_priolist *p =
|
||||||
rb_entry(rb, typeof(*p), node);
|
rb_entry(rb, typeof(*p), node);
|
||||||
|
|
||||||
|
|
|
@ -2815,8 +2815,8 @@ i915_gem_reset_prepare_engine(struct intel_engine_cs *engine)
|
||||||
* Turning off the engine->irq_tasklet until the reset is over
|
* Turning off the engine->irq_tasklet until the reset is over
|
||||||
* prevents the race.
|
* prevents the race.
|
||||||
*/
|
*/
|
||||||
tasklet_kill(&engine->irq_tasklet);
|
tasklet_kill(&engine->execlists.irq_tasklet);
|
||||||
tasklet_disable(&engine->irq_tasklet);
|
tasklet_disable(&engine->execlists.irq_tasklet);
|
||||||
|
|
||||||
if (engine->irq_seqno_barrier)
|
if (engine->irq_seqno_barrier)
|
||||||
engine->irq_seqno_barrier(engine);
|
engine->irq_seqno_barrier(engine);
|
||||||
|
@ -2995,7 +2995,7 @@ void i915_gem_reset(struct drm_i915_private *dev_priv)
|
||||||
|
|
||||||
void i915_gem_reset_finish_engine(struct intel_engine_cs *engine)
|
void i915_gem_reset_finish_engine(struct intel_engine_cs *engine)
|
||||||
{
|
{
|
||||||
tasklet_enable(&engine->irq_tasklet);
|
tasklet_enable(&engine->execlists.irq_tasklet);
|
||||||
kthread_unpark(engine->breadcrumbs.signaler);
|
kthread_unpark(engine->breadcrumbs.signaler);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1327,10 +1327,10 @@ static void engine_record_requests(struct intel_engine_cs *engine,
|
||||||
static void error_record_engine_execlists(struct intel_engine_cs *engine,
|
static void error_record_engine_execlists(struct intel_engine_cs *engine,
|
||||||
struct drm_i915_error_engine *ee)
|
struct drm_i915_error_engine *ee)
|
||||||
{
|
{
|
||||||
const struct execlist_port *port = engine->execlist_port;
|
const struct execlist_port *port = engine->execlists.port;
|
||||||
unsigned int n;
|
unsigned int n;
|
||||||
|
|
||||||
for (n = 0; n < ARRAY_SIZE(engine->execlist_port); n++) {
|
for (n = 0; n < ARRAY_SIZE(engine->execlists.port); n++) {
|
||||||
struct drm_i915_gem_request *rq = port_request(&port[n]);
|
struct drm_i915_gem_request *rq = port_request(&port[n]);
|
||||||
|
|
||||||
if (!rq)
|
if (!rq)
|
||||||
|
|
|
@ -494,11 +494,12 @@ static void i915_guc_submit(struct intel_engine_cs *engine)
|
||||||
struct drm_i915_private *dev_priv = engine->i915;
|
struct drm_i915_private *dev_priv = engine->i915;
|
||||||
struct intel_guc *guc = &dev_priv->guc;
|
struct intel_guc *guc = &dev_priv->guc;
|
||||||
struct i915_guc_client *client = guc->execbuf_client;
|
struct i915_guc_client *client = guc->execbuf_client;
|
||||||
struct execlist_port *port = engine->execlist_port;
|
struct intel_engine_execlists * const execlists = &engine->execlists;
|
||||||
unsigned int engine_id = engine->id;
|
struct execlist_port *port = execlists->port;
|
||||||
|
const unsigned int engine_id = engine->id;
|
||||||
unsigned int n;
|
unsigned int n;
|
||||||
|
|
||||||
for (n = 0; n < ARRAY_SIZE(engine->execlist_port); n++) {
|
for (n = 0; n < ARRAY_SIZE(execlists->port); n++) {
|
||||||
struct drm_i915_gem_request *rq;
|
struct drm_i915_gem_request *rq;
|
||||||
unsigned int count;
|
unsigned int count;
|
||||||
|
|
||||||
|
@ -558,7 +559,8 @@ static void port_assign(struct execlist_port *port,
|
||||||
|
|
||||||
static void i915_guc_dequeue(struct intel_engine_cs *engine)
|
static void i915_guc_dequeue(struct intel_engine_cs *engine)
|
||||||
{
|
{
|
||||||
struct execlist_port *port = engine->execlist_port;
|
struct intel_engine_execlists * const execlists = &engine->execlists;
|
||||||
|
struct execlist_port *port = execlists->port;
|
||||||
struct drm_i915_gem_request *last = NULL;
|
struct drm_i915_gem_request *last = NULL;
|
||||||
bool submit = false;
|
bool submit = false;
|
||||||
struct rb_node *rb;
|
struct rb_node *rb;
|
||||||
|
@ -567,15 +569,15 @@ static void i915_guc_dequeue(struct intel_engine_cs *engine)
|
||||||
port++;
|
port++;
|
||||||
|
|
||||||
spin_lock_irq(&engine->timeline->lock);
|
spin_lock_irq(&engine->timeline->lock);
|
||||||
rb = engine->execlist_first;
|
rb = execlists->first;
|
||||||
GEM_BUG_ON(rb_first(&engine->execlist_queue) != rb);
|
GEM_BUG_ON(rb_first(&execlists->queue) != rb);
|
||||||
while (rb) {
|
while (rb) {
|
||||||
struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
|
struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
|
||||||
struct drm_i915_gem_request *rq, *rn;
|
struct drm_i915_gem_request *rq, *rn;
|
||||||
|
|
||||||
list_for_each_entry_safe(rq, rn, &p->requests, priotree.link) {
|
list_for_each_entry_safe(rq, rn, &p->requests, priotree.link) {
|
||||||
if (last && rq->ctx != last->ctx) {
|
if (last && rq->ctx != last->ctx) {
|
||||||
if (port != engine->execlist_port) {
|
if (port != execlists->port) {
|
||||||
__list_del_many(&p->requests,
|
__list_del_many(&p->requests,
|
||||||
&rq->priotree.link);
|
&rq->priotree.link);
|
||||||
goto done;
|
goto done;
|
||||||
|
@ -596,13 +598,13 @@ static void i915_guc_dequeue(struct intel_engine_cs *engine)
|
||||||
}
|
}
|
||||||
|
|
||||||
rb = rb_next(rb);
|
rb = rb_next(rb);
|
||||||
rb_erase(&p->node, &engine->execlist_queue);
|
rb_erase(&p->node, &execlists->queue);
|
||||||
INIT_LIST_HEAD(&p->requests);
|
INIT_LIST_HEAD(&p->requests);
|
||||||
if (p->priority != I915_PRIORITY_NORMAL)
|
if (p->priority != I915_PRIORITY_NORMAL)
|
||||||
kmem_cache_free(engine->i915->priorities, p);
|
kmem_cache_free(engine->i915->priorities, p);
|
||||||
}
|
}
|
||||||
done:
|
done:
|
||||||
engine->execlist_first = rb;
|
execlists->first = rb;
|
||||||
if (submit) {
|
if (submit) {
|
||||||
port_assign(port, last);
|
port_assign(port, last);
|
||||||
i915_guc_submit(engine);
|
i915_guc_submit(engine);
|
||||||
|
@ -612,8 +614,8 @@ done:
|
||||||
|
|
||||||
static void i915_guc_irq_handler(unsigned long data)
|
static void i915_guc_irq_handler(unsigned long data)
|
||||||
{
|
{
|
||||||
struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
|
struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
|
||||||
struct execlist_port *port = engine->execlist_port;
|
struct execlist_port *port = engine->execlists.port;
|
||||||
struct drm_i915_gem_request *rq;
|
struct drm_i915_gem_request *rq;
|
||||||
|
|
||||||
rq = port_request(&port[0]);
|
rq = port_request(&port[0]);
|
||||||
|
@ -1144,7 +1146,7 @@ int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
|
||||||
* and it is guaranteed that it will remove the work item from the
|
* and it is guaranteed that it will remove the work item from the
|
||||||
* queue before our request is completed.
|
* queue before our request is completed.
|
||||||
*/
|
*/
|
||||||
BUILD_BUG_ON(ARRAY_SIZE(engine->execlist_port) *
|
BUILD_BUG_ON(ARRAY_SIZE(engine->execlists.port) *
|
||||||
sizeof(struct guc_wq_item) *
|
sizeof(struct guc_wq_item) *
|
||||||
I915_NUM_ENGINES > GUC_WQ_SIZE);
|
I915_NUM_ENGINES > GUC_WQ_SIZE);
|
||||||
|
|
||||||
|
@ -1175,14 +1177,15 @@ int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
|
||||||
guc_interrupts_capture(dev_priv);
|
guc_interrupts_capture(dev_priv);
|
||||||
|
|
||||||
for_each_engine(engine, dev_priv, id) {
|
for_each_engine(engine, dev_priv, id) {
|
||||||
|
struct intel_engine_execlists * const execlists = &engine->execlists;
|
||||||
/* The tasklet was initialised by execlists, and may be in
|
/* The tasklet was initialised by execlists, and may be in
|
||||||
* a state of flux (across a reset) and so we just want to
|
* a state of flux (across a reset) and so we just want to
|
||||||
* take over the callback without changing any other state
|
* take over the callback without changing any other state
|
||||||
* in the tasklet.
|
* in the tasklet.
|
||||||
*/
|
*/
|
||||||
engine->irq_tasklet.func = i915_guc_irq_handler;
|
execlists->irq_tasklet.func = i915_guc_irq_handler;
|
||||||
clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
|
clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
|
||||||
tasklet_schedule(&engine->irq_tasklet);
|
tasklet_schedule(&execlists->irq_tasklet);
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -1346,10 +1346,11 @@ static void snb_gt_irq_handler(struct drm_i915_private *dev_priv,
|
||||||
static void
|
static void
|
||||||
gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift)
|
gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift)
|
||||||
{
|
{
|
||||||
|
struct intel_engine_execlists * const execlists = &engine->execlists;
|
||||||
bool tasklet = false;
|
bool tasklet = false;
|
||||||
|
|
||||||
if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift)) {
|
if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift)) {
|
||||||
if (port_count(&engine->execlist_port[0])) {
|
if (port_count(&execlists->port[0])) {
|
||||||
__set_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
|
__set_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
|
||||||
tasklet = true;
|
tasklet = true;
|
||||||
}
|
}
|
||||||
|
@ -1361,7 +1362,7 @@ gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (tasklet)
|
if (tasklet)
|
||||||
tasklet_hi_schedule(&engine->irq_tasklet);
|
tasklet_hi_schedule(&execlists->irq_tasklet);
|
||||||
}
|
}
|
||||||
|
|
||||||
static irqreturn_t gen8_gt_irq_ack(struct drm_i915_private *dev_priv,
|
static irqreturn_t gen8_gt_irq_ack(struct drm_i915_private *dev_priv,
|
||||||
|
|
|
@ -393,8 +393,8 @@ static void intel_engine_init_timeline(struct intel_engine_cs *engine)
|
||||||
*/
|
*/
|
||||||
void intel_engine_setup_common(struct intel_engine_cs *engine)
|
void intel_engine_setup_common(struct intel_engine_cs *engine)
|
||||||
{
|
{
|
||||||
engine->execlist_queue = RB_ROOT;
|
engine->execlists.queue = RB_ROOT;
|
||||||
engine->execlist_first = NULL;
|
engine->execlists.first = NULL;
|
||||||
|
|
||||||
intel_engine_init_timeline(engine);
|
intel_engine_init_timeline(engine);
|
||||||
intel_engine_init_hangcheck(engine);
|
intel_engine_init_hangcheck(engine);
|
||||||
|
@ -1475,11 +1475,11 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
/* Both ports drained, no more ELSP submission? */
|
/* Both ports drained, no more ELSP submission? */
|
||||||
if (port_request(&engine->execlist_port[0]))
|
if (port_request(&engine->execlists.port[0]))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
/* ELSP is empty, but there are ready requests? */
|
/* ELSP is empty, but there are ready requests? */
|
||||||
if (READ_ONCE(engine->execlist_first))
|
if (READ_ONCE(engine->execlists.first))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
/* Ring stopped? */
|
/* Ring stopped? */
|
||||||
|
@ -1528,8 +1528,8 @@ void intel_engines_mark_idle(struct drm_i915_private *i915)
|
||||||
for_each_engine(engine, i915, id) {
|
for_each_engine(engine, i915, id) {
|
||||||
intel_engine_disarm_breadcrumbs(engine);
|
intel_engine_disarm_breadcrumbs(engine);
|
||||||
i915_gem_batch_pool_fini(&engine->batch_pool);
|
i915_gem_batch_pool_fini(&engine->batch_pool);
|
||||||
tasklet_kill(&engine->irq_tasklet);
|
tasklet_kill(&engine->execlists.irq_tasklet);
|
||||||
engine->no_priolist = false;
|
engine->execlists.no_priolist = false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -291,17 +291,18 @@ lookup_priolist(struct intel_engine_cs *engine,
|
||||||
struct i915_priotree *pt,
|
struct i915_priotree *pt,
|
||||||
int prio)
|
int prio)
|
||||||
{
|
{
|
||||||
|
struct intel_engine_execlists * const execlists = &engine->execlists;
|
||||||
struct i915_priolist *p;
|
struct i915_priolist *p;
|
||||||
struct rb_node **parent, *rb;
|
struct rb_node **parent, *rb;
|
||||||
bool first = true;
|
bool first = true;
|
||||||
|
|
||||||
if (unlikely(engine->no_priolist))
|
if (unlikely(execlists->no_priolist))
|
||||||
prio = I915_PRIORITY_NORMAL;
|
prio = I915_PRIORITY_NORMAL;
|
||||||
|
|
||||||
find_priolist:
|
find_priolist:
|
||||||
/* most positive priority is scheduled first, equal priorities fifo */
|
/* most positive priority is scheduled first, equal priorities fifo */
|
||||||
rb = NULL;
|
rb = NULL;
|
||||||
parent = &engine->execlist_queue.rb_node;
|
parent = &execlists->queue.rb_node;
|
||||||
while (*parent) {
|
while (*parent) {
|
||||||
rb = *parent;
|
rb = *parent;
|
||||||
p = rb_entry(rb, typeof(*p), node);
|
p = rb_entry(rb, typeof(*p), node);
|
||||||
|
@ -316,7 +317,7 @@ find_priolist:
|
||||||
}
|
}
|
||||||
|
|
||||||
if (prio == I915_PRIORITY_NORMAL) {
|
if (prio == I915_PRIORITY_NORMAL) {
|
||||||
p = &engine->default_priolist;
|
p = &execlists->default_priolist;
|
||||||
} else {
|
} else {
|
||||||
p = kmem_cache_alloc(engine->i915->priorities, GFP_ATOMIC);
|
p = kmem_cache_alloc(engine->i915->priorities, GFP_ATOMIC);
|
||||||
/* Convert an allocation failure to a priority bump */
|
/* Convert an allocation failure to a priority bump */
|
||||||
|
@ -331,7 +332,7 @@ find_priolist:
|
||||||
* requests, so if userspace lied about their
|
* requests, so if userspace lied about their
|
||||||
* dependencies that reordering may be visible.
|
* dependencies that reordering may be visible.
|
||||||
*/
|
*/
|
||||||
engine->no_priolist = true;
|
execlists->no_priolist = true;
|
||||||
goto find_priolist;
|
goto find_priolist;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -339,10 +340,10 @@ find_priolist:
|
||||||
p->priority = prio;
|
p->priority = prio;
|
||||||
INIT_LIST_HEAD(&p->requests);
|
INIT_LIST_HEAD(&p->requests);
|
||||||
rb_link_node(&p->node, rb, parent);
|
rb_link_node(&p->node, rb, parent);
|
||||||
rb_insert_color(&p->node, &engine->execlist_queue);
|
rb_insert_color(&p->node, &execlists->queue);
|
||||||
|
|
||||||
if (first)
|
if (first)
|
||||||
engine->execlist_first = &p->node;
|
execlists->first = &p->node;
|
||||||
|
|
||||||
return ptr_pack_bits(p, first, 1);
|
return ptr_pack_bits(p, first, 1);
|
||||||
}
|
}
|
||||||
|
@ -393,12 +394,12 @@ static u64 execlists_update_context(struct drm_i915_gem_request *rq)
|
||||||
|
|
||||||
static void execlists_submit_ports(struct intel_engine_cs *engine)
|
static void execlists_submit_ports(struct intel_engine_cs *engine)
|
||||||
{
|
{
|
||||||
struct execlist_port *port = engine->execlist_port;
|
struct execlist_port *port = engine->execlists.port;
|
||||||
u32 __iomem *elsp =
|
u32 __iomem *elsp =
|
||||||
engine->i915->regs + i915_mmio_reg_offset(RING_ELSP(engine));
|
engine->i915->regs + i915_mmio_reg_offset(RING_ELSP(engine));
|
||||||
unsigned int n;
|
unsigned int n;
|
||||||
|
|
||||||
for (n = ARRAY_SIZE(engine->execlist_port); n--; ) {
|
for (n = ARRAY_SIZE(engine->execlists.port); n--; ) {
|
||||||
struct drm_i915_gem_request *rq;
|
struct drm_i915_gem_request *rq;
|
||||||
unsigned int count;
|
unsigned int count;
|
||||||
u64 desc;
|
u64 desc;
|
||||||
|
@ -453,7 +454,7 @@ static void port_assign(struct execlist_port *port,
|
||||||
static void execlists_dequeue(struct intel_engine_cs *engine)
|
static void execlists_dequeue(struct intel_engine_cs *engine)
|
||||||
{
|
{
|
||||||
struct drm_i915_gem_request *last;
|
struct drm_i915_gem_request *last;
|
||||||
struct execlist_port *port = engine->execlist_port;
|
struct execlist_port *port = engine->execlists.port;
|
||||||
struct rb_node *rb;
|
struct rb_node *rb;
|
||||||
bool submit = false;
|
bool submit = false;
|
||||||
|
|
||||||
|
@ -491,8 +492,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
|
||||||
*/
|
*/
|
||||||
|
|
||||||
spin_lock_irq(&engine->timeline->lock);
|
spin_lock_irq(&engine->timeline->lock);
|
||||||
rb = engine->execlist_first;
|
rb = engine->execlists.first;
|
||||||
GEM_BUG_ON(rb_first(&engine->execlist_queue) != rb);
|
GEM_BUG_ON(rb_first(&engine->execlists.queue) != rb);
|
||||||
while (rb) {
|
while (rb) {
|
||||||
struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
|
struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
|
||||||
struct drm_i915_gem_request *rq, *rn;
|
struct drm_i915_gem_request *rq, *rn;
|
||||||
|
@ -515,7 +516,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
|
||||||
* combine this request with the last, then we
|
* combine this request with the last, then we
|
||||||
* are done.
|
* are done.
|
||||||
*/
|
*/
|
||||||
if (port != engine->execlist_port) {
|
if (port != engine->execlists.port) {
|
||||||
__list_del_many(&p->requests,
|
__list_del_many(&p->requests,
|
||||||
&rq->priotree.link);
|
&rq->priotree.link);
|
||||||
goto done;
|
goto done;
|
||||||
|
@ -552,13 +553,13 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
|
||||||
}
|
}
|
||||||
|
|
||||||
rb = rb_next(rb);
|
rb = rb_next(rb);
|
||||||
rb_erase(&p->node, &engine->execlist_queue);
|
rb_erase(&p->node, &engine->execlists.queue);
|
||||||
INIT_LIST_HEAD(&p->requests);
|
INIT_LIST_HEAD(&p->requests);
|
||||||
if (p->priority != I915_PRIORITY_NORMAL)
|
if (p->priority != I915_PRIORITY_NORMAL)
|
||||||
kmem_cache_free(engine->i915->priorities, p);
|
kmem_cache_free(engine->i915->priorities, p);
|
||||||
}
|
}
|
||||||
done:
|
done:
|
||||||
engine->execlist_first = rb;
|
engine->execlists.first = rb;
|
||||||
if (submit)
|
if (submit)
|
||||||
port_assign(port, last);
|
port_assign(port, last);
|
||||||
spin_unlock_irq(&engine->timeline->lock);
|
spin_unlock_irq(&engine->timeline->lock);
|
||||||
|
@ -569,7 +570,8 @@ done:
|
||||||
|
|
||||||
static void execlists_cancel_requests(struct intel_engine_cs *engine)
|
static void execlists_cancel_requests(struct intel_engine_cs *engine)
|
||||||
{
|
{
|
||||||
struct execlist_port *port = engine->execlist_port;
|
struct intel_engine_execlists * const execlists = &engine->execlists;
|
||||||
|
struct execlist_port *port = execlists->port;
|
||||||
struct drm_i915_gem_request *rq, *rn;
|
struct drm_i915_gem_request *rq, *rn;
|
||||||
struct rb_node *rb;
|
struct rb_node *rb;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
@ -578,9 +580,9 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
|
||||||
spin_lock_irqsave(&engine->timeline->lock, flags);
|
spin_lock_irqsave(&engine->timeline->lock, flags);
|
||||||
|
|
||||||
/* Cancel the requests on the HW and clear the ELSP tracker. */
|
/* Cancel the requests on the HW and clear the ELSP tracker. */
|
||||||
for (n = 0; n < ARRAY_SIZE(engine->execlist_port); n++)
|
for (n = 0; n < ARRAY_SIZE(execlists->port); n++)
|
||||||
i915_gem_request_put(port_request(&port[n]));
|
i915_gem_request_put(port_request(&port[n]));
|
||||||
memset(engine->execlist_port, 0, sizeof(engine->execlist_port));
|
memset(execlists->port, 0, sizeof(execlists->port));
|
||||||
|
|
||||||
/* Mark all executing requests as skipped. */
|
/* Mark all executing requests as skipped. */
|
||||||
list_for_each_entry(rq, &engine->timeline->requests, link) {
|
list_for_each_entry(rq, &engine->timeline->requests, link) {
|
||||||
|
@ -590,7 +592,7 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Flush the queued requests to the timeline list (for retiring). */
|
/* Flush the queued requests to the timeline list (for retiring). */
|
||||||
rb = engine->execlist_first;
|
rb = execlists->first;
|
||||||
while (rb) {
|
while (rb) {
|
||||||
struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
|
struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
|
||||||
|
|
||||||
|
@ -603,7 +605,7 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
|
||||||
}
|
}
|
||||||
|
|
||||||
rb = rb_next(rb);
|
rb = rb_next(rb);
|
||||||
rb_erase(&p->node, &engine->execlist_queue);
|
rb_erase(&p->node, &execlists->queue);
|
||||||
INIT_LIST_HEAD(&p->requests);
|
INIT_LIST_HEAD(&p->requests);
|
||||||
if (p->priority != I915_PRIORITY_NORMAL)
|
if (p->priority != I915_PRIORITY_NORMAL)
|
||||||
kmem_cache_free(engine->i915->priorities, p);
|
kmem_cache_free(engine->i915->priorities, p);
|
||||||
|
@ -611,8 +613,8 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
|
||||||
|
|
||||||
/* Remaining _unready_ requests will be nop'ed when submitted */
|
/* Remaining _unready_ requests will be nop'ed when submitted */
|
||||||
|
|
||||||
engine->execlist_queue = RB_ROOT;
|
execlists->queue = RB_ROOT;
|
||||||
engine->execlist_first = NULL;
|
execlists->first = NULL;
|
||||||
GEM_BUG_ON(port_isset(&port[0]));
|
GEM_BUG_ON(port_isset(&port[0]));
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -628,7 +630,7 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
|
||||||
|
|
||||||
static bool execlists_elsp_ready(const struct intel_engine_cs *engine)
|
static bool execlists_elsp_ready(const struct intel_engine_cs *engine)
|
||||||
{
|
{
|
||||||
const struct execlist_port *port = engine->execlist_port;
|
const struct execlist_port *port = engine->execlists.port;
|
||||||
|
|
||||||
return port_count(&port[0]) + port_count(&port[1]) < 2;
|
return port_count(&port[0]) + port_count(&port[1]) < 2;
|
||||||
}
|
}
|
||||||
|
@ -639,8 +641,9 @@ static bool execlists_elsp_ready(const struct intel_engine_cs *engine)
|
||||||
*/
|
*/
|
||||||
static void intel_lrc_irq_handler(unsigned long data)
|
static void intel_lrc_irq_handler(unsigned long data)
|
||||||
{
|
{
|
||||||
struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
|
struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
|
||||||
struct execlist_port *port = engine->execlist_port;
|
struct intel_engine_execlists * const execlists = &engine->execlists;
|
||||||
|
struct execlist_port *port = execlists->port;
|
||||||
struct drm_i915_private *dev_priv = engine->i915;
|
struct drm_i915_private *dev_priv = engine->i915;
|
||||||
|
|
||||||
/* We can skip acquiring intel_runtime_pm_get() here as it was taken
|
/* We can skip acquiring intel_runtime_pm_get() here as it was taken
|
||||||
|
@ -652,7 +655,7 @@ static void intel_lrc_irq_handler(unsigned long data)
|
||||||
*/
|
*/
|
||||||
GEM_BUG_ON(!dev_priv->gt.awake);
|
GEM_BUG_ON(!dev_priv->gt.awake);
|
||||||
|
|
||||||
intel_uncore_forcewake_get(dev_priv, engine->fw_domains);
|
intel_uncore_forcewake_get(dev_priv, execlists->fw_domains);
|
||||||
|
|
||||||
/* Prefer doing test_and_clear_bit() as a two stage operation to avoid
|
/* Prefer doing test_and_clear_bit() as a two stage operation to avoid
|
||||||
* imposing the cost of a locked atomic transaction when submitting a
|
* imposing the cost of a locked atomic transaction when submitting a
|
||||||
|
@ -665,10 +668,10 @@ static void intel_lrc_irq_handler(unsigned long data)
|
||||||
unsigned int head, tail;
|
unsigned int head, tail;
|
||||||
|
|
||||||
/* However GVT emulation depends upon intercepting CSB mmio */
|
/* However GVT emulation depends upon intercepting CSB mmio */
|
||||||
if (unlikely(engine->csb_use_mmio)) {
|
if (unlikely(execlists->csb_use_mmio)) {
|
||||||
buf = (u32 * __force)
|
buf = (u32 * __force)
|
||||||
(dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_BUF_LO(engine, 0)));
|
(dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_BUF_LO(engine, 0)));
|
||||||
engine->csb_head = -1; /* force mmio read of CSB ptrs */
|
execlists->csb_head = -1; /* force mmio read of CSB ptrs */
|
||||||
}
|
}
|
||||||
|
|
||||||
/* The write will be ordered by the uncached read (itself
|
/* The write will be ordered by the uncached read (itself
|
||||||
|
@ -682,19 +685,20 @@ static void intel_lrc_irq_handler(unsigned long data)
|
||||||
* is set and we do a new loop.
|
* is set and we do a new loop.
|
||||||
*/
|
*/
|
||||||
__clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
|
__clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
|
||||||
if (unlikely(engine->csb_head == -1)) { /* following a reset */
|
if (unlikely(execlists->csb_head == -1)) { /* following a reset */
|
||||||
head = readl(dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)));
|
head = readl(dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)));
|
||||||
tail = GEN8_CSB_WRITE_PTR(head);
|
tail = GEN8_CSB_WRITE_PTR(head);
|
||||||
head = GEN8_CSB_READ_PTR(head);
|
head = GEN8_CSB_READ_PTR(head);
|
||||||
engine->csb_head = head;
|
execlists->csb_head = head;
|
||||||
} else {
|
} else {
|
||||||
const int write_idx =
|
const int write_idx =
|
||||||
intel_hws_csb_write_index(dev_priv) -
|
intel_hws_csb_write_index(dev_priv) -
|
||||||
I915_HWS_CSB_BUF0_INDEX;
|
I915_HWS_CSB_BUF0_INDEX;
|
||||||
|
|
||||||
head = engine->csb_head;
|
head = execlists->csb_head;
|
||||||
tail = READ_ONCE(buf[write_idx]);
|
tail = READ_ONCE(buf[write_idx]);
|
||||||
}
|
}
|
||||||
|
|
||||||
while (head != tail) {
|
while (head != tail) {
|
||||||
struct drm_i915_gem_request *rq;
|
struct drm_i915_gem_request *rq;
|
||||||
unsigned int status;
|
unsigned int status;
|
||||||
|
@ -748,8 +752,8 @@ static void intel_lrc_irq_handler(unsigned long data)
|
||||||
!(status & GEN8_CTX_STATUS_ACTIVE_IDLE));
|
!(status & GEN8_CTX_STATUS_ACTIVE_IDLE));
|
||||||
}
|
}
|
||||||
|
|
||||||
if (head != engine->csb_head) {
|
if (head != execlists->csb_head) {
|
||||||
engine->csb_head = head;
|
execlists->csb_head = head;
|
||||||
writel(_MASKED_FIELD(GEN8_CSB_READ_PTR_MASK, head << 8),
|
writel(_MASKED_FIELD(GEN8_CSB_READ_PTR_MASK, head << 8),
|
||||||
dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)));
|
dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)));
|
||||||
}
|
}
|
||||||
|
@ -758,7 +762,7 @@ static void intel_lrc_irq_handler(unsigned long data)
|
||||||
if (execlists_elsp_ready(engine))
|
if (execlists_elsp_ready(engine))
|
||||||
execlists_dequeue(engine);
|
execlists_dequeue(engine);
|
||||||
|
|
||||||
intel_uncore_forcewake_put(dev_priv, engine->fw_domains);
|
intel_uncore_forcewake_put(dev_priv, execlists->fw_domains);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void insert_request(struct intel_engine_cs *engine,
|
static void insert_request(struct intel_engine_cs *engine,
|
||||||
|
@ -769,7 +773,7 @@ static void insert_request(struct intel_engine_cs *engine,
|
||||||
|
|
||||||
list_add_tail(&pt->link, &ptr_mask_bits(p, 1)->requests);
|
list_add_tail(&pt->link, &ptr_mask_bits(p, 1)->requests);
|
||||||
if (ptr_unmask_bits(p, 1) && execlists_elsp_ready(engine))
|
if (ptr_unmask_bits(p, 1) && execlists_elsp_ready(engine))
|
||||||
tasklet_hi_schedule(&engine->irq_tasklet);
|
tasklet_hi_schedule(&engine->execlists.irq_tasklet);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void execlists_submit_request(struct drm_i915_gem_request *request)
|
static void execlists_submit_request(struct drm_i915_gem_request *request)
|
||||||
|
@ -782,7 +786,7 @@ static void execlists_submit_request(struct drm_i915_gem_request *request)
|
||||||
|
|
||||||
insert_request(engine, &request->priotree, request->priotree.priority);
|
insert_request(engine, &request->priotree, request->priotree.priority);
|
||||||
|
|
||||||
GEM_BUG_ON(!engine->execlist_first);
|
GEM_BUG_ON(!engine->execlists.first);
|
||||||
GEM_BUG_ON(list_empty(&request->priotree.link));
|
GEM_BUG_ON(list_empty(&request->priotree.link));
|
||||||
|
|
||||||
spin_unlock_irqrestore(&engine->timeline->lock, flags);
|
spin_unlock_irqrestore(&engine->timeline->lock, flags);
|
||||||
|
@ -1289,6 +1293,7 @@ static u8 gtiir[] = {
|
||||||
static int gen8_init_common_ring(struct intel_engine_cs *engine)
|
static int gen8_init_common_ring(struct intel_engine_cs *engine)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = engine->i915;
|
struct drm_i915_private *dev_priv = engine->i915;
|
||||||
|
struct intel_engine_execlists * const execlists = &engine->execlists;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = intel_mocs_init_engine(engine);
|
ret = intel_mocs_init_engine(engine);
|
||||||
|
@ -1321,11 +1326,11 @@ static int gen8_init_common_ring(struct intel_engine_cs *engine)
|
||||||
I915_WRITE(GEN8_GT_IIR(gtiir[engine->id]),
|
I915_WRITE(GEN8_GT_IIR(gtiir[engine->id]),
|
||||||
GT_CONTEXT_SWITCH_INTERRUPT << engine->irq_shift);
|
GT_CONTEXT_SWITCH_INTERRUPT << engine->irq_shift);
|
||||||
clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
|
clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
|
||||||
engine->csb_head = -1;
|
execlists->csb_head = -1;
|
||||||
|
|
||||||
/* After a GPU reset, we may have requests to replay */
|
/* After a GPU reset, we may have requests to replay */
|
||||||
if (!i915_modparams.enable_guc_submission && engine->execlist_first)
|
if (!i915_modparams.enable_guc_submission && execlists->first)
|
||||||
tasklet_schedule(&engine->irq_tasklet);
|
tasklet_schedule(&execlists->irq_tasklet);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -1366,7 +1371,8 @@ static int gen9_init_render_ring(struct intel_engine_cs *engine)
|
||||||
static void reset_common_ring(struct intel_engine_cs *engine,
|
static void reset_common_ring(struct intel_engine_cs *engine,
|
||||||
struct drm_i915_gem_request *request)
|
struct drm_i915_gem_request *request)
|
||||||
{
|
{
|
||||||
struct execlist_port *port = engine->execlist_port;
|
struct intel_engine_execlists * const execlists = &engine->execlists;
|
||||||
|
struct execlist_port *port = execlists->port;
|
||||||
struct drm_i915_gem_request *rq, *rn;
|
struct drm_i915_gem_request *rq, *rn;
|
||||||
struct intel_context *ce;
|
struct intel_context *ce;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
@ -1383,9 +1389,9 @@ static void reset_common_ring(struct intel_engine_cs *engine,
|
||||||
* guessing the missed context-switch events by looking at what
|
* guessing the missed context-switch events by looking at what
|
||||||
* requests were completed.
|
* requests were completed.
|
||||||
*/
|
*/
|
||||||
for (n = 0; n < ARRAY_SIZE(engine->execlist_port); n++)
|
for (n = 0; n < ARRAY_SIZE(execlists->port); n++)
|
||||||
i915_gem_request_put(port_request(&port[n]));
|
i915_gem_request_put(port_request(&port[n]));
|
||||||
memset(engine->execlist_port, 0, sizeof(engine->execlist_port));
|
memset(execlists->port, 0, sizeof(execlists->port));
|
||||||
|
|
||||||
/* Push back any incomplete requests for replay after the reset. */
|
/* Push back any incomplete requests for replay after the reset. */
|
||||||
list_for_each_entry_safe_reverse(rq, rn,
|
list_for_each_entry_safe_reverse(rq, rn,
|
||||||
|
@ -1719,8 +1725,8 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
|
||||||
* Tasklet cannot be active at this point due intel_mark_active/idle
|
* Tasklet cannot be active at this point due intel_mark_active/idle
|
||||||
* so this is just for documentation.
|
* so this is just for documentation.
|
||||||
*/
|
*/
|
||||||
if (WARN_ON(test_bit(TASKLET_STATE_SCHED, &engine->irq_tasklet.state)))
|
if (WARN_ON(test_bit(TASKLET_STATE_SCHED, &engine->execlists.irq_tasklet.state)))
|
||||||
tasklet_kill(&engine->irq_tasklet);
|
tasklet_kill(&engine->execlists.irq_tasklet);
|
||||||
|
|
||||||
dev_priv = engine->i915;
|
dev_priv = engine->i915;
|
||||||
|
|
||||||
|
@ -1744,7 +1750,7 @@ static void execlists_set_default_submission(struct intel_engine_cs *engine)
|
||||||
engine->submit_request = execlists_submit_request;
|
engine->submit_request = execlists_submit_request;
|
||||||
engine->cancel_requests = execlists_cancel_requests;
|
engine->cancel_requests = execlists_cancel_requests;
|
||||||
engine->schedule = execlists_schedule;
|
engine->schedule = execlists_schedule;
|
||||||
engine->irq_tasklet.func = intel_lrc_irq_handler;
|
engine->execlists.irq_tasklet.func = intel_lrc_irq_handler;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
|
@ -1806,7 +1812,7 @@ logical_ring_setup(struct intel_engine_cs *engine)
|
||||||
/* Intentionally left blank. */
|
/* Intentionally left blank. */
|
||||||
engine->buffer = NULL;
|
engine->buffer = NULL;
|
||||||
|
|
||||||
engine->csb_use_mmio = irq_handler_force_mmio(dev_priv);
|
engine->execlists.csb_use_mmio = irq_handler_force_mmio(dev_priv);
|
||||||
|
|
||||||
fw_domains = intel_uncore_forcewake_for_reg(dev_priv,
|
fw_domains = intel_uncore_forcewake_for_reg(dev_priv,
|
||||||
RING_ELSP(engine),
|
RING_ELSP(engine),
|
||||||
|
@ -1820,9 +1826,9 @@ logical_ring_setup(struct intel_engine_cs *engine)
|
||||||
RING_CONTEXT_STATUS_BUF_BASE(engine),
|
RING_CONTEXT_STATUS_BUF_BASE(engine),
|
||||||
FW_REG_READ);
|
FW_REG_READ);
|
||||||
|
|
||||||
engine->fw_domains = fw_domains;
|
engine->execlists.fw_domains = fw_domains;
|
||||||
|
|
||||||
tasklet_init(&engine->irq_tasklet,
|
tasklet_init(&engine->execlists.irq_tasklet,
|
||||||
intel_lrc_irq_handler, (unsigned long)engine);
|
intel_lrc_irq_handler, (unsigned long)engine);
|
||||||
|
|
||||||
logical_ring_default_vfuncs(engine);
|
logical_ring_default_vfuncs(engine);
|
||||||
|
|
|
@ -184,6 +184,84 @@ struct i915_priolist {
|
||||||
int priority;
|
int priority;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* struct intel_engine_execlists - execlist submission queue and port state
|
||||||
|
*
|
||||||
|
* The struct intel_engine_execlists represents the combined logical state of
|
||||||
|
* driver and the hardware state for execlist mode of submission.
|
||||||
|
*/
|
||||||
|
struct intel_engine_execlists {
|
||||||
|
/**
|
||||||
|
* @irq_tasklet: softirq tasklet for bottom handler
|
||||||
|
*/
|
||||||
|
struct tasklet_struct irq_tasklet;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @default_priolist: priority list for I915_PRIORITY_NORMAL
|
||||||
|
*/
|
||||||
|
struct i915_priolist default_priolist;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @no_priolist: priority lists disabled
|
||||||
|
*/
|
||||||
|
bool no_priolist;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @port: execlist port states
|
||||||
|
*
|
||||||
|
* For each hardware ELSP (ExecList Submission Port) we keep
|
||||||
|
* track of the last request and the number of times we submitted
|
||||||
|
* that port to hw. We then count the number of times the hw reports
|
||||||
|
* a context completion or preemption. As only one context can
|
||||||
|
* be active on hw, we limit resubmission of context to port[0]. This
|
||||||
|
* is called Lite Restore, of the context.
|
||||||
|
*/
|
||||||
|
struct execlist_port {
|
||||||
|
/**
|
||||||
|
* @request_count: combined request and submission count
|
||||||
|
*/
|
||||||
|
struct drm_i915_gem_request *request_count;
|
||||||
|
#define EXECLIST_COUNT_BITS 2
|
||||||
|
#define port_request(p) ptr_mask_bits((p)->request_count, EXECLIST_COUNT_BITS)
|
||||||
|
#define port_count(p) ptr_unmask_bits((p)->request_count, EXECLIST_COUNT_BITS)
|
||||||
|
#define port_pack(rq, count) ptr_pack_bits(rq, count, EXECLIST_COUNT_BITS)
|
||||||
|
#define port_unpack(p, count) ptr_unpack_bits((p)->request_count, count, EXECLIST_COUNT_BITS)
|
||||||
|
#define port_set(p, packed) ((p)->request_count = (packed))
|
||||||
|
#define port_isset(p) ((p)->request_count)
|
||||||
|
#define port_index(p, e) ((p) - (e)->execlists.port)
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @context_id: context ID for port
|
||||||
|
*/
|
||||||
|
GEM_DEBUG_DECL(u32 context_id);
|
||||||
|
} port[2];
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @queue: queue of requests, in priority lists
|
||||||
|
*/
|
||||||
|
struct rb_root queue;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @first: leftmost level in priority @queue
|
||||||
|
*/
|
||||||
|
struct rb_node *first;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @fw_domains: forcewake domains for irq tasklet
|
||||||
|
*/
|
||||||
|
unsigned int fw_domains;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @csb_head: context status buffer head
|
||||||
|
*/
|
||||||
|
unsigned int csb_head;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @csb_use_mmio: access csb through mmio, instead of hwsp
|
||||||
|
*/
|
||||||
|
bool csb_use_mmio;
|
||||||
|
};
|
||||||
|
|
||||||
#define INTEL_ENGINE_CS_MAX_NAME 8
|
#define INTEL_ENGINE_CS_MAX_NAME 8
|
||||||
|
|
||||||
struct intel_engine_cs {
|
struct intel_engine_cs {
|
||||||
|
@ -380,27 +458,7 @@ struct intel_engine_cs {
|
||||||
u32 *(*signal)(struct drm_i915_gem_request *req, u32 *cs);
|
u32 *(*signal)(struct drm_i915_gem_request *req, u32 *cs);
|
||||||
} semaphore;
|
} semaphore;
|
||||||
|
|
||||||
/* Execlists */
|
struct intel_engine_execlists execlists;
|
||||||
struct tasklet_struct irq_tasklet;
|
|
||||||
struct i915_priolist default_priolist;
|
|
||||||
bool no_priolist;
|
|
||||||
struct execlist_port {
|
|
||||||
struct drm_i915_gem_request *request_count;
|
|
||||||
#define EXECLIST_COUNT_BITS 2
|
|
||||||
#define port_request(p) ptr_mask_bits((p)->request_count, EXECLIST_COUNT_BITS)
|
|
||||||
#define port_count(p) ptr_unmask_bits((p)->request_count, EXECLIST_COUNT_BITS)
|
|
||||||
#define port_pack(rq, count) ptr_pack_bits(rq, count, EXECLIST_COUNT_BITS)
|
|
||||||
#define port_unpack(p, count) ptr_unpack_bits((p)->request_count, count, EXECLIST_COUNT_BITS)
|
|
||||||
#define port_set(p, packed) ((p)->request_count = (packed))
|
|
||||||
#define port_isset(p) ((p)->request_count)
|
|
||||||
#define port_index(p, e) ((p) - (e)->execlist_port)
|
|
||||||
GEM_DEBUG_DECL(u32 context_id);
|
|
||||||
} execlist_port[2];
|
|
||||||
struct rb_root execlist_queue;
|
|
||||||
struct rb_node *execlist_first;
|
|
||||||
unsigned int fw_domains;
|
|
||||||
unsigned int csb_head;
|
|
||||||
bool csb_use_mmio;
|
|
||||||
|
|
||||||
/* Contexts are pinned whilst they are active on the GPU. The last
|
/* Contexts are pinned whilst they are active on the GPU. The last
|
||||||
* context executed remains active whilst the GPU is idle - the
|
* context executed remains active whilst the GPU is idle - the
|
||||||
|
|
Loading…
Reference in New Issue