2018-10-01 22:47:54 +08:00
|
|
|
/*
|
|
|
|
* SPDX-License-Identifier: MIT
|
|
|
|
*
|
|
|
|
* Copyright © 2018 Intel Corporation
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/mutex.h>
|
|
|
|
|
|
|
|
#include "i915_drv.h"
|
2019-03-06 05:38:30 +08:00
|
|
|
#include "i915_globals.h"
|
2018-10-01 22:47:54 +08:00
|
|
|
#include "i915_request.h"
|
|
|
|
#include "i915_scheduler.h"
|
|
|
|
|
2019-02-28 18:20:33 +08:00
|
|
|
static struct i915_global_scheduler {
|
2019-03-06 05:38:30 +08:00
|
|
|
struct i915_global base;
|
2019-02-28 18:20:33 +08:00
|
|
|
struct kmem_cache *slab_dependencies;
|
|
|
|
struct kmem_cache *slab_priorities;
|
|
|
|
} global;
|
|
|
|
|
2018-10-01 22:47:54 +08:00
|
|
|
static DEFINE_SPINLOCK(schedule_lock);
|
|
|
|
|
|
|
|
static const struct i915_request *
|
|
|
|
node_to_request(const struct i915_sched_node *node)
|
|
|
|
{
|
|
|
|
return container_of(node, const struct i915_request, sched);
|
|
|
|
}
|
|
|
|
|
2019-02-26 18:23:54 +08:00
|
|
|
static inline bool node_started(const struct i915_sched_node *node)
|
|
|
|
{
|
|
|
|
return i915_request_started(node_to_request(node));
|
|
|
|
}
|
|
|
|
|
2018-10-01 22:47:54 +08:00
|
|
|
static inline bool node_signaled(const struct i915_sched_node *node)
|
|
|
|
{
|
|
|
|
return i915_request_completed(node_to_request(node));
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct i915_priolist *to_priolist(struct rb_node *rb)
|
|
|
|
{
|
|
|
|
return rb_entry(rb, struct i915_priolist, node);
|
|
|
|
}
|
|
|
|
|
2019-01-30 02:54:51 +08:00
|
|
|
static void assert_priolists(struct intel_engine_execlists * const execlists)
|
2018-10-01 22:47:54 +08:00
|
|
|
{
|
|
|
|
struct rb_node *rb;
|
|
|
|
long last_prio, i;
|
|
|
|
|
|
|
|
if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
|
|
|
|
return;
|
|
|
|
|
|
|
|
GEM_BUG_ON(rb_first_cached(&execlists->queue) !=
|
|
|
|
rb_first(&execlists->queue.rb_root));
|
|
|
|
|
2019-01-30 02:54:51 +08:00
|
|
|
last_prio = (INT_MAX >> I915_USER_PRIORITY_SHIFT) + 1;
|
2018-10-01 22:47:54 +08:00
|
|
|
for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
|
|
|
|
const struct i915_priolist *p = to_priolist(rb);
|
|
|
|
|
|
|
|
GEM_BUG_ON(p->priority >= last_prio);
|
|
|
|
last_prio = p->priority;
|
|
|
|
|
|
|
|
GEM_BUG_ON(!p->used);
|
|
|
|
for (i = 0; i < ARRAY_SIZE(p->requests); i++) {
|
|
|
|
if (list_empty(&p->requests[i]))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
GEM_BUG_ON(!(p->used & BIT(i)));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
struct list_head *
|
|
|
|
i915_sched_lookup_priolist(struct intel_engine_cs *engine, int prio)
|
|
|
|
{
|
|
|
|
struct intel_engine_execlists * const execlists = &engine->execlists;
|
|
|
|
struct i915_priolist *p;
|
|
|
|
struct rb_node **parent, *rb;
|
|
|
|
bool first = true;
|
|
|
|
int idx, i;
|
|
|
|
|
2019-06-15 00:46:06 +08:00
|
|
|
lockdep_assert_held(&engine->active.lock);
|
2019-01-30 02:54:51 +08:00
|
|
|
assert_priolists(execlists);
|
2018-10-01 22:47:54 +08:00
|
|
|
|
|
|
|
/* buckets sorted from highest [in slot 0] to lowest priority */
|
|
|
|
idx = I915_PRIORITY_COUNT - (prio & I915_PRIORITY_MASK) - 1;
|
|
|
|
prio >>= I915_USER_PRIORITY_SHIFT;
|
|
|
|
if (unlikely(execlists->no_priolist))
|
|
|
|
prio = I915_PRIORITY_NORMAL;
|
|
|
|
|
|
|
|
find_priolist:
|
|
|
|
/* most positive priority is scheduled first, equal priorities fifo */
|
|
|
|
rb = NULL;
|
|
|
|
parent = &execlists->queue.rb_root.rb_node;
|
|
|
|
while (*parent) {
|
|
|
|
rb = *parent;
|
|
|
|
p = to_priolist(rb);
|
|
|
|
if (prio > p->priority) {
|
|
|
|
parent = &rb->rb_left;
|
|
|
|
} else if (prio < p->priority) {
|
|
|
|
parent = &rb->rb_right;
|
|
|
|
first = false;
|
|
|
|
} else {
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (prio == I915_PRIORITY_NORMAL) {
|
|
|
|
p = &execlists->default_priolist;
|
|
|
|
} else {
|
2019-02-28 18:20:33 +08:00
|
|
|
p = kmem_cache_alloc(global.slab_priorities, GFP_ATOMIC);
|
2018-10-01 22:47:54 +08:00
|
|
|
/* Convert an allocation failure to a priority bump */
|
|
|
|
if (unlikely(!p)) {
|
|
|
|
prio = I915_PRIORITY_NORMAL; /* recurses just once */
|
|
|
|
|
|
|
|
/* To maintain ordering with all rendering, after an
|
|
|
|
* allocation failure we have to disable all scheduling.
|
|
|
|
* Requests will then be executed in fifo, and schedule
|
|
|
|
* will ensure that dependencies are emitted in fifo.
|
|
|
|
* There will be still some reordering with existing
|
|
|
|
* requests, so if userspace lied about their
|
|
|
|
* dependencies that reordering may be visible.
|
|
|
|
*/
|
|
|
|
execlists->no_priolist = true;
|
|
|
|
goto find_priolist;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
p->priority = prio;
|
|
|
|
for (i = 0; i < ARRAY_SIZE(p->requests); i++)
|
|
|
|
INIT_LIST_HEAD(&p->requests[i]);
|
|
|
|
rb_link_node(&p->node, rb, parent);
|
|
|
|
rb_insert_color_cached(&p->node, &execlists->queue, first);
|
|
|
|
p->used = 0;
|
|
|
|
|
|
|
|
out:
|
|
|
|
p->used |= BIT(idx);
|
|
|
|
return &p->requests[idx];
|
|
|
|
}
|
|
|
|
|
2019-05-13 20:01:00 +08:00
|
|
|
void __i915_priolist_free(struct i915_priolist *p)
|
|
|
|
{
|
|
|
|
kmem_cache_free(global.slab_priorities, p);
|
|
|
|
}
|
|
|
|
|
2019-02-12 04:46:47 +08:00
|
|
|
struct sched_cache {
|
|
|
|
struct list_head *priolist;
|
|
|
|
};
|
|
|
|
|
2018-10-01 22:47:54 +08:00
|
|
|
static struct intel_engine_cs *
|
2019-02-12 04:46:47 +08:00
|
|
|
sched_lock_engine(const struct i915_sched_node *node,
|
|
|
|
struct intel_engine_cs *locked,
|
|
|
|
struct sched_cache *cache)
|
2018-10-01 22:47:54 +08:00
|
|
|
{
|
drm/i915: Load balancing across a virtual engine
Having allowed the user to define a set of engines that they will want
to only use, we go one step further and allow them to bind those engines
into a single virtual instance. Submitting a batch to the virtual engine
will then forward it to any one of the set in a manner as best to
distribute load. The virtual engine has a single timeline across all
engines (it operates as a single queue), so it is not able to concurrently
run batches across multiple engines by itself; that is left up to the user
to submit multiple concurrent batches to multiple queues. Multiple users
will be load balanced across the system.
The mechanism used for load balancing in this patch is a late greedy
balancer. When a request is ready for execution, it is added to each
engine's queue, and when an engine is ready for its next request it
claims it from the virtual engine. The first engine to do so, wins, i.e.
the request is executed at the earliest opportunity (idle moment) in the
system.
As not all HW is created equal, the user is still able to skip the
virtual engine and execute the batch on a specific engine, all within the
same queue. It will then be executed in order on the correct engine,
with execution on other virtual engines being moved away due to the load
detection.
A couple of areas for potential improvement left!
- The virtual engine always take priority over equal-priority tasks.
Mostly broken up by applying FQ_CODEL rules for prioritising new clients,
and hopefully the virtual and real engines are not then congested (i.e.
all work is via virtual engines, or all work is to the real engine).
- We require the breadcrumb irq around every virtual engine request. For
normal engines, we eliminate the need for the slow round trip via
interrupt by using the submit fence and queueing in order. For virtual
engines, we have to allow any job to transfer to a new ring, and cannot
coalesce the submissions, so require the completion fence instead,
forcing the persistent use of interrupts.
- We only drip feed single requests through each virtual engine and onto
the physical engines, even if there was enough work to fill all ELSP,
leaving small stalls with an idle CS event at the end of every request.
Could we be greedy and fill both slots? Being lazy is virtuous for load
distribution on less-than-full workloads though.
Other areas of improvement are more general, such as reducing lock
contention, reducing dispatch overhead, looking at direct submission
rather than bouncing around tasklets etc.
sseu: Lift the restriction to allow sseu to be reconfigured on virtual
engines composed of RENDER_CLASS (rcs).
v2: macroize check_user_mbz()
v3: Cancel virtual engines on wedging
v4: Commence commenting
v5: Replace 64b sibling_mask with a list of class:instance
v6: Drop the one-element array in the uabi
v7: Assert it is an virtual engine in to_virtual_engine()
v8: Skip over holes in [class][inst] so we can selftest with (vcs0, vcs2)
Link: https://github.com/intel/media-driver/pull/283
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190521211134.16117-6-chris@chris-wilson.co.uk
2019-05-22 05:11:30 +08:00
|
|
|
const struct i915_request *rq = node_to_request(node);
|
|
|
|
struct intel_engine_cs *engine;
|
2018-10-01 22:47:54 +08:00
|
|
|
|
|
|
|
GEM_BUG_ON(!locked);
|
|
|
|
|
drm/i915: Load balancing across a virtual engine
Having allowed the user to define a set of engines that they will want
to only use, we go one step further and allow them to bind those engines
into a single virtual instance. Submitting a batch to the virtual engine
will then forward it to any one of the set in a manner as best to
distribute load. The virtual engine has a single timeline across all
engines (it operates as a single queue), so it is not able to concurrently
run batches across multiple engines by itself; that is left up to the user
to submit multiple concurrent batches to multiple queues. Multiple users
will be load balanced across the system.
The mechanism used for load balancing in this patch is a late greedy
balancer. When a request is ready for execution, it is added to each
engine's queue, and when an engine is ready for its next request it
claims it from the virtual engine. The first engine to do so, wins, i.e.
the request is executed at the earliest opportunity (idle moment) in the
system.
As not all HW is created equal, the user is still able to skip the
virtual engine and execute the batch on a specific engine, all within the
same queue. It will then be executed in order on the correct engine,
with execution on other virtual engines being moved away due to the load
detection.
A couple of areas for potential improvement left!
- The virtual engine always take priority over equal-priority tasks.
Mostly broken up by applying FQ_CODEL rules for prioritising new clients,
and hopefully the virtual and real engines are not then congested (i.e.
all work is via virtual engines, or all work is to the real engine).
- We require the breadcrumb irq around every virtual engine request. For
normal engines, we eliminate the need for the slow round trip via
interrupt by using the submit fence and queueing in order. For virtual
engines, we have to allow any job to transfer to a new ring, and cannot
coalesce the submissions, so require the completion fence instead,
forcing the persistent use of interrupts.
- We only drip feed single requests through each virtual engine and onto
the physical engines, even if there was enough work to fill all ELSP,
leaving small stalls with an idle CS event at the end of every request.
Could we be greedy and fill both slots? Being lazy is virtuous for load
distribution on less-than-full workloads though.
Other areas of improvement are more general, such as reducing lock
contention, reducing dispatch overhead, looking at direct submission
rather than bouncing around tasklets etc.
sseu: Lift the restriction to allow sseu to be reconfigured on virtual
engines composed of RENDER_CLASS (rcs).
v2: macroize check_user_mbz()
v3: Cancel virtual engines on wedging
v4: Commence commenting
v5: Replace 64b sibling_mask with a list of class:instance
v6: Drop the one-element array in the uabi
v7: Assert it is an virtual engine in to_virtual_engine()
v8: Skip over holes in [class][inst] so we can selftest with (vcs0, vcs2)
Link: https://github.com/intel/media-driver/pull/283
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190521211134.16117-6-chris@chris-wilson.co.uk
2019-05-22 05:11:30 +08:00
|
|
|
/*
|
|
|
|
* Virtual engines complicate acquiring the engine timeline lock,
|
|
|
|
* as their rq->engine pointer is not stable until under that
|
|
|
|
* engine lock. The simple ploy we use is to take the lock then
|
|
|
|
* check that the rq still belongs to the newly locked engine.
|
|
|
|
*/
|
|
|
|
while (locked != (engine = READ_ONCE(rq->engine))) {
|
2019-06-15 00:46:06 +08:00
|
|
|
spin_unlock(&locked->active.lock);
|
2019-02-12 04:46:47 +08:00
|
|
|
memset(cache, 0, sizeof(*cache));
|
2019-06-15 00:46:06 +08:00
|
|
|
spin_lock(&engine->active.lock);
|
drm/i915: Load balancing across a virtual engine
Having allowed the user to define a set of engines that they will want
to only use, we go one step further and allow them to bind those engines
into a single virtual instance. Submitting a batch to the virtual engine
will then forward it to any one of the set in a manner as best to
distribute load. The virtual engine has a single timeline across all
engines (it operates as a single queue), so it is not able to concurrently
run batches across multiple engines by itself; that is left up to the user
to submit multiple concurrent batches to multiple queues. Multiple users
will be load balanced across the system.
The mechanism used for load balancing in this patch is a late greedy
balancer. When a request is ready for execution, it is added to each
engine's queue, and when an engine is ready for its next request it
claims it from the virtual engine. The first engine to do so, wins, i.e.
the request is executed at the earliest opportunity (idle moment) in the
system.
As not all HW is created equal, the user is still able to skip the
virtual engine and execute the batch on a specific engine, all within the
same queue. It will then be executed in order on the correct engine,
with execution on other virtual engines being moved away due to the load
detection.
A couple of areas for potential improvement left!
- The virtual engine always take priority over equal-priority tasks.
Mostly broken up by applying FQ_CODEL rules for prioritising new clients,
and hopefully the virtual and real engines are not then congested (i.e.
all work is via virtual engines, or all work is to the real engine).
- We require the breadcrumb irq around every virtual engine request. For
normal engines, we eliminate the need for the slow round trip via
interrupt by using the submit fence and queueing in order. For virtual
engines, we have to allow any job to transfer to a new ring, and cannot
coalesce the submissions, so require the completion fence instead,
forcing the persistent use of interrupts.
- We only drip feed single requests through each virtual engine and onto
the physical engines, even if there was enough work to fill all ELSP,
leaving small stalls with an idle CS event at the end of every request.
Could we be greedy and fill both slots? Being lazy is virtuous for load
distribution on less-than-full workloads though.
Other areas of improvement are more general, such as reducing lock
contention, reducing dispatch overhead, looking at direct submission
rather than bouncing around tasklets etc.
sseu: Lift the restriction to allow sseu to be reconfigured on virtual
engines composed of RENDER_CLASS (rcs).
v2: macroize check_user_mbz()
v3: Cancel virtual engines on wedging
v4: Commence commenting
v5: Replace 64b sibling_mask with a list of class:instance
v6: Drop the one-element array in the uabi
v7: Assert it is an virtual engine in to_virtual_engine()
v8: Skip over holes in [class][inst] so we can selftest with (vcs0, vcs2)
Link: https://github.com/intel/media-driver/pull/283
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190521211134.16117-6-chris@chris-wilson.co.uk
2019-05-22 05:11:30 +08:00
|
|
|
locked = engine;
|
2018-10-01 22:47:54 +08:00
|
|
|
}
|
|
|
|
|
drm/i915: Load balancing across a virtual engine
Having allowed the user to define a set of engines that they will want
to only use, we go one step further and allow them to bind those engines
into a single virtual instance. Submitting a batch to the virtual engine
will then forward it to any one of the set in a manner as best to
distribute load. The virtual engine has a single timeline across all
engines (it operates as a single queue), so it is not able to concurrently
run batches across multiple engines by itself; that is left up to the user
to submit multiple concurrent batches to multiple queues. Multiple users
will be load balanced across the system.
The mechanism used for load balancing in this patch is a late greedy
balancer. When a request is ready for execution, it is added to each
engine's queue, and when an engine is ready for its next request it
claims it from the virtual engine. The first engine to do so, wins, i.e.
the request is executed at the earliest opportunity (idle moment) in the
system.
As not all HW is created equal, the user is still able to skip the
virtual engine and execute the batch on a specific engine, all within the
same queue. It will then be executed in order on the correct engine,
with execution on other virtual engines being moved away due to the load
detection.
A couple of areas for potential improvement left!
- The virtual engine always take priority over equal-priority tasks.
Mostly broken up by applying FQ_CODEL rules for prioritising new clients,
and hopefully the virtual and real engines are not then congested (i.e.
all work is via virtual engines, or all work is to the real engine).
- We require the breadcrumb irq around every virtual engine request. For
normal engines, we eliminate the need for the slow round trip via
interrupt by using the submit fence and queueing in order. For virtual
engines, we have to allow any job to transfer to a new ring, and cannot
coalesce the submissions, so require the completion fence instead,
forcing the persistent use of interrupts.
- We only drip feed single requests through each virtual engine and onto
the physical engines, even if there was enough work to fill all ELSP,
leaving small stalls with an idle CS event at the end of every request.
Could we be greedy and fill both slots? Being lazy is virtuous for load
distribution on less-than-full workloads though.
Other areas of improvement are more general, such as reducing lock
contention, reducing dispatch overhead, looking at direct submission
rather than bouncing around tasklets etc.
sseu: Lift the restriction to allow sseu to be reconfigured on virtual
engines composed of RENDER_CLASS (rcs).
v2: macroize check_user_mbz()
v3: Cancel virtual engines on wedging
v4: Commence commenting
v5: Replace 64b sibling_mask with a list of class:instance
v6: Drop the one-element array in the uabi
v7: Assert it is an virtual engine in to_virtual_engine()
v8: Skip over holes in [class][inst] so we can selftest with (vcs0, vcs2)
Link: https://github.com/intel/media-driver/pull/283
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190521211134.16117-6-chris@chris-wilson.co.uk
2019-05-22 05:11:30 +08:00
|
|
|
GEM_BUG_ON(locked != engine);
|
|
|
|
return locked;
|
2018-10-01 22:47:54 +08:00
|
|
|
}
|
|
|
|
|
2019-05-07 20:25:44 +08:00
|
|
|
static inline int rq_prio(const struct i915_request *rq)
|
drm/i915/execlists: Suppress preempting self
In order to avoid preempting ourselves, we currently refuse to schedule
the tasklet if we reschedule an inflight context. However, this glosses
over a few issues such as what happens after a CS completion event and
we then preempt the newly executing context with itself, or if something
else causes a tasklet_schedule triggering the same evaluation to
preempt the active context with itself.
However, when we avoid preempting ELSP[0], we still retain the preemption
value as it may match a second preemption request within the same time period
that we need to resolve after the next CS event. However, since we only
store the maximum preemption priority seen, it may not match the
subsequent event and so we should double check whether or not we
actually do need to trigger a preempt-to-idle by comparing the top
priorities from each queue. Later, this gives us a hook for finer
control over deciding whether the preempt-to-idle is justified.
The sequence of events where we end up preempting for no avail is:
1. Queue requests/contexts A, B
2. Priority boost A; no preemption as it is executing, but keep hint
3. After CS switch, B is less than hint, force preempt-to-idle
4. Resubmit B after idling
v2: We can simplify a bunch of tests based on the knowledge that PI will
ensure that earlier requests along the same context will have the highest
priority.
v3: Demonstrate the stale preemption hint with a selftest
References: a2bf92e8cc16 ("drm/i915/execlists: Avoid kicking priority on the current context")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190129185452.20989-4-chris@chris-wilson.co.uk
2019-01-30 02:54:52 +08:00
|
|
|
{
|
2019-05-07 20:25:44 +08:00
|
|
|
return rq->sched.attr.priority | __NO_PREEMPTION;
|
|
|
|
}
|
|
|
|
|
2019-10-18 15:20:27 +08:00
|
|
|
static inline bool need_preempt(int prio, int active)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Allow preemption of low -> normal -> high, but we do
|
|
|
|
* not allow low priority tasks to preempt other low priority
|
|
|
|
* tasks under the impression that latency for low priority
|
|
|
|
* tasks does not matter (as much as background throughput),
|
|
|
|
* so kiss.
|
|
|
|
*/
|
|
|
|
return prio >= max(I915_PRIORITY_NORMAL, active);
|
|
|
|
}
|
|
|
|
|
2019-10-21 16:02:11 +08:00
|
|
|
static void kick_submission(struct intel_engine_cs *engine,
|
|
|
|
const struct i915_request *rq,
|
|
|
|
int prio)
|
2019-05-07 20:25:44 +08:00
|
|
|
{
|
2019-10-21 16:02:11 +08:00
|
|
|
const struct i915_request *inflight;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We only need to kick the tasklet once for the high priority
|
|
|
|
* new context we add into the queue.
|
|
|
|
*/
|
|
|
|
if (prio <= engine->execlists.queue_priority_hint)
|
|
|
|
return;
|
|
|
|
|
2019-11-04 00:23:05 +08:00
|
|
|
rcu_read_lock();
|
|
|
|
|
2019-10-21 16:02:11 +08:00
|
|
|
/* Nothing currently active? We're overdue for a submission! */
|
|
|
|
inflight = execlists_active(&engine->execlists);
|
|
|
|
if (!inflight)
|
2019-11-04 00:23:05 +08:00
|
|
|
goto unlock;
|
drm/i915/execlists: Suppress preempting self
In order to avoid preempting ourselves, we currently refuse to schedule
the tasklet if we reschedule an inflight context. However, this glosses
over a few issues such as what happens after a CS completion event and
we then preempt the newly executing context with itself, or if something
else causes a tasklet_schedule triggering the same evaluation to
preempt the active context with itself.
However, when we avoid preempting ELSP[0], we still retain the preemption
value as it may match a second preemption request within the same time period
that we need to resolve after the next CS event. However, since we only
store the maximum preemption priority seen, it may not match the
subsequent event and so we should double check whether or not we
actually do need to trigger a preempt-to-idle by comparing the top
priorities from each queue. Later, this gives us a hook for finer
control over deciding whether the preempt-to-idle is justified.
The sequence of events where we end up preempting for no avail is:
1. Queue requests/contexts A, B
2. Priority boost A; no preemption as it is executing, but keep hint
3. After CS switch, B is less than hint, force preempt-to-idle
4. Resubmit B after idling
v2: We can simplify a bunch of tests based on the knowledge that PI will
ensure that earlier requests along the same context will have the highest
priority.
v3: Demonstrate the stale preemption hint with a selftest
References: a2bf92e8cc16 ("drm/i915/execlists: Avoid kicking priority on the current context")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190129185452.20989-4-chris@chris-wilson.co.uk
2019-01-30 02:54:52 +08:00
|
|
|
|
2020-03-10 19:59:47 +08:00
|
|
|
engine->execlists.queue_priority_hint = prio;
|
|
|
|
|
2019-05-07 20:25:44 +08:00
|
|
|
/*
|
|
|
|
* If we are already the currently executing context, don't
|
2019-10-21 16:02:11 +08:00
|
|
|
* bother evaluating if we should preempt ourselves.
|
2019-05-07 20:25:44 +08:00
|
|
|
*/
|
2019-12-20 18:12:29 +08:00
|
|
|
if (inflight->context == rq->context)
|
2019-11-04 00:23:05 +08:00
|
|
|
goto unlock;
|
drm/i915/execlists: Suppress preempting self
In order to avoid preempting ourselves, we currently refuse to schedule
the tasklet if we reschedule an inflight context. However, this glosses
over a few issues such as what happens after a CS completion event and
we then preempt the newly executing context with itself, or if something
else causes a tasklet_schedule triggering the same evaluation to
preempt the active context with itself.
However, when we avoid preempting ELSP[0], we still retain the preemption
value as it may match a second preemption request within the same time period
that we need to resolve after the next CS event. However, since we only
store the maximum preemption priority seen, it may not match the
subsequent event and so we should double check whether or not we
actually do need to trigger a preempt-to-idle by comparing the top
priorities from each queue. Later, this gives us a hook for finer
control over deciding whether the preempt-to-idle is justified.
The sequence of events where we end up preempting for no avail is:
1. Queue requests/contexts A, B
2. Priority boost A; no preemption as it is executing, but keep hint
3. After CS switch, B is less than hint, force preempt-to-idle
4. Resubmit B after idling
v2: We can simplify a bunch of tests based on the knowledge that PI will
ensure that earlier requests along the same context will have the highest
priority.
v3: Demonstrate the stale preemption hint with a selftest
References: a2bf92e8cc16 ("drm/i915/execlists: Avoid kicking priority on the current context")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190129185452.20989-4-chris@chris-wilson.co.uk
2019-01-30 02:54:52 +08:00
|
|
|
|
2019-10-21 16:02:11 +08:00
|
|
|
if (need_preempt(prio, rq_prio(inflight)))
|
|
|
|
tasklet_hi_schedule(&engine->execlists.tasklet);
|
2019-11-04 00:23:05 +08:00
|
|
|
|
|
|
|
unlock:
|
|
|
|
rcu_read_unlock();
|
drm/i915/execlists: Suppress preempting self
In order to avoid preempting ourselves, we currently refuse to schedule
the tasklet if we reschedule an inflight context. However, this glosses
over a few issues such as what happens after a CS completion event and
we then preempt the newly executing context with itself, or if something
else causes a tasklet_schedule triggering the same evaluation to
preempt the active context with itself.
However, when we avoid preempting ELSP[0], we still retain the preemption
value as it may match a second preemption request within the same time period
that we need to resolve after the next CS event. However, since we only
store the maximum preemption priority seen, it may not match the
subsequent event and so we should double check whether or not we
actually do need to trigger a preempt-to-idle by comparing the top
priorities from each queue. Later, this gives us a hook for finer
control over deciding whether the preempt-to-idle is justified.
The sequence of events where we end up preempting for no avail is:
1. Queue requests/contexts A, B
2. Priority boost A; no preemption as it is executing, but keep hint
3. After CS switch, B is less than hint, force preempt-to-idle
4. Resubmit B after idling
v2: We can simplify a bunch of tests based on the knowledge that PI will
ensure that earlier requests along the same context will have the highest
priority.
v3: Demonstrate the stale preemption hint with a selftest
References: a2bf92e8cc16 ("drm/i915/execlists: Avoid kicking priority on the current context")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190129185452.20989-4-chris@chris-wilson.co.uk
2019-01-30 02:54:52 +08:00
|
|
|
}
|
|
|
|
|
2019-05-13 20:01:01 +08:00
|
|
|
static void __i915_schedule(struct i915_sched_node *node,
|
2018-10-01 22:47:55 +08:00
|
|
|
const struct i915_sched_attr *attr)
|
2018-10-01 22:47:54 +08:00
|
|
|
{
|
2020-03-06 15:16:13 +08:00
|
|
|
const int prio = max(attr->priority, node->attr.priority);
|
2019-02-12 04:46:47 +08:00
|
|
|
struct intel_engine_cs *engine;
|
2018-10-01 22:47:54 +08:00
|
|
|
struct i915_dependency *dep, *p;
|
|
|
|
struct i915_dependency stack;
|
2019-02-12 04:46:47 +08:00
|
|
|
struct sched_cache cache;
|
2018-10-01 22:47:54 +08:00
|
|
|
LIST_HEAD(dfs);
|
|
|
|
|
2018-10-01 22:47:55 +08:00
|
|
|
/* Needed in order to use the temporary link inside i915_dependency */
|
|
|
|
lockdep_assert_held(&schedule_lock);
|
2018-10-01 22:47:54 +08:00
|
|
|
GEM_BUG_ON(prio == I915_PRIORITY_INVALID);
|
|
|
|
|
2019-05-13 20:01:02 +08:00
|
|
|
if (node_signaled(node))
|
2018-10-01 22:47:54 +08:00
|
|
|
return;
|
|
|
|
|
2019-05-13 20:01:01 +08:00
|
|
|
stack.signaler = node;
|
2018-10-01 22:47:54 +08:00
|
|
|
list_add(&stack.dfs_link, &dfs);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Recursively bump all dependent priorities to match the new request.
|
|
|
|
*
|
|
|
|
* A naive approach would be to use recursion:
|
|
|
|
* static void update_priorities(struct i915_sched_node *node, prio) {
|
|
|
|
* list_for_each_entry(dep, &node->signalers_list, signal_link)
|
|
|
|
* update_priorities(dep->signal, prio)
|
|
|
|
* queue_request(node);
|
|
|
|
* }
|
|
|
|
* but that may have unlimited recursion depth and so runs a very
|
|
|
|
* real risk of overunning the kernel stack. Instead, we build
|
|
|
|
* a flat list of all dependencies starting with the current request.
|
|
|
|
* As we walk the list of dependencies, we add all of its dependencies
|
|
|
|
* to the end of the list (this may include an already visited
|
|
|
|
* request) and continue to walk onwards onto the new dependencies. The
|
|
|
|
* end result is a topological list of requests in reverse order, the
|
|
|
|
* last element in the list is the request we must execute first.
|
|
|
|
*/
|
|
|
|
list_for_each_entry(dep, &dfs, dfs_link) {
|
|
|
|
struct i915_sched_node *node = dep->signaler;
|
|
|
|
|
2019-02-26 18:23:54 +08:00
|
|
|
/* If we are already flying, we know we have no signalers */
|
|
|
|
if (node_started(node))
|
|
|
|
continue;
|
|
|
|
|
2018-10-01 22:47:54 +08:00
|
|
|
/*
|
|
|
|
* Within an engine, there can be no cycle, but we may
|
|
|
|
* refer to the same dependency chain multiple times
|
|
|
|
* (redundant dependencies are not eliminated) and across
|
|
|
|
* engines.
|
|
|
|
*/
|
|
|
|
list_for_each_entry(p, &node->signalers_list, signal_link) {
|
|
|
|
GEM_BUG_ON(p == dep); /* no cycles! */
|
|
|
|
|
|
|
|
if (node_signaled(p->signaler))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (prio > READ_ONCE(p->signaler->attr.priority))
|
|
|
|
list_move_tail(&p->dfs_link, &dfs);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we didn't need to bump any existing priorities, and we haven't
|
|
|
|
* yet submitted this request (i.e. there is no potential race with
|
|
|
|
* execlists_submit_request()), we can set our own priority and skip
|
|
|
|
* acquiring the engine locks.
|
|
|
|
*/
|
2019-05-13 20:01:01 +08:00
|
|
|
if (node->attr.priority == I915_PRIORITY_INVALID) {
|
|
|
|
GEM_BUG_ON(!list_empty(&node->link));
|
|
|
|
node->attr = *attr;
|
2018-10-01 22:47:54 +08:00
|
|
|
|
|
|
|
if (stack.dfs_link.next == stack.dfs_link.prev)
|
2018-10-01 22:47:55 +08:00
|
|
|
return;
|
2018-10-01 22:47:54 +08:00
|
|
|
|
|
|
|
__list_del_entry(&stack.dfs_link);
|
|
|
|
}
|
|
|
|
|
2019-02-12 04:46:47 +08:00
|
|
|
memset(&cache, 0, sizeof(cache));
|
2019-05-13 20:01:01 +08:00
|
|
|
engine = node_to_request(node)->engine;
|
2019-06-15 00:46:06 +08:00
|
|
|
spin_lock(&engine->active.lock);
|
2018-10-01 22:47:54 +08:00
|
|
|
|
|
|
|
/* Fifo and depth-first replacement ensure our deps execute before us */
|
drm/i915: Load balancing across a virtual engine
Having allowed the user to define a set of engines that they will want
to only use, we go one step further and allow them to bind those engines
into a single virtual instance. Submitting a batch to the virtual engine
will then forward it to any one of the set in a manner as best to
distribute load. The virtual engine has a single timeline across all
engines (it operates as a single queue), so it is not able to concurrently
run batches across multiple engines by itself; that is left up to the user
to submit multiple concurrent batches to multiple queues. Multiple users
will be load balanced across the system.
The mechanism used for load balancing in this patch is a late greedy
balancer. When a request is ready for execution, it is added to each
engine's queue, and when an engine is ready for its next request it
claims it from the virtual engine. The first engine to do so, wins, i.e.
the request is executed at the earliest opportunity (idle moment) in the
system.
As not all HW is created equal, the user is still able to skip the
virtual engine and execute the batch on a specific engine, all within the
same queue. It will then be executed in order on the correct engine,
with execution on other virtual engines being moved away due to the load
detection.
A couple of areas for potential improvement left!
- The virtual engine always take priority over equal-priority tasks.
Mostly broken up by applying FQ_CODEL rules for prioritising new clients,
and hopefully the virtual and real engines are not then congested (i.e.
all work is via virtual engines, or all work is to the real engine).
- We require the breadcrumb irq around every virtual engine request. For
normal engines, we eliminate the need for the slow round trip via
interrupt by using the submit fence and queueing in order. For virtual
engines, we have to allow any job to transfer to a new ring, and cannot
coalesce the submissions, so require the completion fence instead,
forcing the persistent use of interrupts.
- We only drip feed single requests through each virtual engine and onto
the physical engines, even if there was enough work to fill all ELSP,
leaving small stalls with an idle CS event at the end of every request.
Could we be greedy and fill both slots? Being lazy is virtuous for load
distribution on less-than-full workloads though.
Other areas of improvement are more general, such as reducing lock
contention, reducing dispatch overhead, looking at direct submission
rather than bouncing around tasklets etc.
sseu: Lift the restriction to allow sseu to be reconfigured on virtual
engines composed of RENDER_CLASS (rcs).
v2: macroize check_user_mbz()
v3: Cancel virtual engines on wedging
v4: Commence commenting
v5: Replace 64b sibling_mask with a list of class:instance
v6: Drop the one-element array in the uabi
v7: Assert it is an virtual engine in to_virtual_engine()
v8: Skip over holes in [class][inst] so we can selftest with (vcs0, vcs2)
Link: https://github.com/intel/media-driver/pull/283
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190521211134.16117-6-chris@chris-wilson.co.uk
2019-05-22 05:11:30 +08:00
|
|
|
engine = sched_lock_engine(node, engine, &cache);
|
2018-10-01 22:47:54 +08:00
|
|
|
list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) {
|
|
|
|
INIT_LIST_HEAD(&dep->dfs_link);
|
|
|
|
|
2019-05-13 20:01:01 +08:00
|
|
|
node = dep->signaler;
|
2019-02-12 04:46:47 +08:00
|
|
|
engine = sched_lock_engine(node, engine, &cache);
|
2019-06-15 00:46:06 +08:00
|
|
|
lockdep_assert_held(&engine->active.lock);
|
2018-10-01 22:47:54 +08:00
|
|
|
|
|
|
|
/* Recheck after acquiring the engine->timeline.lock */
|
|
|
|
if (prio <= node->attr.priority || node_signaled(node))
|
|
|
|
continue;
|
|
|
|
|
drm/i915: Load balancing across a virtual engine
Having allowed the user to define a set of engines that they will want
to only use, we go one step further and allow them to bind those engines
into a single virtual instance. Submitting a batch to the virtual engine
will then forward it to any one of the set in a manner as best to
distribute load. The virtual engine has a single timeline across all
engines (it operates as a single queue), so it is not able to concurrently
run batches across multiple engines by itself; that is left up to the user
to submit multiple concurrent batches to multiple queues. Multiple users
will be load balanced across the system.
The mechanism used for load balancing in this patch is a late greedy
balancer. When a request is ready for execution, it is added to each
engine's queue, and when an engine is ready for its next request it
claims it from the virtual engine. The first engine to do so, wins, i.e.
the request is executed at the earliest opportunity (idle moment) in the
system.
As not all HW is created equal, the user is still able to skip the
virtual engine and execute the batch on a specific engine, all within the
same queue. It will then be executed in order on the correct engine,
with execution on other virtual engines being moved away due to the load
detection.
A couple of areas for potential improvement left!
- The virtual engine always take priority over equal-priority tasks.
Mostly broken up by applying FQ_CODEL rules for prioritising new clients,
and hopefully the virtual and real engines are not then congested (i.e.
all work is via virtual engines, or all work is to the real engine).
- We require the breadcrumb irq around every virtual engine request. For
normal engines, we eliminate the need for the slow round trip via
interrupt by using the submit fence and queueing in order. For virtual
engines, we have to allow any job to transfer to a new ring, and cannot
coalesce the submissions, so require the completion fence instead,
forcing the persistent use of interrupts.
- We only drip feed single requests through each virtual engine and onto
the physical engines, even if there was enough work to fill all ELSP,
leaving small stalls with an idle CS event at the end of every request.
Could we be greedy and fill both slots? Being lazy is virtuous for load
distribution on less-than-full workloads though.
Other areas of improvement are more general, such as reducing lock
contention, reducing dispatch overhead, looking at direct submission
rather than bouncing around tasklets etc.
sseu: Lift the restriction to allow sseu to be reconfigured on virtual
engines composed of RENDER_CLASS (rcs).
v2: macroize check_user_mbz()
v3: Cancel virtual engines on wedging
v4: Commence commenting
v5: Replace 64b sibling_mask with a list of class:instance
v6: Drop the one-element array in the uabi
v7: Assert it is an virtual engine in to_virtual_engine()
v8: Skip over holes in [class][inst] so we can selftest with (vcs0, vcs2)
Link: https://github.com/intel/media-driver/pull/283
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190521211134.16117-6-chris@chris-wilson.co.uk
2019-05-22 05:11:30 +08:00
|
|
|
GEM_BUG_ON(node_to_request(node)->engine != engine);
|
|
|
|
|
2020-03-09 19:09:34 +08:00
|
|
|
WRITE_ONCE(node->attr.priority, prio);
|
2019-06-15 00:46:06 +08:00
|
|
|
|
2020-01-17 02:47:52 +08:00
|
|
|
/*
|
|
|
|
* Once the request is ready, it will be placed into the
|
|
|
|
* priority lists and then onto the HW runlist. Before the
|
|
|
|
* request is ready, it does not contribute to our preemption
|
|
|
|
* decisions and we can safely ignore it, as it will, and
|
|
|
|
* any preemption required, be dealt with upon submission.
|
|
|
|
* See engine->submit_request()
|
|
|
|
*/
|
|
|
|
if (list_empty(&node->link))
|
2019-06-15 00:46:06 +08:00
|
|
|
continue;
|
|
|
|
|
2020-01-17 02:47:52 +08:00
|
|
|
if (i915_request_in_priority_queue(node_to_request(node))) {
|
2019-06-15 00:46:06 +08:00
|
|
|
if (!cache.priolist)
|
|
|
|
cache.priolist =
|
|
|
|
i915_sched_lookup_priolist(engine,
|
|
|
|
prio);
|
|
|
|
list_move_tail(&node->link, cache.priolist);
|
2018-10-01 22:47:54 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Defer (tasklet) submission until after all of our updates. */
|
2019-10-21 16:02:11 +08:00
|
|
|
kick_submission(engine, node_to_request(node), prio);
|
2018-10-01 22:47:54 +08:00
|
|
|
}
|
|
|
|
|
2019-06-15 00:46:06 +08:00
|
|
|
spin_unlock(&engine->active.lock);
|
2018-10-01 22:47:55 +08:00
|
|
|
}
|
2018-10-01 22:47:54 +08:00
|
|
|
|
2018-10-01 22:47:55 +08:00
|
|
|
void i915_schedule(struct i915_request *rq, const struct i915_sched_attr *attr)
|
|
|
|
{
|
drm/i915: Bump ready tasks ahead of busywaits
Consider two tasks that are running in parallel on a pair of engines
(vcs0, vcs1), but then must complete on a shared engine (rcs0). To
maximise throughput, we want to run the first ready task on rcs0 (i.e.
the first task that completes on either of vcs0 or vcs1). When using
semaphores, however, we will instead queue onto rcs in submission order.
To resolve this incorrect ordering, we want to re-evaluate the priority
queue when each of the request is ready. Normally this happens because
we only insert into the priority queue requests that are ready, but with
semaphores we are inserting ahead of their readiness and to compensate
we penalize those tasks with reduced priority (so that tasks that do not
need to busywait should naturally be run first). However, given a series
of tasks that each use semaphores, the queue degrades into submission
fifo rather than readiness fifo, and so to counter this we give a small
boost to semaphore users as their dependent tasks are completed (and so
we no longer require any busywait prior to running the user task as they
are then ready themselves).
v2: Fixup irqsave for schedule_lock (Tvrtko)
Testcase: igt/gem_exec_schedule/semaphore-codependency
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Dmitry Rogozhkin <dmitry.v.rogozhkin@intel.com>
Cc: Dmitry Ermilov <dmitry.ermilov@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190409152922.23894-1-chris@chris-wilson.co.uk
2019-04-09 23:29:22 +08:00
|
|
|
spin_lock_irq(&schedule_lock);
|
2019-05-13 20:01:01 +08:00
|
|
|
__i915_schedule(&rq->sched, attr);
|
drm/i915: Bump ready tasks ahead of busywaits
Consider two tasks that are running in parallel on a pair of engines
(vcs0, vcs1), but then must complete on a shared engine (rcs0). To
maximise throughput, we want to run the first ready task on rcs0 (i.e.
the first task that completes on either of vcs0 or vcs1). When using
semaphores, however, we will instead queue onto rcs in submission order.
To resolve this incorrect ordering, we want to re-evaluate the priority
queue when each of the request is ready. Normally this happens because
we only insert into the priority queue requests that are ready, but with
semaphores we are inserting ahead of their readiness and to compensate
we penalize those tasks with reduced priority (so that tasks that do not
need to busywait should naturally be run first). However, given a series
of tasks that each use semaphores, the queue degrades into submission
fifo rather than readiness fifo, and so to counter this we give a small
boost to semaphore users as their dependent tasks are completed (and so
we no longer require any busywait prior to running the user task as they
are then ready themselves).
v2: Fixup irqsave for schedule_lock (Tvrtko)
Testcase: igt/gem_exec_schedule/semaphore-codependency
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Dmitry Rogozhkin <dmitry.v.rogozhkin@intel.com>
Cc: Dmitry Ermilov <dmitry.ermilov@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190409152922.23894-1-chris@chris-wilson.co.uk
2019-04-09 23:29:22 +08:00
|
|
|
spin_unlock_irq(&schedule_lock);
|
2018-10-01 22:47:54 +08:00
|
|
|
}
|
2018-10-01 22:47:55 +08:00
|
|
|
|
2019-05-13 20:01:01 +08:00
|
|
|
static void __bump_priority(struct i915_sched_node *node, unsigned int bump)
|
|
|
|
{
|
|
|
|
struct i915_sched_attr attr = node->attr;
|
|
|
|
|
2020-02-20 20:36:07 +08:00
|
|
|
if (attr.priority & bump)
|
|
|
|
return;
|
|
|
|
|
2019-05-13 20:01:01 +08:00
|
|
|
attr.priority |= bump;
|
|
|
|
__i915_schedule(node, &attr);
|
|
|
|
}
|
|
|
|
|
2018-10-01 22:47:55 +08:00
|
|
|
void i915_schedule_bump_priority(struct i915_request *rq, unsigned int bump)
|
|
|
|
{
|
drm/i915: Bump ready tasks ahead of busywaits
Consider two tasks that are running in parallel on a pair of engines
(vcs0, vcs1), but then must complete on a shared engine (rcs0). To
maximise throughput, we want to run the first ready task on rcs0 (i.e.
the first task that completes on either of vcs0 or vcs1). When using
semaphores, however, we will instead queue onto rcs in submission order.
To resolve this incorrect ordering, we want to re-evaluate the priority
queue when each of the request is ready. Normally this happens because
we only insert into the priority queue requests that are ready, but with
semaphores we are inserting ahead of their readiness and to compensate
we penalize those tasks with reduced priority (so that tasks that do not
need to busywait should naturally be run first). However, given a series
of tasks that each use semaphores, the queue degrades into submission
fifo rather than readiness fifo, and so to counter this we give a small
boost to semaphore users as their dependent tasks are completed (and so
we no longer require any busywait prior to running the user task as they
are then ready themselves).
v2: Fixup irqsave for schedule_lock (Tvrtko)
Testcase: igt/gem_exec_schedule/semaphore-codependency
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Dmitry Rogozhkin <dmitry.v.rogozhkin@intel.com>
Cc: Dmitry Ermilov <dmitry.ermilov@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190409152922.23894-1-chris@chris-wilson.co.uk
2019-04-09 23:29:22 +08:00
|
|
|
unsigned long flags;
|
2018-10-01 22:47:55 +08:00
|
|
|
|
|
|
|
GEM_BUG_ON(bump & ~I915_PRIORITY_MASK);
|
2019-08-14 03:07:05 +08:00
|
|
|
if (READ_ONCE(rq->sched.attr.priority) & bump)
|
2018-10-01 22:47:55 +08:00
|
|
|
return;
|
|
|
|
|
drm/i915: Bump ready tasks ahead of busywaits
Consider two tasks that are running in parallel on a pair of engines
(vcs0, vcs1), but then must complete on a shared engine (rcs0). To
maximise throughput, we want to run the first ready task on rcs0 (i.e.
the first task that completes on either of vcs0 or vcs1). When using
semaphores, however, we will instead queue onto rcs in submission order.
To resolve this incorrect ordering, we want to re-evaluate the priority
queue when each of the request is ready. Normally this happens because
we only insert into the priority queue requests that are ready, but with
semaphores we are inserting ahead of their readiness and to compensate
we penalize those tasks with reduced priority (so that tasks that do not
need to busywait should naturally be run first). However, given a series
of tasks that each use semaphores, the queue degrades into submission
fifo rather than readiness fifo, and so to counter this we give a small
boost to semaphore users as their dependent tasks are completed (and so
we no longer require any busywait prior to running the user task as they
are then ready themselves).
v2: Fixup irqsave for schedule_lock (Tvrtko)
Testcase: igt/gem_exec_schedule/semaphore-codependency
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Dmitry Rogozhkin <dmitry.v.rogozhkin@intel.com>
Cc: Dmitry Ermilov <dmitry.ermilov@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190409152922.23894-1-chris@chris-wilson.co.uk
2019-04-09 23:29:22 +08:00
|
|
|
spin_lock_irqsave(&schedule_lock, flags);
|
2019-05-13 20:01:01 +08:00
|
|
|
__bump_priority(&rq->sched, bump);
|
drm/i915: Bump ready tasks ahead of busywaits
Consider two tasks that are running in parallel on a pair of engines
(vcs0, vcs1), but then must complete on a shared engine (rcs0). To
maximise throughput, we want to run the first ready task on rcs0 (i.e.
the first task that completes on either of vcs0 or vcs1). When using
semaphores, however, we will instead queue onto rcs in submission order.
To resolve this incorrect ordering, we want to re-evaluate the priority
queue when each of the request is ready. Normally this happens because
we only insert into the priority queue requests that are ready, but with
semaphores we are inserting ahead of their readiness and to compensate
we penalize those tasks with reduced priority (so that tasks that do not
need to busywait should naturally be run first). However, given a series
of tasks that each use semaphores, the queue degrades into submission
fifo rather than readiness fifo, and so to counter this we give a small
boost to semaphore users as their dependent tasks are completed (and so
we no longer require any busywait prior to running the user task as they
are then ready themselves).
v2: Fixup irqsave for schedule_lock (Tvrtko)
Testcase: igt/gem_exec_schedule/semaphore-codependency
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Dmitry Rogozhkin <dmitry.v.rogozhkin@intel.com>
Cc: Dmitry Ermilov <dmitry.ermilov@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190409152922.23894-1-chris@chris-wilson.co.uk
2019-04-09 23:29:22 +08:00
|
|
|
spin_unlock_irqrestore(&schedule_lock, flags);
|
2018-10-01 22:47:55 +08:00
|
|
|
}
|
2019-02-28 18:20:33 +08:00
|
|
|
|
2019-05-13 20:01:00 +08:00
|
|
|
void i915_sched_node_init(struct i915_sched_node *node)
|
2019-02-28 18:20:33 +08:00
|
|
|
{
|
2019-05-13 20:01:00 +08:00
|
|
|
INIT_LIST_HEAD(&node->signalers_list);
|
|
|
|
INIT_LIST_HEAD(&node->waiters_list);
|
|
|
|
INIT_LIST_HEAD(&node->link);
|
drm/i915: Use a ctor for TYPESAFE_BY_RCU i915_request
As we start peeking into requests for longer and longer, e.g.
incorporating use of spinlocks when only protected by an
rcu_read_lock(), we need to be careful in how we reset the request when
recycling and need to preserve any barriers that may still be in use as
the request is reset for reuse.
Quoting Linus Torvalds:
> If there is refcounting going on then why use SLAB_TYPESAFE_BY_RCU?
.. because the object can be accessed (by RCU) after the refcount has
gone down to zero, and the thing has been released.
That's the whole and only point of SLAB_TYPESAFE_BY_RCU.
That flag basically says:
"I may end up accessing this object *after* it has been free'd,
because there may be RCU lookups in flight"
This has nothing to do with constructors. It's ok if the object gets
reused as an object of the same type and does *not* get
re-initialized, because we're perfectly fine seeing old stale data.
What it guarantees is that the slab isn't shared with any other kind
of object, _and_ that the underlying pages are free'd after an RCU
quiescent period (so the pages aren't shared with another kind of
object either during an RCU walk).
And it doesn't necessarily have to have a constructor, because the
thing that a RCU walk will care about is
(a) guaranteed to be an object that *has* been on some RCU list (so
it's not a "new" object)
(b) the RCU walk needs to have logic to verify that it's still the
*same* object and hasn't been re-used as something else.
In contrast, a SLAB_TYPESAFE_BY_RCU memory gets free'd and re-used
immediately, but because it gets reused as the same kind of object,
the RCU walker can "know" what parts have meaning for re-use, in a way
it couidn't if the re-use was random.
That said, it *is* subtle, and people should be careful.
> So the re-use might initialize the fields lazily, not necessarily using a ctor.
If you have a well-defined refcount, and use "atomic_inc_not_zero()"
to guard the speculative RCU access section, and use
"atomic_dec_and_test()" in the freeing section, then you should be
safe wrt new allocations.
If you have a completely new allocation that has "random stale
content", you know that it cannot be on the RCU list, so there is no
speculative access that can ever see that random content.
So the only case you need to worry about is a re-use allocation, and
you know that the refcount will start out as zero even if you don't
have a constructor.
So you can think of the refcount itself as always having a zero
constructor, *BUT* you need to be careful with ordering.
In particular, whoever does the allocation needs to then set the
refcount to a non-zero value *after* it has initialized all the other
fields. And in particular, it needs to make sure that it uses the
proper memory ordering to do so.
NOTE! One thing to be very worried about is that re-initializing
whatever RCU lists means that now the RCU walker may be walking on the
wrong list so the walker may do the right thing for this particular
entry, but it may miss walking *other* entries. So then you can get
spurious lookup failures, because the RCU walker never walked all the
way to the end of the right list. That ends up being a much more
subtle bug.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191122094924.629690-1-chris@chris-wilson.co.uk
2019-11-22 17:49:24 +08:00
|
|
|
|
|
|
|
i915_sched_node_reinit(node);
|
|
|
|
}
|
|
|
|
|
|
|
|
void i915_sched_node_reinit(struct i915_sched_node *node)
|
|
|
|
{
|
2019-05-13 20:01:00 +08:00
|
|
|
node->attr.priority = I915_PRIORITY_INVALID;
|
|
|
|
node->semaphores = 0;
|
|
|
|
node->flags = 0;
|
drm/i915: Use a ctor for TYPESAFE_BY_RCU i915_request
As we start peeking into requests for longer and longer, e.g.
incorporating use of spinlocks when only protected by an
rcu_read_lock(), we need to be careful in how we reset the request when
recycling and need to preserve any barriers that may still be in use as
the request is reset for reuse.
Quoting Linus Torvalds:
> If there is refcounting going on then why use SLAB_TYPESAFE_BY_RCU?
.. because the object can be accessed (by RCU) after the refcount has
gone down to zero, and the thing has been released.
That's the whole and only point of SLAB_TYPESAFE_BY_RCU.
That flag basically says:
"I may end up accessing this object *after* it has been free'd,
because there may be RCU lookups in flight"
This has nothing to do with constructors. It's ok if the object gets
reused as an object of the same type and does *not* get
re-initialized, because we're perfectly fine seeing old stale data.
What it guarantees is that the slab isn't shared with any other kind
of object, _and_ that the underlying pages are free'd after an RCU
quiescent period (so the pages aren't shared with another kind of
object either during an RCU walk).
And it doesn't necessarily have to have a constructor, because the
thing that a RCU walk will care about is
(a) guaranteed to be an object that *has* been on some RCU list (so
it's not a "new" object)
(b) the RCU walk needs to have logic to verify that it's still the
*same* object and hasn't been re-used as something else.
In contrast, a SLAB_TYPESAFE_BY_RCU memory gets free'd and re-used
immediately, but because it gets reused as the same kind of object,
the RCU walker can "know" what parts have meaning for re-use, in a way
it couidn't if the re-use was random.
That said, it *is* subtle, and people should be careful.
> So the re-use might initialize the fields lazily, not necessarily using a ctor.
If you have a well-defined refcount, and use "atomic_inc_not_zero()"
to guard the speculative RCU access section, and use
"atomic_dec_and_test()" in the freeing section, then you should be
safe wrt new allocations.
If you have a completely new allocation that has "random stale
content", you know that it cannot be on the RCU list, so there is no
speculative access that can ever see that random content.
So the only case you need to worry about is a re-use allocation, and
you know that the refcount will start out as zero even if you don't
have a constructor.
So you can think of the refcount itself as always having a zero
constructor, *BUT* you need to be careful with ordering.
In particular, whoever does the allocation needs to then set the
refcount to a non-zero value *after* it has initialized all the other
fields. And in particular, it needs to make sure that it uses the
proper memory ordering to do so.
NOTE! One thing to be very worried about is that re-initializing
whatever RCU lists means that now the RCU walker may be walking on the
wrong list so the walker may do the right thing for this particular
entry, but it may miss walking *other* entries. So then you can get
spurious lookup failures, because the RCU walker never walked all the
way to the end of the right list. That ends up being a much more
subtle bug.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191122094924.629690-1-chris@chris-wilson.co.uk
2019-11-22 17:49:24 +08:00
|
|
|
|
|
|
|
GEM_BUG_ON(!list_empty(&node->signalers_list));
|
|
|
|
GEM_BUG_ON(!list_empty(&node->waiters_list));
|
|
|
|
GEM_BUG_ON(!list_empty(&node->link));
|
2019-05-13 20:01:00 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct i915_dependency *
|
|
|
|
i915_dependency_alloc(void)
|
|
|
|
{
|
|
|
|
return kmem_cache_alloc(global.slab_dependencies, GFP_KERNEL);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
i915_dependency_free(struct i915_dependency *dep)
|
|
|
|
{
|
|
|
|
kmem_cache_free(global.slab_dependencies, dep);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
|
|
|
|
struct i915_sched_node *signal,
|
|
|
|
struct i915_dependency *dep,
|
|
|
|
unsigned long flags)
|
|
|
|
{
|
|
|
|
bool ret = false;
|
|
|
|
|
|
|
|
spin_lock_irq(&schedule_lock);
|
|
|
|
|
|
|
|
if (!node_signaled(signal)) {
|
|
|
|
INIT_LIST_HEAD(&dep->dfs_link);
|
|
|
|
dep->signaler = signal;
|
2019-06-20 22:20:52 +08:00
|
|
|
dep->waiter = node;
|
2019-05-13 20:01:00 +08:00
|
|
|
dep->flags = flags;
|
|
|
|
|
|
|
|
/* Keep track of whether anyone on this chain has a semaphore */
|
|
|
|
if (signal->flags & I915_SCHED_HAS_SEMAPHORE_CHAIN &&
|
|
|
|
!node_started(signal))
|
|
|
|
node->flags |= I915_SCHED_HAS_SEMAPHORE_CHAIN;
|
|
|
|
|
2020-02-07 04:49:13 +08:00
|
|
|
/* All set, now publish. Beware the lockless walkers. */
|
2020-02-07 19:02:13 +08:00
|
|
|
list_add_rcu(&dep->signal_link, &node->signalers_list);
|
2020-02-07 04:49:13 +08:00
|
|
|
list_add_rcu(&dep->wait_link, &signal->waiters_list);
|
|
|
|
|
2019-05-15 21:00:50 +08:00
|
|
|
/*
|
|
|
|
* As we do not allow WAIT to preempt inflight requests,
|
|
|
|
* once we have executed a request, along with triggering
|
|
|
|
* any execution callbacks, we must preserve its ordering
|
|
|
|
* within the non-preemptible FIFO.
|
|
|
|
*/
|
|
|
|
BUILD_BUG_ON(__NO_PREEMPTION & ~I915_PRIORITY_MASK);
|
|
|
|
if (flags & I915_DEPENDENCY_EXTERNAL)
|
|
|
|
__bump_priority(signal, __NO_PREEMPTION);
|
|
|
|
|
2019-05-13 20:01:00 +08:00
|
|
|
ret = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_unlock_irq(&schedule_lock);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
int i915_sched_node_add_dependency(struct i915_sched_node *node,
|
|
|
|
struct i915_sched_node *signal)
|
|
|
|
{
|
|
|
|
struct i915_dependency *dep;
|
|
|
|
|
|
|
|
dep = i915_dependency_alloc();
|
|
|
|
if (!dep)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
if (!__i915_sched_node_add_dependency(node, signal, dep,
|
2019-05-15 21:00:50 +08:00
|
|
|
I915_DEPENDENCY_EXTERNAL |
|
2019-05-13 20:01:00 +08:00
|
|
|
I915_DEPENDENCY_ALLOC))
|
|
|
|
i915_dependency_free(dep);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void i915_sched_node_fini(struct i915_sched_node *node)
|
|
|
|
{
|
|
|
|
struct i915_dependency *dep, *tmp;
|
|
|
|
|
|
|
|
spin_lock_irq(&schedule_lock);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Everyone we depended upon (the fences we wait to be signaled)
|
|
|
|
* should retire before us and remove themselves from our list.
|
|
|
|
* However, retirement is run independently on each timeline and
|
|
|
|
* so we may be called out-of-order.
|
|
|
|
*/
|
|
|
|
list_for_each_entry_safe(dep, tmp, &node->signalers_list, signal_link) {
|
|
|
|
GEM_BUG_ON(!list_empty(&dep->dfs_link));
|
|
|
|
|
2020-02-20 15:50:20 +08:00
|
|
|
list_del_rcu(&dep->wait_link);
|
2019-05-13 20:01:00 +08:00
|
|
|
if (dep->flags & I915_DEPENDENCY_ALLOC)
|
|
|
|
i915_dependency_free(dep);
|
|
|
|
}
|
drm/i915: Use a ctor for TYPESAFE_BY_RCU i915_request
As we start peeking into requests for longer and longer, e.g.
incorporating use of spinlocks when only protected by an
rcu_read_lock(), we need to be careful in how we reset the request when
recycling and need to preserve any barriers that may still be in use as
the request is reset for reuse.
Quoting Linus Torvalds:
> If there is refcounting going on then why use SLAB_TYPESAFE_BY_RCU?
.. because the object can be accessed (by RCU) after the refcount has
gone down to zero, and the thing has been released.
That's the whole and only point of SLAB_TYPESAFE_BY_RCU.
That flag basically says:
"I may end up accessing this object *after* it has been free'd,
because there may be RCU lookups in flight"
This has nothing to do with constructors. It's ok if the object gets
reused as an object of the same type and does *not* get
re-initialized, because we're perfectly fine seeing old stale data.
What it guarantees is that the slab isn't shared with any other kind
of object, _and_ that the underlying pages are free'd after an RCU
quiescent period (so the pages aren't shared with another kind of
object either during an RCU walk).
And it doesn't necessarily have to have a constructor, because the
thing that a RCU walk will care about is
(a) guaranteed to be an object that *has* been on some RCU list (so
it's not a "new" object)
(b) the RCU walk needs to have logic to verify that it's still the
*same* object and hasn't been re-used as something else.
In contrast, a SLAB_TYPESAFE_BY_RCU memory gets free'd and re-used
immediately, but because it gets reused as the same kind of object,
the RCU walker can "know" what parts have meaning for re-use, in a way
it couidn't if the re-use was random.
That said, it *is* subtle, and people should be careful.
> So the re-use might initialize the fields lazily, not necessarily using a ctor.
If you have a well-defined refcount, and use "atomic_inc_not_zero()"
to guard the speculative RCU access section, and use
"atomic_dec_and_test()" in the freeing section, then you should be
safe wrt new allocations.
If you have a completely new allocation that has "random stale
content", you know that it cannot be on the RCU list, so there is no
speculative access that can ever see that random content.
So the only case you need to worry about is a re-use allocation, and
you know that the refcount will start out as zero even if you don't
have a constructor.
So you can think of the refcount itself as always having a zero
constructor, *BUT* you need to be careful with ordering.
In particular, whoever does the allocation needs to then set the
refcount to a non-zero value *after* it has initialized all the other
fields. And in particular, it needs to make sure that it uses the
proper memory ordering to do so.
NOTE! One thing to be very worried about is that re-initializing
whatever RCU lists means that now the RCU walker may be walking on the
wrong list so the walker may do the right thing for this particular
entry, but it may miss walking *other* entries. So then you can get
spurious lookup failures, because the RCU walker never walked all the
way to the end of the right list. That ends up being a much more
subtle bug.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191122094924.629690-1-chris@chris-wilson.co.uk
2019-11-22 17:49:24 +08:00
|
|
|
INIT_LIST_HEAD(&node->signalers_list);
|
2019-05-13 20:01:00 +08:00
|
|
|
|
|
|
|
/* Remove ourselves from everyone who depends upon us */
|
|
|
|
list_for_each_entry_safe(dep, tmp, &node->waiters_list, wait_link) {
|
|
|
|
GEM_BUG_ON(dep->signaler != node);
|
|
|
|
GEM_BUG_ON(!list_empty(&dep->dfs_link));
|
|
|
|
|
2020-02-20 15:50:20 +08:00
|
|
|
list_del_rcu(&dep->signal_link);
|
2019-05-13 20:01:00 +08:00
|
|
|
if (dep->flags & I915_DEPENDENCY_ALLOC)
|
|
|
|
i915_dependency_free(dep);
|
|
|
|
}
|
drm/i915: Use a ctor for TYPESAFE_BY_RCU i915_request
As we start peeking into requests for longer and longer, e.g.
incorporating use of spinlocks when only protected by an
rcu_read_lock(), we need to be careful in how we reset the request when
recycling and need to preserve any barriers that may still be in use as
the request is reset for reuse.
Quoting Linus Torvalds:
> If there is refcounting going on then why use SLAB_TYPESAFE_BY_RCU?
.. because the object can be accessed (by RCU) after the refcount has
gone down to zero, and the thing has been released.
That's the whole and only point of SLAB_TYPESAFE_BY_RCU.
That flag basically says:
"I may end up accessing this object *after* it has been free'd,
because there may be RCU lookups in flight"
This has nothing to do with constructors. It's ok if the object gets
reused as an object of the same type and does *not* get
re-initialized, because we're perfectly fine seeing old stale data.
What it guarantees is that the slab isn't shared with any other kind
of object, _and_ that the underlying pages are free'd after an RCU
quiescent period (so the pages aren't shared with another kind of
object either during an RCU walk).
And it doesn't necessarily have to have a constructor, because the
thing that a RCU walk will care about is
(a) guaranteed to be an object that *has* been on some RCU list (so
it's not a "new" object)
(b) the RCU walk needs to have logic to verify that it's still the
*same* object and hasn't been re-used as something else.
In contrast, a SLAB_TYPESAFE_BY_RCU memory gets free'd and re-used
immediately, but because it gets reused as the same kind of object,
the RCU walker can "know" what parts have meaning for re-use, in a way
it couidn't if the re-use was random.
That said, it *is* subtle, and people should be careful.
> So the re-use might initialize the fields lazily, not necessarily using a ctor.
If you have a well-defined refcount, and use "atomic_inc_not_zero()"
to guard the speculative RCU access section, and use
"atomic_dec_and_test()" in the freeing section, then you should be
safe wrt new allocations.
If you have a completely new allocation that has "random stale
content", you know that it cannot be on the RCU list, so there is no
speculative access that can ever see that random content.
So the only case you need to worry about is a re-use allocation, and
you know that the refcount will start out as zero even if you don't
have a constructor.
So you can think of the refcount itself as always having a zero
constructor, *BUT* you need to be careful with ordering.
In particular, whoever does the allocation needs to then set the
refcount to a non-zero value *after* it has initialized all the other
fields. And in particular, it needs to make sure that it uses the
proper memory ordering to do so.
NOTE! One thing to be very worried about is that re-initializing
whatever RCU lists means that now the RCU walker may be walking on the
wrong list so the walker may do the right thing for this particular
entry, but it may miss walking *other* entries. So then you can get
spurious lookup failures, because the RCU walker never walked all the
way to the end of the right list. That ends up being a much more
subtle bug.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191122094924.629690-1-chris@chris-wilson.co.uk
2019-11-22 17:49:24 +08:00
|
|
|
INIT_LIST_HEAD(&node->waiters_list);
|
2019-05-13 20:01:00 +08:00
|
|
|
|
|
|
|
spin_unlock_irq(&schedule_lock);
|
2019-02-28 18:20:33 +08:00
|
|
|
}
|
|
|
|
|
2019-03-06 05:38:30 +08:00
|
|
|
static void i915_global_scheduler_shrink(void)
|
|
|
|
{
|
|
|
|
kmem_cache_shrink(global.slab_dependencies);
|
|
|
|
kmem_cache_shrink(global.slab_priorities);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void i915_global_scheduler_exit(void)
|
|
|
|
{
|
|
|
|
kmem_cache_destroy(global.slab_dependencies);
|
|
|
|
kmem_cache_destroy(global.slab_priorities);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct i915_global_scheduler global = { {
|
|
|
|
.shrink = i915_global_scheduler_shrink,
|
|
|
|
.exit = i915_global_scheduler_exit,
|
|
|
|
} };
|
|
|
|
|
2019-02-28 18:20:33 +08:00
|
|
|
int __init i915_global_scheduler_init(void)
|
|
|
|
{
|
|
|
|
global.slab_dependencies = KMEM_CACHE(i915_dependency,
|
2020-02-20 15:50:20 +08:00
|
|
|
SLAB_HWCACHE_ALIGN |
|
|
|
|
SLAB_TYPESAFE_BY_RCU);
|
2019-02-28 18:20:33 +08:00
|
|
|
if (!global.slab_dependencies)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
global.slab_priorities = KMEM_CACHE(i915_priolist,
|
|
|
|
SLAB_HWCACHE_ALIGN);
|
|
|
|
if (!global.slab_priorities)
|
|
|
|
goto err_priorities;
|
|
|
|
|
2019-03-06 05:38:30 +08:00
|
|
|
i915_global_register(&global.base);
|
2019-02-28 18:20:33 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_priorities:
|
|
|
|
kmem_cache_destroy(global.slab_priorities);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|