2016-04-14 00:35:02 +08:00
|
|
|
/*
|
|
|
|
* Copyright © 2016 Intel Corporation
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
|
|
* to deal in the Software without restriction, including without limitation
|
|
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
|
|
* Software is furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice (including the next
|
|
|
|
* paragraph) shall be included in all copies or substantial portions of the
|
|
|
|
* Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
|
|
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
|
|
|
* IN THE SOFTWARE.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef __I915_GEM_H__
|
|
|
|
#define __I915_GEM_H__
|
|
|
|
|
2017-05-03 17:39:21 +08:00
|
|
|
#include <linux/bug.h>
|
2018-05-17 02:33:49 +08:00
|
|
|
#include <linux/interrupt.h>
|
2017-05-03 17:39:21 +08:00
|
|
|
|
2019-08-06 18:07:28 +08:00
|
|
|
#include <drm/drm_drv.h>
|
|
|
|
|
2018-04-06 23:51:44 +08:00
|
|
|
struct drm_i915_private;
|
|
|
|
|
2016-04-14 00:35:02 +08:00
|
|
|
#ifdef CONFIG_DRM_I915_DEBUG_GEM
|
2018-04-26 18:32:19 +08:00
|
|
|
|
|
|
|
#define GEM_SHOW_DEBUG() (drm_debug & DRM_UT_DRIVER)
|
|
|
|
|
2017-11-16 16:39:54 +08:00
|
|
|
#define GEM_BUG_ON(condition) do { if (unlikely((condition))) { \
|
2019-10-10 15:14:26 +08:00
|
|
|
GEM_TRACE_ERR("%s:%d GEM_BUG_ON(%s)\n", \
|
|
|
|
__func__, __LINE__, __stringify(condition)); \
|
2017-11-16 16:39:54 +08:00
|
|
|
BUG(); \
|
|
|
|
} \
|
|
|
|
} while(0)
|
2016-12-14 04:32:19 +08:00
|
|
|
#define GEM_WARN_ON(expr) WARN_ON(expr)
|
2017-02-07 01:05:01 +08:00
|
|
|
|
2017-02-07 18:23:19 +08:00
|
|
|
#define GEM_DEBUG_DECL(var) var
|
|
|
|
#define GEM_DEBUG_EXEC(expr) expr
|
|
|
|
#define GEM_DEBUG_BUG_ON(expr) GEM_BUG_ON(expr)
|
2018-10-12 14:31:42 +08:00
|
|
|
#define GEM_DEBUG_WARN_ON(expr) GEM_WARN_ON(expr)
|
2017-02-07 01:05:01 +08:00
|
|
|
|
2016-04-14 00:35:02 +08:00
|
|
|
#else
|
2018-04-26 18:32:19 +08:00
|
|
|
|
|
|
|
#define GEM_SHOW_DEBUG() (0)
|
|
|
|
|
2016-12-03 02:47:50 +08:00
|
|
|
#define GEM_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr)
|
2018-10-12 14:31:42 +08:00
|
|
|
#define GEM_WARN_ON(expr) ({ unlikely(!!(expr)); })
|
2017-02-07 01:05:01 +08:00
|
|
|
|
2017-02-07 18:23:19 +08:00
|
|
|
#define GEM_DEBUG_DECL(var)
|
|
|
|
#define GEM_DEBUG_EXEC(expr) do { } while (0)
|
|
|
|
#define GEM_DEBUG_BUG_ON(expr)
|
2018-10-12 14:31:42 +08:00
|
|
|
#define GEM_DEBUG_WARN_ON(expr) ({ BUILD_BUG_ON_INVALID(expr); 0; })
|
2016-04-14 00:35:02 +08:00
|
|
|
#endif
|
|
|
|
|
2017-11-09 22:30:19 +08:00
|
|
|
#if IS_ENABLED(CONFIG_DRM_I915_TRACE_GEM)
|
|
|
|
#define GEM_TRACE(...) trace_printk(__VA_ARGS__)
|
2019-10-10 15:14:26 +08:00
|
|
|
#define GEM_TRACE_ERR(...) do { \
|
|
|
|
pr_err(__VA_ARGS__); \
|
|
|
|
trace_printk(__VA_ARGS__); \
|
|
|
|
} while (0)
|
2018-03-09 18:11:14 +08:00
|
|
|
#define GEM_TRACE_DUMP() ftrace_dump(DUMP_ALL)
|
2018-05-24 16:11:35 +08:00
|
|
|
#define GEM_TRACE_DUMP_ON(expr) \
|
|
|
|
do { if (expr) ftrace_dump(DUMP_ALL); } while (0)
|
2017-11-09 22:30:19 +08:00
|
|
|
#else
|
|
|
|
#define GEM_TRACE(...) do { } while (0)
|
2019-10-10 15:14:26 +08:00
|
|
|
#define GEM_TRACE_ERR(...) do { } while (0)
|
2018-03-09 18:11:14 +08:00
|
|
|
#define GEM_TRACE_DUMP() do { } while (0)
|
2018-05-24 16:11:35 +08:00
|
|
|
#define GEM_TRACE_DUMP_ON(expr) BUILD_BUG_ON_INVALID(expr)
|
2017-11-09 22:30:19 +08:00
|
|
|
#endif
|
|
|
|
|
2019-03-07 18:45:29 +08:00
|
|
|
#define I915_GEM_IDLE_TIMEOUT (HZ / 5)
|
|
|
|
|
2019-10-10 00:09:06 +08:00
|
|
|
static inline void tasklet_lock(struct tasklet_struct *t)
|
|
|
|
{
|
|
|
|
while (!tasklet_trylock(t))
|
|
|
|
cpu_relax();
|
|
|
|
}
|
|
|
|
|
2019-10-14 20:13:36 +08:00
|
|
|
static inline bool tasklet_is_locked(const struct tasklet_struct *t)
|
|
|
|
{
|
|
|
|
return test_bit(TASKLET_STATE_RUN, &t->state);
|
|
|
|
}
|
|
|
|
|
2018-05-17 02:33:49 +08:00
|
|
|
static inline void __tasklet_disable_sync_once(struct tasklet_struct *t)
|
|
|
|
{
|
2019-03-14 00:28:35 +08:00
|
|
|
if (!atomic_fetch_inc(&t->count))
|
2018-05-17 02:33:49 +08:00
|
|
|
tasklet_unlock_wait(t);
|
|
|
|
}
|
|
|
|
|
drm/i915/execlists: Direct submission of new requests (avoid tasklet/ksoftirqd)
Back in commit 27af5eea54d1 ("drm/i915: Move execlists irq handler to a
bottom half"), we came to the conclusion that running our CSB processing
and ELSP submission from inside the irq handler was a bad idea. A really
bad idea as we could impose nearly 1s latency on other users of the
system, on average! Deferring our work to a tasklet allowed us to do the
processing with irqs enabled, reducing the impact to an average of about
50us.
We have since eradicated the use of forcewaked mmio from inside the CSB
processing and ELSP submission, bringing the impact down to around 5us
(on Kabylake); an order of magnitude better than our measurements 2
years ago on Broadwell and only about 2x worse on average than the
gem_syslatency on an unladen system.
In this iteration of the tasklet-vs-direct submission debate, we seek a
compromise where by we submit new requests immediately to the HW but
defer processing the CS interrupt onto a tasklet. We gain the advantage
of low-latency and ksoftirqd avoidance when waking up the HW, while
avoiding the system-wide starvation of our CS irq-storms.
Comparing the impact on the maximum latency observed (that is the time
stolen from an RT process) over a 120s interval, repeated several times
(using gem_syslatency, similar to RT's cyclictest) while the system is
fully laden with i915 nops, we see that direct submission an actually
improve the worse case.
Maximum latency in microseconds of a third party RT thread
(gem_syslatency -t 120 -f 2)
x Always using tasklets (a couple of >1000us outliers removed)
+ Only using tasklets from CS irq, direct submission of requests
+------------------------------------------------------------------------+
| + |
| + |
| + |
| + + |
| + + + |
| + + + + x x x |
| +++ + + + x x x x x x |
| +++ + ++ + + *x x x x x x |
| +++ + ++ + * *x x * x x x |
| + +++ + ++ * * +*xxx * x x xx |
| * +++ + ++++* *x+**xx+ * x x xxxx x |
| **x++++*++**+*x*x****x+ * +x xx xxxx x x |
|x* ******+***************++*+***xxxxxx* xx*x xxx + x+|
| |__________MA___________| |
| |______M__A________| |
+------------------------------------------------------------------------+
N Min Max Median Avg Stddev
x 118 91 186 124 125.28814 16.279137
+ 120 92 187 109 112.00833 13.458617
Difference at 95.0% confidence
-13.2798 +/- 3.79219
-10.5994% +/- 3.02677%
(Student's t, pooled s = 14.9237)
However the mean latency is adversely affected:
Mean latency in microseconds of a third party RT thread
(gem_syslatency -t 120 -f 1)
x Always using tasklets
+ Only using tasklets from CS irq, direct submission of requests
+------------------------------------------------------------------------+
| xxxxxx + ++ |
| xxxxxx + ++ |
| xxxxxx + +++ ++ |
| xxxxxxx +++++ ++ |
| xxxxxxx +++++ ++ |
| xxxxxxx +++++ +++ |
| xxxxxxx + ++++++++++ |
| xxxxxxxx ++ ++++++++++ |
| xxxxxxxx ++ ++++++++++ |
| xxxxxxxxxx +++++++++++++++ |
| xxxxxxxxxxx x +++++++++++++++ |
|x xxxxxxxxxxxxx x + + ++++++++++++++++++ +|
| |__A__| |
| |____A___| |
+------------------------------------------------------------------------+
N Min Max Median Avg Stddev
x 120 3.506 3.727 3.631 3.6321417 0.02773109
+ 120 3.834 4.149 4.039 4.0375167 0.041221676
Difference at 95.0% confidence
0.405375 +/- 0.00888913
11.1608% +/- 0.244735%
(Student's t, pooled s = 0.03513)
However, since the mean latency corresponds to the amount of irqsoff
processing we have to do for a CS interrupt, we only need to speed that
up to benefit not just system latency but our own throughput.
v2: Remember to defer submissions when under reset.
v4: Only use direct submission for new requests
v5: Be aware that with mixing direct tasklet evaluation and deferred
tasklets, we may end up idling before running the deferred tasklet.
v6: Remove the redudant likely() from tasklet_is_enabled(), restrict the
annotation to reset_in_progress().
v7: Take the full timeline.lock when enabling perf_pmu stats as the
tasklet is no longer a valid guard. A consequence is that the stats are
now only valid for engines also using the timeline.lock to process
state.
Testcase: igt/gem_exec_latency/*rthog*
References: 27af5eea54d1 ("drm/i915: Move execlists irq handler to a bottom half")
Suggested-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180628201211.13837-9-chris@chris-wilson.co.uk
2018-06-29 04:12:11 +08:00
|
|
|
static inline bool __tasklet_is_enabled(const struct tasklet_struct *t)
|
|
|
|
{
|
|
|
|
return !atomic_read(&t->count);
|
|
|
|
}
|
|
|
|
|
2019-03-14 00:28:35 +08:00
|
|
|
static inline bool __tasklet_enable(struct tasklet_struct *t)
|
|
|
|
{
|
|
|
|
return atomic_dec_and_test(&t->count);
|
|
|
|
}
|
|
|
|
|
drm/i915: Load balancing across a virtual engine
Having allowed the user to define a set of engines that they will want
to only use, we go one step further and allow them to bind those engines
into a single virtual instance. Submitting a batch to the virtual engine
will then forward it to any one of the set in a manner as best to
distribute load. The virtual engine has a single timeline across all
engines (it operates as a single queue), so it is not able to concurrently
run batches across multiple engines by itself; that is left up to the user
to submit multiple concurrent batches to multiple queues. Multiple users
will be load balanced across the system.
The mechanism used for load balancing in this patch is a late greedy
balancer. When a request is ready for execution, it is added to each
engine's queue, and when an engine is ready for its next request it
claims it from the virtual engine. The first engine to do so, wins, i.e.
the request is executed at the earliest opportunity (idle moment) in the
system.
As not all HW is created equal, the user is still able to skip the
virtual engine and execute the batch on a specific engine, all within the
same queue. It will then be executed in order on the correct engine,
with execution on other virtual engines being moved away due to the load
detection.
A couple of areas for potential improvement left!
- The virtual engine always take priority over equal-priority tasks.
Mostly broken up by applying FQ_CODEL rules for prioritising new clients,
and hopefully the virtual and real engines are not then congested (i.e.
all work is via virtual engines, or all work is to the real engine).
- We require the breadcrumb irq around every virtual engine request. For
normal engines, we eliminate the need for the slow round trip via
interrupt by using the submit fence and queueing in order. For virtual
engines, we have to allow any job to transfer to a new ring, and cannot
coalesce the submissions, so require the completion fence instead,
forcing the persistent use of interrupts.
- We only drip feed single requests through each virtual engine and onto
the physical engines, even if there was enough work to fill all ELSP,
leaving small stalls with an idle CS event at the end of every request.
Could we be greedy and fill both slots? Being lazy is virtuous for load
distribution on less-than-full workloads though.
Other areas of improvement are more general, such as reducing lock
contention, reducing dispatch overhead, looking at direct submission
rather than bouncing around tasklets etc.
sseu: Lift the restriction to allow sseu to be reconfigured on virtual
engines composed of RENDER_CLASS (rcs).
v2: macroize check_user_mbz()
v3: Cancel virtual engines on wedging
v4: Commence commenting
v5: Replace 64b sibling_mask with a list of class:instance
v6: Drop the one-element array in the uabi
v7: Assert it is an virtual engine in to_virtual_engine()
v8: Skip over holes in [class][inst] so we can selftest with (vcs0, vcs2)
Link: https://github.com/intel/media-driver/pull/283
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190521211134.16117-6-chris@chris-wilson.co.uk
2019-05-22 05:11:30 +08:00
|
|
|
static inline bool __tasklet_is_scheduled(struct tasklet_struct *t)
|
|
|
|
{
|
|
|
|
return test_bit(TASKLET_STATE_SCHED, &t->state);
|
|
|
|
}
|
|
|
|
|
2019-10-16 18:08:51 +08:00
|
|
|
static inline void cancel_timer(struct timer_list *t)
|
|
|
|
{
|
|
|
|
if (!READ_ONCE(t->expires))
|
|
|
|
return;
|
|
|
|
|
|
|
|
del_timer(t);
|
|
|
|
WRITE_ONCE(t->expires, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool timer_expired(const struct timer_list *t)
|
|
|
|
{
|
|
|
|
return READ_ONCE(t->expires) && !timer_pending(t);
|
|
|
|
}
|
|
|
|
|
2016-04-14 00:35:02 +08:00
|
|
|
#endif /* __I915_GEM_H__ */
|