2019-08-06 20:43:00 +08:00
|
|
|
/*
|
|
|
|
* SPDX-License-Identifier: MIT
|
|
|
|
*
|
|
|
|
* Copyright © 2019 Intel Corporation
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/list.h>
|
|
|
|
#include <linux/list_sort.h>
|
|
|
|
#include <linux/llist.h>
|
|
|
|
|
|
|
|
#include "i915_drv.h"
|
|
|
|
#include "intel_engine.h"
|
|
|
|
#include "intel_engine_user.h"
|
2019-12-27 03:12:37 +08:00
|
|
|
#include "intel_gt.h"
|
2019-08-06 20:43:00 +08:00
|
|
|
|
|
|
|
struct intel_engine_cs *
|
|
|
|
intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance)
|
|
|
|
{
|
|
|
|
struct rb_node *p = i915->uabi_engines.rb_node;
|
|
|
|
|
|
|
|
while (p) {
|
|
|
|
struct intel_engine_cs *it =
|
|
|
|
rb_entry(p, typeof(*it), uabi_node);
|
|
|
|
|
|
|
|
if (class < it->uabi_class)
|
|
|
|
p = p->rb_left;
|
|
|
|
else if (class > it->uabi_class ||
|
|
|
|
instance > it->uabi_instance)
|
|
|
|
p = p->rb_right;
|
|
|
|
else if (instance < it->uabi_instance)
|
|
|
|
p = p->rb_left;
|
|
|
|
else
|
|
|
|
return it;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
void intel_engine_add_user(struct intel_engine_cs *engine)
|
|
|
|
{
|
|
|
|
llist_add((struct llist_node *)&engine->uabi_node,
|
|
|
|
(struct llist_head *)&engine->i915->uabi_engines);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const u8 uabi_classes[] = {
|
|
|
|
[RENDER_CLASS] = I915_ENGINE_CLASS_RENDER,
|
|
|
|
[COPY_ENGINE_CLASS] = I915_ENGINE_CLASS_COPY,
|
|
|
|
[VIDEO_DECODE_CLASS] = I915_ENGINE_CLASS_VIDEO,
|
|
|
|
[VIDEO_ENHANCEMENT_CLASS] = I915_ENGINE_CLASS_VIDEO_ENHANCE,
|
|
|
|
};
|
|
|
|
|
|
|
|
static int engine_cmp(void *priv, struct list_head *A, struct list_head *B)
|
|
|
|
{
|
|
|
|
const struct intel_engine_cs *a =
|
|
|
|
container_of((struct rb_node *)A, typeof(*a), uabi_node);
|
|
|
|
const struct intel_engine_cs *b =
|
|
|
|
container_of((struct rb_node *)B, typeof(*b), uabi_node);
|
|
|
|
|
|
|
|
if (uabi_classes[a->class] < uabi_classes[b->class])
|
|
|
|
return -1;
|
|
|
|
if (uabi_classes[a->class] > uabi_classes[b->class])
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
if (a->instance < b->instance)
|
|
|
|
return -1;
|
|
|
|
if (a->instance > b->instance)
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct llist_node *get_engines(struct drm_i915_private *i915)
|
|
|
|
{
|
|
|
|
return llist_del_all((struct llist_head *)&i915->uabi_engines);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void sort_engines(struct drm_i915_private *i915,
|
|
|
|
struct list_head *engines)
|
|
|
|
{
|
|
|
|
struct llist_node *pos, *next;
|
|
|
|
|
|
|
|
llist_for_each_safe(pos, next, get_engines(i915)) {
|
|
|
|
struct intel_engine_cs *engine =
|
|
|
|
container_of((struct rb_node *)pos, typeof(*engine),
|
|
|
|
uabi_node);
|
|
|
|
list_add((struct list_head *)&engine->uabi_node, engines);
|
|
|
|
}
|
|
|
|
list_sort(NULL, engines, engine_cmp);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void set_scheduler_caps(struct drm_i915_private *i915)
|
|
|
|
{
|
|
|
|
static const struct {
|
|
|
|
u8 engine;
|
|
|
|
u8 sched;
|
|
|
|
} map[] = {
|
|
|
|
#define MAP(x, y) { ilog2(I915_ENGINE_##x), ilog2(I915_SCHEDULER_CAP_##y) }
|
|
|
|
MAP(HAS_PREEMPTION, PREEMPTION),
|
|
|
|
MAP(HAS_SEMAPHORES, SEMAPHORES),
|
|
|
|
MAP(SUPPORTS_STATS, ENGINE_BUSY_STATS),
|
|
|
|
#undef MAP
|
|
|
|
};
|
|
|
|
struct intel_engine_cs *engine;
|
|
|
|
u32 enabled, disabled;
|
|
|
|
|
|
|
|
enabled = 0;
|
|
|
|
disabled = 0;
|
|
|
|
for_each_uabi_engine(engine, i915) { /* all engines must agree! */
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (engine->schedule)
|
|
|
|
enabled |= (I915_SCHEDULER_CAP_ENABLED |
|
|
|
|
I915_SCHEDULER_CAP_PRIORITY);
|
|
|
|
else
|
|
|
|
disabled |= (I915_SCHEDULER_CAP_ENABLED |
|
|
|
|
I915_SCHEDULER_CAP_PRIORITY);
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(map); i++) {
|
|
|
|
if (engine->flags & BIT(map[i].engine))
|
|
|
|
enabled |= BIT(map[i].sched);
|
|
|
|
else
|
|
|
|
disabled |= BIT(map[i].sched);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
i915->caps.scheduler = enabled & ~disabled;
|
|
|
|
if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_ENABLED))
|
|
|
|
i915->caps.scheduler = 0;
|
|
|
|
}
|
|
|
|
|
2019-08-07 19:04:31 +08:00
|
|
|
const char *intel_engine_class_repr(u8 class)
|
|
|
|
{
|
|
|
|
static const char * const uabi_names[] = {
|
|
|
|
[RENDER_CLASS] = "rcs",
|
|
|
|
[COPY_ENGINE_CLASS] = "bcs",
|
|
|
|
[VIDEO_DECODE_CLASS] = "vcs",
|
|
|
|
[VIDEO_ENHANCEMENT_CLASS] = "vecs",
|
|
|
|
};
|
|
|
|
|
|
|
|
if (class >= ARRAY_SIZE(uabi_names) || !uabi_names[class])
|
|
|
|
return "xxx";
|
|
|
|
|
|
|
|
return uabi_names[class];
|
|
|
|
}
|
|
|
|
|
2019-08-08 19:06:12 +08:00
|
|
|
struct legacy_ring {
|
|
|
|
struct intel_gt *gt;
|
|
|
|
u8 class;
|
|
|
|
u8 instance;
|
|
|
|
};
|
|
|
|
|
|
|
|
static int legacy_ring_idx(const struct legacy_ring *ring)
|
|
|
|
{
|
|
|
|
static const struct {
|
|
|
|
u8 base, max;
|
|
|
|
} map[] = {
|
|
|
|
[RENDER_CLASS] = { RCS0, 1 },
|
|
|
|
[COPY_ENGINE_CLASS] = { BCS0, 1 },
|
|
|
|
[VIDEO_DECODE_CLASS] = { VCS0, I915_MAX_VCS },
|
|
|
|
[VIDEO_ENHANCEMENT_CLASS] = { VECS0, I915_MAX_VECS },
|
|
|
|
};
|
|
|
|
|
|
|
|
if (GEM_DEBUG_WARN_ON(ring->class >= ARRAY_SIZE(map)))
|
2019-10-18 00:18:52 +08:00
|
|
|
return INVALID_ENGINE;
|
2019-08-08 19:06:12 +08:00
|
|
|
|
|
|
|
if (GEM_DEBUG_WARN_ON(ring->instance >= map[ring->class].max))
|
2019-10-18 00:18:52 +08:00
|
|
|
return INVALID_ENGINE;
|
2019-08-08 19:06:12 +08:00
|
|
|
|
|
|
|
return map[ring->class].base + ring->instance;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void add_legacy_ring(struct legacy_ring *ring,
|
|
|
|
struct intel_engine_cs *engine)
|
|
|
|
{
|
|
|
|
if (engine->gt != ring->gt || engine->class != ring->class) {
|
|
|
|
ring->gt = engine->gt;
|
|
|
|
ring->class = engine->class;
|
|
|
|
ring->instance = 0;
|
|
|
|
}
|
|
|
|
|
2019-10-18 00:18:52 +08:00
|
|
|
engine->legacy_idx = legacy_ring_idx(ring);
|
|
|
|
if (engine->legacy_idx != INVALID_ENGINE)
|
|
|
|
ring->instance++;
|
2019-08-08 19:06:12 +08:00
|
|
|
}
|
|
|
|
|
2019-08-06 20:43:00 +08:00
|
|
|
void intel_engines_driver_register(struct drm_i915_private *i915)
|
|
|
|
{
|
2019-08-08 19:06:12 +08:00
|
|
|
struct legacy_ring ring = {};
|
2019-08-06 20:43:00 +08:00
|
|
|
u8 uabi_instances[4] = {};
|
|
|
|
struct list_head *it, *next;
|
|
|
|
struct rb_node **p, *prev;
|
|
|
|
LIST_HEAD(engines);
|
|
|
|
|
|
|
|
sort_engines(i915, &engines);
|
|
|
|
|
|
|
|
prev = NULL;
|
|
|
|
p = &i915->uabi_engines.rb_node;
|
|
|
|
list_for_each_safe(it, next, &engines) {
|
|
|
|
struct intel_engine_cs *engine =
|
|
|
|
container_of((struct rb_node *)it, typeof(*engine),
|
|
|
|
uabi_node);
|
2019-08-07 19:04:31 +08:00
|
|
|
char old[sizeof(engine->name)];
|
2019-08-06 20:43:00 +08:00
|
|
|
|
2020-07-06 22:41:05 +08:00
|
|
|
if (intel_gt_has_unrecoverable_error(engine->gt))
|
2019-12-27 03:12:37 +08:00
|
|
|
continue; /* ignore incomplete engines */
|
|
|
|
|
2019-08-06 20:43:00 +08:00
|
|
|
GEM_BUG_ON(engine->class >= ARRAY_SIZE(uabi_classes));
|
|
|
|
engine->uabi_class = uabi_classes[engine->class];
|
|
|
|
|
|
|
|
GEM_BUG_ON(engine->uabi_class >= ARRAY_SIZE(uabi_instances));
|
|
|
|
engine->uabi_instance = uabi_instances[engine->uabi_class]++;
|
|
|
|
|
2019-08-07 19:04:31 +08:00
|
|
|
/* Replace the internal name with the final user facing name */
|
|
|
|
memcpy(old, engine->name, sizeof(engine->name));
|
|
|
|
scnprintf(engine->name, sizeof(engine->name), "%s%u",
|
|
|
|
intel_engine_class_repr(engine->class),
|
|
|
|
engine->uabi_instance);
|
|
|
|
DRM_DEBUG_DRIVER("renamed %s to %s\n", old, engine->name);
|
|
|
|
|
2019-08-06 20:43:00 +08:00
|
|
|
rb_link_node(&engine->uabi_node, prev, p);
|
|
|
|
rb_insert_color(&engine->uabi_node, &i915->uabi_engines);
|
|
|
|
|
|
|
|
GEM_BUG_ON(intel_engine_lookup_user(i915,
|
|
|
|
engine->uabi_class,
|
|
|
|
engine->uabi_instance) != engine);
|
|
|
|
|
2019-08-08 19:06:12 +08:00
|
|
|
/* Fix up the mapping to match default execbuf::user_map[] */
|
|
|
|
add_legacy_ring(&ring, engine);
|
|
|
|
|
2019-08-06 20:43:00 +08:00
|
|
|
prev = &engine->uabi_node;
|
|
|
|
p = &prev->rb_right;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (IS_ENABLED(CONFIG_DRM_I915_SELFTESTS) &&
|
|
|
|
IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) {
|
|
|
|
struct intel_engine_cs *engine;
|
|
|
|
unsigned int isolation;
|
|
|
|
int class, inst;
|
|
|
|
int errors = 0;
|
|
|
|
|
|
|
|
for (class = 0; class < ARRAY_SIZE(uabi_instances); class++) {
|
|
|
|
for (inst = 0; inst < uabi_instances[class]; inst++) {
|
|
|
|
engine = intel_engine_lookup_user(i915,
|
|
|
|
class, inst);
|
|
|
|
if (!engine) {
|
|
|
|
pr_err("UABI engine not found for { class:%d, instance:%d }\n",
|
|
|
|
class, inst);
|
|
|
|
errors++;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (engine->uabi_class != class ||
|
|
|
|
engine->uabi_instance != inst) {
|
|
|
|
pr_err("Wrong UABI engine:%s { class:%d, instance:%d } found for { class:%d, instance:%d }\n",
|
|
|
|
engine->name,
|
|
|
|
engine->uabi_class,
|
|
|
|
engine->uabi_instance,
|
|
|
|
class, inst);
|
|
|
|
errors++;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Make sure that classes with multiple engine instances all
|
|
|
|
* share the same basic configuration.
|
|
|
|
*/
|
|
|
|
isolation = intel_engines_has_context_isolation(i915);
|
|
|
|
for_each_uabi_engine(engine, i915) {
|
|
|
|
unsigned int bit = BIT(engine->uabi_class);
|
|
|
|
unsigned int expected = engine->default_state ? bit : 0;
|
|
|
|
|
|
|
|
if ((isolation & bit) != expected) {
|
|
|
|
pr_err("mismatching default context state for class %d on engine %s\n",
|
|
|
|
engine->uabi_class, engine->name);
|
|
|
|
errors++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
drm/i915/gt: Make WARN* drm specific where drm_priv ptr is available
drm specific WARN* calls include device information in the
backtrace, so we know what device the warnings originate from.
Covert all the calls of WARN* with device specific drm_WARN*
variants in functions where drm_i915_private struct pointer is readily
available.
The conversion was done automatically with below coccinelle semantic
patch. checkpatch errors/warnings are fixed manually.
@rule1@
identifier func, T;
@@
func(...) {
...
struct drm_i915_private *T = ...;
<+...
(
-WARN(
+drm_WARN(&T->drm,
...)
|
-WARN_ON(
+drm_WARN_ON(&T->drm,
...)
|
-WARN_ONCE(
+drm_WARN_ONCE(&T->drm,
...)
|
-WARN_ON_ONCE(
+drm_WARN_ON_ONCE(&T->drm,
...)
)
...+>
}
@rule2@
identifier func, T;
@@
func(struct drm_i915_private *T,...) {
<+...
(
-WARN(
+drm_WARN(&T->drm,
...)
|
-WARN_ON(
+drm_WARN_ON(&T->drm,
...)
|
-WARN_ONCE(
+drm_WARN_ONCE(&T->drm,
...)
|
-WARN_ON_ONCE(
+drm_WARN_ON_ONCE(&T->drm,
...)
)
...+>
}
command: spatch --sp-file <script> --dir drivers/gpu/drm/i915/gt \
--linux-spacing --in-place
Signed-off-by: Pankaj Bharadiya <pankaj.laxminarayan.bharadiya@intel.com>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200115034455.17658-7-pankaj.laxminarayan.bharadiya@intel.com
2020-01-15 11:44:50 +08:00
|
|
|
if (drm_WARN(&i915->drm, errors,
|
|
|
|
"Invalid UABI engine mapping found"))
|
2019-08-06 20:43:00 +08:00
|
|
|
i915->uabi_engines = RB_ROOT;
|
|
|
|
}
|
|
|
|
|
|
|
|
set_scheduler_caps(i915);
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915)
|
|
|
|
{
|
|
|
|
struct intel_engine_cs *engine;
|
|
|
|
unsigned int which;
|
|
|
|
|
|
|
|
which = 0;
|
|
|
|
for_each_uabi_engine(engine, i915)
|
|
|
|
if (engine->default_state)
|
|
|
|
which |= BIT(engine->uabi_class);
|
|
|
|
|
|
|
|
return which;
|
|
|
|
}
|