2016-07-13 23:03:40 +08:00
|
|
|
/*
|
|
|
|
* Copyright © 2016 Intel Corporation
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
|
|
* to deal in the Software without restriction, including without limitation
|
|
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
|
|
* Software is furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice (including the next
|
|
|
|
* paragraph) shall be included in all copies or substantial portions of the
|
|
|
|
* Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
|
|
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
|
|
|
* IN THE SOFTWARE.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "i915_drv.h"
|
|
|
|
#include "intel_ringbuffer.h"
|
|
|
|
#include "intel_lrc.h"
|
|
|
|
|
|
|
|
static const struct engine_info {
|
|
|
|
const char *name;
|
|
|
|
unsigned exec_id;
|
2016-08-17 00:04:20 +08:00
|
|
|
enum intel_engine_hw_id hw_id;
|
2016-07-13 23:03:40 +08:00
|
|
|
u32 mmio_base;
|
|
|
|
unsigned irq_shift;
|
|
|
|
int (*init_legacy)(struct intel_engine_cs *engine);
|
|
|
|
int (*init_execlists)(struct intel_engine_cs *engine);
|
|
|
|
} intel_engines[] = {
|
|
|
|
[RCS] = {
|
|
|
|
.name = "render ring",
|
|
|
|
.exec_id = I915_EXEC_RENDER,
|
2016-08-17 00:04:20 +08:00
|
|
|
.hw_id = RCS_HW,
|
2016-07-13 23:03:40 +08:00
|
|
|
.mmio_base = RENDER_RING_BASE,
|
|
|
|
.irq_shift = GEN8_RCS_IRQ_SHIFT,
|
|
|
|
.init_execlists = logical_render_ring_init,
|
|
|
|
.init_legacy = intel_init_render_ring_buffer,
|
|
|
|
},
|
|
|
|
[BCS] = {
|
|
|
|
.name = "blitter ring",
|
|
|
|
.exec_id = I915_EXEC_BLT,
|
2016-08-17 00:04:20 +08:00
|
|
|
.hw_id = BCS_HW,
|
2016-07-13 23:03:40 +08:00
|
|
|
.mmio_base = BLT_RING_BASE,
|
|
|
|
.irq_shift = GEN8_BCS_IRQ_SHIFT,
|
|
|
|
.init_execlists = logical_xcs_ring_init,
|
|
|
|
.init_legacy = intel_init_blt_ring_buffer,
|
|
|
|
},
|
|
|
|
[VCS] = {
|
|
|
|
.name = "bsd ring",
|
|
|
|
.exec_id = I915_EXEC_BSD,
|
2016-08-17 00:04:20 +08:00
|
|
|
.hw_id = VCS_HW,
|
2016-07-13 23:03:40 +08:00
|
|
|
.mmio_base = GEN6_BSD_RING_BASE,
|
|
|
|
.irq_shift = GEN8_VCS1_IRQ_SHIFT,
|
|
|
|
.init_execlists = logical_xcs_ring_init,
|
|
|
|
.init_legacy = intel_init_bsd_ring_buffer,
|
|
|
|
},
|
|
|
|
[VCS2] = {
|
|
|
|
.name = "bsd2 ring",
|
|
|
|
.exec_id = I915_EXEC_BSD,
|
2016-08-17 00:04:20 +08:00
|
|
|
.hw_id = VCS2_HW,
|
2016-07-13 23:03:40 +08:00
|
|
|
.mmio_base = GEN8_BSD2_RING_BASE,
|
|
|
|
.irq_shift = GEN8_VCS2_IRQ_SHIFT,
|
|
|
|
.init_execlists = logical_xcs_ring_init,
|
|
|
|
.init_legacy = intel_init_bsd2_ring_buffer,
|
|
|
|
},
|
|
|
|
[VECS] = {
|
|
|
|
.name = "video enhancement ring",
|
|
|
|
.exec_id = I915_EXEC_VEBOX,
|
2016-08-17 00:04:20 +08:00
|
|
|
.hw_id = VECS_HW,
|
2016-07-13 23:03:40 +08:00
|
|
|
.mmio_base = VEBOX_RING_BASE,
|
|
|
|
.irq_shift = GEN8_VECS_IRQ_SHIFT,
|
|
|
|
.init_execlists = logical_xcs_ring_init,
|
|
|
|
.init_legacy = intel_init_vebox_ring_buffer,
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
drm/i915: Allocate intel_engine_cs structure only for the enabled engines
With the possibility of addition of many more number of rings in future,
the drm_i915_private structure could bloat as an array, of type
intel_engine_cs, is embedded inside it.
struct intel_engine_cs engine[I915_NUM_ENGINES];
Though this is still fine as generally there is only a single instance of
drm_i915_private structure used, but not all of the possible rings would be
enabled or active on most of the platforms. Some memory can be saved by
allocating intel_engine_cs structure only for the enabled/active engines.
Currently the engine/ring ID is kept static and dev_priv->engine[] is simply
indexed using the enums defined in intel_engine_id.
To save memory and continue using the static engine/ring IDs, 'engine' is
defined as an array of pointers.
struct intel_engine_cs *engine[I915_NUM_ENGINES];
dev_priv->engine[engine_ID] will be NULL for disabled engine instances.
There is a text size reduction of 928 bytes, from 1028200 to 1027272, for
i915.o file (but for i915.ko file text size remain same as 1193131 bytes).
v2:
- Remove the engine iterator field added in drm_i915_private structure,
instead pass a local iterator variable to the for_each_engine**
macros. (Chris)
- Do away with intel_engine_initialized() and instead directly use the
NULL pointer check on engine pointer. (Chris)
v3:
- Remove for_each_engine_id() macro, as the updated macro for_each_engine()
can be used in place of it. (Chris)
- Protect the access to Render engine Fault register with a NULL check, as
engine specific init is done later in Driver load sequence.
v4:
- Use !!dev_priv->engine[VCS] style for the engine check in getparam. (Chris)
- Kill the superfluous init_engine_lists().
v5:
- Cleanup the intel_engines_init() & intel_engines_setup(), with respect to
allocation of intel_engine_cs structure. (Chris)
v6:
- Rebase.
v7:
- Optimize the for_each_engine_masked() macro. (Chris)
- Change the type of 'iter' local variable to enum intel_engine_id. (Chris)
- Rebase.
v8: Rebase.
v9: Rebase.
v10:
- For index calculation use engine ID instead of pointer based arithmetic in
intel_engine_sync_index() as engine pointers are not contiguous now (Chris)
- For appropriateness, rename local enum variable 'iter' to 'id'. (Joonas)
- Use for_each_engine macro for cleanup in intel_engines_init() and remove
check for NULL engine pointer in cleanup() routines. (Joonas)
v11: Rebase.
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Akash Goel <akash.goel@intel.com>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1476378888-7372-1-git-send-email-akash.goel@intel.com
2016-10-14 01:14:48 +08:00
|
|
|
static int
|
2016-07-13 23:03:40 +08:00
|
|
|
intel_engine_setup(struct drm_i915_private *dev_priv,
|
|
|
|
enum intel_engine_id id)
|
|
|
|
{
|
|
|
|
const struct engine_info *info = &intel_engines[id];
|
drm/i915: Allocate intel_engine_cs structure only for the enabled engines
With the possibility of addition of many more number of rings in future,
the drm_i915_private structure could bloat as an array, of type
intel_engine_cs, is embedded inside it.
struct intel_engine_cs engine[I915_NUM_ENGINES];
Though this is still fine as generally there is only a single instance of
drm_i915_private structure used, but not all of the possible rings would be
enabled or active on most of the platforms. Some memory can be saved by
allocating intel_engine_cs structure only for the enabled/active engines.
Currently the engine/ring ID is kept static and dev_priv->engine[] is simply
indexed using the enums defined in intel_engine_id.
To save memory and continue using the static engine/ring IDs, 'engine' is
defined as an array of pointers.
struct intel_engine_cs *engine[I915_NUM_ENGINES];
dev_priv->engine[engine_ID] will be NULL for disabled engine instances.
There is a text size reduction of 928 bytes, from 1028200 to 1027272, for
i915.o file (but for i915.ko file text size remain same as 1193131 bytes).
v2:
- Remove the engine iterator field added in drm_i915_private structure,
instead pass a local iterator variable to the for_each_engine**
macros. (Chris)
- Do away with intel_engine_initialized() and instead directly use the
NULL pointer check on engine pointer. (Chris)
v3:
- Remove for_each_engine_id() macro, as the updated macro for_each_engine()
can be used in place of it. (Chris)
- Protect the access to Render engine Fault register with a NULL check, as
engine specific init is done later in Driver load sequence.
v4:
- Use !!dev_priv->engine[VCS] style for the engine check in getparam. (Chris)
- Kill the superfluous init_engine_lists().
v5:
- Cleanup the intel_engines_init() & intel_engines_setup(), with respect to
allocation of intel_engine_cs structure. (Chris)
v6:
- Rebase.
v7:
- Optimize the for_each_engine_masked() macro. (Chris)
- Change the type of 'iter' local variable to enum intel_engine_id. (Chris)
- Rebase.
v8: Rebase.
v9: Rebase.
v10:
- For index calculation use engine ID instead of pointer based arithmetic in
intel_engine_sync_index() as engine pointers are not contiguous now (Chris)
- For appropriateness, rename local enum variable 'iter' to 'id'. (Joonas)
- Use for_each_engine macro for cleanup in intel_engines_init() and remove
check for NULL engine pointer in cleanup() routines. (Joonas)
v11: Rebase.
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Akash Goel <akash.goel@intel.com>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1476378888-7372-1-git-send-email-akash.goel@intel.com
2016-10-14 01:14:48 +08:00
|
|
|
struct intel_engine_cs *engine;
|
|
|
|
|
|
|
|
GEM_BUG_ON(dev_priv->engine[id]);
|
|
|
|
engine = kzalloc(sizeof(*engine), GFP_KERNEL);
|
|
|
|
if (!engine)
|
|
|
|
return -ENOMEM;
|
2016-07-13 23:03:40 +08:00
|
|
|
|
|
|
|
engine->id = id;
|
|
|
|
engine->i915 = dev_priv;
|
|
|
|
engine->name = info->name;
|
|
|
|
engine->exec_id = info->exec_id;
|
2016-08-17 00:04:20 +08:00
|
|
|
engine->hw_id = engine->guc_id = info->hw_id;
|
2016-07-13 23:03:40 +08:00
|
|
|
engine->mmio_base = info->mmio_base;
|
|
|
|
engine->irq_shift = info->irq_shift;
|
|
|
|
|
2016-11-15 04:41:01 +08:00
|
|
|
/* Nothing to do here, execute in order of dependencies */
|
|
|
|
engine->schedule = NULL;
|
|
|
|
|
drm/i915: Allocate intel_engine_cs structure only for the enabled engines
With the possibility of addition of many more number of rings in future,
the drm_i915_private structure could bloat as an array, of type
intel_engine_cs, is embedded inside it.
struct intel_engine_cs engine[I915_NUM_ENGINES];
Though this is still fine as generally there is only a single instance of
drm_i915_private structure used, but not all of the possible rings would be
enabled or active on most of the platforms. Some memory can be saved by
allocating intel_engine_cs structure only for the enabled/active engines.
Currently the engine/ring ID is kept static and dev_priv->engine[] is simply
indexed using the enums defined in intel_engine_id.
To save memory and continue using the static engine/ring IDs, 'engine' is
defined as an array of pointers.
struct intel_engine_cs *engine[I915_NUM_ENGINES];
dev_priv->engine[engine_ID] will be NULL for disabled engine instances.
There is a text size reduction of 928 bytes, from 1028200 to 1027272, for
i915.o file (but for i915.ko file text size remain same as 1193131 bytes).
v2:
- Remove the engine iterator field added in drm_i915_private structure,
instead pass a local iterator variable to the for_each_engine**
macros. (Chris)
- Do away with intel_engine_initialized() and instead directly use the
NULL pointer check on engine pointer. (Chris)
v3:
- Remove for_each_engine_id() macro, as the updated macro for_each_engine()
can be used in place of it. (Chris)
- Protect the access to Render engine Fault register with a NULL check, as
engine specific init is done later in Driver load sequence.
v4:
- Use !!dev_priv->engine[VCS] style for the engine check in getparam. (Chris)
- Kill the superfluous init_engine_lists().
v5:
- Cleanup the intel_engines_init() & intel_engines_setup(), with respect to
allocation of intel_engine_cs structure. (Chris)
v6:
- Rebase.
v7:
- Optimize the for_each_engine_masked() macro. (Chris)
- Change the type of 'iter' local variable to enum intel_engine_id. (Chris)
- Rebase.
v8: Rebase.
v9: Rebase.
v10:
- For index calculation use engine ID instead of pointer based arithmetic in
intel_engine_sync_index() as engine pointers are not contiguous now (Chris)
- For appropriateness, rename local enum variable 'iter' to 'id'. (Joonas)
- Use for_each_engine macro for cleanup in intel_engines_init() and remove
check for NULL engine pointer in cleanup() routines. (Joonas)
v11: Rebase.
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Akash Goel <akash.goel@intel.com>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1476378888-7372-1-git-send-email-akash.goel@intel.com
2016-10-14 01:14:48 +08:00
|
|
|
dev_priv->engine[id] = engine;
|
|
|
|
return 0;
|
2016-07-13 23:03:40 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2017-01-24 19:01:34 +08:00
|
|
|
* intel_engines_init_early() - allocate the Engine Command Streamers
|
2016-12-01 22:16:38 +08:00
|
|
|
* @dev_priv: i915 device private
|
2016-07-13 23:03:40 +08:00
|
|
|
*
|
|
|
|
* Return: non-zero if the initialization failed.
|
|
|
|
*/
|
2017-01-24 19:01:34 +08:00
|
|
|
int intel_engines_init_early(struct drm_i915_private *dev_priv)
|
2016-07-13 23:03:40 +08:00
|
|
|
{
|
2016-08-10 23:22:10 +08:00
|
|
|
struct intel_device_info *device_info = mkwrite_device_info(dev_priv);
|
2016-10-13 18:02:56 +08:00
|
|
|
unsigned int ring_mask = INTEL_INFO(dev_priv)->ring_mask;
|
2016-07-13 23:03:40 +08:00
|
|
|
unsigned int mask = 0;
|
drm/i915: Allocate intel_engine_cs structure only for the enabled engines
With the possibility of addition of many more number of rings in future,
the drm_i915_private structure could bloat as an array, of type
intel_engine_cs, is embedded inside it.
struct intel_engine_cs engine[I915_NUM_ENGINES];
Though this is still fine as generally there is only a single instance of
drm_i915_private structure used, but not all of the possible rings would be
enabled or active on most of the platforms. Some memory can be saved by
allocating intel_engine_cs structure only for the enabled/active engines.
Currently the engine/ring ID is kept static and dev_priv->engine[] is simply
indexed using the enums defined in intel_engine_id.
To save memory and continue using the static engine/ring IDs, 'engine' is
defined as an array of pointers.
struct intel_engine_cs *engine[I915_NUM_ENGINES];
dev_priv->engine[engine_ID] will be NULL for disabled engine instances.
There is a text size reduction of 928 bytes, from 1028200 to 1027272, for
i915.o file (but for i915.ko file text size remain same as 1193131 bytes).
v2:
- Remove the engine iterator field added in drm_i915_private structure,
instead pass a local iterator variable to the for_each_engine**
macros. (Chris)
- Do away with intel_engine_initialized() and instead directly use the
NULL pointer check on engine pointer. (Chris)
v3:
- Remove for_each_engine_id() macro, as the updated macro for_each_engine()
can be used in place of it. (Chris)
- Protect the access to Render engine Fault register with a NULL check, as
engine specific init is done later in Driver load sequence.
v4:
- Use !!dev_priv->engine[VCS] style for the engine check in getparam. (Chris)
- Kill the superfluous init_engine_lists().
v5:
- Cleanup the intel_engines_init() & intel_engines_setup(), with respect to
allocation of intel_engine_cs structure. (Chris)
v6:
- Rebase.
v7:
- Optimize the for_each_engine_masked() macro. (Chris)
- Change the type of 'iter' local variable to enum intel_engine_id. (Chris)
- Rebase.
v8: Rebase.
v9: Rebase.
v10:
- For index calculation use engine ID instead of pointer based arithmetic in
intel_engine_sync_index() as engine pointers are not contiguous now (Chris)
- For appropriateness, rename local enum variable 'iter' to 'id'. (Joonas)
- Use for_each_engine macro for cleanup in intel_engines_init() and remove
check for NULL engine pointer in cleanup() routines. (Joonas)
v11: Rebase.
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Akash Goel <akash.goel@intel.com>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1476378888-7372-1-git-send-email-akash.goel@intel.com
2016-10-14 01:14:48 +08:00
|
|
|
struct intel_engine_cs *engine;
|
|
|
|
enum intel_engine_id id;
|
2016-07-13 23:03:40 +08:00
|
|
|
unsigned int i;
|
2017-01-24 19:01:34 +08:00
|
|
|
int err;
|
2016-07-13 23:03:40 +08:00
|
|
|
|
2016-10-13 18:02:56 +08:00
|
|
|
WARN_ON(ring_mask == 0);
|
|
|
|
WARN_ON(ring_mask &
|
2016-07-13 23:03:40 +08:00
|
|
|
GENMASK(sizeof(mask) * BITS_PER_BYTE - 1, I915_NUM_ENGINES));
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(intel_engines); i++) {
|
|
|
|
if (!HAS_ENGINE(dev_priv, i))
|
|
|
|
continue;
|
|
|
|
|
2017-01-24 19:01:34 +08:00
|
|
|
err = intel_engine_setup(dev_priv, i);
|
|
|
|
if (err)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
mask |= ENGINE_MASK(i);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Catch failures to update intel_engines table when the new engines
|
|
|
|
* are added to the driver by a warning and disabling the forgotten
|
|
|
|
* engines.
|
|
|
|
*/
|
|
|
|
if (WARN_ON(mask != ring_mask))
|
|
|
|
device_info->ring_mask = mask;
|
|
|
|
|
|
|
|
device_info->num_rings = hweight32(mask);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
for_each_engine(engine, dev_priv, id)
|
|
|
|
kfree(engine);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* intel_engines_init() - allocate, populate and init the Engine Command Streamers
|
|
|
|
* @dev_priv: i915 device private
|
|
|
|
*
|
|
|
|
* Return: non-zero if the initialization failed.
|
|
|
|
*/
|
|
|
|
int intel_engines_init(struct drm_i915_private *dev_priv)
|
|
|
|
{
|
|
|
|
struct intel_device_info *device_info = mkwrite_device_info(dev_priv);
|
|
|
|
struct intel_engine_cs *engine;
|
|
|
|
enum intel_engine_id id, err_id;
|
|
|
|
unsigned int mask = 0;
|
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
for_each_engine(engine, dev_priv, id) {
|
|
|
|
int (*init)(struct intel_engine_cs *engine);
|
|
|
|
|
2016-07-13 23:03:40 +08:00
|
|
|
if (i915.enable_execlists)
|
2017-01-24 19:01:34 +08:00
|
|
|
init = intel_engines[id].init_execlists;
|
2016-07-13 23:03:40 +08:00
|
|
|
else
|
2017-01-24 19:01:34 +08:00
|
|
|
init = intel_engines[id].init_legacy;
|
|
|
|
if (!init) {
|
|
|
|
kfree(engine);
|
|
|
|
dev_priv->engine[id] = NULL;
|
2016-07-13 23:03:40 +08:00
|
|
|
continue;
|
2017-01-24 19:01:34 +08:00
|
|
|
}
|
2016-07-13 23:03:40 +08:00
|
|
|
|
2017-01-24 19:01:34 +08:00
|
|
|
err = init(engine);
|
|
|
|
if (err) {
|
|
|
|
err_id = id;
|
2016-07-13 23:03:40 +08:00
|
|
|
goto cleanup;
|
2017-01-24 19:01:34 +08:00
|
|
|
}
|
2016-07-13 23:03:40 +08:00
|
|
|
|
2017-01-24 19:01:34 +08:00
|
|
|
mask |= ENGINE_MASK(id);
|
2016-07-13 23:03:40 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Catch failures to update intel_engines table when the new engines
|
|
|
|
* are added to the driver by a warning and disabling the forgotten
|
|
|
|
* engines.
|
|
|
|
*/
|
2017-01-24 19:01:34 +08:00
|
|
|
if (WARN_ON(mask != INTEL_INFO(dev_priv)->ring_mask))
|
2016-08-10 23:22:10 +08:00
|
|
|
device_info->ring_mask = mask;
|
|
|
|
|
|
|
|
device_info->num_rings = hweight32(mask);
|
2016-07-13 23:03:40 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
cleanup:
|
drm/i915: Allocate intel_engine_cs structure only for the enabled engines
With the possibility of addition of many more number of rings in future,
the drm_i915_private structure could bloat as an array, of type
intel_engine_cs, is embedded inside it.
struct intel_engine_cs engine[I915_NUM_ENGINES];
Though this is still fine as generally there is only a single instance of
drm_i915_private structure used, but not all of the possible rings would be
enabled or active on most of the platforms. Some memory can be saved by
allocating intel_engine_cs structure only for the enabled/active engines.
Currently the engine/ring ID is kept static and dev_priv->engine[] is simply
indexed using the enums defined in intel_engine_id.
To save memory and continue using the static engine/ring IDs, 'engine' is
defined as an array of pointers.
struct intel_engine_cs *engine[I915_NUM_ENGINES];
dev_priv->engine[engine_ID] will be NULL for disabled engine instances.
There is a text size reduction of 928 bytes, from 1028200 to 1027272, for
i915.o file (but for i915.ko file text size remain same as 1193131 bytes).
v2:
- Remove the engine iterator field added in drm_i915_private structure,
instead pass a local iterator variable to the for_each_engine**
macros. (Chris)
- Do away with intel_engine_initialized() and instead directly use the
NULL pointer check on engine pointer. (Chris)
v3:
- Remove for_each_engine_id() macro, as the updated macro for_each_engine()
can be used in place of it. (Chris)
- Protect the access to Render engine Fault register with a NULL check, as
engine specific init is done later in Driver load sequence.
v4:
- Use !!dev_priv->engine[VCS] style for the engine check in getparam. (Chris)
- Kill the superfluous init_engine_lists().
v5:
- Cleanup the intel_engines_init() & intel_engines_setup(), with respect to
allocation of intel_engine_cs structure. (Chris)
v6:
- Rebase.
v7:
- Optimize the for_each_engine_masked() macro. (Chris)
- Change the type of 'iter' local variable to enum intel_engine_id. (Chris)
- Rebase.
v8: Rebase.
v9: Rebase.
v10:
- For index calculation use engine ID instead of pointer based arithmetic in
intel_engine_sync_index() as engine pointers are not contiguous now (Chris)
- For appropriateness, rename local enum variable 'iter' to 'id'. (Joonas)
- Use for_each_engine macro for cleanup in intel_engines_init() and remove
check for NULL engine pointer in cleanup() routines. (Joonas)
v11: Rebase.
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Akash Goel <akash.goel@intel.com>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1476378888-7372-1-git-send-email-akash.goel@intel.com
2016-10-14 01:14:48 +08:00
|
|
|
for_each_engine(engine, dev_priv, id) {
|
2017-01-24 19:01:34 +08:00
|
|
|
if (id >= err_id)
|
|
|
|
kfree(engine);
|
2016-07-13 23:03:40 +08:00
|
|
|
else
|
2017-02-16 20:23:22 +08:00
|
|
|
dev_priv->gt.cleanup_engine(engine);
|
2016-07-13 23:03:40 +08:00
|
|
|
}
|
2017-01-24 19:01:34 +08:00
|
|
|
return err;
|
2016-07-13 23:03:40 +08:00
|
|
|
}
|
|
|
|
|
2016-10-28 20:58:46 +08:00
|
|
|
void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno)
|
2016-08-15 17:49:00 +08:00
|
|
|
{
|
|
|
|
struct drm_i915_private *dev_priv = engine->i915;
|
|
|
|
|
|
|
|
/* Our semaphore implementation is strictly monotonic (i.e. we proceed
|
|
|
|
* so long as the semaphore value in the register/page is greater
|
|
|
|
* than the sync value), so whenever we reset the seqno,
|
|
|
|
* so long as we reset the tracking semaphore value to 0, it will
|
|
|
|
* always be before the next request's seqno. If we don't reset
|
|
|
|
* the semaphore value, then when the seqno moves backwards all
|
|
|
|
* future waits will complete instantly (causing rendering corruption).
|
|
|
|
*/
|
|
|
|
if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
|
|
|
|
I915_WRITE(RING_SYNC_0(engine->mmio_base), 0);
|
|
|
|
I915_WRITE(RING_SYNC_1(engine->mmio_base), 0);
|
|
|
|
if (HAS_VEBOX(dev_priv))
|
|
|
|
I915_WRITE(RING_SYNC_2(engine->mmio_base), 0);
|
|
|
|
}
|
2016-08-15 17:49:02 +08:00
|
|
|
if (dev_priv->semaphore) {
|
|
|
|
struct page *page = i915_vma_first_page(dev_priv->semaphore);
|
|
|
|
void *semaphores;
|
|
|
|
|
|
|
|
/* Semaphores are in noncoherent memory, flush to be safe */
|
|
|
|
semaphores = kmap(page);
|
2016-08-15 17:49:00 +08:00
|
|
|
memset(semaphores + GEN8_SEMAPHORE_OFFSET(engine->id, 0),
|
|
|
|
0, I915_NUM_ENGINES * gen8_semaphore_seqno_size);
|
2016-08-15 17:49:02 +08:00
|
|
|
drm_clflush_virt_range(semaphores + GEN8_SEMAPHORE_OFFSET(engine->id, 0),
|
|
|
|
I915_NUM_ENGINES * gen8_semaphore_seqno_size);
|
2016-08-15 17:49:00 +08:00
|
|
|
kunmap(page);
|
|
|
|
}
|
|
|
|
|
|
|
|
intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
|
|
|
|
if (engine->irq_seqno_barrier)
|
|
|
|
engine->irq_seqno_barrier(engine);
|
2016-10-28 20:58:46 +08:00
|
|
|
|
|
|
|
GEM_BUG_ON(i915_gem_active_isset(&engine->timeline->last_request));
|
|
|
|
engine->timeline->last_submitted_seqno = seqno;
|
2016-08-15 17:49:00 +08:00
|
|
|
|
|
|
|
engine->hangcheck.seqno = seqno;
|
|
|
|
|
|
|
|
/* After manually advancing the seqno, fake the interrupt in case
|
|
|
|
* there are any waiters for that seqno.
|
|
|
|
*/
|
|
|
|
intel_engine_wakeup(engine);
|
|
|
|
}
|
|
|
|
|
2016-10-28 20:58:46 +08:00
|
|
|
static void intel_engine_init_timeline(struct intel_engine_cs *engine)
|
2016-08-05 17:14:11 +08:00
|
|
|
{
|
2016-10-28 20:58:46 +08:00
|
|
|
engine->timeline = &engine->i915->gt.global_timeline.engine[engine->id];
|
2016-08-05 17:14:11 +08:00
|
|
|
}
|
|
|
|
|
2016-07-13 23:03:41 +08:00
|
|
|
/**
|
|
|
|
* intel_engines_setup_common - setup engine state not requiring hw access
|
|
|
|
* @engine: Engine to setup.
|
|
|
|
*
|
|
|
|
* Initializes @engine@ structure members shared between legacy and execlists
|
|
|
|
* submission modes which do not require hardware access.
|
|
|
|
*
|
|
|
|
* Typically done early in the submission mode specific engine setup stage.
|
|
|
|
*/
|
|
|
|
void intel_engine_setup_common(struct intel_engine_cs *engine)
|
|
|
|
{
|
2016-11-15 04:41:03 +08:00
|
|
|
engine->execlist_queue = RB_ROOT;
|
|
|
|
engine->execlist_first = NULL;
|
2016-07-13 23:03:41 +08:00
|
|
|
|
2016-10-28 20:58:46 +08:00
|
|
|
intel_engine_init_timeline(engine);
|
2016-07-13 23:03:41 +08:00
|
|
|
intel_engine_init_hangcheck(engine);
|
2016-08-04 23:32:19 +08:00
|
|
|
i915_gem_batch_pool_init(engine, &engine->batch_pool);
|
2016-08-19 00:17:10 +08:00
|
|
|
|
|
|
|
intel_engine_init_cmd_parser(engine);
|
2016-07-13 23:03:41 +08:00
|
|
|
}
|
|
|
|
|
2016-08-15 17:48:59 +08:00
|
|
|
int intel_engine_create_scratch(struct intel_engine_cs *engine, int size)
|
|
|
|
{
|
|
|
|
struct drm_i915_gem_object *obj;
|
|
|
|
struct i915_vma *vma;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
WARN_ON(engine->scratch);
|
|
|
|
|
2016-12-01 22:16:36 +08:00
|
|
|
obj = i915_gem_object_create_stolen(engine->i915, size);
|
2016-08-15 17:48:59 +08:00
|
|
|
if (!obj)
|
2016-10-28 20:58:30 +08:00
|
|
|
obj = i915_gem_object_create_internal(engine->i915, size);
|
2016-08-15 17:48:59 +08:00
|
|
|
if (IS_ERR(obj)) {
|
|
|
|
DRM_ERROR("Failed to allocate scratch page\n");
|
|
|
|
return PTR_ERR(obj);
|
|
|
|
}
|
|
|
|
|
2017-01-16 23:21:30 +08:00
|
|
|
vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL);
|
2016-08-15 17:48:59 +08:00
|
|
|
if (IS_ERR(vma)) {
|
|
|
|
ret = PTR_ERR(vma);
|
|
|
|
goto err_unref;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = i915_vma_pin(vma, 0, 4096, PIN_GLOBAL | PIN_HIGH);
|
|
|
|
if (ret)
|
|
|
|
goto err_unref;
|
|
|
|
|
|
|
|
engine->scratch = vma;
|
2016-08-15 17:49:07 +08:00
|
|
|
DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n",
|
|
|
|
engine->name, i915_ggtt_offset(vma));
|
2016-08-15 17:48:59 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_unref:
|
|
|
|
i915_gem_object_put(obj);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void intel_engine_cleanup_scratch(struct intel_engine_cs *engine)
|
|
|
|
{
|
2016-08-15 17:49:05 +08:00
|
|
|
i915_vma_unpin_and_release(&engine->scratch);
|
2016-08-15 17:48:59 +08:00
|
|
|
}
|
|
|
|
|
2016-07-13 23:03:41 +08:00
|
|
|
/**
|
|
|
|
* intel_engines_init_common - initialize cengine state which might require hw access
|
|
|
|
* @engine: Engine to initialize.
|
|
|
|
*
|
|
|
|
* Initializes @engine@ structure members shared between legacy and execlists
|
|
|
|
* submission modes which do require hardware access.
|
|
|
|
*
|
|
|
|
* Typcally done at later stages of submission mode specific engine setup.
|
|
|
|
*
|
|
|
|
* Returns zero on success or an error code on failure.
|
|
|
|
*/
|
|
|
|
int intel_engine_init_common(struct intel_engine_cs *engine)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
drm/i915: Unify active context tracking between legacy/execlists/guc
The requests conversion introduced a nasty bug where we could generate a
new request in the middle of constructing a request if we needed to idle
the system in order to evict space for a context. The request to idle
would be executed (and waited upon) before the current one, creating a
minor havoc in the seqno accounting, as we will consider the current
request to already be completed (prior to deferred seqno assignment) but
ring->last_retired_head would have been updated and still could allow
us to overwrite the current request before execution.
We also employed two different mechanisms to track the active context
until it was switched out. The legacy method allowed for waiting upon an
active context (it could forcibly evict any vma, including context's),
but the execlists method took a step backwards by pinning the vma for
the entire active lifespan of the context (the only way to evict was to
idle the entire GPU, not individual contexts). However, to circumvent
the tricky issue of locking (i.e. we cannot take struct_mutex at the
time of i915_gem_request_submit(), where we would want to move the
previous context onto the active tracker and unpin it), we take the
execlists approach and keep the contexts pinned until retirement.
The benefit of the execlists approach, more important for execlists than
legacy, was the reduction in work in pinning the context for each
request - as the context was kept pinned until idle, it could short
circuit the pinning for all active contexts.
We introduce new engine vfuncs to pin and unpin the context
respectively. The context is pinned at the start of the request, and
only unpinned when the following request is retired (this ensures that
the context is idle and coherent in main memory before we unpin it). We
move the engine->last_context tracking into the retirement itself
(rather than during request submission) in order to allow the submission
to be reordered or unwound without undue difficultly.
And finally an ulterior motive for unifying context handling was to
prepare for mock requests.
v2: Rename to last_retired_context, split out legacy_context tracking
for MI_SET_CONTEXT.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20161218153724.8439-3-chris@chris-wilson.co.uk
2016-12-18 23:37:20 +08:00
|
|
|
/* We may need to do things with the shrinker which
|
|
|
|
* require us to immediately switch back to the default
|
|
|
|
* context. This can cause a problem as pinning the
|
|
|
|
* default context also requires GTT space which may not
|
|
|
|
* be available. To avoid this we always pin the default
|
|
|
|
* context.
|
|
|
|
*/
|
|
|
|
ret = engine->context_pin(engine, engine->i915->kernel_context);
|
2016-07-13 23:03:41 +08:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
drm/i915: Unify active context tracking between legacy/execlists/guc
The requests conversion introduced a nasty bug where we could generate a
new request in the middle of constructing a request if we needed to idle
the system in order to evict space for a context. The request to idle
would be executed (and waited upon) before the current one, creating a
minor havoc in the seqno accounting, as we will consider the current
request to already be completed (prior to deferred seqno assignment) but
ring->last_retired_head would have been updated and still could allow
us to overwrite the current request before execution.
We also employed two different mechanisms to track the active context
until it was switched out. The legacy method allowed for waiting upon an
active context (it could forcibly evict any vma, including context's),
but the execlists method took a step backwards by pinning the vma for
the entire active lifespan of the context (the only way to evict was to
idle the entire GPU, not individual contexts). However, to circumvent
the tricky issue of locking (i.e. we cannot take struct_mutex at the
time of i915_gem_request_submit(), where we would want to move the
previous context onto the active tracker and unpin it), we take the
execlists approach and keep the contexts pinned until retirement.
The benefit of the execlists approach, more important for execlists than
legacy, was the reduction in work in pinning the context for each
request - as the context was kept pinned until idle, it could short
circuit the pinning for all active contexts.
We introduce new engine vfuncs to pin and unpin the context
respectively. The context is pinned at the start of the request, and
only unpinned when the following request is retired (this ensures that
the context is idle and coherent in main memory before we unpin it). We
move the engine->last_context tracking into the retirement itself
(rather than during request submission) in order to allow the submission
to be reordered or unwound without undue difficultly.
And finally an ulterior motive for unifying context handling was to
prepare for mock requests.
v2: Rename to last_retired_context, split out legacy_context tracking
for MI_SET_CONTEXT.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20161218153724.8439-3-chris@chris-wilson.co.uk
2016-12-18 23:37:20 +08:00
|
|
|
ret = intel_engine_init_breadcrumbs(engine);
|
|
|
|
if (ret)
|
|
|
|
goto err_unpin;
|
|
|
|
|
2016-10-28 20:58:31 +08:00
|
|
|
ret = i915_gem_render_state_init(engine);
|
|
|
|
if (ret)
|
drm/i915: Unify active context tracking between legacy/execlists/guc
The requests conversion introduced a nasty bug where we could generate a
new request in the middle of constructing a request if we needed to idle
the system in order to evict space for a context. The request to idle
would be executed (and waited upon) before the current one, creating a
minor havoc in the seqno accounting, as we will consider the current
request to already be completed (prior to deferred seqno assignment) but
ring->last_retired_head would have been updated and still could allow
us to overwrite the current request before execution.
We also employed two different mechanisms to track the active context
until it was switched out. The legacy method allowed for waiting upon an
active context (it could forcibly evict any vma, including context's),
but the execlists method took a step backwards by pinning the vma for
the entire active lifespan of the context (the only way to evict was to
idle the entire GPU, not individual contexts). However, to circumvent
the tricky issue of locking (i.e. we cannot take struct_mutex at the
time of i915_gem_request_submit(), where we would want to move the
previous context onto the active tracker and unpin it), we take the
execlists approach and keep the contexts pinned until retirement.
The benefit of the execlists approach, more important for execlists than
legacy, was the reduction in work in pinning the context for each
request - as the context was kept pinned until idle, it could short
circuit the pinning for all active contexts.
We introduce new engine vfuncs to pin and unpin the context
respectively. The context is pinned at the start of the request, and
only unpinned when the following request is retired (this ensures that
the context is idle and coherent in main memory before we unpin it). We
move the engine->last_context tracking into the retirement itself
(rather than during request submission) in order to allow the submission
to be reordered or unwound without undue difficultly.
And finally an ulterior motive for unifying context handling was to
prepare for mock requests.
v2: Rename to last_retired_context, split out legacy_context tracking
for MI_SET_CONTEXT.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20161218153724.8439-3-chris@chris-wilson.co.uk
2016-12-18 23:37:20 +08:00
|
|
|
goto err_unpin;
|
2016-10-28 20:58:31 +08:00
|
|
|
|
2016-08-19 00:17:10 +08:00
|
|
|
return 0;
|
drm/i915: Unify active context tracking between legacy/execlists/guc
The requests conversion introduced a nasty bug where we could generate a
new request in the middle of constructing a request if we needed to idle
the system in order to evict space for a context. The request to idle
would be executed (and waited upon) before the current one, creating a
minor havoc in the seqno accounting, as we will consider the current
request to already be completed (prior to deferred seqno assignment) but
ring->last_retired_head would have been updated and still could allow
us to overwrite the current request before execution.
We also employed two different mechanisms to track the active context
until it was switched out. The legacy method allowed for waiting upon an
active context (it could forcibly evict any vma, including context's),
but the execlists method took a step backwards by pinning the vma for
the entire active lifespan of the context (the only way to evict was to
idle the entire GPU, not individual contexts). However, to circumvent
the tricky issue of locking (i.e. we cannot take struct_mutex at the
time of i915_gem_request_submit(), where we would want to move the
previous context onto the active tracker and unpin it), we take the
execlists approach and keep the contexts pinned until retirement.
The benefit of the execlists approach, more important for execlists than
legacy, was the reduction in work in pinning the context for each
request - as the context was kept pinned until idle, it could short
circuit the pinning for all active contexts.
We introduce new engine vfuncs to pin and unpin the context
respectively. The context is pinned at the start of the request, and
only unpinned when the following request is retired (this ensures that
the context is idle and coherent in main memory before we unpin it). We
move the engine->last_context tracking into the retirement itself
(rather than during request submission) in order to allow the submission
to be reordered or unwound without undue difficultly.
And finally an ulterior motive for unifying context handling was to
prepare for mock requests.
v2: Rename to last_retired_context, split out legacy_context tracking
for MI_SET_CONTEXT.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20161218153724.8439-3-chris@chris-wilson.co.uk
2016-12-18 23:37:20 +08:00
|
|
|
|
|
|
|
err_unpin:
|
|
|
|
engine->context_unpin(engine, engine->i915->kernel_context);
|
|
|
|
return ret;
|
2016-07-13 23:03:41 +08:00
|
|
|
}
|
2016-08-03 20:19:16 +08:00
|
|
|
|
|
|
|
/**
|
|
|
|
* intel_engines_cleanup_common - cleans up the engine state created by
|
|
|
|
* the common initiailizers.
|
|
|
|
* @engine: Engine to cleanup.
|
|
|
|
*
|
|
|
|
* This cleans up everything created by the common helpers.
|
|
|
|
*/
|
|
|
|
void intel_engine_cleanup_common(struct intel_engine_cs *engine)
|
|
|
|
{
|
2016-08-15 17:48:59 +08:00
|
|
|
intel_engine_cleanup_scratch(engine);
|
|
|
|
|
2016-10-28 20:58:31 +08:00
|
|
|
i915_gem_render_state_fini(engine);
|
2016-08-03 20:19:16 +08:00
|
|
|
intel_engine_fini_breadcrumbs(engine);
|
2016-08-19 00:17:10 +08:00
|
|
|
intel_engine_cleanup_cmd_parser(engine);
|
2016-08-03 20:19:16 +08:00
|
|
|
i915_gem_batch_pool_fini(&engine->batch_pool);
|
drm/i915: Unify active context tracking between legacy/execlists/guc
The requests conversion introduced a nasty bug where we could generate a
new request in the middle of constructing a request if we needed to idle
the system in order to evict space for a context. The request to idle
would be executed (and waited upon) before the current one, creating a
minor havoc in the seqno accounting, as we will consider the current
request to already be completed (prior to deferred seqno assignment) but
ring->last_retired_head would have been updated and still could allow
us to overwrite the current request before execution.
We also employed two different mechanisms to track the active context
until it was switched out. The legacy method allowed for waiting upon an
active context (it could forcibly evict any vma, including context's),
but the execlists method took a step backwards by pinning the vma for
the entire active lifespan of the context (the only way to evict was to
idle the entire GPU, not individual contexts). However, to circumvent
the tricky issue of locking (i.e. we cannot take struct_mutex at the
time of i915_gem_request_submit(), where we would want to move the
previous context onto the active tracker and unpin it), we take the
execlists approach and keep the contexts pinned until retirement.
The benefit of the execlists approach, more important for execlists than
legacy, was the reduction in work in pinning the context for each
request - as the context was kept pinned until idle, it could short
circuit the pinning for all active contexts.
We introduce new engine vfuncs to pin and unpin the context
respectively. The context is pinned at the start of the request, and
only unpinned when the following request is retired (this ensures that
the context is idle and coherent in main memory before we unpin it). We
move the engine->last_context tracking into the retirement itself
(rather than during request submission) in order to allow the submission
to be reordered or unwound without undue difficultly.
And finally an ulterior motive for unifying context handling was to
prepare for mock requests.
v2: Rename to last_retired_context, split out legacy_context tracking
for MI_SET_CONTEXT.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20161218153724.8439-3-chris@chris-wilson.co.uk
2016-12-18 23:37:20 +08:00
|
|
|
|
|
|
|
engine->context_unpin(engine, engine->i915->kernel_context);
|
2016-08-03 20:19:16 +08:00
|
|
|
}
|
2016-10-05 04:11:31 +08:00
|
|
|
|
|
|
|
u64 intel_engine_get_active_head(struct intel_engine_cs *engine)
|
|
|
|
{
|
|
|
|
struct drm_i915_private *dev_priv = engine->i915;
|
|
|
|
u64 acthd;
|
|
|
|
|
|
|
|
if (INTEL_GEN(dev_priv) >= 8)
|
|
|
|
acthd = I915_READ64_2x32(RING_ACTHD(engine->mmio_base),
|
|
|
|
RING_ACTHD_UDW(engine->mmio_base));
|
|
|
|
else if (INTEL_GEN(dev_priv) >= 4)
|
|
|
|
acthd = I915_READ(RING_ACTHD(engine->mmio_base));
|
|
|
|
else
|
|
|
|
acthd = I915_READ(ACTHD);
|
|
|
|
|
|
|
|
return acthd;
|
|
|
|
}
|
|
|
|
|
|
|
|
u64 intel_engine_get_last_batch_head(struct intel_engine_cs *engine)
|
|
|
|
{
|
|
|
|
struct drm_i915_private *dev_priv = engine->i915;
|
|
|
|
u64 bbaddr;
|
|
|
|
|
|
|
|
if (INTEL_GEN(dev_priv) >= 8)
|
|
|
|
bbaddr = I915_READ64_2x32(RING_BBADDR(engine->mmio_base),
|
|
|
|
RING_BBADDR_UDW(engine->mmio_base));
|
|
|
|
else
|
|
|
|
bbaddr = I915_READ(RING_BBADDR(engine->mmio_base));
|
|
|
|
|
|
|
|
return bbaddr;
|
|
|
|
}
|
2016-10-12 17:05:17 +08:00
|
|
|
|
|
|
|
const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
|
|
|
|
{
|
|
|
|
switch (type) {
|
|
|
|
case I915_CACHE_NONE: return " uncached";
|
|
|
|
case I915_CACHE_LLC: return HAS_LLC(i915) ? " LLC" : " snooped";
|
|
|
|
case I915_CACHE_L3_LLC: return " L3+LLC";
|
|
|
|
case I915_CACHE_WT: return " WT";
|
|
|
|
default: return "";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline uint32_t
|
|
|
|
read_subslice_reg(struct drm_i915_private *dev_priv, int slice,
|
|
|
|
int subslice, i915_reg_t reg)
|
|
|
|
{
|
|
|
|
uint32_t mcr;
|
|
|
|
uint32_t ret;
|
|
|
|
enum forcewake_domains fw_domains;
|
|
|
|
|
|
|
|
fw_domains = intel_uncore_forcewake_for_reg(dev_priv, reg,
|
|
|
|
FW_REG_READ);
|
|
|
|
fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
|
|
|
|
GEN8_MCR_SELECTOR,
|
|
|
|
FW_REG_READ | FW_REG_WRITE);
|
|
|
|
|
|
|
|
spin_lock_irq(&dev_priv->uncore.lock);
|
|
|
|
intel_uncore_forcewake_get__locked(dev_priv, fw_domains);
|
|
|
|
|
|
|
|
mcr = I915_READ_FW(GEN8_MCR_SELECTOR);
|
|
|
|
/*
|
|
|
|
* The HW expects the slice and sublice selectors to be reset to 0
|
|
|
|
* after reading out the registers.
|
|
|
|
*/
|
|
|
|
WARN_ON_ONCE(mcr & (GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK));
|
|
|
|
mcr &= ~(GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK);
|
|
|
|
mcr |= GEN8_MCR_SLICE(slice) | GEN8_MCR_SUBSLICE(subslice);
|
|
|
|
I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr);
|
|
|
|
|
|
|
|
ret = I915_READ_FW(reg);
|
|
|
|
|
|
|
|
mcr &= ~(GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK);
|
|
|
|
I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr);
|
|
|
|
|
|
|
|
intel_uncore_forcewake_put__locked(dev_priv, fw_domains);
|
|
|
|
spin_unlock_irq(&dev_priv->uncore.lock);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* NB: please notice the memset */
|
|
|
|
void intel_engine_get_instdone(struct intel_engine_cs *engine,
|
|
|
|
struct intel_instdone *instdone)
|
|
|
|
{
|
|
|
|
struct drm_i915_private *dev_priv = engine->i915;
|
|
|
|
u32 mmio_base = engine->mmio_base;
|
|
|
|
int slice;
|
|
|
|
int subslice;
|
|
|
|
|
|
|
|
memset(instdone, 0, sizeof(*instdone));
|
|
|
|
|
|
|
|
switch (INTEL_GEN(dev_priv)) {
|
|
|
|
default:
|
|
|
|
instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));
|
|
|
|
|
|
|
|
if (engine->id != RCS)
|
|
|
|
break;
|
|
|
|
|
|
|
|
instdone->slice_common = I915_READ(GEN7_SC_INSTDONE);
|
|
|
|
for_each_instdone_slice_subslice(dev_priv, slice, subslice) {
|
|
|
|
instdone->sampler[slice][subslice] =
|
|
|
|
read_subslice_reg(dev_priv, slice, subslice,
|
|
|
|
GEN7_SAMPLER_INSTDONE);
|
|
|
|
instdone->row[slice][subslice] =
|
|
|
|
read_subslice_reg(dev_priv, slice, subslice,
|
|
|
|
GEN7_ROW_INSTDONE);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 7:
|
|
|
|
instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));
|
|
|
|
|
|
|
|
if (engine->id != RCS)
|
|
|
|
break;
|
|
|
|
|
|
|
|
instdone->slice_common = I915_READ(GEN7_SC_INSTDONE);
|
|
|
|
instdone->sampler[0][0] = I915_READ(GEN7_SAMPLER_INSTDONE);
|
|
|
|
instdone->row[0][0] = I915_READ(GEN7_ROW_INSTDONE);
|
|
|
|
|
|
|
|
break;
|
|
|
|
case 6:
|
|
|
|
case 5:
|
|
|
|
case 4:
|
|
|
|
instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));
|
|
|
|
|
|
|
|
if (engine->id == RCS)
|
|
|
|
/* HACK: Using the wrong struct member */
|
|
|
|
instdone->slice_common = I915_READ(GEN4_INSTDONE1);
|
|
|
|
break;
|
|
|
|
case 3:
|
|
|
|
case 2:
|
|
|
|
instdone->instdone = I915_READ(GEN2_INSTDONE);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2017-02-14 01:15:14 +08:00
|
|
|
|
|
|
|
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
|
|
|
|
#include "selftests/mock_engine.c"
|
|
|
|
#endif
|