2010-05-22 04:26:39 +08:00
|
|
|
/*
|
|
|
|
* Copyright © 2008-2010 Intel Corporation
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
|
|
* to deal in the Software without restriction, including without limitation
|
|
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
|
|
* Software is furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice (including the next
|
|
|
|
* paragraph) shall be included in all copies or substantial portions of the
|
|
|
|
* Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
|
|
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
|
|
|
* IN THE SOFTWARE.
|
|
|
|
*
|
|
|
|
* Authors:
|
|
|
|
* Eric Anholt <eric@anholt.net>
|
|
|
|
* Zou Nan hai <nanhai.zou@intel.com>
|
|
|
|
* Xiang Hai hao<haihao.xiang@intel.com>
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
2020-06-01 15:24:12 +08:00
|
|
|
#include "gen2_engine_cs.h"
|
|
|
|
#include "gen6_engine_cs.h"
|
2020-01-07 21:40:09 +08:00
|
|
|
#include "gen6_ppgtt.h"
|
2020-03-06 08:09:57 +08:00
|
|
|
#include "gen7_renderclear.h"
|
2017-11-10 22:26:34 +08:00
|
|
|
#include "i915_drv.h"
|
2020-07-31 23:48:34 +08:00
|
|
|
#include "intel_breadcrumbs.h"
|
2019-05-28 17:29:49 +08:00
|
|
|
#include "intel_context.h"
|
2019-07-04 17:19:25 +08:00
|
|
|
#include "intel_gt.h"
|
2019-04-25 01:48:39 +08:00
|
|
|
#include "intel_reset.h"
|
2019-10-24 18:03:44 +08:00
|
|
|
#include "intel_ring.h"
|
drm/i915/gt: Keep a no-frills swappable copy of the default context state
We need to keep the default context state around to instantiate new
contexts (aka golden rendercontext), and we also keep it pinned while
the engine is active so that we can quickly reset a hanging context.
However, the default contexts are large enough to merit keeping in
swappable memory as opposed to kernel memory, so we store them inside
shmemfs. Currently, we use the normal GEM objects to create the default
context image, but we can throw away all but the shmemfs file.
This greatly simplifies the tricky power management code which wants to
run underneath the normal GT locking, and we definitely do not want to
use any high level objects that may appear to recurse back into the GT.
Though perhaps the primary advantage of the complex GEM object is that
we aggressively cache the mapping, but here we are recreating the
vm_area everytime time we unpark. At the worst, we add a lightweight
cache, but first find a microbenchmark that is impacted.
Having started to create some utility functions to make working with
shmemfs objects easier, we can start putting them to wider use, where
GEM objects are overkill, such as storing persistent error state.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.auld@intel.com>
Cc: Ramalingam C <ramalingam.c@intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200429172429.6054-1-chris@chris-wilson.co.uk
2020-04-30 01:24:29 +08:00
|
|
|
#include "shmem_utils.h"
|
2010-05-22 04:26:39 +08:00
|
|
|
|
2016-04-29 16:07:05 +08:00
|
|
|
/* Rough estimate of the typical request size, performing a flush,
|
|
|
|
* set-context and then emitting the batch.
|
|
|
|
*/
|
|
|
|
#define LEGACY_REQUEST_SIZE 200
|
|
|
|
|
2018-12-18 18:27:12 +08:00
|
|
|
static void set_hwstam(struct intel_engine_cs *engine, u32 mask)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Keep the render interrupt unmasked as this papers over
|
|
|
|
* lost interrupts following a reset.
|
|
|
|
*/
|
|
|
|
if (engine->class == RENDER_CLASS) {
|
|
|
|
if (INTEL_GEN(engine->i915) >= 6)
|
|
|
|
mask &= ~BIT(0);
|
|
|
|
else
|
|
|
|
mask &= ~I915_USER_INTERRUPT;
|
|
|
|
}
|
|
|
|
|
|
|
|
intel_engine_set_hwsp_writemask(engine, mask);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void set_hws_pga(struct intel_engine_cs *engine, phys_addr_t phys)
|
2013-07-03 18:56:54 +08:00
|
|
|
{
|
|
|
|
u32 addr;
|
|
|
|
|
2018-09-03 23:23:04 +08:00
|
|
|
addr = lower_32_bits(phys);
|
2019-12-07 05:24:17 +08:00
|
|
|
if (INTEL_GEN(engine->i915) >= 4)
|
2018-09-03 23:23:04 +08:00
|
|
|
addr |= (phys >> 28) & 0xf0;
|
|
|
|
|
2019-12-07 05:24:17 +08:00
|
|
|
intel_uncore_write(engine->uncore, HWS_PGA, addr);
|
2013-07-03 18:56:54 +08:00
|
|
|
}
|
|
|
|
|
2019-01-28 18:23:55 +08:00
|
|
|
static struct page *status_page(struct intel_engine_cs *engine)
|
2018-12-18 18:27:12 +08:00
|
|
|
{
|
2019-01-28 18:23:55 +08:00
|
|
|
struct drm_i915_gem_object *obj = engine->status_page.vma->obj;
|
2018-12-18 18:27:12 +08:00
|
|
|
|
2019-01-28 18:23:55 +08:00
|
|
|
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
|
|
|
|
return sg_page(obj->mm.pages->sgl);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ring_setup_phys_status_page(struct intel_engine_cs *engine)
|
|
|
|
{
|
|
|
|
set_hws_pga(engine, PFN_PHYS(page_to_pfn(status_page(engine))));
|
2018-12-18 18:27:12 +08:00
|
|
|
set_hwstam(engine, ~0u);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void set_hwsp(struct intel_engine_cs *engine, u32 offset)
|
2015-02-11 03:32:17 +08:00
|
|
|
{
|
2018-12-18 18:27:12 +08:00
|
|
|
i915_reg_t hwsp;
|
2015-02-11 03:32:17 +08:00
|
|
|
|
2018-12-18 18:27:12 +08:00
|
|
|
/*
|
|
|
|
* The ring status page addresses are no longer next to the rest of
|
2015-02-11 03:32:17 +08:00
|
|
|
* the ring registers as of gen7.
|
|
|
|
*/
|
2019-12-07 05:24:17 +08:00
|
|
|
if (IS_GEN(engine->i915, 7)) {
|
2016-03-16 19:00:37 +08:00
|
|
|
switch (engine->id) {
|
2017-08-31 02:01:15 +08:00
|
|
|
/*
|
|
|
|
* No more rings exist on Gen7. Default case is only to shut up
|
|
|
|
* gcc switch check warning.
|
|
|
|
*/
|
|
|
|
default:
|
|
|
|
GEM_BUG_ON(engine->id);
|
2020-08-24 06:36:59 +08:00
|
|
|
fallthrough;
|
2019-03-06 02:03:30 +08:00
|
|
|
case RCS0:
|
2018-12-18 18:27:12 +08:00
|
|
|
hwsp = RENDER_HWS_PGA_GEN7;
|
2015-02-11 03:32:17 +08:00
|
|
|
break;
|
2019-03-06 02:03:30 +08:00
|
|
|
case BCS0:
|
2018-12-18 18:27:12 +08:00
|
|
|
hwsp = BLT_HWS_PGA_GEN7;
|
2015-02-11 03:32:17 +08:00
|
|
|
break;
|
2019-03-06 02:03:30 +08:00
|
|
|
case VCS0:
|
2018-12-18 18:27:12 +08:00
|
|
|
hwsp = BSD_HWS_PGA_GEN7;
|
2015-02-11 03:32:17 +08:00
|
|
|
break;
|
2019-03-06 02:03:30 +08:00
|
|
|
case VECS0:
|
2018-12-18 18:27:12 +08:00
|
|
|
hwsp = VEBOX_HWS_PGA_GEN7;
|
2015-02-11 03:32:17 +08:00
|
|
|
break;
|
|
|
|
}
|
2019-12-07 05:24:17 +08:00
|
|
|
} else if (IS_GEN(engine->i915, 6)) {
|
2018-12-18 18:27:12 +08:00
|
|
|
hwsp = RING_HWS_PGA_GEN6(engine->mmio_base);
|
2015-02-11 03:32:17 +08:00
|
|
|
} else {
|
2018-12-18 18:27:12 +08:00
|
|
|
hwsp = RING_HWS_PGA(engine->mmio_base);
|
2018-08-08 18:51:00 +08:00
|
|
|
}
|
2017-08-19 02:37:01 +08:00
|
|
|
|
2019-12-07 05:24:17 +08:00
|
|
|
intel_uncore_write(engine->uncore, hwsp, offset);
|
|
|
|
intel_uncore_posting_read(engine->uncore, hwsp);
|
2018-12-18 18:27:12 +08:00
|
|
|
}
|
2015-02-11 03:32:17 +08:00
|
|
|
|
2018-12-18 18:27:12 +08:00
|
|
|
static void flush_cs_tlb(struct intel_engine_cs *engine)
|
|
|
|
{
|
|
|
|
struct drm_i915_private *dev_priv = engine->i915;
|
|
|
|
|
|
|
|
if (!IS_GEN_RANGE(dev_priv, 6, 7))
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* ring should be idle before issuing a sync flush*/
|
drm/i915/gt: Make WARN* drm specific where drm_priv ptr is available
drm specific WARN* calls include device information in the
backtrace, so we know what device the warnings originate from.
Covert all the calls of WARN* with device specific drm_WARN*
variants in functions where drm_i915_private struct pointer is readily
available.
The conversion was done automatically with below coccinelle semantic
patch. checkpatch errors/warnings are fixed manually.
@rule1@
identifier func, T;
@@
func(...) {
...
struct drm_i915_private *T = ...;
<+...
(
-WARN(
+drm_WARN(&T->drm,
...)
|
-WARN_ON(
+drm_WARN_ON(&T->drm,
...)
|
-WARN_ONCE(
+drm_WARN_ONCE(&T->drm,
...)
|
-WARN_ON_ONCE(
+drm_WARN_ON_ONCE(&T->drm,
...)
)
...+>
}
@rule2@
identifier func, T;
@@
func(struct drm_i915_private *T,...) {
<+...
(
-WARN(
+drm_WARN(&T->drm,
...)
|
-WARN_ON(
+drm_WARN_ON(&T->drm,
...)
|
-WARN_ONCE(
+drm_WARN_ONCE(&T->drm,
...)
|
-WARN_ON_ONCE(
+drm_WARN_ON_ONCE(&T->drm,
...)
)
...+>
}
command: spatch --sp-file <script> --dir drivers/gpu/drm/i915/gt \
--linux-spacing --in-place
Signed-off-by: Pankaj Bharadiya <pankaj.laxminarayan.bharadiya@intel.com>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200115034455.17658-7-pankaj.laxminarayan.bharadiya@intel.com
2020-01-15 11:44:50 +08:00
|
|
|
drm_WARN_ON(&dev_priv->drm,
|
|
|
|
(ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0);
|
2019-03-26 05:49:40 +08:00
|
|
|
|
|
|
|
ENGINE_WRITE(engine, RING_INSTPM,
|
|
|
|
_MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
|
|
|
|
INSTPM_SYNC_FLUSH));
|
|
|
|
if (intel_wait_for_register(engine->uncore,
|
|
|
|
RING_INSTPM(engine->mmio_base),
|
|
|
|
INSTPM_SYNC_FLUSH, 0,
|
2018-12-18 18:27:12 +08:00
|
|
|
1000))
|
drm/i915/ring_submission: use drm_device based logging macros.
Replace the use of printk based drm logging macros to the struct
drm_device based logging macros in i915/gt/intel_ring_submission.c.
This was done using the following semantic patch that transforms based
on the existence of a drm_i915_private device:
@@
identifier fn, T;
@@
fn(...) {
...
struct drm_i915_private *T = ...;
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
@@
identifier fn, T;
@@
fn(...,struct drm_i915_private *T,...) {
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
New checkpatch warnings were fixed manually.
Note that this converts DRM_DEBUG_DRIVER to drm_dbg().
References: https://lists.freedesktop.org/archives/dri-devel/2020-January/253381.html
Signed-off-by: Wambui Karuga <wambui.karugax@gmail.com>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200314183344.17603-6-wambui.karugax@gmail.com
2020-03-15 02:33:42 +08:00
|
|
|
drm_err(&dev_priv->drm,
|
|
|
|
"%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
|
|
|
|
engine->name);
|
2018-12-18 18:27:12 +08:00
|
|
|
}
|
2015-02-11 03:32:17 +08:00
|
|
|
|
2018-12-18 18:27:12 +08:00
|
|
|
static void ring_setup_status_page(struct intel_engine_cs *engine)
|
|
|
|
{
|
2019-01-28 18:23:55 +08:00
|
|
|
set_hwsp(engine, i915_ggtt_offset(engine->status_page.vma));
|
2018-12-18 18:27:12 +08:00
|
|
|
set_hwstam(engine, ~0u);
|
2015-02-11 03:32:17 +08:00
|
|
|
|
2018-12-18 18:27:12 +08:00
|
|
|
flush_cs_tlb(engine);
|
2015-02-11 03:32:17 +08:00
|
|
|
}
|
|
|
|
|
2016-03-16 19:00:37 +08:00
|
|
|
static bool stop_ring(struct intel_engine_cs *engine)
|
2010-05-21 09:08:55 +08:00
|
|
|
{
|
2016-05-06 22:40:21 +08:00
|
|
|
struct drm_i915_private *dev_priv = engine->i915;
|
2010-05-21 09:08:55 +08:00
|
|
|
|
2016-08-15 17:49:11 +08:00
|
|
|
if (INTEL_GEN(dev_priv) > 2) {
|
2019-03-26 05:49:40 +08:00
|
|
|
ENGINE_WRITE(engine,
|
|
|
|
RING_MI_MODE, _MASKED_BIT_ENABLE(STOP_RING));
|
|
|
|
if (intel_wait_for_register(engine->uncore,
|
2016-06-30 22:33:30 +08:00
|
|
|
RING_MI_MODE(engine->mmio_base),
|
|
|
|
MODE_IDLE,
|
|
|
|
MODE_IDLE,
|
|
|
|
1000)) {
|
drm/i915/ring_submission: use drm_device based logging macros.
Replace the use of printk based drm logging macros to the struct
drm_device based logging macros in i915/gt/intel_ring_submission.c.
This was done using the following semantic patch that transforms based
on the existence of a drm_i915_private device:
@@
identifier fn, T;
@@
fn(...) {
...
struct drm_i915_private *T = ...;
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
@@
identifier fn, T;
@@
fn(...,struct drm_i915_private *T,...) {
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
New checkpatch warnings were fixed manually.
Note that this converts DRM_DEBUG_DRIVER to drm_dbg().
References: https://lists.freedesktop.org/archives/dri-devel/2020-January/253381.html
Signed-off-by: Wambui Karuga <wambui.karugax@gmail.com>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200314183344.17603-6-wambui.karugax@gmail.com
2020-03-15 02:33:42 +08:00
|
|
|
drm_err(&dev_priv->drm,
|
|
|
|
"%s : timed out trying to stop ring\n",
|
|
|
|
engine->name);
|
2019-03-26 05:49:40 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Sometimes we observe that the idle flag is not
|
2014-08-11 16:21:35 +08:00
|
|
|
* set even though the ring is empty. So double
|
|
|
|
* check before giving up.
|
|
|
|
*/
|
2019-03-26 05:49:40 +08:00
|
|
|
if (ENGINE_READ(engine, RING_HEAD) !=
|
|
|
|
ENGINE_READ(engine, RING_TAIL))
|
2014-08-11 16:21:35 +08:00
|
|
|
return false;
|
2014-04-02 23:36:07 +08:00
|
|
|
}
|
|
|
|
}
|
2012-06-04 17:18:15 +08:00
|
|
|
|
2019-03-26 05:49:40 +08:00
|
|
|
ENGINE_WRITE(engine, RING_HEAD, ENGINE_READ(engine, RING_TAIL));
|
2017-10-27 17:43:11 +08:00
|
|
|
|
2019-03-26 05:49:40 +08:00
|
|
|
ENGINE_WRITE(engine, RING_HEAD, 0);
|
|
|
|
ENGINE_WRITE(engine, RING_TAIL, 0);
|
2010-05-21 09:08:55 +08:00
|
|
|
|
2017-10-27 17:43:11 +08:00
|
|
|
/* The ring must be empty before it is disabled */
|
2019-03-26 05:49:40 +08:00
|
|
|
ENGINE_WRITE(engine, RING_CTL, 0);
|
2017-10-27 17:43:11 +08:00
|
|
|
|
2019-03-26 05:49:40 +08:00
|
|
|
return (ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR) == 0;
|
2014-04-02 23:36:07 +08:00
|
|
|
}
|
2010-05-21 09:08:55 +08:00
|
|
|
|
2020-02-06 09:44:38 +08:00
|
|
|
static struct i915_address_space *vm_alias(struct i915_address_space *vm)
|
|
|
|
{
|
|
|
|
if (i915_is_ggtt(vm))
|
|
|
|
vm = &i915_vm_to_ggtt(vm)->alias->vm;
|
|
|
|
|
|
|
|
return vm;
|
|
|
|
}
|
|
|
|
|
2020-07-30 00:42:18 +08:00
|
|
|
static u32 pp_dir(struct i915_address_space *vm)
|
|
|
|
{
|
|
|
|
return to_gen6_ppgtt(i915_vm_to_ppgtt(vm))->pp_dir;
|
|
|
|
}
|
|
|
|
|
2020-02-06 09:44:38 +08:00
|
|
|
static void set_pp_dir(struct intel_engine_cs *engine)
|
|
|
|
{
|
|
|
|
struct i915_address_space *vm = vm_alias(engine->gt->vm);
|
|
|
|
|
|
|
|
if (vm) {
|
|
|
|
ENGINE_WRITE(engine, RING_PP_DIR_DCLV, PP_DIR_DCLV_2G);
|
2020-07-30 00:42:18 +08:00
|
|
|
ENGINE_WRITE(engine, RING_PP_DIR_BASE, pp_dir(vm));
|
2020-02-06 09:44:38 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
drm/i915: Invert the GEM wakeref hierarchy
In the current scheme, on submitting a request we take a single global
GEM wakeref, which trickles down to wake up all GT power domains. This
is undesirable as we would like to be able to localise our power
management to the available power domains and to remove the global GEM
operations from the heart of the driver. (The intent there is to push
global GEM decisions to the boundary as used by the GEM user interface.)
Now during request construction, each request is responsible via its
logical context to acquire a wakeref on each power domain it intends to
utilize. Currently, each request takes a wakeref on the engine(s) and
the engines themselves take a chipset wakeref. This gives us a
transition on each engine which we can extend if we want to insert more
powermangement control (such as soft rc6). The global GEM operations
that currently require a struct_mutex are reduced to listening to pm
events from the chipset GT wakeref. As we reduce the struct_mutex
requirement, these listeners should evaporate.
Perhaps the biggest immediate change is that this removes the
struct_mutex requirement around GT power management, allowing us greater
flexibility in request construction. Another important knock-on effect,
is that by tracking engine usage, we can insert a switch back to the
kernel context on that engine immediately, avoiding any extra delay or
inserting global synchronisation barriers. This makes tracking when an
engine and its associated contexts are idle much easier -- important for
when we forgo our assumed execution ordering and need idle barriers to
unpin used contexts. In the process, it means we remove a large chunk of
code whose only purpose was to switch back to the kernel context.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Imre Deak <imre.deak@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190424200717.1686-5-chris@chris-wilson.co.uk
2019-04-25 04:07:17 +08:00
|
|
|
static int xcs_resume(struct intel_engine_cs *engine)
|
2014-04-02 23:36:07 +08:00
|
|
|
{
|
2016-05-06 22:40:21 +08:00
|
|
|
struct drm_i915_private *dev_priv = engine->i915;
|
2019-08-10 02:25:18 +08:00
|
|
|
struct intel_ring *ring = engine->legacy.ring;
|
2014-04-02 23:36:07 +08:00
|
|
|
int ret = 0;
|
|
|
|
|
2019-12-13 23:51:52 +08:00
|
|
|
ENGINE_TRACE(engine, "ring:{HEAD:%04x, TAIL:%04x}\n",
|
|
|
|
ring->head, ring->tail);
|
drm/i915: Invert the GEM wakeref hierarchy
In the current scheme, on submitting a request we take a single global
GEM wakeref, which trickles down to wake up all GT power domains. This
is undesirable as we would like to be able to localise our power
management to the available power domains and to remove the global GEM
operations from the heart of the driver. (The intent there is to push
global GEM decisions to the boundary as used by the GEM user interface.)
Now during request construction, each request is responsible via its
logical context to acquire a wakeref on each power domain it intends to
utilize. Currently, each request takes a wakeref on the engine(s) and
the engines themselves take a chipset wakeref. This gives us a
transition on each engine which we can extend if we want to insert more
powermangement control (such as soft rc6). The global GEM operations
that currently require a struct_mutex are reduced to listening to pm
events from the chipset GT wakeref. As we reduce the struct_mutex
requirement, these listeners should evaporate.
Perhaps the biggest immediate change is that this removes the
struct_mutex requirement around GT power management, allowing us greater
flexibility in request construction. Another important knock-on effect,
is that by tracking engine usage, we can insert a switch back to the
kernel context on that engine immediately, avoiding any extra delay or
inserting global synchronisation barriers. This makes tracking when an
engine and its associated contexts are idle much easier -- important for
when we forgo our assumed execution ordering and need idle barriers to
unpin used contexts. In the process, it means we remove a large chunk of
code whose only purpose was to switch back to the kernel context.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Imre Deak <imre.deak@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190424200717.1686-5-chris@chris-wilson.co.uk
2019-04-25 04:07:17 +08:00
|
|
|
|
2019-03-26 05:49:40 +08:00
|
|
|
intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL);
|
2014-04-02 23:36:07 +08:00
|
|
|
|
2019-08-08 15:41:52 +08:00
|
|
|
/* WaClearRingBufHeadRegAtInit:ctg,elk */
|
2016-03-16 19:00:37 +08:00
|
|
|
if (!stop_ring(engine)) {
|
2014-04-02 23:36:07 +08:00
|
|
|
/* G45 ring initialization often fails to reset head to zero */
|
drm/i915/ring_submission: use drm_device based logging macros.
Replace the use of printk based drm logging macros to the struct
drm_device based logging macros in i915/gt/intel_ring_submission.c.
This was done using the following semantic patch that transforms based
on the existence of a drm_i915_private device:
@@
identifier fn, T;
@@
fn(...) {
...
struct drm_i915_private *T = ...;
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
@@
identifier fn, T;
@@
fn(...,struct drm_i915_private *T,...) {
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
New checkpatch warnings were fixed manually.
Note that this converts DRM_DEBUG_DRIVER to drm_dbg().
References: https://lists.freedesktop.org/archives/dri-devel/2020-January/253381.html
Signed-off-by: Wambui Karuga <wambui.karugax@gmail.com>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200314183344.17603-6-wambui.karugax@gmail.com
2020-03-15 02:33:42 +08:00
|
|
|
drm_dbg(&dev_priv->drm, "%s head not reset to zero "
|
|
|
|
"ctl %08x head %08x tail %08x start %08x\n",
|
|
|
|
engine->name,
|
|
|
|
ENGINE_READ(engine, RING_CTL),
|
|
|
|
ENGINE_READ(engine, RING_HEAD),
|
|
|
|
ENGINE_READ(engine, RING_TAIL),
|
|
|
|
ENGINE_READ(engine, RING_START));
|
|
|
|
|
|
|
|
if (!stop_ring(engine)) {
|
|
|
|
drm_err(&dev_priv->drm,
|
|
|
|
"failed to set %s head to zero "
|
2018-02-07 19:15:45 +08:00
|
|
|
"ctl %08x head %08x tail %08x start %08x\n",
|
|
|
|
engine->name,
|
2019-03-26 05:49:40 +08:00
|
|
|
ENGINE_READ(engine, RING_CTL),
|
|
|
|
ENGINE_READ(engine, RING_HEAD),
|
|
|
|
ENGINE_READ(engine, RING_TAIL),
|
|
|
|
ENGINE_READ(engine, RING_START));
|
2014-04-02 23:36:07 +08:00
|
|
|
ret = -EIO;
|
|
|
|
goto out;
|
2010-12-06 04:42:33 +08:00
|
|
|
}
|
2010-05-21 09:08:55 +08:00
|
|
|
}
|
|
|
|
|
2016-08-18 03:30:56 +08:00
|
|
|
if (HWS_NEEDS_PHYSICAL(dev_priv))
|
2016-03-16 19:00:37 +08:00
|
|
|
ring_setup_phys_status_page(engine);
|
2016-08-18 03:30:56 +08:00
|
|
|
else
|
2018-12-18 18:27:12 +08:00
|
|
|
ring_setup_status_page(engine);
|
2014-04-02 23:36:07 +08:00
|
|
|
|
2020-07-31 23:48:34 +08:00
|
|
|
intel_breadcrumbs_reset(engine->breadcrumbs);
|
2016-09-09 21:11:53 +08:00
|
|
|
|
2014-08-07 22:29:53 +08:00
|
|
|
/* Enforce ordering by reading HEAD register back */
|
2019-08-08 15:41:52 +08:00
|
|
|
ENGINE_POSTING_READ(engine, RING_HEAD);
|
2014-08-07 22:29:53 +08:00
|
|
|
|
2019-08-08 15:41:52 +08:00
|
|
|
/*
|
|
|
|
* Initialize the ring. This must happen _after_ we've cleared the ring
|
2012-08-07 15:54:14 +08:00
|
|
|
* registers with the above sequence (the readback of the HEAD registers
|
|
|
|
* also enforces ordering), otherwise the hw might lose the new ring
|
2019-08-08 15:41:52 +08:00
|
|
|
* register values.
|
|
|
|
*/
|
2019-03-26 05:49:40 +08:00
|
|
|
ENGINE_WRITE(engine, RING_START, i915_ggtt_offset(ring->vma));
|
2014-08-07 22:39:54 +08:00
|
|
|
|
2018-06-11 19:08:45 +08:00
|
|
|
/* Check that the ring offsets point within the ring! */
|
|
|
|
GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head));
|
|
|
|
GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail));
|
2016-09-09 21:11:53 +08:00
|
|
|
intel_ring_update_space(ring);
|
2018-11-26 20:28:21 +08:00
|
|
|
|
2020-02-06 09:44:38 +08:00
|
|
|
set_pp_dir(engine);
|
|
|
|
|
2018-11-26 20:28:21 +08:00
|
|
|
/* First wake the ring up to an empty/idle ring */
|
2019-03-26 05:49:40 +08:00
|
|
|
ENGINE_WRITE(engine, RING_HEAD, ring->head);
|
|
|
|
ENGINE_WRITE(engine, RING_TAIL, ring->head);
|
|
|
|
ENGINE_POSTING_READ(engine, RING_TAIL);
|
2014-08-07 22:39:54 +08:00
|
|
|
|
2019-03-26 05:49:40 +08:00
|
|
|
ENGINE_WRITE(engine, RING_CTL, RING_CTL_SIZE(ring->size) | RING_VALID);
|
2010-05-21 09:08:55 +08:00
|
|
|
|
|
|
|
/* If the head is still not zero, the ring is dead */
|
2019-03-26 05:49:40 +08:00
|
|
|
if (intel_wait_for_register(engine->uncore,
|
2019-03-26 05:49:39 +08:00
|
|
|
RING_CTL(engine->mmio_base),
|
2017-04-11 18:13:40 +08:00
|
|
|
RING_VALID, RING_VALID,
|
|
|
|
50)) {
|
drm/i915/ring_submission: use drm_device based logging macros.
Replace the use of printk based drm logging macros to the struct
drm_device based logging macros in i915/gt/intel_ring_submission.c.
This was done using the following semantic patch that transforms based
on the existence of a drm_i915_private device:
@@
identifier fn, T;
@@
fn(...) {
...
struct drm_i915_private *T = ...;
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
@@
identifier fn, T;
@@
fn(...,struct drm_i915_private *T,...) {
<+...
(
-DRM_INFO(
+drm_info(&T->drm,
...)
|
-DRM_ERROR(
+drm_err(&T->drm,
...)
|
-DRM_WARN(
+drm_warn(&T->drm,
...)
|
-DRM_DEBUG_DRIVER(
+drm_dbg(&T->drm,
...)
|
-DRM_DEBUG_KMS(
+drm_dbg_kms(&T->drm,
...)
|
-DRM_DEBUG_ATOMIC(
+drm_dbg_atomic(&T->drm,
...)
)
...+>
}
New checkpatch warnings were fixed manually.
Note that this converts DRM_DEBUG_DRIVER to drm_dbg().
References: https://lists.freedesktop.org/archives/dri-devel/2020-January/253381.html
Signed-off-by: Wambui Karuga <wambui.karugax@gmail.com>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200314183344.17603-6-wambui.karugax@gmail.com
2020-03-15 02:33:42 +08:00
|
|
|
drm_err(&dev_priv->drm, "%s initialization failed "
|
2016-09-09 21:11:53 +08:00
|
|
|
"ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n",
|
2016-03-16 19:00:37 +08:00
|
|
|
engine->name,
|
2019-03-26 05:49:40 +08:00
|
|
|
ENGINE_READ(engine, RING_CTL),
|
|
|
|
ENGINE_READ(engine, RING_CTL) & RING_VALID,
|
|
|
|
ENGINE_READ(engine, RING_HEAD), ring->head,
|
|
|
|
ENGINE_READ(engine, RING_TAIL), ring->tail,
|
|
|
|
ENGINE_READ(engine, RING_START),
|
2016-08-15 17:49:07 +08:00
|
|
|
i915_ggtt_offset(ring->vma));
|
2012-06-04 17:18:15 +08:00
|
|
|
ret = -EIO;
|
|
|
|
goto out;
|
2010-05-21 09:08:55 +08:00
|
|
|
}
|
|
|
|
|
2017-10-13 21:12:17 +08:00
|
|
|
if (INTEL_GEN(dev_priv) > 2)
|
2019-03-26 05:49:40 +08:00
|
|
|
ENGINE_WRITE(engine,
|
|
|
|
RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
|
2017-10-13 21:12:17 +08:00
|
|
|
|
2018-11-26 20:28:21 +08:00
|
|
|
/* Now awake, let it get started */
|
|
|
|
if (ring->tail != ring->head) {
|
2019-03-26 05:49:40 +08:00
|
|
|
ENGINE_WRITE(engine, RING_TAIL, ring->tail);
|
|
|
|
ENGINE_POSTING_READ(engine, RING_TAIL);
|
2018-11-26 20:28:21 +08:00
|
|
|
}
|
|
|
|
|
2018-08-14 18:40:56 +08:00
|
|
|
/* Papering over lost _interrupts_ immediately following the restart */
|
2019-12-17 17:56:41 +08:00
|
|
|
intel_engine_signal_breadcrumbs(engine);
|
2012-06-04 17:18:15 +08:00
|
|
|
out:
|
2019-03-26 05:49:40 +08:00
|
|
|
intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL);
|
2012-06-04 17:18:15 +08:00
|
|
|
|
|
|
|
return ret;
|
2010-05-21 09:08:55 +08:00
|
|
|
}
|
|
|
|
|
2019-01-25 21:22:28 +08:00
|
|
|
static void reset_prepare(struct intel_engine_cs *engine)
|
2016-09-09 21:11:53 +08:00
|
|
|
{
|
2019-07-16 20:49:28 +08:00
|
|
|
struct intel_uncore *uncore = engine->uncore;
|
|
|
|
const u32 base = engine->mmio_base;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We stop engines, otherwise we might get failed reset and a
|
|
|
|
* dead gpu (on elk). Also as modern gpu as kbl can suffer
|
|
|
|
* from system hang if batchbuffer is progressing when
|
|
|
|
* the reset is issued, regardless of READY_TO_RESET ack.
|
|
|
|
* Thus assume it is best to stop engines on all gens
|
|
|
|
* where we have a gpu reset.
|
|
|
|
*
|
|
|
|
* WaKBLVECSSemaphoreWaitPoll:kbl (on ALL_ENGINES)
|
|
|
|
*
|
|
|
|
* WaMediaResetMainRingCleanup:ctg,elk (presumably)
|
|
|
|
*
|
|
|
|
* FIXME: Wa for more modern gens needs to be validated
|
|
|
|
*/
|
2019-12-13 23:51:52 +08:00
|
|
|
ENGINE_TRACE(engine, "\n");
|
2019-07-16 20:49:28 +08:00
|
|
|
|
|
|
|
if (intel_engine_stop_cs(engine))
|
2019-12-13 23:51:52 +08:00
|
|
|
ENGINE_TRACE(engine, "timed out on STOP_RING\n");
|
2019-07-16 20:49:28 +08:00
|
|
|
|
|
|
|
intel_uncore_write_fw(uncore,
|
|
|
|
RING_HEAD(base),
|
|
|
|
intel_uncore_read_fw(uncore, RING_TAIL(base)));
|
|
|
|
intel_uncore_posting_read_fw(uncore, RING_HEAD(base)); /* paranoia */
|
|
|
|
|
|
|
|
intel_uncore_write_fw(uncore, RING_HEAD(base), 0);
|
|
|
|
intel_uncore_write_fw(uncore, RING_TAIL(base), 0);
|
|
|
|
intel_uncore_posting_read_fw(uncore, RING_TAIL(base));
|
|
|
|
|
|
|
|
/* The ring must be empty before it is disabled */
|
|
|
|
intel_uncore_write_fw(uncore, RING_CTL(base), 0);
|
|
|
|
|
|
|
|
/* Check acts as a post */
|
|
|
|
if (intel_uncore_read_fw(uncore, RING_HEAD(base)))
|
2019-12-13 23:51:52 +08:00
|
|
|
ENGINE_TRACE(engine, "ring head [%x] not parked\n",
|
|
|
|
intel_uncore_read_fw(uncore, RING_HEAD(base)));
|
2018-05-17 02:33:51 +08:00
|
|
|
}
|
|
|
|
|
2019-12-22 20:07:52 +08:00
|
|
|
static void reset_rewind(struct intel_engine_cs *engine, bool stalled)
|
2018-05-17 02:33:51 +08:00
|
|
|
{
|
2019-01-25 21:22:28 +08:00
|
|
|
struct i915_request *pos, *rq;
|
|
|
|
unsigned long flags;
|
2018-06-11 19:08:44 +08:00
|
|
|
u32 head;
|
2018-05-17 02:33:51 +08:00
|
|
|
|
2019-01-25 21:22:28 +08:00
|
|
|
rq = NULL;
|
2019-06-15 00:46:06 +08:00
|
|
|
spin_lock_irqsave(&engine->active.lock, flags);
|
|
|
|
list_for_each_entry(pos, &engine->active.requests, sched.link) {
|
2019-01-29 02:18:11 +08:00
|
|
|
if (!i915_request_completed(pos)) {
|
2019-01-25 21:22:28 +08:00
|
|
|
rq = pos;
|
|
|
|
break;
|
|
|
|
}
|
2018-06-11 19:08:44 +08:00
|
|
|
}
|
2017-10-09 19:03:01 +08:00
|
|
|
|
|
|
|
/*
|
2019-01-25 21:22:28 +08:00
|
|
|
* The guilty request will get skipped on a hung engine.
|
2017-02-07 23:24:37 +08:00
|
|
|
*
|
2019-01-25 21:22:28 +08:00
|
|
|
* Users of client default contexts do not rely on logical
|
|
|
|
* state preserved between batches so it is safe to execute
|
|
|
|
* queued requests following the hang. Non default contexts
|
|
|
|
* rely on preserved state, so skipping a batch loses the
|
|
|
|
* evolution of the state and it needs to be considered corrupted.
|
|
|
|
* Executing more queued batches on top of corrupted state is
|
|
|
|
* risky. But we take the risk by trying to advance through
|
|
|
|
* the queued requests in order to make the client behaviour
|
|
|
|
* more predictable around resets, by not throwing away random
|
|
|
|
* amount of batches it has prepared for execution. Sophisticated
|
|
|
|
* clients can use gem_reset_stats_ioctl and dma fence status
|
|
|
|
* (exported via sync_file info ioctl on explicit fences) to observe
|
|
|
|
* when it loses the context state and should rebuild accordingly.
|
2017-02-07 23:24:37 +08:00
|
|
|
*
|
2019-01-25 21:22:28 +08:00
|
|
|
* The context ban, and ultimately the client ban, mechanism are safety
|
|
|
|
* valves if client submission ends up resulting in nothing more than
|
|
|
|
* subsequent hangs.
|
2017-02-07 23:24:37 +08:00
|
|
|
*/
|
2019-01-25 21:22:28 +08:00
|
|
|
|
2018-06-11 19:08:44 +08:00
|
|
|
if (rq) {
|
2019-01-25 21:22:28 +08:00
|
|
|
/*
|
|
|
|
* Try to restore the logical GPU state to match the
|
|
|
|
* continuation of the request queue. If we skip the
|
|
|
|
* context/PD restore, then the next request may try to execute
|
|
|
|
* assuming that its context is valid and loaded on the GPU and
|
|
|
|
* so may try to access invalid memory, prompting repeated GPU
|
|
|
|
* hangs.
|
|
|
|
*
|
|
|
|
* If the request was guilty, we still restore the logical
|
|
|
|
* state in case the next request requires it (e.g. the
|
|
|
|
* aliasing ppgtt), but skip over the hung batch.
|
|
|
|
*
|
|
|
|
* If the request was innocent, we try to replay the request
|
|
|
|
* with the restored context.
|
|
|
|
*/
|
2019-07-13 03:29:53 +08:00
|
|
|
__i915_request_reset(rq, stalled);
|
2019-01-25 21:22:28 +08:00
|
|
|
|
2019-08-10 02:25:18 +08:00
|
|
|
GEM_BUG_ON(rq->ring != engine->legacy.ring);
|
2019-01-25 21:22:28 +08:00
|
|
|
head = rq->head;
|
|
|
|
} else {
|
2019-08-10 02:25:18 +08:00
|
|
|
head = engine->legacy.ring->tail;
|
2017-02-07 23:24:37 +08:00
|
|
|
}
|
2019-08-10 02:25:18 +08:00
|
|
|
engine->legacy.ring->head = intel_ring_wrap(engine->legacy.ring, head);
|
2019-01-25 21:22:28 +08:00
|
|
|
|
2019-06-15 00:46:06 +08:00
|
|
|
spin_unlock_irqrestore(&engine->active.lock, flags);
|
2016-09-09 21:11:53 +08:00
|
|
|
}
|
|
|
|
|
2018-05-17 02:33:51 +08:00
|
|
|
static void reset_finish(struct intel_engine_cs *engine)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2019-12-22 20:07:52 +08:00
|
|
|
static void reset_cancel(struct intel_engine_cs *engine)
|
2017-09-16 01:31:00 +08:00
|
|
|
{
|
2018-02-21 17:56:36 +08:00
|
|
|
struct i915_request *request;
|
2017-09-16 01:31:00 +08:00
|
|
|
unsigned long flags;
|
|
|
|
|
2019-06-15 00:46:06 +08:00
|
|
|
spin_lock_irqsave(&engine->active.lock, flags);
|
2017-09-16 01:31:00 +08:00
|
|
|
|
|
|
|
/* Mark all submitted requests as skipped. */
|
2019-06-15 00:46:06 +08:00
|
|
|
list_for_each_entry(request, &engine->active.requests, sched.link) {
|
2020-03-04 20:18:48 +08:00
|
|
|
i915_request_set_error_once(request, -EIO);
|
2019-01-29 02:18:11 +08:00
|
|
|
i915_request_mark_complete(request);
|
2017-09-16 01:31:00 +08:00
|
|
|
}
|
2020-10-01 00:32:51 +08:00
|
|
|
intel_engine_signal_breadcrumbs(engine);
|
2018-12-03 19:36:55 +08:00
|
|
|
|
2017-09-16 01:31:00 +08:00
|
|
|
/* Remaining _unready_ requests will be nop'ed when submitted */
|
|
|
|
|
2019-06-15 00:46:06 +08:00
|
|
|
spin_unlock_irqrestore(&engine->active.lock, flags);
|
2017-09-16 01:31:00 +08:00
|
|
|
}
|
|
|
|
|
2018-02-21 17:56:36 +08:00
|
|
|
static void i9xx_submit_request(struct i915_request *request)
|
2016-08-03 05:50:34 +08:00
|
|
|
{
|
2018-02-21 17:56:36 +08:00
|
|
|
i915_request_submit(request);
|
2019-09-09 19:30:18 +08:00
|
|
|
wmb(); /* paranoid flush writes out of the WCB before mmio */
|
2016-11-15 04:40:59 +08:00
|
|
|
|
2019-03-26 05:49:40 +08:00
|
|
|
ENGINE_WRITE(request->engine, RING_TAIL,
|
|
|
|
intel_ring_set_tail(request->ring, request->tail));
|
2016-08-03 05:50:34 +08:00
|
|
|
}
|
|
|
|
|
2019-03-08 21:25:19 +08:00
|
|
|
static void __ring_context_fini(struct intel_context *ce)
|
|
|
|
{
|
drm/i915: Pull i915_vma_pin under the vm->mutex
Replace the struct_mutex requirement for pinning the i915_vma with the
local vm->mutex instead. Note that the vm->mutex is tainted by the
shrinker (we require unbinding from inside fs-reclaim) and so we cannot
allocate while holding that mutex. Instead we have to preallocate
workers to do allocate and apply the PTE updates after we have we
reserved their slot in the drm_mm (using fences to order the PTE writes
with the GPU work and with later unbind).
In adding the asynchronous vma binding, one subtle requirement is to
avoid coupling the binding fence into the backing object->resv. That is
the asynchronous binding only applies to the vma timeline itself and not
to the pages as that is a more global timeline (the binding of one vma
does not need to be ordered with another vma, nor does the implicit GEM
fencing depend on a vma, only on writes to the backing store). Keeping
the vma binding distinct from the backing store timelines is verified by
a number of async gem_exec_fence and gem_exec_schedule tests. The way we
do this is quite simple, we keep the fence for the vma binding separate
and only wait on it as required, and never add it to the obj->resv
itself.
Another consequence in reducing the locking around the vma is the
destruction of the vma is no longer globally serialised by struct_mutex.
A natural solution would be to add a kref to i915_vma, but that requires
decoupling the reference cycles, possibly by introducing a new
i915_mm_pages object that is own by both obj->mm and vma->pages.
However, we have not taken that route due to the overshadowing lmem/ttm
discussions, and instead play a series of complicated games with
trylocks to (hopefully) ensure that only one destruction path is called!
v2: Add some commentary, and some helpers to reduce patch churn.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191004134015.13204-4-chris@chris-wilson.co.uk
2019-10-04 21:39:58 +08:00
|
|
|
i915_vma_put(ce->state);
|
2019-03-08 21:25:19 +08:00
|
|
|
}
|
|
|
|
|
2019-03-19 05:23:47 +08:00
|
|
|
static void ring_context_destroy(struct kref *ref)
|
2018-05-18 05:26:32 +08:00
|
|
|
{
|
2019-03-19 05:23:47 +08:00
|
|
|
struct intel_context *ce = container_of(ref, typeof(*ce), ref);
|
|
|
|
|
2019-03-08 21:25:22 +08:00
|
|
|
GEM_BUG_ON(intel_context_is_pinned(ce));
|
2018-05-18 05:26:32 +08:00
|
|
|
|
2019-03-08 21:25:19 +08:00
|
|
|
if (ce->state)
|
|
|
|
__ring_context_fini(ce);
|
2018-06-25 18:06:04 +08:00
|
|
|
|
2019-07-18 15:00:06 +08:00
|
|
|
intel_context_fini(ce);
|
2019-03-08 21:25:19 +08:00
|
|
|
intel_context_free(ce);
|
2018-05-18 05:26:32 +08:00
|
|
|
}
|
|
|
|
|
2020-08-19 22:08:54 +08:00
|
|
|
static int ring_context_pre_pin(struct intel_context *ce,
|
|
|
|
struct i915_gem_ww_ctx *ww,
|
|
|
|
void **unused)
|
2018-06-14 17:41:03 +08:00
|
|
|
{
|
2019-06-11 17:12:37 +08:00
|
|
|
struct i915_address_space *vm;
|
2018-06-14 17:41:03 +08:00
|
|
|
int err = 0;
|
|
|
|
|
2020-02-06 09:44:38 +08:00
|
|
|
vm = vm_alias(ce->vm);
|
2019-06-11 17:12:37 +08:00
|
|
|
if (vm)
|
2020-08-19 22:08:54 +08:00
|
|
|
err = gen6_ppgtt_pin(i915_vm_to_ppgtt((vm)), ww);
|
2018-06-14 17:41:03 +08:00
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2019-07-30 22:32:08 +08:00
|
|
|
static void __context_unpin_ppgtt(struct intel_context *ce)
|
2018-06-14 17:41:03 +08:00
|
|
|
{
|
2019-06-11 17:12:37 +08:00
|
|
|
struct i915_address_space *vm;
|
2018-06-14 17:41:03 +08:00
|
|
|
|
2020-02-06 09:44:38 +08:00
|
|
|
vm = vm_alias(ce->vm);
|
2019-06-11 17:12:37 +08:00
|
|
|
if (vm)
|
|
|
|
gen6_ppgtt_unpin(i915_vm_to_ppgtt(vm));
|
2018-06-14 17:41:03 +08:00
|
|
|
}
|
|
|
|
|
2019-03-08 21:25:18 +08:00
|
|
|
static void ring_context_unpin(struct intel_context *ce)
|
2020-08-19 22:08:53 +08:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ring_context_post_unpin(struct intel_context *ce)
|
2018-06-05 16:53:48 +08:00
|
|
|
{
|
2019-07-30 22:32:08 +08:00
|
|
|
__context_unpin_ppgtt(ce);
|
drm/i915: Unify active context tracking between legacy/execlists/guc
The requests conversion introduced a nasty bug where we could generate a
new request in the middle of constructing a request if we needed to idle
the system in order to evict space for a context. The request to idle
would be executed (and waited upon) before the current one, creating a
minor havoc in the seqno accounting, as we will consider the current
request to already be completed (prior to deferred seqno assignment) but
ring->last_retired_head would have been updated and still could allow
us to overwrite the current request before execution.
We also employed two different mechanisms to track the active context
until it was switched out. The legacy method allowed for waiting upon an
active context (it could forcibly evict any vma, including context's),
but the execlists method took a step backwards by pinning the vma for
the entire active lifespan of the context (the only way to evict was to
idle the entire GPU, not individual contexts). However, to circumvent
the tricky issue of locking (i.e. we cannot take struct_mutex at the
time of i915_gem_request_submit(), where we would want to move the
previous context onto the active tracker and unpin it), we take the
execlists approach and keep the contexts pinned until retirement.
The benefit of the execlists approach, more important for execlists than
legacy, was the reduction in work in pinning the context for each
request - as the context was kept pinned until idle, it could short
circuit the pinning for all active contexts.
We introduce new engine vfuncs to pin and unpin the context
respectively. The context is pinned at the start of the request, and
only unpinned when the following request is retired (this ensures that
the context is idle and coherent in main memory before we unpin it). We
move the engine->last_context tracking into the retirement itself
(rather than during request submission) in order to allow the submission
to be reordered or unwound without undue difficultly.
And finally an ulterior motive for unifying context handling was to
prepare for mock requests.
v2: Rename to last_retired_context, split out legacy_context tracking
for MI_SET_CONTEXT.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20161218153724.8439-3-chris@chris-wilson.co.uk
2016-12-18 23:37:20 +08:00
|
|
|
}
|
|
|
|
|
2017-04-27 18:46:51 +08:00
|
|
|
static struct i915_vma *
|
|
|
|
alloc_context_vma(struct intel_engine_cs *engine)
|
|
|
|
{
|
|
|
|
struct drm_i915_private *i915 = engine->i915;
|
|
|
|
struct drm_i915_gem_object *obj;
|
|
|
|
struct i915_vma *vma;
|
2017-11-10 22:26:33 +08:00
|
|
|
int err;
|
2017-04-27 18:46:51 +08:00
|
|
|
|
2019-05-28 17:29:45 +08:00
|
|
|
obj = i915_gem_object_create_shmem(i915, engine->context_size);
|
2017-04-27 18:46:51 +08:00
|
|
|
if (IS_ERR(obj))
|
|
|
|
return ERR_CAST(obj);
|
|
|
|
|
drm/i915: Flush pages on acquisition
When we return pages to the system, we ensure that they are marked as
being in the CPU domain since any external access is uncontrolled and we
must assume the worst. This means that we need to always flush the pages
on acquisition if we need to use them on the GPU, and from the beginning
have used set-domain. Set-domain is overkill for the purpose as it is a
general synchronisation barrier, but our intent is to only flush the
pages being swapped in. If we move that flush into the pages acquisition
phase, we know then that when we have obj->mm.pages, they are coherent
with the GPU and need only maintain that status without resorting to
heavy handed use of set-domain.
The principle knock-on effect for userspace is through mmap-gtt
pagefaulting. Our uAPI has always implied that the GTT mmap was async
(especially as when any pagefault occurs is unpredicatable to userspace)
and so userspace had to apply explicit domain control itself
(set-domain). However, swapping is transparent to the kernel, and so on
first fault we need to acquire the pages and make them coherent for
access through the GTT. Our use of set-domain here leaks into the uABI
that the first pagefault was synchronous. This is unintentional and
baring a few igt should be unoticed, nevertheless we bump the uABI
version for mmap-gtt to reflect the change in behaviour.
Another implication of the change is that gem_create() is presumed to
create an object that is coherent with the CPU and is in the CPU write
domain, so a set-domain(CPU) following a gem_create() would be a minor
operation that merely checked whether we could allocate all pages for
the object. On applying this change, a set-domain(CPU) causes a clflush
as we acquire the pages. This will have a small impact on mesa as we move
the clflush here on !llc from execbuf time to create, but that should
have minimal performance impact as the same clflush exists but is now
done early and because of the clflush issue, userspace recycles bo and
so should resist allocating fresh objects.
Internally, the presumption that objects are created in the CPU
write-domain and remain so through writes to obj->mm.mapping is more
prevalent than I expected; but easy enough to catch and apply a manual
flush.
For the future, we should push the page flush from the central
set_pages() into the callers so that we can more finely control when it
is applied, but for now doing it one location is easier to validate, at
the cost of sometimes flushing when there is no need.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.william.auld@gmail.com>
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Antonio Argenziano <antonio.argenziano@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: Matthew Auld <matthew.william.auld@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190321161908.8007-1-chris@chris-wilson.co.uk
2019-03-22 00:19:07 +08:00
|
|
|
/*
|
|
|
|
* Try to make the context utilize L3 as well as LLC.
|
|
|
|
*
|
|
|
|
* On VLV we don't have L3 controls in the PTEs so we
|
|
|
|
* shouldn't touch the cache level, especially as that
|
|
|
|
* would make the object snooped which might have a
|
|
|
|
* negative performance impact.
|
|
|
|
*
|
|
|
|
* Snooping is required on non-llc platforms in execlist
|
|
|
|
* mode, but since all GGTT accesses use PAT entry 0 we
|
|
|
|
* get snooping anyway regardless of cache_level.
|
|
|
|
*
|
|
|
|
* This is only applicable for Ivy Bridge devices since
|
|
|
|
* later platforms don't have L3 control bits in the PTE.
|
|
|
|
*/
|
|
|
|
if (IS_IVYBRIDGE(i915))
|
|
|
|
i915_gem_object_set_cache_coherency(obj, I915_CACHE_L3_LLC);
|
|
|
|
|
2017-11-10 22:26:33 +08:00
|
|
|
if (engine->default_state) {
|
drm/i915/gt: Keep a no-frills swappable copy of the default context state
We need to keep the default context state around to instantiate new
contexts (aka golden rendercontext), and we also keep it pinned while
the engine is active so that we can quickly reset a hanging context.
However, the default contexts are large enough to merit keeping in
swappable memory as opposed to kernel memory, so we store them inside
shmemfs. Currently, we use the normal GEM objects to create the default
context image, but we can throw away all but the shmemfs file.
This greatly simplifies the tricky power management code which wants to
run underneath the normal GT locking, and we definitely do not want to
use any high level objects that may appear to recurse back into the GT.
Though perhaps the primary advantage of the complex GEM object is that
we aggressively cache the mapping, but here we are recreating the
vm_area everytime time we unpark. At the worst, we add a lightweight
cache, but first find a microbenchmark that is impacted.
Having started to create some utility functions to make working with
shmemfs objects easier, we can start putting them to wider use, where
GEM objects are overkill, such as storing persistent error state.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.auld@intel.com>
Cc: Ramalingam C <ramalingam.c@intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200429172429.6054-1-chris@chris-wilson.co.uk
2020-04-30 01:24:29 +08:00
|
|
|
void *vaddr;
|
2017-11-10 22:26:33 +08:00
|
|
|
|
|
|
|
vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
|
|
|
|
if (IS_ERR(vaddr)) {
|
|
|
|
err = PTR_ERR(vaddr);
|
|
|
|
goto err_obj;
|
|
|
|
}
|
|
|
|
|
drm/i915/gt: Keep a no-frills swappable copy of the default context state
We need to keep the default context state around to instantiate new
contexts (aka golden rendercontext), and we also keep it pinned while
the engine is active so that we can quickly reset a hanging context.
However, the default contexts are large enough to merit keeping in
swappable memory as opposed to kernel memory, so we store them inside
shmemfs. Currently, we use the normal GEM objects to create the default
context image, but we can throw away all but the shmemfs file.
This greatly simplifies the tricky power management code which wants to
run underneath the normal GT locking, and we definitely do not want to
use any high level objects that may appear to recurse back into the GT.
Though perhaps the primary advantage of the complex GEM object is that
we aggressively cache the mapping, but here we are recreating the
vm_area everytime time we unpark. At the worst, we add a lightweight
cache, but first find a microbenchmark that is impacted.
Having started to create some utility functions to make working with
shmemfs objects easier, we can start putting them to wider use, where
GEM objects are overkill, such as storing persistent error state.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.auld@intel.com>
Cc: Ramalingam C <ramalingam.c@intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200429172429.6054-1-chris@chris-wilson.co.uk
2020-04-30 01:24:29 +08:00
|
|
|
shmem_read(engine->default_state, 0,
|
|
|
|
vaddr, engine->context_size);
|
2017-11-10 22:26:33 +08:00
|
|
|
|
drm/i915: Flush pages on acquisition
When we return pages to the system, we ensure that they are marked as
being in the CPU domain since any external access is uncontrolled and we
must assume the worst. This means that we need to always flush the pages
on acquisition if we need to use them on the GPU, and from the beginning
have used set-domain. Set-domain is overkill for the purpose as it is a
general synchronisation barrier, but our intent is to only flush the
pages being swapped in. If we move that flush into the pages acquisition
phase, we know then that when we have obj->mm.pages, they are coherent
with the GPU and need only maintain that status without resorting to
heavy handed use of set-domain.
The principle knock-on effect for userspace is through mmap-gtt
pagefaulting. Our uAPI has always implied that the GTT mmap was async
(especially as when any pagefault occurs is unpredicatable to userspace)
and so userspace had to apply explicit domain control itself
(set-domain). However, swapping is transparent to the kernel, and so on
first fault we need to acquire the pages and make them coherent for
access through the GTT. Our use of set-domain here leaks into the uABI
that the first pagefault was synchronous. This is unintentional and
baring a few igt should be unoticed, nevertheless we bump the uABI
version for mmap-gtt to reflect the change in behaviour.
Another implication of the change is that gem_create() is presumed to
create an object that is coherent with the CPU and is in the CPU write
domain, so a set-domain(CPU) following a gem_create() would be a minor
operation that merely checked whether we could allocate all pages for
the object. On applying this change, a set-domain(CPU) causes a clflush
as we acquire the pages. This will have a small impact on mesa as we move
the clflush here on !llc from execbuf time to create, but that should
have minimal performance impact as the same clflush exists but is now
done early and because of the clflush issue, userspace recycles bo and
so should resist allocating fresh objects.
Internally, the presumption that objects are created in the CPU
write-domain and remain so through writes to obj->mm.mapping is more
prevalent than I expected; but easy enough to catch and apply a manual
flush.
For the future, we should push the page flush from the central
set_pages() into the callers so that we can more finely control when it
is applied, but for now doing it one location is easier to validate, at
the cost of sometimes flushing when there is no need.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.william.auld@gmail.com>
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Antonio Argenziano <antonio.argenziano@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: Matthew Auld <matthew.william.auld@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190321161908.8007-1-chris@chris-wilson.co.uk
2019-03-22 00:19:07 +08:00
|
|
|
i915_gem_object_flush_map(obj);
|
2020-07-09 01:37:47 +08:00
|
|
|
__i915_gem_object_release_map(obj);
|
2017-04-27 18:46:51 +08:00
|
|
|
}
|
|
|
|
|
2019-06-21 15:08:08 +08:00
|
|
|
vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
|
2017-11-10 22:26:33 +08:00
|
|
|
if (IS_ERR(vma)) {
|
|
|
|
err = PTR_ERR(vma);
|
|
|
|
goto err_obj;
|
|
|
|
}
|
2017-04-27 18:46:51 +08:00
|
|
|
|
|
|
|
return vma;
|
2017-11-10 22:26:33 +08:00
|
|
|
|
|
|
|
err_obj:
|
|
|
|
i915_gem_object_put(obj);
|
|
|
|
return ERR_PTR(err);
|
2017-04-27 18:46:51 +08:00
|
|
|
}
|
|
|
|
|
2019-08-10 02:25:16 +08:00
|
|
|
static int ring_context_alloc(struct intel_context *ce)
|
2016-06-24 21:55:53 +08:00
|
|
|
{
|
2019-03-08 21:25:20 +08:00
|
|
|
struct intel_engine_cs *engine = ce->engine;
|
2016-06-24 21:55:53 +08:00
|
|
|
|
2019-03-08 21:25:16 +08:00
|
|
|
/* One ringbuffer to rule them all */
|
2019-08-10 02:25:18 +08:00
|
|
|
GEM_BUG_ON(!engine->legacy.ring);
|
|
|
|
ce->ring = engine->legacy.ring;
|
|
|
|
ce->timeline = intel_timeline_get(engine->legacy.timeline);
|
2019-03-08 21:25:16 +08:00
|
|
|
|
2019-08-10 02:25:16 +08:00
|
|
|
GEM_BUG_ON(ce->state);
|
|
|
|
if (engine->context_size) {
|
2017-04-27 18:46:51 +08:00
|
|
|
struct i915_vma *vma;
|
|
|
|
|
|
|
|
vma = alloc_context_vma(engine);
|
2019-03-08 21:25:20 +08:00
|
|
|
if (IS_ERR(vma))
|
|
|
|
return PTR_ERR(vma);
|
2017-04-27 18:46:51 +08:00
|
|
|
|
|
|
|
ce->state = vma;
|
2019-12-03 20:41:55 +08:00
|
|
|
if (engine->default_state)
|
|
|
|
__set_bit(CONTEXT_VALID_BIT, &ce->flags);
|
2017-04-27 18:46:51 +08:00
|
|
|
}
|
|
|
|
|
2019-08-10 02:25:16 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-08-19 22:08:53 +08:00
|
|
|
static int ring_context_pin(struct intel_context *ce, void *unused)
|
2019-08-10 02:25:16 +08:00
|
|
|
{
|
2020-08-19 22:08:53 +08:00
|
|
|
return 0;
|
2016-06-24 21:55:53 +08:00
|
|
|
}
|
|
|
|
|
2019-04-11 03:01:20 +08:00
|
|
|
static void ring_context_reset(struct intel_context *ce)
|
|
|
|
{
|
2020-01-02 21:17:05 +08:00
|
|
|
intel_ring_reset(ce->ring, ce->ring->emit);
|
2020-12-10 16:02:20 +08:00
|
|
|
clear_bit(CONTEXT_VALID_BIT, &ce->flags);
|
2019-04-11 03:01:20 +08:00
|
|
|
}
|
|
|
|
|
2019-03-08 21:25:18 +08:00
|
|
|
static const struct intel_context_ops ring_context_ops = {
|
2019-08-10 02:25:16 +08:00
|
|
|
.alloc = ring_context_alloc,
|
|
|
|
|
2020-08-19 22:08:53 +08:00
|
|
|
.pre_pin = ring_context_pre_pin,
|
2019-03-08 21:25:20 +08:00
|
|
|
.pin = ring_context_pin,
|
2019-03-08 21:25:18 +08:00
|
|
|
.unpin = ring_context_unpin,
|
2020-08-19 22:08:53 +08:00
|
|
|
.post_unpin = ring_context_post_unpin,
|
2019-04-11 03:01:20 +08:00
|
|
|
|
2019-04-25 04:07:15 +08:00
|
|
|
.enter = intel_context_enter_engine,
|
|
|
|
.exit = intel_context_exit_engine,
|
|
|
|
|
2019-04-11 03:01:20 +08:00
|
|
|
.reset = ring_context_reset,
|
2019-03-08 21:25:18 +08:00
|
|
|
.destroy = ring_context_destroy,
|
|
|
|
};
|
|
|
|
|
2019-12-08 22:36:48 +08:00
|
|
|
static int load_pd_dir(struct i915_request *rq,
|
2020-07-30 00:42:18 +08:00
|
|
|
struct i915_address_space *vm,
|
2019-12-08 22:36:48 +08:00
|
|
|
u32 valid)
|
2018-06-11 19:08:44 +08:00
|
|
|
{
|
|
|
|
const struct intel_engine_cs * const engine = rq->engine;
|
|
|
|
u32 *cs;
|
|
|
|
|
2019-12-17 17:13:28 +08:00
|
|
|
cs = intel_ring_begin(rq, 12);
|
2018-06-11 19:08:44 +08:00
|
|
|
if (IS_ERR(cs))
|
|
|
|
return PTR_ERR(cs);
|
|
|
|
|
2019-12-17 17:13:28 +08:00
|
|
|
*cs++ = MI_LOAD_REGISTER_IMM(1);
|
2019-03-26 05:49:40 +08:00
|
|
|
*cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine->mmio_base));
|
2019-12-08 22:36:48 +08:00
|
|
|
*cs++ = valid;
|
2019-12-17 17:13:28 +08:00
|
|
|
|
|
|
|
*cs++ = MI_LOAD_REGISTER_IMM(1);
|
2019-03-26 05:49:40 +08:00
|
|
|
*cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base));
|
2020-07-30 00:42:18 +08:00
|
|
|
*cs++ = pp_dir(vm);
|
2018-06-11 19:08:44 +08:00
|
|
|
|
2019-12-04 05:16:31 +08:00
|
|
|
/* Stall until the page table load is complete? */
|
2018-06-12 01:18:25 +08:00
|
|
|
*cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
|
2019-03-26 05:49:40 +08:00
|
|
|
*cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base));
|
2019-12-16 22:24:09 +08:00
|
|
|
*cs++ = intel_gt_scratch_offset(engine->gt,
|
2019-07-09 20:33:43 +08:00
|
|
|
INTEL_GT_SCRATCH_FIELD_DEFAULT);
|
2018-06-12 01:18:25 +08:00
|
|
|
|
2019-12-17 17:13:28 +08:00
|
|
|
*cs++ = MI_LOAD_REGISTER_IMM(1);
|
|
|
|
*cs++ = i915_mmio_reg_offset(RING_INSTPM(engine->mmio_base));
|
|
|
|
*cs++ = _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE);
|
|
|
|
|
2018-06-12 01:18:25 +08:00
|
|
|
intel_ring_advance(rq, cs);
|
2019-12-04 05:16:31 +08:00
|
|
|
|
2019-12-31 20:08:57 +08:00
|
|
|
return rq->engine->emit_flush(rq, EMIT_FLUSH);
|
2019-12-05 19:37:26 +08:00
|
|
|
}
|
|
|
|
|
2020-03-06 08:09:56 +08:00
|
|
|
static inline int mi_set_context(struct i915_request *rq,
|
|
|
|
struct intel_context *ce,
|
|
|
|
u32 flags)
|
2017-11-23 23:26:31 +08:00
|
|
|
{
|
|
|
|
struct intel_engine_cs *engine = rq->engine;
|
2020-06-03 06:09:53 +08:00
|
|
|
struct drm_i915_private *i915 = engine->i915;
|
2017-11-23 23:26:31 +08:00
|
|
|
enum intel_engine_id id;
|
2019-03-06 02:03:30 +08:00
|
|
|
const int num_engines =
|
2020-07-08 08:39:47 +08:00
|
|
|
IS_HASWELL(i915) ? engine->gt->info.num_engines - 1 : 0;
|
2018-06-11 18:48:08 +08:00
|
|
|
bool force_restore = false;
|
2017-11-23 23:26:31 +08:00
|
|
|
int len;
|
|
|
|
u32 *cs;
|
|
|
|
|
|
|
|
len = 4;
|
drm/i915: replace IS_GEN<N> with IS_GEN(..., N)
Define IS_GEN() similarly to our IS_GEN_RANGE(). but use gen instead of
gen_mask to do the comparison. Now callers can pass then gen as a parameter,
so we don't require one macro for each gen.
The following spatch was used to convert the users of these macros:
@@
expression e;
@@
(
- IS_GEN2(e)
+ IS_GEN(e, 2)
|
- IS_GEN3(e)
+ IS_GEN(e, 3)
|
- IS_GEN4(e)
+ IS_GEN(e, 4)
|
- IS_GEN5(e)
+ IS_GEN(e, 5)
|
- IS_GEN6(e)
+ IS_GEN(e, 6)
|
- IS_GEN7(e)
+ IS_GEN(e, 7)
|
- IS_GEN8(e)
+ IS_GEN(e, 8)
|
- IS_GEN9(e)
+ IS_GEN(e, 9)
|
- IS_GEN10(e)
+ IS_GEN(e, 10)
|
- IS_GEN11(e)
+ IS_GEN(e, 11)
)
v2: use IS_GEN rather than GT_GEN and compare to info.gen rather than
using the bitmask
Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
Reviewed-by: Jani Nikula <jani.nikula@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20181212181044.15886-2-lucas.demarchi@intel.com
2018-12-13 02:10:43 +08:00
|
|
|
if (IS_GEN(i915, 7))
|
2019-03-06 02:03:30 +08:00
|
|
|
len += 2 + (num_engines ? 4 * num_engines + 6 : 0);
|
2019-04-19 19:17:48 +08:00
|
|
|
else if (IS_GEN(i915, 5))
|
|
|
|
len += 2;
|
2018-06-11 18:48:08 +08:00
|
|
|
if (flags & MI_FORCE_RESTORE) {
|
|
|
|
GEM_BUG_ON(flags & MI_RESTORE_INHIBIT);
|
|
|
|
flags &= ~MI_FORCE_RESTORE;
|
|
|
|
force_restore = true;
|
|
|
|
len += 2;
|
|
|
|
}
|
2017-11-23 23:26:31 +08:00
|
|
|
|
|
|
|
cs = intel_ring_begin(rq, len);
|
|
|
|
if (IS_ERR(cs))
|
|
|
|
return PTR_ERR(cs);
|
|
|
|
|
|
|
|
/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
|
drm/i915: replace IS_GEN<N> with IS_GEN(..., N)
Define IS_GEN() similarly to our IS_GEN_RANGE(). but use gen instead of
gen_mask to do the comparison. Now callers can pass then gen as a parameter,
so we don't require one macro for each gen.
The following spatch was used to convert the users of these macros:
@@
expression e;
@@
(
- IS_GEN2(e)
+ IS_GEN(e, 2)
|
- IS_GEN3(e)
+ IS_GEN(e, 3)
|
- IS_GEN4(e)
+ IS_GEN(e, 4)
|
- IS_GEN5(e)
+ IS_GEN(e, 5)
|
- IS_GEN6(e)
+ IS_GEN(e, 6)
|
- IS_GEN7(e)
+ IS_GEN(e, 7)
|
- IS_GEN8(e)
+ IS_GEN(e, 8)
|
- IS_GEN9(e)
+ IS_GEN(e, 9)
|
- IS_GEN10(e)
+ IS_GEN(e, 10)
|
- IS_GEN11(e)
+ IS_GEN(e, 11)
)
v2: use IS_GEN rather than GT_GEN and compare to info.gen rather than
using the bitmask
Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
Reviewed-by: Jani Nikula <jani.nikula@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20181212181044.15886-2-lucas.demarchi@intel.com
2018-12-13 02:10:43 +08:00
|
|
|
if (IS_GEN(i915, 7)) {
|
2017-11-23 23:26:31 +08:00
|
|
|
*cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
|
2019-03-06 02:03:30 +08:00
|
|
|
if (num_engines) {
|
2017-11-23 23:26:31 +08:00
|
|
|
struct intel_engine_cs *signaller;
|
|
|
|
|
2019-03-06 02:03:30 +08:00
|
|
|
*cs++ = MI_LOAD_REGISTER_IMM(num_engines);
|
2019-10-18 19:53:31 +08:00
|
|
|
for_each_engine(signaller, engine->gt, id) {
|
2017-11-23 23:26:31 +08:00
|
|
|
if (signaller == engine)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
*cs++ = i915_mmio_reg_offset(
|
|
|
|
RING_PSMI_CTL(signaller->mmio_base));
|
|
|
|
*cs++ = _MASKED_BIT_ENABLE(
|
|
|
|
GEN6_PSMI_SLEEP_MSG_DISABLE);
|
|
|
|
}
|
|
|
|
}
|
2019-04-19 19:17:48 +08:00
|
|
|
} else if (IS_GEN(i915, 5)) {
|
|
|
|
/*
|
|
|
|
* This w/a is only listed for pre-production ilk a/b steppings,
|
|
|
|
* but is also mentioned for programming the powerctx. To be
|
|
|
|
* safe, just apply the workaround; we do not use SyncFlush so
|
|
|
|
* this should never take effect and so be a no-op!
|
|
|
|
*/
|
|
|
|
*cs++ = MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN;
|
2017-11-23 23:26:31 +08:00
|
|
|
}
|
|
|
|
|
2018-06-11 18:48:08 +08:00
|
|
|
if (force_restore) {
|
|
|
|
/*
|
|
|
|
* The HW doesn't handle being told to restore the current
|
|
|
|
* context very well. Quite often it likes goes to go off and
|
|
|
|
* sulk, especially when it is meant to be reloading PP_DIR.
|
|
|
|
* A very simple fix to force the reload is to simply switch
|
|
|
|
* away from the current context and back again.
|
|
|
|
*
|
|
|
|
* Note that the kernel_context will contain random state
|
|
|
|
* following the INHIBIT_RESTORE. We accept this since we
|
|
|
|
* never use the kernel_context state; it is merely a
|
|
|
|
* placeholder we use to flush other contexts.
|
|
|
|
*/
|
|
|
|
*cs++ = MI_SET_CONTEXT;
|
2019-03-08 21:25:21 +08:00
|
|
|
*cs++ = i915_ggtt_offset(engine->kernel_context->state) |
|
2018-06-11 18:48:08 +08:00
|
|
|
MI_MM_SPACE_GTT |
|
|
|
|
MI_RESTORE_INHIBIT;
|
|
|
|
}
|
|
|
|
|
2017-11-23 23:26:31 +08:00
|
|
|
*cs++ = MI_NOOP;
|
|
|
|
*cs++ = MI_SET_CONTEXT;
|
2020-03-06 08:09:56 +08:00
|
|
|
*cs++ = i915_ggtt_offset(ce->state) | flags;
|
2017-11-23 23:26:31 +08:00
|
|
|
/*
|
|
|
|
* w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
|
|
|
|
* WaMiSetContext_Hang:snb,ivb,vlv
|
|
|
|
*/
|
|
|
|
*cs++ = MI_NOOP;
|
|
|
|
|
drm/i915: replace IS_GEN<N> with IS_GEN(..., N)
Define IS_GEN() similarly to our IS_GEN_RANGE(). but use gen instead of
gen_mask to do the comparison. Now callers can pass then gen as a parameter,
so we don't require one macro for each gen.
The following spatch was used to convert the users of these macros:
@@
expression e;
@@
(
- IS_GEN2(e)
+ IS_GEN(e, 2)
|
- IS_GEN3(e)
+ IS_GEN(e, 3)
|
- IS_GEN4(e)
+ IS_GEN(e, 4)
|
- IS_GEN5(e)
+ IS_GEN(e, 5)
|
- IS_GEN6(e)
+ IS_GEN(e, 6)
|
- IS_GEN7(e)
+ IS_GEN(e, 7)
|
- IS_GEN8(e)
+ IS_GEN(e, 8)
|
- IS_GEN9(e)
+ IS_GEN(e, 9)
|
- IS_GEN10(e)
+ IS_GEN(e, 10)
|
- IS_GEN11(e)
+ IS_GEN(e, 11)
)
v2: use IS_GEN rather than GT_GEN and compare to info.gen rather than
using the bitmask
Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
Reviewed-by: Jani Nikula <jani.nikula@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20181212181044.15886-2-lucas.demarchi@intel.com
2018-12-13 02:10:43 +08:00
|
|
|
if (IS_GEN(i915, 7)) {
|
2019-03-06 02:03:30 +08:00
|
|
|
if (num_engines) {
|
2017-11-23 23:26:31 +08:00
|
|
|
struct intel_engine_cs *signaller;
|
|
|
|
i915_reg_t last_reg = {}; /* keep gcc quiet */
|
|
|
|
|
2019-03-06 02:03:30 +08:00
|
|
|
*cs++ = MI_LOAD_REGISTER_IMM(num_engines);
|
2019-10-18 19:53:31 +08:00
|
|
|
for_each_engine(signaller, engine->gt, id) {
|
2017-11-23 23:26:31 +08:00
|
|
|
if (signaller == engine)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
last_reg = RING_PSMI_CTL(signaller->mmio_base);
|
|
|
|
*cs++ = i915_mmio_reg_offset(last_reg);
|
|
|
|
*cs++ = _MASKED_BIT_DISABLE(
|
|
|
|
GEN6_PSMI_SLEEP_MSG_DISABLE);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Insert a delay before the next switch! */
|
|
|
|
*cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
|
|
|
|
*cs++ = i915_mmio_reg_offset(last_reg);
|
2019-10-18 19:53:31 +08:00
|
|
|
*cs++ = intel_gt_scratch_offset(engine->gt,
|
2019-07-09 20:33:43 +08:00
|
|
|
INTEL_GT_SCRATCH_FIELD_DEFAULT);
|
2017-11-23 23:26:31 +08:00
|
|
|
*cs++ = MI_NOOP;
|
|
|
|
}
|
|
|
|
*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
|
2019-04-19 19:17:48 +08:00
|
|
|
} else if (IS_GEN(i915, 5)) {
|
|
|
|
*cs++ = MI_SUSPEND_FLUSH;
|
2017-11-23 23:26:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
intel_ring_advance(rq, cs);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-07-30 22:32:08 +08:00
|
|
|
static int remap_l3_slice(struct i915_request *rq, int slice)
|
2017-11-23 23:26:31 +08:00
|
|
|
{
|
2020-06-03 06:09:53 +08:00
|
|
|
u32 *cs, *remap_info = rq->engine->i915->l3_parity.remap_info[slice];
|
2017-11-23 23:26:31 +08:00
|
|
|
int i;
|
|
|
|
|
|
|
|
if (!remap_info)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
cs = intel_ring_begin(rq, GEN7_L3LOG_SIZE/4 * 2 + 2);
|
|
|
|
if (IS_ERR(cs))
|
|
|
|
return PTR_ERR(cs);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Note: We do not worry about the concurrent register cacheline hang
|
|
|
|
* here because no other code should access these registers other than
|
|
|
|
* at initialization time.
|
|
|
|
*/
|
|
|
|
*cs++ = MI_LOAD_REGISTER_IMM(GEN7_L3LOG_SIZE/4);
|
|
|
|
for (i = 0; i < GEN7_L3LOG_SIZE/4; i++) {
|
|
|
|
*cs++ = i915_mmio_reg_offset(GEN7_L3LOG(slice, i));
|
|
|
|
*cs++ = remap_info[i];
|
|
|
|
}
|
|
|
|
*cs++ = MI_NOOP;
|
|
|
|
intel_ring_advance(rq, cs);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-07-30 22:32:08 +08:00
|
|
|
static int remap_l3(struct i915_request *rq)
|
|
|
|
{
|
2019-12-23 07:35:58 +08:00
|
|
|
struct i915_gem_context *ctx = i915_request_gem_context(rq);
|
2019-07-30 22:32:08 +08:00
|
|
|
int i, err;
|
|
|
|
|
2019-12-22 00:03:24 +08:00
|
|
|
if (!ctx || !ctx->remap_slice)
|
2019-07-30 22:32:08 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
for (i = 0; i < MAX_L3_SLICES; i++) {
|
|
|
|
if (!(ctx->remap_slice & BIT(i)))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
err = remap_l3_slice(rq, i);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
ctx->remap_slice = 0;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-12-16 22:24:09 +08:00
|
|
|
static int switch_mm(struct i915_request *rq, struct i915_address_space *vm)
|
2017-11-23 23:26:31 +08:00
|
|
|
{
|
2019-07-30 22:32:08 +08:00
|
|
|
int ret;
|
2017-11-23 23:26:31 +08:00
|
|
|
|
2019-12-16 22:24:09 +08:00
|
|
|
if (!vm)
|
|
|
|
return 0;
|
2017-11-23 23:26:31 +08:00
|
|
|
|
2019-12-16 22:24:09 +08:00
|
|
|
ret = rq->engine->emit_flush(rq, EMIT_FLUSH);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2019-12-05 19:37:26 +08:00
|
|
|
|
2019-12-16 22:24:09 +08:00
|
|
|
/*
|
|
|
|
* Not only do we need a full barrier (post-sync write) after
|
|
|
|
* invalidating the TLBs, but we need to wait a little bit
|
|
|
|
* longer. Whether this is merely delaying us, or the
|
|
|
|
* subsequent flush is a key part of serialising with the
|
|
|
|
* post-sync op, this extra pass appears vital before a
|
|
|
|
* mm switch!
|
|
|
|
*/
|
2020-07-30 00:42:18 +08:00
|
|
|
ret = load_pd_dir(rq, vm, PP_DIR_DCLV_2G);
|
2019-12-16 22:24:09 +08:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
2019-12-04 05:16:31 +08:00
|
|
|
|
2019-12-31 20:08:57 +08:00
|
|
|
return rq->engine->emit_flush(rq, EMIT_INVALIDATE);
|
2019-12-16 22:24:09 +08:00
|
|
|
}
|
2018-09-04 14:38:02 +08:00
|
|
|
|
2020-03-06 08:09:56 +08:00
|
|
|
static int clear_residuals(struct i915_request *rq)
|
|
|
|
{
|
|
|
|
struct intel_engine_cs *engine = rq->engine;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = switch_mm(rq, vm_alias(engine->kernel_context->vm));
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
if (engine->kernel_context->state) {
|
|
|
|
ret = mi_set_context(rq,
|
|
|
|
engine->kernel_context,
|
|
|
|
MI_MM_SPACE_GTT | MI_RESTORE_INHIBIT);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = engine->emit_bb_start(rq,
|
|
|
|
engine->wa_ctx.vma->node.start, 0,
|
|
|
|
0);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
ret = engine->emit_flush(rq, EMIT_FLUSH);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
/* Always invalidate before the next switch_mm() */
|
|
|
|
return engine->emit_flush(rq, EMIT_INVALIDATE);
|
|
|
|
}
|
|
|
|
|
2019-12-16 22:24:09 +08:00
|
|
|
static int switch_context(struct i915_request *rq)
|
|
|
|
{
|
2020-03-06 08:09:56 +08:00
|
|
|
struct intel_engine_cs *engine = rq->engine;
|
2019-12-20 18:12:29 +08:00
|
|
|
struct intel_context *ce = rq->context;
|
2020-03-06 08:09:56 +08:00
|
|
|
void **residuals = NULL;
|
2019-12-16 22:24:09 +08:00
|
|
|
int ret;
|
2019-12-05 19:37:26 +08:00
|
|
|
|
2020-06-03 06:09:53 +08:00
|
|
|
GEM_BUG_ON(HAS_EXECLISTS(engine->i915));
|
2019-12-05 19:37:26 +08:00
|
|
|
|
2020-03-06 08:09:56 +08:00
|
|
|
if (engine->wa_ctx.vma && ce != engine->kernel_context) {
|
|
|
|
if (engine->wa_ctx.vma->private != ce) {
|
|
|
|
ret = clear_residuals(rq);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
residuals = &engine->wa_ctx.vma->private;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-02-06 09:44:38 +08:00
|
|
|
ret = switch_mm(rq, vm_alias(ce->vm));
|
2019-12-16 22:24:09 +08:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
2017-11-23 23:26:31 +08:00
|
|
|
|
2019-11-30 20:05:03 +08:00
|
|
|
if (ce->state) {
|
2019-12-30 02:31:50 +08:00
|
|
|
u32 flags;
|
2019-12-16 22:24:09 +08:00
|
|
|
|
2020-03-06 08:09:56 +08:00
|
|
|
GEM_BUG_ON(engine->id != RCS0);
|
2019-11-30 20:05:03 +08:00
|
|
|
|
2019-12-30 02:31:50 +08:00
|
|
|
/* For resource streamer on HSW+ and power context elsewhere */
|
|
|
|
BUILD_BUG_ON(HSW_MI_RS_SAVE_STATE_EN != MI_SAVE_EXT_STATE_EN);
|
|
|
|
BUILD_BUG_ON(HSW_MI_RS_RESTORE_STATE_EN != MI_RESTORE_EXT_STATE_EN);
|
|
|
|
|
|
|
|
flags = MI_SAVE_EXT_STATE_EN | MI_MM_SPACE_GTT;
|
|
|
|
if (test_bit(CONTEXT_VALID_BIT, &ce->flags))
|
|
|
|
flags |= MI_RESTORE_EXT_STATE_EN;
|
|
|
|
else
|
|
|
|
flags |= MI_RESTORE_INHIBIT;
|
2019-11-30 20:05:03 +08:00
|
|
|
|
2020-03-06 08:09:56 +08:00
|
|
|
ret = mi_set_context(rq, ce, flags);
|
2019-11-30 20:05:03 +08:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-07-30 22:32:08 +08:00
|
|
|
ret = remap_l3(rq);
|
|
|
|
if (ret)
|
2019-08-31 01:59:58 +08:00
|
|
|
return ret;
|
2017-11-23 23:26:31 +08:00
|
|
|
|
2020-03-06 08:09:56 +08:00
|
|
|
/*
|
|
|
|
* Now past the point of no return, this request _will_ be emitted.
|
|
|
|
*
|
|
|
|
* Or at least this preamble will be emitted, the request may be
|
|
|
|
* interrupted prior to submitting the user payload. If so, we
|
|
|
|
* still submit the "empty" request in order to preserve global
|
|
|
|
* state tracking such as this, our tracking of the current
|
|
|
|
* dirty context.
|
|
|
|
*/
|
|
|
|
if (residuals) {
|
|
|
|
intel_context_put(*residuals);
|
|
|
|
*residuals = intel_context_get(ce);
|
|
|
|
}
|
|
|
|
|
2017-11-23 23:26:31 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-02-21 17:56:36 +08:00
|
|
|
static int ring_request_alloc(struct i915_request *request)
|
2012-11-28 00:22:52 +08:00
|
|
|
{
|
2017-11-15 23:12:04 +08:00
|
|
|
int ret;
|
2016-04-28 16:56:49 +08:00
|
|
|
|
2019-12-20 18:12:29 +08:00
|
|
|
GEM_BUG_ON(!intel_context_is_pinned(request->context));
|
drm/i915: Mark i915_request.timeline as a volatile, rcu pointer
The request->timeline is only valid until the request is retired (i.e.
before it is completed). Upon retiring the request, the context may be
unpinned and freed, and along with it the timeline may be freed. We
therefore need to be very careful when chasing rq->timeline that the
pointer does not disappear beneath us. The vast majority of users are in
a protected context, either during request construction or retirement,
where the timeline->mutex is held and the timeline cannot disappear. It
is those few off the beaten path (where we access a second timeline) that
need extra scrutiny -- to be added in the next patch after first adding
the warnings about dangerous access.
One complication, where we cannot use the timeline->mutex itself, is
during request submission onto hardware (under spinlocks). Here, we want
to check on the timeline to finalize the breadcrumb, and so we need to
impose a second rule to ensure that the request->timeline is indeed
valid. As we are submitting the request, it's context and timeline must
be pinned, as it will be used by the hardware. Since it is pinned, we
know the request->timeline must still be valid, and we cannot submit the
idle barrier until after we release the engine->active.lock, ergo while
submitting and holding that spinlock, a second thread cannot release the
timeline.
v2: Don't be lazy inside selftests; hold the timeline->mutex for as long
as we need it, and tidy up acquiring the timeline with a bit of
refactoring (i915_active_add_request)
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190919111912.21631-1-chris@chris-wilson.co.uk
2019-09-19 19:19:10 +08:00
|
|
|
GEM_BUG_ON(i915_request_timeline(request)->has_initial_breadcrumb);
|
drm/i915: Unify active context tracking between legacy/execlists/guc
The requests conversion introduced a nasty bug where we could generate a
new request in the middle of constructing a request if we needed to idle
the system in order to evict space for a context. The request to idle
would be executed (and waited upon) before the current one, creating a
minor havoc in the seqno accounting, as we will consider the current
request to already be completed (prior to deferred seqno assignment) but
ring->last_retired_head would have been updated and still could allow
us to overwrite the current request before execution.
We also employed two different mechanisms to track the active context
until it was switched out. The legacy method allowed for waiting upon an
active context (it could forcibly evict any vma, including context's),
but the execlists method took a step backwards by pinning the vma for
the entire active lifespan of the context (the only way to evict was to
idle the entire GPU, not individual contexts). However, to circumvent
the tricky issue of locking (i.e. we cannot take struct_mutex at the
time of i915_gem_request_submit(), where we would want to move the
previous context onto the active tracker and unpin it), we take the
execlists approach and keep the contexts pinned until retirement.
The benefit of the execlists approach, more important for execlists than
legacy, was the reduction in work in pinning the context for each
request - as the context was kept pinned until idle, it could short
circuit the pinning for all active contexts.
We introduce new engine vfuncs to pin and unpin the context
respectively. The context is pinned at the start of the request, and
only unpinned when the following request is retired (this ensures that
the context is idle and coherent in main memory before we unpin it). We
move the engine->last_context tracking into the retirement itself
(rather than during request submission) in order to allow the submission
to be reordered or unwound without undue difficultly.
And finally an ulterior motive for unifying context handling was to
prepare for mock requests.
v2: Rename to last_retired_context, split out legacy_context tracking
for MI_SET_CONTEXT.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20161218153724.8439-3-chris@chris-wilson.co.uk
2016-12-18 23:37:20 +08:00
|
|
|
|
2018-12-07 17:02:11 +08:00
|
|
|
/*
|
|
|
|
* Flush enough space to reduce the likelihood of waiting after
|
2016-04-28 16:56:49 +08:00
|
|
|
* we start building the request - in which case we will just
|
|
|
|
* have to repeat work.
|
|
|
|
*/
|
2016-04-29 16:07:05 +08:00
|
|
|
request->reserved_space += LEGACY_REQUEST_SIZE;
|
2016-04-28 16:56:49 +08:00
|
|
|
|
2019-04-19 19:17:47 +08:00
|
|
|
/* Unconditionally invalidate GPU caches and TLBs. */
|
|
|
|
ret = request->engine->emit_flush(request, EMIT_INVALIDATE);
|
2017-11-15 23:12:04 +08:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
2016-04-28 16:56:49 +08:00
|
|
|
|
2019-04-19 19:17:47 +08:00
|
|
|
ret = switch_context(request);
|
2017-11-20 18:20:02 +08:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2016-04-29 16:07:05 +08:00
|
|
|
request->reserved_space -= LEGACY_REQUEST_SIZE;
|
2016-04-28 16:56:49 +08:00
|
|
|
return 0;
|
2012-11-28 00:22:52 +08:00
|
|
|
}
|
|
|
|
|
2018-02-21 17:56:36 +08:00
|
|
|
static void gen6_bsd_submit_request(struct i915_request *request)
|
2010-09-19 21:40:43 +08:00
|
|
|
{
|
2019-03-26 05:49:40 +08:00
|
|
|
struct intel_uncore *uncore = request->engine->uncore;
|
2010-09-19 21:40:43 +08:00
|
|
|
|
2019-03-26 05:49:38 +08:00
|
|
|
intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
|
2016-06-30 22:33:45 +08:00
|
|
|
|
2010-09-19 21:40:43 +08:00
|
|
|
/* Every tail move must follow the sequence below */
|
2012-07-06 00:14:01 +08:00
|
|
|
|
|
|
|
/* Disable notification that the ring is IDLE. The GT
|
|
|
|
* will then assume that it is busy and bring it out of rc6.
|
|
|
|
*/
|
2019-03-26 05:49:38 +08:00
|
|
|
intel_uncore_write_fw(uncore, GEN6_BSD_SLEEP_PSMI_CONTROL,
|
|
|
|
_MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
|
2012-07-06 00:14:01 +08:00
|
|
|
|
|
|
|
/* Clear the context id. Here be magic! */
|
2019-03-26 05:49:38 +08:00
|
|
|
intel_uncore_write64_fw(uncore, GEN6_BSD_RNCID, 0x0);
|
2011-08-17 03:34:10 +08:00
|
|
|
|
2012-07-06 00:14:01 +08:00
|
|
|
/* Wait for the ring not to be idle, i.e. for it to wake up. */
|
2019-03-26 05:49:38 +08:00
|
|
|
if (__intel_wait_for_register_fw(uncore,
|
2017-04-11 18:13:37 +08:00
|
|
|
GEN6_BSD_SLEEP_PSMI_CONTROL,
|
|
|
|
GEN6_BSD_SLEEP_INDICATOR,
|
|
|
|
0,
|
|
|
|
1000, 0, NULL))
|
2020-01-28 15:14:36 +08:00
|
|
|
drm_err(&uncore->i915->drm,
|
|
|
|
"timed out waiting for the BSD ring to wake up\n");
|
2011-08-17 03:34:10 +08:00
|
|
|
|
2012-07-06 00:14:01 +08:00
|
|
|
/* Now that the ring is fully powered up, update the tail */
|
2016-08-03 05:50:34 +08:00
|
|
|
i9xx_submit_request(request);
|
2012-07-06 00:14:01 +08:00
|
|
|
|
|
|
|
/* Let the ring send IDLE messages to the GT again,
|
|
|
|
* and so let it sleep to conserve power when idle.
|
|
|
|
*/
|
2019-03-26 05:49:38 +08:00
|
|
|
intel_uncore_write_fw(uncore, GEN6_BSD_SLEEP_PSMI_CONTROL,
|
|
|
|
_MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
|
2016-06-30 22:33:45 +08:00
|
|
|
|
2019-03-26 05:49:38 +08:00
|
|
|
intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
|
2010-09-19 21:40:43 +08:00
|
|
|
}
|
|
|
|
|
2017-03-17 01:13:03 +08:00
|
|
|
static void i9xx_set_default_submission(struct intel_engine_cs *engine)
|
|
|
|
{
|
|
|
|
engine->submit_request = i9xx_submit_request;
|
2017-10-25 22:39:41 +08:00
|
|
|
|
|
|
|
engine->park = NULL;
|
|
|
|
engine->unpark = NULL;
|
2017-03-17 01:13:03 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void gen6_bsd_set_default_submission(struct intel_engine_cs *engine)
|
|
|
|
{
|
2017-10-25 22:39:41 +08:00
|
|
|
i9xx_set_default_submission(engine);
|
2017-03-17 01:13:03 +08:00
|
|
|
engine->submit_request = gen6_bsd_submit_request;
|
|
|
|
}
|
|
|
|
|
2019-12-22 20:07:52 +08:00
|
|
|
static void ring_release(struct intel_engine_cs *engine)
|
2019-05-01 18:32:04 +08:00
|
|
|
{
|
|
|
|
struct drm_i915_private *dev_priv = engine->i915;
|
|
|
|
|
drm/i915/gt: Make WARN* drm specific where drm_priv ptr is available
drm specific WARN* calls include device information in the
backtrace, so we know what device the warnings originate from.
Covert all the calls of WARN* with device specific drm_WARN*
variants in functions where drm_i915_private struct pointer is readily
available.
The conversion was done automatically with below coccinelle semantic
patch. checkpatch errors/warnings are fixed manually.
@rule1@
identifier func, T;
@@
func(...) {
...
struct drm_i915_private *T = ...;
<+...
(
-WARN(
+drm_WARN(&T->drm,
...)
|
-WARN_ON(
+drm_WARN_ON(&T->drm,
...)
|
-WARN_ONCE(
+drm_WARN_ONCE(&T->drm,
...)
|
-WARN_ON_ONCE(
+drm_WARN_ON_ONCE(&T->drm,
...)
)
...+>
}
@rule2@
identifier func, T;
@@
func(struct drm_i915_private *T,...) {
<+...
(
-WARN(
+drm_WARN(&T->drm,
...)
|
-WARN_ON(
+drm_WARN_ON(&T->drm,
...)
|
-WARN_ONCE(
+drm_WARN_ONCE(&T->drm,
...)
|
-WARN_ON_ONCE(
+drm_WARN_ON_ONCE(&T->drm,
...)
)
...+>
}
command: spatch --sp-file <script> --dir drivers/gpu/drm/i915/gt \
--linux-spacing --in-place
Signed-off-by: Pankaj Bharadiya <pankaj.laxminarayan.bharadiya@intel.com>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200115034455.17658-7-pankaj.laxminarayan.bharadiya@intel.com
2020-01-15 11:44:50 +08:00
|
|
|
drm_WARN_ON(&dev_priv->drm, INTEL_GEN(dev_priv) > 2 &&
|
|
|
|
(ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0);
|
2019-05-01 18:32:04 +08:00
|
|
|
|
2019-06-20 01:01:35 +08:00
|
|
|
intel_engine_cleanup_common(engine);
|
|
|
|
|
2020-03-06 08:09:56 +08:00
|
|
|
if (engine->wa_ctx.vma) {
|
|
|
|
intel_context_put(engine->wa_ctx.vma->private);
|
|
|
|
i915_vma_unpin_and_release(&engine->wa_ctx.vma, 0);
|
|
|
|
}
|
|
|
|
|
2019-08-10 02:25:18 +08:00
|
|
|
intel_ring_unpin(engine->legacy.ring);
|
|
|
|
intel_ring_put(engine->legacy.ring);
|
|
|
|
|
|
|
|
intel_timeline_unpin(engine->legacy.timeline);
|
|
|
|
intel_timeline_put(engine->legacy.timeline);
|
2019-05-01 18:32:04 +08:00
|
|
|
}
|
|
|
|
|
2019-04-27 00:33:33 +08:00
|
|
|
static void setup_irq(struct intel_engine_cs *engine)
|
|
|
|
{
|
|
|
|
struct drm_i915_private *i915 = engine->i915;
|
|
|
|
|
|
|
|
if (INTEL_GEN(i915) >= 6) {
|
|
|
|
engine->irq_enable = gen6_irq_enable;
|
|
|
|
engine->irq_disable = gen6_irq_disable;
|
|
|
|
} else if (INTEL_GEN(i915) >= 5) {
|
|
|
|
engine->irq_enable = gen5_irq_enable;
|
|
|
|
engine->irq_disable = gen5_irq_disable;
|
|
|
|
} else if (INTEL_GEN(i915) >= 3) {
|
2020-06-01 15:24:12 +08:00
|
|
|
engine->irq_enable = gen3_irq_enable;
|
|
|
|
engine->irq_disable = gen3_irq_disable;
|
2019-04-27 00:33:33 +08:00
|
|
|
} else {
|
2020-06-01 15:24:12 +08:00
|
|
|
engine->irq_enable = gen2_irq_enable;
|
|
|
|
engine->irq_disable = gen2_irq_disable;
|
2019-04-27 00:33:33 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void setup_common(struct intel_engine_cs *engine)
|
2016-06-29 23:09:20 +08:00
|
|
|
{
|
2019-04-27 00:33:33 +08:00
|
|
|
struct drm_i915_private *i915 = engine->i915;
|
|
|
|
|
drm/i915: Remove obsolete ringbuffer emission for gen8+
Since removing the module parameter to force selection of ringbuffer
emission for gen8, the code is defunct. Remove it.
To put the difference into perspective, a couple of microbenchmarks
(bdw i7-5557u, 20170324):
ring execlists
exec continuous nops on all rings: 1.491us 2.223us
exec sequential nops on each ring: 12.508us 53.682us
single nop + sync: 9.272us 30.291us
vblank_mode=0 glxgears: ~11000fps ~9000fps
Since the earlier submission, gen8 ringbuffer submission has fallen
further and further behind in features. So while ringbuffer may hold the
throughput crown, in terms of interactive latency, execlists is much
better. Alas, we have no convenient metrics for such, other than
demonstrating things we can do with execlists but can not using
legacy ringbuffer submission.
We have made a few improvements to lowlevel execlists throughput,
and ringbuffer currently panics on boot! (bdw i7-5557u, 20171026):
ring execlists
exec continuous nops on all rings: n/a 1.921us
exec sequential nops on each ring: n/a 44.621us
single nop + sync: n/a 21.953us
vblank_mode=0 glxgears: n/a ~18500fps
References: https://bugs.freedesktop.org/show_bug.cgi?id=87725
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Once-upon-a-time-Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20171120205504.21892-2-chris@chris-wilson.co.uk
2017-11-21 04:55:01 +08:00
|
|
|
/* gen8+ are only supported with execlists */
|
2019-04-27 00:33:33 +08:00
|
|
|
GEM_BUG_ON(INTEL_GEN(i915) >= 8);
|
drm/i915: Remove obsolete ringbuffer emission for gen8+
Since removing the module parameter to force selection of ringbuffer
emission for gen8, the code is defunct. Remove it.
To put the difference into perspective, a couple of microbenchmarks
(bdw i7-5557u, 20170324):
ring execlists
exec continuous nops on all rings: 1.491us 2.223us
exec sequential nops on each ring: 12.508us 53.682us
single nop + sync: 9.272us 30.291us
vblank_mode=0 glxgears: ~11000fps ~9000fps
Since the earlier submission, gen8 ringbuffer submission has fallen
further and further behind in features. So while ringbuffer may hold the
throughput crown, in terms of interactive latency, execlists is much
better. Alas, we have no convenient metrics for such, other than
demonstrating things we can do with execlists but can not using
legacy ringbuffer submission.
We have made a few improvements to lowlevel execlists throughput,
and ringbuffer currently panics on boot! (bdw i7-5557u, 20171026):
ring execlists
exec continuous nops on all rings: n/a 1.921us
exec sequential nops on each ring: n/a 44.621us
single nop + sync: n/a 21.953us
vblank_mode=0 glxgears: n/a ~18500fps
References: https://bugs.freedesktop.org/show_bug.cgi?id=87725
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Once-upon-a-time-Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20171120205504.21892-2-chris@chris-wilson.co.uk
2017-11-21 04:55:01 +08:00
|
|
|
|
2019-04-27 00:33:33 +08:00
|
|
|
setup_irq(engine);
|
2016-08-03 05:50:35 +08:00
|
|
|
|
drm/i915: Invert the GEM wakeref hierarchy
In the current scheme, on submitting a request we take a single global
GEM wakeref, which trickles down to wake up all GT power domains. This
is undesirable as we would like to be able to localise our power
management to the available power domains and to remove the global GEM
operations from the heart of the driver. (The intent there is to push
global GEM decisions to the boundary as used by the GEM user interface.)
Now during request construction, each request is responsible via its
logical context to acquire a wakeref on each power domain it intends to
utilize. Currently, each request takes a wakeref on the engine(s) and
the engines themselves take a chipset wakeref. This gives us a
transition on each engine which we can extend if we want to insert more
powermangement control (such as soft rc6). The global GEM operations
that currently require a struct_mutex are reduced to listening to pm
events from the chipset GT wakeref. As we reduce the struct_mutex
requirement, these listeners should evaporate.
Perhaps the biggest immediate change is that this removes the
struct_mutex requirement around GT power management, allowing us greater
flexibility in request construction. Another important knock-on effect,
is that by tracking engine usage, we can insert a switch back to the
kernel context on that engine immediately, avoiding any extra delay or
inserting global synchronisation barriers. This makes tracking when an
engine and its associated contexts are idle much easier -- important for
when we forgo our assumed execution ordering and need idle barriers to
unpin used contexts. In the process, it means we remove a large chunk of
code whose only purpose was to switch back to the kernel context.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Imre Deak <imre.deak@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190424200717.1686-5-chris@chris-wilson.co.uk
2019-04-25 04:07:17 +08:00
|
|
|
engine->resume = xcs_resume;
|
2018-05-17 02:33:51 +08:00
|
|
|
engine->reset.prepare = reset_prepare;
|
2019-12-22 20:07:52 +08:00
|
|
|
engine->reset.rewind = reset_rewind;
|
|
|
|
engine->reset.cancel = reset_cancel;
|
2018-05-17 02:33:51 +08:00
|
|
|
engine->reset.finish = reset_finish;
|
2016-06-29 23:09:21 +08:00
|
|
|
|
2019-03-08 21:25:18 +08:00
|
|
|
engine->cops = &ring_context_ops;
|
2016-12-18 23:37:24 +08:00
|
|
|
engine->request_alloc = ring_request_alloc;
|
|
|
|
|
2019-01-30 02:54:50 +08:00
|
|
|
/*
|
|
|
|
* Using a global execution timeline; the previous final breadcrumb is
|
|
|
|
* equivalent to our next initial bread so we can elide
|
|
|
|
* engine->emit_init_breadcrumb().
|
|
|
|
*/
|
2020-06-01 15:24:12 +08:00
|
|
|
engine->emit_fini_breadcrumb = gen3_emit_breadcrumb;
|
2019-04-27 00:33:33 +08:00
|
|
|
if (IS_GEN(i915, 5))
|
2019-01-30 02:54:50 +08:00
|
|
|
engine->emit_fini_breadcrumb = gen5_emit_breadcrumb;
|
2017-03-17 01:13:03 +08:00
|
|
|
|
|
|
|
engine->set_default_submission = i9xx_set_default_submission;
|
2016-07-01 16:18:12 +08:00
|
|
|
|
2019-04-27 00:33:33 +08:00
|
|
|
if (INTEL_GEN(i915) >= 6)
|
2016-08-03 05:50:27 +08:00
|
|
|
engine->emit_bb_start = gen6_emit_bb_start;
|
2019-04-27 00:33:33 +08:00
|
|
|
else if (INTEL_GEN(i915) >= 4)
|
2020-06-01 15:24:12 +08:00
|
|
|
engine->emit_bb_start = gen4_emit_bb_start;
|
2019-04-27 00:33:33 +08:00
|
|
|
else if (IS_I830(i915) || IS_I845G(i915))
|
2016-08-03 05:50:27 +08:00
|
|
|
engine->emit_bb_start = i830_emit_bb_start;
|
2016-07-01 16:18:12 +08:00
|
|
|
else
|
2020-06-01 15:24:12 +08:00
|
|
|
engine->emit_bb_start = gen3_emit_bb_start;
|
2016-06-29 23:09:20 +08:00
|
|
|
}
|
|
|
|
|
2019-04-27 00:33:33 +08:00
|
|
|
static void setup_rcs(struct intel_engine_cs *engine)
|
2010-09-16 10:43:11 +08:00
|
|
|
{
|
2019-04-27 00:33:33 +08:00
|
|
|
struct drm_i915_private *i915 = engine->i915;
|
2016-06-29 23:09:20 +08:00
|
|
|
|
2019-04-27 00:33:33 +08:00
|
|
|
if (HAS_L3_DPF(i915))
|
2016-07-02 00:23:28 +08:00
|
|
|
engine->irq_keep_mask = GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
|
2016-07-02 00:23:21 +08:00
|
|
|
|
2018-03-15 02:26:53 +08:00
|
|
|
engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
|
|
|
|
|
2019-04-27 00:33:33 +08:00
|
|
|
if (INTEL_GEN(i915) >= 7) {
|
2020-06-01 15:24:12 +08:00
|
|
|
engine->emit_flush = gen7_emit_flush_rcs;
|
|
|
|
engine->emit_fini_breadcrumb = gen7_emit_breadcrumb_rcs;
|
2019-04-27 00:33:33 +08:00
|
|
|
} else if (IS_GEN(i915, 6)) {
|
2020-06-01 15:24:12 +08:00
|
|
|
engine->emit_flush = gen6_emit_flush_rcs;
|
|
|
|
engine->emit_fini_breadcrumb = gen6_emit_breadcrumb_rcs;
|
2019-04-27 00:33:33 +08:00
|
|
|
} else if (IS_GEN(i915, 5)) {
|
2020-06-01 15:24:12 +08:00
|
|
|
engine->emit_flush = gen4_emit_flush_rcs;
|
2012-04-12 04:12:48 +08:00
|
|
|
} else {
|
2019-04-27 00:33:33 +08:00
|
|
|
if (INTEL_GEN(i915) < 4)
|
2020-06-01 15:24:12 +08:00
|
|
|
engine->emit_flush = gen2_emit_flush;
|
2012-04-18 18:12:11 +08:00
|
|
|
else
|
2020-06-01 15:24:12 +08:00
|
|
|
engine->emit_flush = gen4_emit_flush_rcs;
|
2016-03-16 19:00:36 +08:00
|
|
|
engine->irq_enable_mask = I915_USER_INTERRUPT;
|
2010-12-04 19:30:53 +08:00
|
|
|
}
|
2014-07-01 00:53:36 +08:00
|
|
|
|
2019-04-27 00:33:33 +08:00
|
|
|
if (IS_HASWELL(i915))
|
2016-08-03 05:50:27 +08:00
|
|
|
engine->emit_bb_start = hsw_emit_bb_start;
|
2010-09-16 10:43:11 +08:00
|
|
|
}
|
|
|
|
|
2019-04-27 00:33:33 +08:00
|
|
|
static void setup_vcs(struct intel_engine_cs *engine)
|
2010-09-16 10:43:11 +08:00
|
|
|
{
|
2019-04-27 00:33:33 +08:00
|
|
|
struct drm_i915_private *i915 = engine->i915;
|
2016-06-29 23:09:20 +08:00
|
|
|
|
2019-04-27 00:33:33 +08:00
|
|
|
if (INTEL_GEN(i915) >= 6) {
|
2012-04-12 04:12:55 +08:00
|
|
|
/* gen6 bsd needs a special wa for tail updates */
|
2019-04-27 00:33:33 +08:00
|
|
|
if (IS_GEN(i915, 6))
|
2017-03-17 01:13:03 +08:00
|
|
|
engine->set_default_submission = gen6_bsd_set_default_submission;
|
2020-06-01 15:24:12 +08:00
|
|
|
engine->emit_flush = gen6_emit_flush_vcs;
|
drm/i915: Remove obsolete ringbuffer emission for gen8+
Since removing the module parameter to force selection of ringbuffer
emission for gen8, the code is defunct. Remove it.
To put the difference into perspective, a couple of microbenchmarks
(bdw i7-5557u, 20170324):
ring execlists
exec continuous nops on all rings: 1.491us 2.223us
exec sequential nops on each ring: 12.508us 53.682us
single nop + sync: 9.272us 30.291us
vblank_mode=0 glxgears: ~11000fps ~9000fps
Since the earlier submission, gen8 ringbuffer submission has fallen
further and further behind in features. So while ringbuffer may hold the
throughput crown, in terms of interactive latency, execlists is much
better. Alas, we have no convenient metrics for such, other than
demonstrating things we can do with execlists but can not using
legacy ringbuffer submission.
We have made a few improvements to lowlevel execlists throughput,
and ringbuffer currently panics on boot! (bdw i7-5557u, 20171026):
ring execlists
exec continuous nops on all rings: n/a 1.921us
exec sequential nops on each ring: n/a 44.621us
single nop + sync: n/a 21.953us
vblank_mode=0 glxgears: n/a ~18500fps
References: https://bugs.freedesktop.org/show_bug.cgi?id=87725
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Once-upon-a-time-Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20171120205504.21892-2-chris@chris-wilson.co.uk
2017-11-21 04:55:01 +08:00
|
|
|
engine->irq_enable_mask = GT_BSD_USER_INTERRUPT;
|
2018-12-28 23:31:14 +08:00
|
|
|
|
2019-04-27 00:33:33 +08:00
|
|
|
if (IS_GEN(i915, 6))
|
2020-06-01 15:24:12 +08:00
|
|
|
engine->emit_fini_breadcrumb = gen6_emit_breadcrumb_xcs;
|
2019-01-25 20:00:04 +08:00
|
|
|
else
|
2020-06-01 15:24:12 +08:00
|
|
|
engine->emit_fini_breadcrumb = gen7_emit_breadcrumb_xcs;
|
2012-04-12 04:12:49 +08:00
|
|
|
} else {
|
2020-06-01 15:24:12 +08:00
|
|
|
engine->emit_flush = gen4_emit_flush_vcs;
|
2019-04-27 00:33:33 +08:00
|
|
|
if (IS_GEN(i915, 5))
|
2016-03-16 19:00:36 +08:00
|
|
|
engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
|
2016-06-29 23:09:32 +08:00
|
|
|
else
|
2016-03-16 19:00:36 +08:00
|
|
|
engine->irq_enable_mask = I915_BSD_USER_INTERRUPT;
|
2012-04-12 04:12:49 +08:00
|
|
|
}
|
2010-09-16 10:43:11 +08:00
|
|
|
}
|
2010-10-19 18:19:32 +08:00
|
|
|
|
2019-04-27 00:33:33 +08:00
|
|
|
static void setup_bcs(struct intel_engine_cs *engine)
|
2010-10-19 18:19:32 +08:00
|
|
|
{
|
2019-04-27 00:33:33 +08:00
|
|
|
struct drm_i915_private *i915 = engine->i915;
|
2016-06-29 23:09:20 +08:00
|
|
|
|
2020-06-01 15:24:12 +08:00
|
|
|
engine->emit_flush = gen6_emit_flush_xcs;
|
drm/i915: Remove obsolete ringbuffer emission for gen8+
Since removing the module parameter to force selection of ringbuffer
emission for gen8, the code is defunct. Remove it.
To put the difference into perspective, a couple of microbenchmarks
(bdw i7-5557u, 20170324):
ring execlists
exec continuous nops on all rings: 1.491us 2.223us
exec sequential nops on each ring: 12.508us 53.682us
single nop + sync: 9.272us 30.291us
vblank_mode=0 glxgears: ~11000fps ~9000fps
Since the earlier submission, gen8 ringbuffer submission has fallen
further and further behind in features. So while ringbuffer may hold the
throughput crown, in terms of interactive latency, execlists is much
better. Alas, we have no convenient metrics for such, other than
demonstrating things we can do with execlists but can not using
legacy ringbuffer submission.
We have made a few improvements to lowlevel execlists throughput,
and ringbuffer currently panics on boot! (bdw i7-5557u, 20171026):
ring execlists
exec continuous nops on all rings: n/a 1.921us
exec sequential nops on each ring: n/a 44.621us
single nop + sync: n/a 21.953us
vblank_mode=0 glxgears: n/a ~18500fps
References: https://bugs.freedesktop.org/show_bug.cgi?id=87725
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Once-upon-a-time-Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20171120205504.21892-2-chris@chris-wilson.co.uk
2017-11-21 04:55:01 +08:00
|
|
|
engine->irq_enable_mask = GT_BLT_USER_INTERRUPT;
|
2010-10-19 18:19:32 +08:00
|
|
|
|
2019-04-27 00:33:33 +08:00
|
|
|
if (IS_GEN(i915, 6))
|
2020-06-01 15:24:12 +08:00
|
|
|
engine->emit_fini_breadcrumb = gen6_emit_breadcrumb_xcs;
|
2019-01-25 20:00:04 +08:00
|
|
|
else
|
2020-06-01 15:24:12 +08:00
|
|
|
engine->emit_fini_breadcrumb = gen7_emit_breadcrumb_xcs;
|
2010-10-19 18:19:32 +08:00
|
|
|
}
|
2012-07-20 19:41:08 +08:00
|
|
|
|
2019-04-27 00:33:33 +08:00
|
|
|
static void setup_vecs(struct intel_engine_cs *engine)
|
2013-05-29 10:22:23 +08:00
|
|
|
{
|
2019-04-27 00:33:33 +08:00
|
|
|
struct drm_i915_private *i915 = engine->i915;
|
2018-12-28 23:31:14 +08:00
|
|
|
|
2019-04-27 00:33:33 +08:00
|
|
|
GEM_BUG_ON(INTEL_GEN(i915) < 7);
|
2016-06-29 23:09:20 +08:00
|
|
|
|
2020-06-01 15:24:12 +08:00
|
|
|
engine->emit_flush = gen6_emit_flush_xcs;
|
drm/i915: Remove obsolete ringbuffer emission for gen8+
Since removing the module parameter to force selection of ringbuffer
emission for gen8, the code is defunct. Remove it.
To put the difference into perspective, a couple of microbenchmarks
(bdw i7-5557u, 20170324):
ring execlists
exec continuous nops on all rings: 1.491us 2.223us
exec sequential nops on each ring: 12.508us 53.682us
single nop + sync: 9.272us 30.291us
vblank_mode=0 glxgears: ~11000fps ~9000fps
Since the earlier submission, gen8 ringbuffer submission has fallen
further and further behind in features. So while ringbuffer may hold the
throughput crown, in terms of interactive latency, execlists is much
better. Alas, we have no convenient metrics for such, other than
demonstrating things we can do with execlists but can not using
legacy ringbuffer submission.
We have made a few improvements to lowlevel execlists throughput,
and ringbuffer currently panics on boot! (bdw i7-5557u, 20171026):
ring execlists
exec continuous nops on all rings: n/a 1.921us
exec sequential nops on each ring: n/a 44.621us
single nop + sync: n/a 21.953us
vblank_mode=0 glxgears: n/a ~18500fps
References: https://bugs.freedesktop.org/show_bug.cgi?id=87725
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Once-upon-a-time-Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20171120205504.21892-2-chris@chris-wilson.co.uk
2017-11-21 04:55:01 +08:00
|
|
|
engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
|
2020-06-01 15:24:12 +08:00
|
|
|
engine->irq_enable = hsw_irq_enable_vecs;
|
|
|
|
engine->irq_disable = hsw_irq_disable_vecs;
|
2013-05-29 10:22:23 +08:00
|
|
|
|
2020-06-01 15:24:12 +08:00
|
|
|
engine->emit_fini_breadcrumb = gen7_emit_breadcrumb_xcs;
|
2019-04-27 00:33:33 +08:00
|
|
|
}
|
|
|
|
|
2020-03-06 08:09:56 +08:00
|
|
|
static int gen7_ctx_switch_bb_setup(struct intel_engine_cs * const engine,
|
|
|
|
struct i915_vma * const vma)
|
|
|
|
{
|
2020-03-06 08:09:57 +08:00
|
|
|
return gen7_setup_clear_gpr_bb(engine, vma);
|
2020-03-06 08:09:56 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int gen7_ctx_switch_bb_init(struct intel_engine_cs *engine)
|
|
|
|
{
|
|
|
|
struct drm_i915_gem_object *obj;
|
|
|
|
struct i915_vma *vma;
|
|
|
|
int size;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
size = gen7_ctx_switch_bb_setup(engine, NULL /* probe size */);
|
|
|
|
if (size <= 0)
|
|
|
|
return size;
|
|
|
|
|
|
|
|
size = ALIGN(size, PAGE_SIZE);
|
|
|
|
obj = i915_gem_object_create_internal(engine->i915, size);
|
|
|
|
if (IS_ERR(obj))
|
|
|
|
return PTR_ERR(obj);
|
|
|
|
|
|
|
|
vma = i915_vma_instance(obj, engine->gt->vm, NULL);
|
|
|
|
if (IS_ERR(vma)) {
|
|
|
|
err = PTR_ERR(vma);
|
|
|
|
goto err_obj;
|
|
|
|
}
|
|
|
|
|
|
|
|
vma->private = intel_context_create(engine); /* dummy residuals */
|
|
|
|
if (IS_ERR(vma->private)) {
|
|
|
|
err = PTR_ERR(vma->private);
|
|
|
|
goto err_obj;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_HIGH);
|
|
|
|
if (err)
|
|
|
|
goto err_private;
|
|
|
|
|
2020-03-07 20:24:25 +08:00
|
|
|
err = i915_vma_sync(vma);
|
|
|
|
if (err)
|
|
|
|
goto err_unpin;
|
|
|
|
|
2020-03-06 08:09:56 +08:00
|
|
|
err = gen7_ctx_switch_bb_setup(engine, vma);
|
|
|
|
if (err)
|
|
|
|
goto err_unpin;
|
|
|
|
|
|
|
|
engine->wa_ctx.vma = vma;
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_unpin:
|
|
|
|
i915_vma_unpin(vma);
|
|
|
|
err_private:
|
|
|
|
intel_context_put(vma->private);
|
|
|
|
err_obj:
|
|
|
|
i915_gem_object_put(obj);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2019-04-27 00:33:33 +08:00
|
|
|
int intel_ring_submission_setup(struct intel_engine_cs *engine)
|
|
|
|
{
|
2019-12-22 22:40:45 +08:00
|
|
|
struct intel_timeline *timeline;
|
|
|
|
struct intel_ring *ring;
|
|
|
|
int err;
|
|
|
|
|
2019-04-27 00:33:33 +08:00
|
|
|
setup_common(engine);
|
|
|
|
|
|
|
|
switch (engine->class) {
|
|
|
|
case RENDER_CLASS:
|
|
|
|
setup_rcs(engine);
|
|
|
|
break;
|
|
|
|
case VIDEO_DECODE_CLASS:
|
|
|
|
setup_vcs(engine);
|
|
|
|
break;
|
|
|
|
case COPY_ENGINE_CLASS:
|
|
|
|
setup_bcs(engine);
|
|
|
|
break;
|
|
|
|
case VIDEO_ENHANCEMENT_CLASS:
|
|
|
|
setup_vecs(engine);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
MISSING_CASE(engine->class);
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
2020-07-31 02:39:06 +08:00
|
|
|
timeline = intel_timeline_create_from_engine(engine,
|
|
|
|
I915_GEM_HWS_SEQNO_ADDR);
|
2019-04-27 00:33:33 +08:00
|
|
|
if (IS_ERR(timeline)) {
|
|
|
|
err = PTR_ERR(timeline);
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
GEM_BUG_ON(timeline->has_initial_breadcrumb);
|
|
|
|
|
2020-08-19 22:08:54 +08:00
|
|
|
err = intel_timeline_pin(timeline, NULL);
|
2019-08-10 02:25:18 +08:00
|
|
|
if (err)
|
|
|
|
goto err_timeline;
|
|
|
|
|
|
|
|
ring = intel_engine_create_ring(engine, SZ_16K);
|
2019-04-27 00:33:33 +08:00
|
|
|
if (IS_ERR(ring)) {
|
|
|
|
err = PTR_ERR(ring);
|
2019-08-10 02:25:18 +08:00
|
|
|
goto err_timeline_unpin;
|
2019-04-27 00:33:33 +08:00
|
|
|
}
|
|
|
|
|
2020-08-19 22:08:54 +08:00
|
|
|
err = intel_ring_pin(ring, NULL);
|
2019-04-27 00:33:33 +08:00
|
|
|
if (err)
|
|
|
|
goto err_ring;
|
2018-12-28 23:31:14 +08:00
|
|
|
|
2019-08-10 02:25:18 +08:00
|
|
|
GEM_BUG_ON(engine->legacy.ring);
|
|
|
|
engine->legacy.ring = ring;
|
|
|
|
engine->legacy.timeline = timeline;
|
2019-04-27 00:33:33 +08:00
|
|
|
|
2019-08-10 02:25:18 +08:00
|
|
|
GEM_BUG_ON(timeline->hwsp_ggtt != engine->status_page.vma);
|
2019-04-27 00:33:33 +08:00
|
|
|
|
2020-03-11 18:36:40 +08:00
|
|
|
if (IS_HASWELL(engine->i915) && engine->class == RENDER_CLASS) {
|
2020-03-06 08:09:56 +08:00
|
|
|
err = gen7_ctx_switch_bb_init(engine);
|
|
|
|
if (err)
|
|
|
|
goto err_ring_unpin;
|
|
|
|
}
|
|
|
|
|
2020-01-07 22:31:18 +08:00
|
|
|
/* Finally, take ownership and responsibility for cleanup! */
|
|
|
|
engine->release = ring_release;
|
|
|
|
|
2019-04-27 00:33:33 +08:00
|
|
|
return 0;
|
|
|
|
|
2020-03-06 08:09:56 +08:00
|
|
|
err_ring_unpin:
|
|
|
|
intel_ring_unpin(ring);
|
2019-04-27 00:33:33 +08:00
|
|
|
err_ring:
|
|
|
|
intel_ring_put(ring);
|
2019-08-10 02:25:18 +08:00
|
|
|
err_timeline_unpin:
|
|
|
|
intel_timeline_unpin(timeline);
|
|
|
|
err_timeline:
|
|
|
|
intel_timeline_put(timeline);
|
2019-04-27 00:33:33 +08:00
|
|
|
err:
|
|
|
|
intel_engine_cleanup_common(engine);
|
|
|
|
return err;
|
2013-05-29 10:22:23 +08:00
|
|
|
}
|
2020-03-06 08:09:56 +08:00
|
|
|
|
|
|
|
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
|
|
|
|
#include "selftest_ring_submission.c"
|
|
|
|
#endif
|