2018-03-08 17:50:37 +08:00
|
|
|
|
/*
|
|
|
|
|
* SPDX-License-Identifier: MIT
|
|
|
|
|
*
|
|
|
|
|
* Copyright <EFBFBD> 2008-2018 Intel Corporation
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
#ifndef _I915_GPU_ERROR_H_
|
|
|
|
|
#define _I915_GPU_ERROR_H_
|
|
|
|
|
|
2019-07-13 03:29:53 +08:00
|
|
|
|
#include <linux/atomic.h>
|
2018-03-08 17:50:37 +08:00
|
|
|
|
#include <linux/kref.h>
|
|
|
|
|
#include <linux/ktime.h>
|
|
|
|
|
#include <linux/sched.h>
|
|
|
|
|
|
|
|
|
|
#include <drm/drm_mm.h>
|
|
|
|
|
|
2019-04-25 01:48:39 +08:00
|
|
|
|
#include "gt/intel_engine.h"
|
2019-07-13 18:00:11 +08:00
|
|
|
|
#include "gt/uc/intel_uc_fw.h"
|
2019-04-25 01:48:39 +08:00
|
|
|
|
|
2018-03-08 17:50:37 +08:00
|
|
|
|
#include "intel_device_info.h"
|
|
|
|
|
|
|
|
|
|
#include "i915_gem.h"
|
|
|
|
|
#include "i915_gem_gtt.h"
|
|
|
|
|
#include "i915_params.h"
|
2018-04-19 02:40:52 +08:00
|
|
|
|
#include "i915_scheduler.h"
|
2018-03-08 17:50:37 +08:00
|
|
|
|
|
|
|
|
|
struct drm_i915_private;
|
|
|
|
|
struct intel_overlay_error_state;
|
|
|
|
|
struct intel_display_error_state;
|
|
|
|
|
|
|
|
|
|
struct i915_gpu_state {
|
|
|
|
|
struct kref ref;
|
|
|
|
|
ktime_t time;
|
|
|
|
|
ktime_t boottime;
|
|
|
|
|
ktime_t uptime;
|
2018-04-30 15:52:59 +08:00
|
|
|
|
unsigned long capture;
|
|
|
|
|
unsigned long epoch;
|
2018-03-08 17:50:37 +08:00
|
|
|
|
|
|
|
|
|
struct drm_i915_private *i915;
|
|
|
|
|
|
|
|
|
|
char error_msg[128];
|
|
|
|
|
bool simulated;
|
|
|
|
|
bool awake;
|
|
|
|
|
bool wakelock;
|
|
|
|
|
bool suspended;
|
|
|
|
|
int iommu;
|
|
|
|
|
u32 reset_count;
|
|
|
|
|
u32 suspend_count;
|
|
|
|
|
struct intel_device_info device_info;
|
2018-12-31 22:56:41 +08:00
|
|
|
|
struct intel_runtime_info runtime_info;
|
2018-03-08 17:50:37 +08:00
|
|
|
|
struct intel_driver_caps driver_caps;
|
|
|
|
|
struct i915_params params;
|
|
|
|
|
|
|
|
|
|
struct i915_error_uc {
|
|
|
|
|
struct intel_uc_fw guc_fw;
|
|
|
|
|
struct intel_uc_fw huc_fw;
|
|
|
|
|
struct drm_i915_error_object *guc_log;
|
|
|
|
|
} uc;
|
|
|
|
|
|
|
|
|
|
/* Generic register state */
|
|
|
|
|
u32 eir;
|
|
|
|
|
u32 pgtbl_er;
|
|
|
|
|
u32 ier;
|
2018-05-11 05:59:55 +08:00
|
|
|
|
u32 gtier[6], ngtier;
|
2018-03-08 17:50:37 +08:00
|
|
|
|
u32 ccid;
|
|
|
|
|
u32 derrmr;
|
|
|
|
|
u32 forcewake;
|
|
|
|
|
u32 error; /* gen6+ */
|
|
|
|
|
u32 err_int; /* gen7 */
|
|
|
|
|
u32 fault_data0; /* gen8, gen9 */
|
|
|
|
|
u32 fault_data1; /* gen8, gen9 */
|
|
|
|
|
u32 done_reg;
|
|
|
|
|
u32 gac_eco;
|
|
|
|
|
u32 gam_ecochk;
|
|
|
|
|
u32 gab_ctl;
|
|
|
|
|
u32 gfx_mode;
|
|
|
|
|
|
|
|
|
|
u32 nfence;
|
|
|
|
|
u64 fence[I915_MAX_NUM_FENCES];
|
|
|
|
|
struct intel_overlay_error_state *overlay;
|
|
|
|
|
struct intel_display_error_state *display;
|
|
|
|
|
|
|
|
|
|
struct drm_i915_error_engine {
|
|
|
|
|
int engine_id;
|
|
|
|
|
/* Software tracked state */
|
|
|
|
|
bool idle;
|
|
|
|
|
unsigned long hangcheck_timestamp;
|
|
|
|
|
int num_requests;
|
|
|
|
|
u32 reset_count;
|
|
|
|
|
|
|
|
|
|
/* position of active request inside the ring */
|
|
|
|
|
u32 rq_head, rq_post, rq_tail;
|
|
|
|
|
|
|
|
|
|
/* our own tracking of ring head and tail */
|
|
|
|
|
u32 cpu_ring_head;
|
|
|
|
|
u32 cpu_ring_tail;
|
|
|
|
|
|
|
|
|
|
/* Register state */
|
|
|
|
|
u32 start;
|
|
|
|
|
u32 tail;
|
|
|
|
|
u32 head;
|
|
|
|
|
u32 ctl;
|
|
|
|
|
u32 mode;
|
|
|
|
|
u32 hws;
|
|
|
|
|
u32 ipeir;
|
|
|
|
|
u32 ipehr;
|
|
|
|
|
u32 bbstate;
|
|
|
|
|
u32 instpm;
|
|
|
|
|
u32 instps;
|
|
|
|
|
u64 bbaddr;
|
|
|
|
|
u64 acthd;
|
|
|
|
|
u32 fault_reg;
|
|
|
|
|
u64 faddr;
|
|
|
|
|
u32 rc_psmi; /* sleep state */
|
|
|
|
|
struct intel_instdone instdone;
|
|
|
|
|
|
|
|
|
|
struct drm_i915_error_context {
|
|
|
|
|
char comm[TASK_COMM_LEN];
|
|
|
|
|
pid_t pid;
|
|
|
|
|
u32 hw_id;
|
|
|
|
|
int active;
|
|
|
|
|
int guilty;
|
2018-04-19 02:40:52 +08:00
|
|
|
|
struct i915_sched_attr sched_attr;
|
2018-03-08 17:50:37 +08:00
|
|
|
|
} context;
|
|
|
|
|
|
|
|
|
|
struct drm_i915_error_object {
|
|
|
|
|
u64 gtt_offset;
|
|
|
|
|
u64 gtt_size;
|
2018-10-03 16:24:22 +08:00
|
|
|
|
int num_pages;
|
2018-03-08 17:50:37 +08:00
|
|
|
|
int page_count;
|
|
|
|
|
int unused;
|
|
|
|
|
u32 *pages[0];
|
|
|
|
|
} *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page;
|
|
|
|
|
|
|
|
|
|
struct drm_i915_error_object **user_bo;
|
|
|
|
|
long user_bo_count;
|
|
|
|
|
|
|
|
|
|
struct drm_i915_error_object *wa_ctx;
|
|
|
|
|
struct drm_i915_error_object *default_state;
|
|
|
|
|
|
|
|
|
|
struct drm_i915_error_request {
|
drm/i915: Replace global breadcrumbs with per-context interrupt tracking
A few years ago, see commit 688e6c725816 ("drm/i915: Slaughter the
thundering i915_wait_request herd"), the issue of handling multiple
clients waiting in parallel was brought to our attention. The
requirement was that every client should be woken immediately upon its
request being signaled, without incurring any cpu overhead.
To handle certain fragility of our hw meant that we could not do a
simple check inside the irq handler (some generations required almost
unbounded delays before we could be sure of seqno coherency) and so
request completion checking required delegation.
Before commit 688e6c725816, the solution was simple. Every client
waiting on a request would be woken on every interrupt and each would do
a heavyweight check to see if their request was complete. Commit
688e6c725816 introduced an rbtree so that only the earliest waiter on
the global timeline would woken, and would wake the next and so on.
(Along with various complications to handle requests being reordered
along the global timeline, and also a requirement for kthread to provide
a delegate for fence signaling that had no process context.)
The global rbtree depends on knowing the execution timeline (and global
seqno). Without knowing that order, we must instead check all contexts
queued to the HW to see which may have advanced. We trim that list by
only checking queued contexts that are being waited on, but still we
keep a list of all active contexts and their active signalers that we
inspect from inside the irq handler. By moving the waiters onto the fence
signal list, we can combine the client wakeup with the dma_fence
signaling (a dramatic reduction in complexity, but does require the HW
being coherent, the seqno must be visible from the cpu before the
interrupt is raised - we keep a timer backup just in case).
Having previously fixed all the issues with irq-seqno serialisation (by
inserting delays onto the GPU after each request instead of random delays
on the CPU after each interrupt), we can rely on the seqno state to
perfom direct wakeups from the interrupt handler. This allows us to
preserve our single context switch behaviour of the current routine,
with the only downside that we lose the RT priority sorting of wakeups.
In general, direct wakeup latency of multiple clients is about the same
(about 10% better in most cases) with a reduction in total CPU time spent
in the waiter (about 20-50% depending on gen). Average herd behaviour is
improved, but at the cost of not delegating wakeups on task_prio.
v2: Capture fence signaling state for error state and add comments to
warm even the most cold of hearts.
v3: Check if the request is still active before busywaiting
v4: Reduce the amount of pointer misdirection with list_for_each_safe
and using a local i915_request variable inside the loops
v5: Add a missing pluralisation to a purely informative selftest message.
References: 688e6c725816 ("drm/i915: Slaughter the thundering i915_wait_request herd")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190129205230.19056-2-chris@chris-wilson.co.uk
2019-01-30 04:52:29 +08:00
|
|
|
|
unsigned long flags;
|
2018-03-08 17:50:37 +08:00
|
|
|
|
long jiffies;
|
|
|
|
|
pid_t pid;
|
|
|
|
|
u32 context;
|
|
|
|
|
u32 seqno;
|
2018-05-02 18:41:50 +08:00
|
|
|
|
u32 start;
|
2018-03-08 17:50:37 +08:00
|
|
|
|
u32 head;
|
|
|
|
|
u32 tail;
|
2018-04-19 02:40:52 +08:00
|
|
|
|
struct i915_sched_attr sched_attr;
|
2018-03-08 17:50:37 +08:00
|
|
|
|
} *requests, execlist[EXECLIST_MAX_PORTS];
|
|
|
|
|
unsigned int num_ports;
|
|
|
|
|
|
|
|
|
|
struct {
|
|
|
|
|
u32 gfx_mode;
|
|
|
|
|
union {
|
|
|
|
|
u64 pdp[4];
|
|
|
|
|
u32 pp_dir_base;
|
|
|
|
|
};
|
|
|
|
|
} vm_info;
|
|
|
|
|
} engine[I915_NUM_ENGINES];
|
|
|
|
|
|
2018-11-23 21:23:25 +08:00
|
|
|
|
struct scatterlist *sgl, *fit;
|
2018-03-08 17:50:37 +08:00
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
struct i915_gpu_error {
|
|
|
|
|
/* For reset and error_state handling. */
|
|
|
|
|
spinlock_t lock;
|
|
|
|
|
/* Protected by the above dev->gpu_error.lock. */
|
|
|
|
|
struct i915_gpu_state *first_error;
|
|
|
|
|
|
|
|
|
|
atomic_t pending_fb_pin;
|
|
|
|
|
|
2019-02-08 23:37:03 +08:00
|
|
|
|
/** Number of times the device has been reset (global) */
|
2019-07-13 03:29:53 +08:00
|
|
|
|
atomic_t reset_count;
|
2019-02-08 23:37:03 +08:00
|
|
|
|
|
2018-03-08 17:50:37 +08:00
|
|
|
|
/** Number of times an engine has been reset */
|
2019-07-13 03:29:53 +08:00
|
|
|
|
atomic_t reset_engine_count[I915_NUM_ENGINES];
|
2018-03-08 17:50:37 +08:00
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
struct drm_i915_error_state_buf {
|
|
|
|
|
struct drm_i915_private *i915;
|
2018-11-23 21:23:25 +08:00
|
|
|
|
struct scatterlist *sgl, *cur, *end;
|
|
|
|
|
|
|
|
|
|
char *buf;
|
|
|
|
|
size_t bytes;
|
|
|
|
|
size_t size;
|
|
|
|
|
loff_t iter;
|
|
|
|
|
|
2018-03-08 17:50:37 +08:00
|
|
|
|
int err;
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
|
|
|
|
|
|
|
|
|
|
__printf(2, 3)
|
|
|
|
|
void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
|
|
|
|
|
|
|
|
|
|
struct i915_gpu_state *i915_capture_gpu_state(struct drm_i915_private *i915);
|
|
|
|
|
void i915_capture_error_state(struct drm_i915_private *dev_priv,
|
2019-04-02 00:26:39 +08:00
|
|
|
|
intel_engine_mask_t engine_mask,
|
2018-03-08 17:50:37 +08:00
|
|
|
|
const char *error_msg);
|
|
|
|
|
|
|
|
|
|
static inline struct i915_gpu_state *
|
|
|
|
|
i915_gpu_state_get(struct i915_gpu_state *gpu)
|
|
|
|
|
{
|
|
|
|
|
kref_get(&gpu->ref);
|
|
|
|
|
return gpu;
|
|
|
|
|
}
|
|
|
|
|
|
2018-11-23 21:23:25 +08:00
|
|
|
|
ssize_t i915_gpu_state_copy_to_buffer(struct i915_gpu_state *error,
|
|
|
|
|
char *buf, loff_t offset, size_t count);
|
|
|
|
|
|
2018-03-08 17:50:37 +08:00
|
|
|
|
void __i915_gpu_state_free(struct kref *kref);
|
|
|
|
|
static inline void i915_gpu_state_put(struct i915_gpu_state *gpu)
|
|
|
|
|
{
|
|
|
|
|
if (gpu)
|
|
|
|
|
kref_put(&gpu->ref, __i915_gpu_state_free);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
struct i915_gpu_state *i915_first_error_state(struct drm_i915_private *i915);
|
|
|
|
|
void i915_reset_error_state(struct drm_i915_private *i915);
|
2018-11-03 00:12:12 +08:00
|
|
|
|
void i915_disable_error_state(struct drm_i915_private *i915, int err);
|
2018-03-08 17:50:37 +08:00
|
|
|
|
|
|
|
|
|
#else
|
|
|
|
|
|
|
|
|
|
static inline void i915_capture_error_state(struct drm_i915_private *dev_priv,
|
|
|
|
|
u32 engine_mask,
|
|
|
|
|
const char *error_msg)
|
|
|
|
|
{
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static inline struct i915_gpu_state *
|
|
|
|
|
i915_first_error_state(struct drm_i915_private *i915)
|
|
|
|
|
{
|
2018-11-03 00:12:12 +08:00
|
|
|
|
return ERR_PTR(-ENODEV);
|
2018-03-08 17:50:37 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static inline void i915_reset_error_state(struct drm_i915_private *i915)
|
|
|
|
|
{
|
|
|
|
|
}
|
|
|
|
|
|
2018-11-03 00:12:12 +08:00
|
|
|
|
static inline void i915_disable_error_state(struct drm_i915_private *i915,
|
|
|
|
|
int err)
|
|
|
|
|
{
|
|
|
|
|
}
|
|
|
|
|
|
2018-03-08 17:50:37 +08:00
|
|
|
|
#endif /* IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) */
|
|
|
|
|
|
|
|
|
|
#endif /* _I915_GPU_ERROR_H_ */
|