Merge tag 'drm-intel-fixes-2019-06-03' of git://anongit.freedesktop.org/drm/drm-intel into drm-fixes
- Add missing Icelake W/A to disable GPU hang on cache ECC error - GVT a fix for recently seen arbitrary DMA map fault and more enforcement fixes. Signed-off-by: Dave Airlie <airlied@redhat.com> From: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190603132928.GA4866@jlahtine-desk.ger.corp.intel.com
This commit is contained in:
commit
351212f69d
|
@ -2530,7 +2530,7 @@ static const struct cmd_info cmd_info[] = {
|
|||
0, 12, NULL},
|
||||
|
||||
{"VEB_DI_IECP", OP_VEB_DNDI_IECP_STATE, F_LEN_VAR, R_VECS, D_BDW_PLUS,
|
||||
0, 20, NULL},
|
||||
0, 12, NULL},
|
||||
};
|
||||
|
||||
static void add_cmd_entry(struct intel_gvt *gvt, struct cmd_entry *e)
|
||||
|
|
|
@ -53,13 +53,19 @@ static int preallocated_oos_pages = 8192;
|
|||
*/
|
||||
bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size)
|
||||
{
|
||||
if ((!vgpu_gmadr_is_valid(vgpu, addr)) || (size
|
||||
&& !vgpu_gmadr_is_valid(vgpu, addr + size - 1))) {
|
||||
gvt_vgpu_err("invalid range gmadr 0x%llx size 0x%x\n",
|
||||
addr, size);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
if (size == 0)
|
||||
return vgpu_gmadr_is_valid(vgpu, addr);
|
||||
|
||||
if (vgpu_gmadr_is_aperture(vgpu, addr) &&
|
||||
vgpu_gmadr_is_aperture(vgpu, addr + size - 1))
|
||||
return true;
|
||||
else if (vgpu_gmadr_is_hidden(vgpu, addr) &&
|
||||
vgpu_gmadr_is_hidden(vgpu, addr + size - 1))
|
||||
return true;
|
||||
|
||||
gvt_dbg_mm("Invalid ggtt range at 0x%llx, size: 0x%x\n",
|
||||
addr, size);
|
||||
return false;
|
||||
}
|
||||
|
||||
/* translate a guest gmadr to host gmadr */
|
||||
|
@ -2183,7 +2189,8 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
|
|||
struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
|
||||
unsigned long g_gtt_index = off >> info->gtt_entry_size_shift;
|
||||
unsigned long gma, gfn;
|
||||
struct intel_gvt_gtt_entry e, m;
|
||||
struct intel_gvt_gtt_entry e = {.val64 = 0, .type = GTT_TYPE_GGTT_PTE};
|
||||
struct intel_gvt_gtt_entry m = {.val64 = 0, .type = GTT_TYPE_GGTT_PTE};
|
||||
dma_addr_t dma_addr;
|
||||
int ret;
|
||||
struct intel_gvt_partial_pte *partial_pte, *pos, *n;
|
||||
|
@ -2250,7 +2257,8 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
|
|||
|
||||
if (!partial_update && (ops->test_present(&e))) {
|
||||
gfn = ops->get_pfn(&e);
|
||||
m = e;
|
||||
m.val64 = e.val64;
|
||||
m.type = e.type;
|
||||
|
||||
/* one PTE update may be issued in multiple writes and the
|
||||
* first write may not construct a valid gfn
|
||||
|
|
|
@ -464,6 +464,8 @@ static i915_reg_t force_nonpriv_white_list[] = {
|
|||
_MMIO(0x2690),
|
||||
_MMIO(0x2694),
|
||||
_MMIO(0x2698),
|
||||
_MMIO(0x2754),
|
||||
_MMIO(0x28a0),
|
||||
_MMIO(0x4de0),
|
||||
_MMIO(0x4de4),
|
||||
_MMIO(0x4dfc),
|
||||
|
@ -1690,8 +1692,22 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
|
|||
bool enable_execlist;
|
||||
int ret;
|
||||
|
||||
(*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(1);
|
||||
if (IS_COFFEELAKE(vgpu->gvt->dev_priv))
|
||||
(*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(2);
|
||||
write_vreg(vgpu, offset, p_data, bytes);
|
||||
|
||||
if (data & _MASKED_BIT_ENABLE(1)) {
|
||||
enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (IS_COFFEELAKE(vgpu->gvt->dev_priv) &&
|
||||
data & _MASKED_BIT_ENABLE(2)) {
|
||||
enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* when PPGTT mode enabled, we will check if guest has called
|
||||
* pvinfo, if not, we will treat this guest as non-gvtg-aware
|
||||
* guest, and stop emulating its cfg space, mmio, gtt, etc.
|
||||
|
@ -1773,6 +1789,21 @@ static int ring_reset_ctl_write(struct intel_vgpu *vgpu,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int csfe_chicken1_mmio_write(struct intel_vgpu *vgpu,
|
||||
unsigned int offset, void *p_data,
|
||||
unsigned int bytes)
|
||||
{
|
||||
u32 data = *(u32 *)p_data;
|
||||
|
||||
(*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(0x18);
|
||||
write_vreg(vgpu, offset, p_data, bytes);
|
||||
|
||||
if (data & _MASKED_BIT_ENABLE(0x10) || data & _MASKED_BIT_ENABLE(0x8))
|
||||
enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define MMIO_F(reg, s, f, am, rm, d, r, w) do { \
|
||||
ret = new_mmio_info(gvt, i915_mmio_reg_offset(reg), \
|
||||
f, s, am, rm, d, r, w); \
|
||||
|
@ -3059,7 +3090,10 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
|
|||
MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_C)), D_SKL_PLUS);
|
||||
|
||||
MMIO_D(_MMIO(0x44500), D_SKL_PLUS);
|
||||
MMIO_DFH(GEN9_CSFE_CHICKEN1_RCS, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
|
||||
#define CSFE_CHICKEN1_REG(base) _MMIO((base) + 0xD4)
|
||||
MMIO_RING_DFH(CSFE_CHICKEN1_REG, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
|
||||
NULL, csfe_chicken1_mmio_write);
|
||||
#undef CSFE_CHICKEN1_REG
|
||||
MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
|
||||
NULL, NULL);
|
||||
MMIO_DFH(GEN9_WM_CHICKEN3, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
|
||||
|
|
|
@ -7620,6 +7620,9 @@ enum {
|
|||
#define GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION (1 << 8)
|
||||
#define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE (1 << 0)
|
||||
|
||||
#define GEN8_L3CNTLREG _MMIO(0x7034)
|
||||
#define GEN8_ERRDETBCTRL (1 << 9)
|
||||
|
||||
#define GEN11_COMMON_SLICE_CHICKEN3 _MMIO(0x7304)
|
||||
#define GEN11_BLEND_EMB_FIX_DISABLE_IN_RCC (1 << 11)
|
||||
|
||||
|
|
|
@ -518,6 +518,12 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine)
|
|||
struct drm_i915_private *i915 = engine->i915;
|
||||
struct i915_wa_list *wal = &engine->ctx_wa_list;
|
||||
|
||||
/* WaDisableBankHangMode:icl */
|
||||
wa_write(wal,
|
||||
GEN8_L3CNTLREG,
|
||||
intel_uncore_read(engine->uncore, GEN8_L3CNTLREG) |
|
||||
GEN8_ERRDETBCTRL);
|
||||
|
||||
/* Wa_1604370585:icl (pre-prod)
|
||||
* Formerly known as WaPushConstantDereferenceHoldDisable
|
||||
*/
|
||||
|
|
Loading…
Reference in New Issue