Merge tag 'gvt-fixes-2019-07-30' of https://github.com/intel/gvt-linux into drm-intel-fixes
gvt-fixes-2019-07-30 - Guard against potential ggtt access error (Xiong) - Fix includecheck (Zhenyu) - Fix cache entry for guest page mapping found by 2M ppgtt guest (Xiaolin) - Fix runtime pm warning (Xiaolin) - Fix shadow mm settlement for Windows guest reset failure (Colin) Signed-off-by: Jani Nikula <jani.nikula@intel.com> From: Zhenyu Wang <zhenyuw@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190730070020.GX8319@zhen-hp.sh.intel.com
This commit is contained in:
commit
cf4a459031
|
@ -2674,11 +2674,6 @@ static int scan_workload(struct intel_vgpu_workload *workload)
|
|||
gma_head == gma_tail)
|
||||
return 0;
|
||||
|
||||
if (!intel_gvt_ggtt_validate_range(s.vgpu, s.ring_start, s.ring_size)) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = ip_gma_set(&s, gma_head);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
@ -2724,11 +2719,6 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
|
|||
s.workload = workload;
|
||||
s.is_ctx_wa = true;
|
||||
|
||||
if (!intel_gvt_ggtt_validate_range(s.vgpu, s.ring_start, s.ring_size)) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = ip_gma_set(&s, gma_head);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
|
|
@ -245,7 +245,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
|
|||
plane->hw_format = fmt;
|
||||
|
||||
plane->base = vgpu_vreg_t(vgpu, DSPSURF(pipe)) & I915_GTT_PAGE_MASK;
|
||||
if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0))
|
||||
if (!vgpu_gmadr_is_valid(vgpu, plane->base))
|
||||
return -EINVAL;
|
||||
|
||||
plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base);
|
||||
|
@ -368,7 +368,7 @@ int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu,
|
|||
alpha_plane, alpha_force);
|
||||
|
||||
plane->base = vgpu_vreg_t(vgpu, CURBASE(pipe)) & I915_GTT_PAGE_MASK;
|
||||
if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0))
|
||||
if (!vgpu_gmadr_is_valid(vgpu, plane->base))
|
||||
return -EINVAL;
|
||||
|
||||
plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base);
|
||||
|
@ -472,7 +472,7 @@ int intel_vgpu_decode_sprite_plane(struct intel_vgpu *vgpu,
|
|||
plane->drm_format = drm_format;
|
||||
|
||||
plane->base = vgpu_vreg_t(vgpu, SPRSURF(pipe)) & I915_GTT_PAGE_MASK;
|
||||
if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0))
|
||||
if (!vgpu_gmadr_is_valid(vgpu, plane->base))
|
||||
return -EINVAL;
|
||||
|
||||
plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base);
|
||||
|
|
|
@ -2141,11 +2141,20 @@ static int emulate_ggtt_mmio_read(struct intel_vgpu *vgpu,
|
|||
struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm;
|
||||
const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
|
||||
unsigned long index = off >> info->gtt_entry_size_shift;
|
||||
unsigned long gma;
|
||||
struct intel_gvt_gtt_entry e;
|
||||
|
||||
if (bytes != 4 && bytes != 8)
|
||||
return -EINVAL;
|
||||
|
||||
gma = index << I915_GTT_PAGE_SHIFT;
|
||||
if (!intel_gvt_ggtt_validate_range(vgpu,
|
||||
gma, 1 << I915_GTT_PAGE_SHIFT)) {
|
||||
gvt_dbg_mm("read invalid ggtt at 0x%lx\n", gma);
|
||||
memset(p_data, 0, bytes);
|
||||
return 0;
|
||||
}
|
||||
|
||||
ggtt_get_guest_entry(ggtt_mm, &e, index);
|
||||
memcpy(p_data, (void *)&e.val64 + (off & (info->gtt_entry_size - 1)),
|
||||
bytes);
|
||||
|
|
|
@ -1904,6 +1904,18 @@ static int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn,
|
|||
|
||||
entry = __gvt_cache_find_gfn(info->vgpu, gfn);
|
||||
if (!entry) {
|
||||
ret = gvt_dma_map_page(vgpu, gfn, dma_addr, size);
|
||||
if (ret)
|
||||
goto err_unlock;
|
||||
|
||||
ret = __gvt_cache_add(info->vgpu, gfn, *dma_addr, size);
|
||||
if (ret)
|
||||
goto err_unmap;
|
||||
} else if (entry->size != size) {
|
||||
/* the same gfn with different size: unmap and re-map */
|
||||
gvt_dma_unmap_page(vgpu, gfn, entry->dma_addr, entry->size);
|
||||
__gvt_cache_remove_entry(vgpu, entry);
|
||||
|
||||
ret = gvt_dma_map_page(vgpu, gfn, dma_addr, size);
|
||||
if (ret)
|
||||
goto err_unlock;
|
||||
|
|
|
@ -364,16 +364,13 @@ static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
|
|||
wa_ctx->indirect_ctx.shadow_va = NULL;
|
||||
}
|
||||
|
||||
static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
|
||||
struct i915_gem_context *ctx)
|
||||
static void set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
|
||||
struct i915_gem_context *ctx)
|
||||
{
|
||||
struct intel_vgpu_mm *mm = workload->shadow_mm;
|
||||
struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(ctx->vm);
|
||||
int i = 0;
|
||||
|
||||
if (mm->type != INTEL_GVT_MM_PPGTT || !mm->ppgtt_mm.shadowed)
|
||||
return -EINVAL;
|
||||
|
||||
if (mm->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
|
||||
px_dma(ppgtt->pd) = mm->ppgtt_mm.shadow_pdps[0];
|
||||
} else {
|
||||
|
@ -384,8 +381,6 @@ static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
|
|||
px_dma(pd) = mm->ppgtt_mm.shadow_pdps[i];
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -614,6 +609,8 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
|
|||
static int prepare_workload(struct intel_vgpu_workload *workload)
|
||||
{
|
||||
struct intel_vgpu *vgpu = workload->vgpu;
|
||||
struct intel_vgpu_submission *s = &vgpu->submission;
|
||||
int ring = workload->ring_id;
|
||||
int ret = 0;
|
||||
|
||||
ret = intel_vgpu_pin_mm(workload->shadow_mm);
|
||||
|
@ -622,8 +619,16 @@ static int prepare_workload(struct intel_vgpu_workload *workload)
|
|||
return ret;
|
||||
}
|
||||
|
||||
if (workload->shadow_mm->type != INTEL_GVT_MM_PPGTT ||
|
||||
!workload->shadow_mm->ppgtt_mm.shadowed) {
|
||||
gvt_vgpu_err("workload shadow ppgtt isn't ready\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
update_shadow_pdps(workload);
|
||||
|
||||
set_context_ppgtt_from_shadow(workload, s->shadow[ring]->gem_context);
|
||||
|
||||
ret = intel_vgpu_sync_oos_pages(workload->vgpu);
|
||||
if (ret) {
|
||||
gvt_vgpu_err("fail to vgpu sync oos pages\n");
|
||||
|
@ -674,7 +679,6 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
|
|||
{
|
||||
struct intel_vgpu *vgpu = workload->vgpu;
|
||||
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
|
||||
struct intel_vgpu_submission *s = &vgpu->submission;
|
||||
struct i915_request *rq;
|
||||
int ring_id = workload->ring_id;
|
||||
int ret;
|
||||
|
@ -685,13 +689,6 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
|
|||
mutex_lock(&vgpu->vgpu_lock);
|
||||
mutex_lock(&dev_priv->drm.struct_mutex);
|
||||
|
||||
ret = set_context_ppgtt_from_shadow(workload,
|
||||
s->shadow[ring_id]->gem_context);
|
||||
if (ret < 0) {
|
||||
gvt_vgpu_err("workload shadow ppgtt isn't ready\n");
|
||||
goto err_req;
|
||||
}
|
||||
|
||||
ret = intel_gvt_workload_req_alloc(workload);
|
||||
if (ret)
|
||||
goto err_req;
|
||||
|
@ -990,6 +987,7 @@ static int workload_thread(void *priv)
|
|||
int ret;
|
||||
bool need_force_wake = (INTEL_GEN(gvt->dev_priv) >= 9);
|
||||
DEFINE_WAIT_FUNC(wait, woken_wake_function);
|
||||
struct intel_runtime_pm *rpm = &gvt->dev_priv->runtime_pm;
|
||||
|
||||
kfree(p);
|
||||
|
||||
|
@ -1013,6 +1011,8 @@ static int workload_thread(void *priv)
|
|||
workload->ring_id, workload,
|
||||
workload->vgpu->id);
|
||||
|
||||
intel_runtime_pm_get(rpm);
|
||||
|
||||
gvt_dbg_sched("ring id %d will dispatch workload %p\n",
|
||||
workload->ring_id, workload);
|
||||
|
||||
|
@ -1042,6 +1042,7 @@ complete:
|
|||
intel_uncore_forcewake_put(&gvt->dev_priv->uncore,
|
||||
FORCEWAKE_ALL);
|
||||
|
||||
intel_runtime_pm_put_unchecked(rpm);
|
||||
if (ret && (vgpu_is_vm_unhealthy(ret)))
|
||||
enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
|
||||
}
|
||||
|
@ -1492,6 +1493,12 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
|
|||
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
|
||||
RING_CTX_OFF(ctx_ctrl.val), &ctx_ctl, 4);
|
||||
|
||||
if (!intel_gvt_ggtt_validate_range(vgpu, start,
|
||||
_RING_CTL_BUF_SIZE(ctl))) {
|
||||
gvt_vgpu_err("context contain invalid rb at: 0x%x\n", start);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
workload = alloc_workload(vgpu);
|
||||
if (IS_ERR(workload))
|
||||
return workload;
|
||||
|
@ -1516,9 +1523,31 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
|
|||
workload->wa_ctx.indirect_ctx.size =
|
||||
(indirect_ctx & INDIRECT_CTX_SIZE_MASK) *
|
||||
CACHELINE_BYTES;
|
||||
|
||||
if (workload->wa_ctx.indirect_ctx.size != 0) {
|
||||
if (!intel_gvt_ggtt_validate_range(vgpu,
|
||||
workload->wa_ctx.indirect_ctx.guest_gma,
|
||||
workload->wa_ctx.indirect_ctx.size)) {
|
||||
kmem_cache_free(s->workloads, workload);
|
||||
gvt_vgpu_err("invalid wa_ctx at: 0x%lx\n",
|
||||
workload->wa_ctx.indirect_ctx.guest_gma);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
}
|
||||
|
||||
workload->wa_ctx.per_ctx.guest_gma =
|
||||
per_ctx & PER_CTX_ADDR_MASK;
|
||||
workload->wa_ctx.per_ctx.valid = per_ctx & 1;
|
||||
if (workload->wa_ctx.per_ctx.valid) {
|
||||
if (!intel_gvt_ggtt_validate_range(vgpu,
|
||||
workload->wa_ctx.per_ctx.guest_gma,
|
||||
CACHELINE_BYTES)) {
|
||||
kmem_cache_free(s->workloads, workload);
|
||||
gvt_vgpu_err("invalid per_ctx at: 0x%lx\n",
|
||||
workload->wa_ctx.per_ctx.guest_gma);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
gvt_dbg_el("workload %p ring id %d head %x tail %x start %x ctl %x\n",
|
||||
|
|
|
@ -28,8 +28,6 @@
|
|||
*
|
||||
*/
|
||||
|
||||
#include "trace.h"
|
||||
|
||||
#ifndef __CHECKER__
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include "trace.h"
|
||||
|
|
Loading…
Reference in New Issue