Merge tag 'gvt-next-fixes-2019-09-06' of https://github.com/intel/gvt-linux into drm-intel-next-fixes
gvt-next-fixes-2019-09-06 - Fix guest context head pointer update for hang (Xiaolin) - Fix guest context ring state for reset (Weinan) Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com> From: Zhenyu Wang <zhenyuw@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190906054255.GC3458@zhen-hp.sh.intel.com
This commit is contained in:
commit
0ac072cced
|
@ -568,6 +568,16 @@ static int prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void update_vreg_in_ctx(struct intel_vgpu_workload *workload)
|
||||
{
|
||||
struct intel_vgpu *vgpu = workload->vgpu;
|
||||
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
|
||||
u32 ring_base;
|
||||
|
||||
ring_base = dev_priv->engine[workload->ring_id]->mmio_base;
|
||||
vgpu_vreg_t(vgpu, RING_START(ring_base)) = workload->rb_start;
|
||||
}
|
||||
|
||||
static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
|
||||
{
|
||||
struct intel_vgpu *vgpu = workload->vgpu;
|
||||
|
@ -1016,6 +1026,13 @@ static int workload_thread(void *priv)
|
|||
if (need_force_wake)
|
||||
intel_uncore_forcewake_get(&gvt->dev_priv->uncore,
|
||||
FORCEWAKE_ALL);
|
||||
/*
|
||||
* Update the vReg of the vGPU which submitted this
|
||||
* workload. The vGPU may use these registers for checking
|
||||
* the context state. The value comes from GPU commands
|
||||
* in this workload.
|
||||
*/
|
||||
update_vreg_in_ctx(workload);
|
||||
|
||||
ret = dispatch_workload(workload);
|
||||
|
||||
|
@ -1438,9 +1455,6 @@ static int prepare_mm(struct intel_vgpu_workload *workload)
|
|||
#define same_context(a, b) (((a)->context_id == (b)->context_id) && \
|
||||
((a)->lrca == (b)->lrca))
|
||||
|
||||
#define get_last_workload(q) \
|
||||
(list_empty(q) ? NULL : container_of(q->prev, \
|
||||
struct intel_vgpu_workload, list))
|
||||
/**
|
||||
* intel_vgpu_create_workload - create a vGPU workload
|
||||
* @vgpu: a vGPU
|
||||
|
@ -1460,7 +1474,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
|
|||
{
|
||||
struct intel_vgpu_submission *s = &vgpu->submission;
|
||||
struct list_head *q = workload_q_head(vgpu, ring_id);
|
||||
struct intel_vgpu_workload *last_workload = get_last_workload(q);
|
||||
struct intel_vgpu_workload *last_workload = NULL;
|
||||
struct intel_vgpu_workload *workload = NULL;
|
||||
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
|
||||
u64 ring_context_gpa;
|
||||
|
@ -1486,15 +1500,20 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
|
|||
head &= RB_HEAD_OFF_MASK;
|
||||
tail &= RB_TAIL_OFF_MASK;
|
||||
|
||||
if (last_workload && same_context(&last_workload->ctx_desc, desc)) {
|
||||
gvt_dbg_el("ring id %d cur workload == last\n", ring_id);
|
||||
gvt_dbg_el("ctx head %x real head %lx\n", head,
|
||||
last_workload->rb_tail);
|
||||
/*
|
||||
* cannot use guest context head pointer here,
|
||||
* as it might not be updated at this time
|
||||
*/
|
||||
head = last_workload->rb_tail;
|
||||
list_for_each_entry_reverse(last_workload, q, list) {
|
||||
|
||||
if (same_context(&last_workload->ctx_desc, desc)) {
|
||||
gvt_dbg_el("ring id %d cur workload == last\n",
|
||||
ring_id);
|
||||
gvt_dbg_el("ctx head %x real head %lx\n", head,
|
||||
last_workload->rb_tail);
|
||||
/*
|
||||
* cannot use guest context head pointer here,
|
||||
* as it might not be updated at this time
|
||||
*/
|
||||
head = last_workload->rb_tail;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
gvt_dbg_el("ring id %d begin a new workload\n", ring_id);
|
||||
|
|
Loading…
Reference in New Issue