Merge tag 'gvt-next-fixes-2018-03-20' of https://github.com/intel/gvt-linux into drm-intel-next-fixes
gvt-next-fixes-2018-03-20 - No need warning on untracked regs (Colin) - Error handling fix for dma unmap (Changbin) - invalidate shadow ppgtt for vGPU reset (Zhi) - ensure to update shadow ppgtt after pinned (Zhi) - force guest ctx ctrl update for sanity (Zhenyu/Xiong) - one typo fix (Colin) Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20180320024108.f3fdwunm55zhk6gw@zhen-hp.sh.intel.com
This commit is contained in:
commit
8c5cb3c1c5
|
@ -2046,7 +2046,7 @@ static void intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu *vgpu)
|
|||
}
|
||||
|
||||
if (GEM_WARN_ON(!list_empty(&vgpu->gtt.ppgtt_mm_list_head)))
|
||||
gvt_err("vgpu ppgtt mm is not fully destoried\n");
|
||||
gvt_err("vgpu ppgtt mm is not fully destroyed\n");
|
||||
|
||||
if (GEM_WARN_ON(!radix_tree_empty(&vgpu->gtt.spt_tree))) {
|
||||
gvt_err("Why we still has spt not freed?\n");
|
||||
|
@ -2290,6 +2290,28 @@ void intel_gvt_clean_gtt(struct intel_gvt *gvt)
|
|||
clean_spt_oos(gvt);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_vgpu_invalidate_ppgtt - invalidate PPGTT instances
|
||||
* @vgpu: a vGPU
|
||||
*
|
||||
* This function is called when invalidate all PPGTT instances of a vGPU.
|
||||
*
|
||||
*/
|
||||
void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu)
|
||||
{
|
||||
struct list_head *pos, *n;
|
||||
struct intel_vgpu_mm *mm;
|
||||
|
||||
list_for_each_safe(pos, n, &vgpu->gtt.ppgtt_mm_list_head) {
|
||||
mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list);
|
||||
if (mm->type == INTEL_GVT_MM_PPGTT) {
|
||||
list_del_init(&mm->ppgtt_mm.lru_list);
|
||||
if (mm->ppgtt_mm.shadowed)
|
||||
invalidate_ppgtt_mm(mm);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_vgpu_reset_ggtt - reset the GGTT entry
|
||||
* @vgpu: a vGPU
|
||||
|
|
|
@ -194,6 +194,7 @@ struct intel_vgpu_gtt {
|
|||
extern int intel_vgpu_init_gtt(struct intel_vgpu *vgpu);
|
||||
extern void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu);
|
||||
void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu);
|
||||
void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu);
|
||||
|
||||
extern int intel_gvt_init_gtt(struct intel_gvt *gvt);
|
||||
void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu);
|
||||
|
|
|
@ -1767,6 +1767,10 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
|
|||
MMIO_D(CURBASE(PIPE_B), D_ALL);
|
||||
MMIO_D(CURBASE(PIPE_C), D_ALL);
|
||||
|
||||
MMIO_D(CUR_FBC_CTL(PIPE_A), D_ALL);
|
||||
MMIO_D(CUR_FBC_CTL(PIPE_B), D_ALL);
|
||||
MMIO_D(CUR_FBC_CTL(PIPE_C), D_ALL);
|
||||
|
||||
MMIO_D(_MMIO(0x700ac), D_ALL);
|
||||
MMIO_D(_MMIO(0x710ac), D_ALL);
|
||||
MMIO_D(_MMIO(0x720ac), D_ALL);
|
||||
|
@ -2228,6 +2232,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
|
|||
|
||||
MMIO_D(HSW_AUD_CFG(PIPE_A), D_ALL);
|
||||
MMIO_D(HSW_AUD_PIN_ELD_CP_VLD, D_ALL);
|
||||
MMIO_D(HSW_AUD_MISC_CTRL(PIPE_A), D_ALL);
|
||||
|
||||
MMIO_DH(_MMIO(_TRANS_DDI_FUNC_CTL_A), D_ALL, NULL, NULL);
|
||||
MMIO_DH(_MMIO(_TRANS_DDI_FUNC_CTL_B), D_ALL, NULL, NULL);
|
||||
|
@ -2559,6 +2564,7 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
|
|||
MMIO_D(WM_MISC, D_BDW);
|
||||
MMIO_D(_MMIO(BDW_EDP_PSR_BASE), D_BDW);
|
||||
|
||||
MMIO_D(_MMIO(0x6671c), D_BDW_PLUS);
|
||||
MMIO_D(_MMIO(0x66c00), D_BDW_PLUS);
|
||||
MMIO_D(_MMIO(0x66c04), D_BDW_PLUS);
|
||||
|
||||
|
@ -2787,6 +2793,7 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
|
|||
MMIO_D(_MMIO(0x70380), D_SKL_PLUS);
|
||||
MMIO_D(_MMIO(0x71380), D_SKL_PLUS);
|
||||
MMIO_D(_MMIO(0x72380), D_SKL_PLUS);
|
||||
MMIO_D(_MMIO(0x7239c), D_SKL_PLUS);
|
||||
MMIO_D(_MMIO(0x7039c), D_SKL_PLUS);
|
||||
|
||||
MMIO_D(_MMIO(0x8f074), D_SKL | D_KBL);
|
||||
|
@ -2801,7 +2808,9 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
|
|||
MMIO_F(_MMIO(0xc800), 0x7f8, F_CMD_ACCESS, 0, 0, D_SKL | D_KBL, NULL, NULL);
|
||||
MMIO_F(_MMIO(0xb020), 0x80, F_CMD_ACCESS, 0, 0, D_SKL | D_KBL, NULL, NULL);
|
||||
|
||||
MMIO_D(RPM_CONFIG0, D_SKL_PLUS);
|
||||
MMIO_D(_MMIO(0xd08), D_SKL_PLUS);
|
||||
MMIO_D(RC6_LOCATION, D_SKL_PLUS);
|
||||
MMIO_DFH(_MMIO(0x20e0), D_SKL_PLUS, F_MODE_MASK, NULL, NULL);
|
||||
MMIO_DFH(_MMIO(0x20ec), D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
|
||||
|
||||
|
|
|
@ -184,7 +184,7 @@ static struct gvt_dma *__gvt_cache_find_gfn(struct intel_vgpu *vgpu, gfn_t gfn)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static void __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
|
||||
static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
|
||||
dma_addr_t dma_addr)
|
||||
{
|
||||
struct gvt_dma *new, *itr;
|
||||
|
@ -192,7 +192,7 @@ static void __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
|
|||
|
||||
new = kzalloc(sizeof(struct gvt_dma), GFP_KERNEL);
|
||||
if (!new)
|
||||
return;
|
||||
return -ENOMEM;
|
||||
|
||||
new->vgpu = vgpu;
|
||||
new->gfn = gfn;
|
||||
|
@ -229,6 +229,7 @@ static void __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
|
|||
rb_insert_color(&new->dma_addr_node, &vgpu->vdev.dma_addr_cache);
|
||||
|
||||
vgpu->vdev.nr_cache_entries++;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __gvt_cache_remove_entry(struct intel_vgpu *vgpu,
|
||||
|
@ -1586,11 +1587,12 @@ int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn,
|
|||
entry = __gvt_cache_find_gfn(info->vgpu, gfn);
|
||||
if (!entry) {
|
||||
ret = gvt_dma_map_page(vgpu, gfn, dma_addr);
|
||||
if (ret) {
|
||||
mutex_unlock(&info->vgpu->vdev.cache_lock);
|
||||
return ret;
|
||||
}
|
||||
__gvt_cache_add(info->vgpu, gfn, *dma_addr);
|
||||
if (ret)
|
||||
goto err_unlock;
|
||||
|
||||
ret = __gvt_cache_add(info->vgpu, gfn, *dma_addr);
|
||||
if (ret)
|
||||
goto err_unmap;
|
||||
} else {
|
||||
kref_get(&entry->ref);
|
||||
*dma_addr = entry->dma_addr;
|
||||
|
@ -1598,6 +1600,12 @@ int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn,
|
|||
|
||||
mutex_unlock(&info->vgpu->vdev.cache_lock);
|
||||
return 0;
|
||||
|
||||
err_unmap:
|
||||
gvt_dma_unmap_page(vgpu, gfn, *dma_addr);
|
||||
err_unlock:
|
||||
mutex_unlock(&info->vgpu->vdev.cache_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __gvt_dma_release(struct kref *ref)
|
||||
|
|
|
@ -52,6 +52,29 @@ static void set_context_pdp_root_pointer(
|
|||
pdp_pair[i].val = pdp[7 - i];
|
||||
}
|
||||
|
||||
static void update_shadow_pdps(struct intel_vgpu_workload *workload)
|
||||
{
|
||||
struct intel_vgpu *vgpu = workload->vgpu;
|
||||
int ring_id = workload->ring_id;
|
||||
struct i915_gem_context *shadow_ctx = vgpu->submission.shadow_ctx;
|
||||
struct drm_i915_gem_object *ctx_obj =
|
||||
shadow_ctx->engine[ring_id].state->obj;
|
||||
struct execlist_ring_context *shadow_ring_context;
|
||||
struct page *page;
|
||||
|
||||
if (WARN_ON(!workload->shadow_mm))
|
||||
return;
|
||||
|
||||
if (WARN_ON(!atomic_read(&workload->shadow_mm->pincount)))
|
||||
return;
|
||||
|
||||
page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
|
||||
shadow_ring_context = kmap(page);
|
||||
set_context_pdp_root_pointer(shadow_ring_context,
|
||||
(void *)workload->shadow_mm->ppgtt_mm.shadow_pdps);
|
||||
kunmap(page);
|
||||
}
|
||||
|
||||
static int populate_shadow_context(struct intel_vgpu_workload *workload)
|
||||
{
|
||||
struct intel_vgpu *vgpu = workload->vgpu;
|
||||
|
@ -101,8 +124,14 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
|
|||
#define COPY_REG(name) \
|
||||
intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
|
||||
+ RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
|
||||
#define COPY_REG_MASKED(name) {\
|
||||
intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
|
||||
+ RING_CTX_OFF(name.val),\
|
||||
&shadow_ring_context->name.val, 4);\
|
||||
shadow_ring_context->name.val |= 0xffff << 16;\
|
||||
}
|
||||
|
||||
COPY_REG(ctx_ctrl);
|
||||
COPY_REG_MASKED(ctx_ctrl);
|
||||
COPY_REG(ctx_timestamp);
|
||||
|
||||
if (ring_id == RCS) {
|
||||
|
@ -111,9 +140,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
|
|||
COPY_REG(rcs_indirect_ctx_offset);
|
||||
}
|
||||
#undef COPY_REG
|
||||
|
||||
set_context_pdp_root_pointer(shadow_ring_context,
|
||||
(void *)workload->shadow_mm->ppgtt_mm.shadow_pdps);
|
||||
#undef COPY_REG_MASKED
|
||||
|
||||
intel_gvt_hypervisor_read_gpa(vgpu,
|
||||
workload->ring_context_gpa +
|
||||
|
@ -509,6 +536,8 @@ static int prepare_workload(struct intel_vgpu_workload *workload)
|
|||
return ret;
|
||||
}
|
||||
|
||||
update_shadow_pdps(workload);
|
||||
|
||||
ret = intel_vgpu_sync_oos_pages(workload->vgpu);
|
||||
if (ret) {
|
||||
gvt_vgpu_err("fail to vgpu sync oos pages\n");
|
||||
|
|
|
@ -522,6 +522,7 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
|
|||
/* full GPU reset or device model level reset */
|
||||
if (engine_mask == ALL_ENGINES || dmlr) {
|
||||
intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0);
|
||||
intel_vgpu_invalidate_ppgtt(vgpu);
|
||||
/*fence will not be reset during virtual reset */
|
||||
if (dmlr) {
|
||||
intel_vgpu_reset_gtt(vgpu);
|
||||
|
|
Loading…
Reference in New Issue