drm/i915/gvt: update vreg on inhibit context lri command
Commit cd7e 61b9"init mmio by lri command in vgpu inhibit context" initializes registers saved/restored in context with its vreg value through lri command in ring buffer. It relies on vreg got updated on every guest access. There is a case found that Linux guest uses lri command in inhibit-ctx to update the register. This patch adds vreg update on this case. v2: move mmio_attribute functions to gvt.h (Zhenyu) v3: use mask_mmio_write in vreg update v4: refine codes and add more comments (Zhenyu) Fixes: cd7e61b9("drm/i915/gvt: init mmio by lri command in vgpu inhibit context") Signed-off-by: Hang Yuan <hang.yuan@linux.intel.com> Signed-off-by: Weinan Li <weinan.z.li@intel.com> Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
This commit is contained in:
parent
a4cae23cc0
commit
6cef21a196
|
@ -862,6 +862,7 @@ static int cmd_reg_handler(struct parser_exec_state *s,
|
|||
{
|
||||
struct intel_vgpu *vgpu = s->vgpu;
|
||||
struct intel_gvt *gvt = vgpu->gvt;
|
||||
u32 ctx_sr_ctl;
|
||||
|
||||
if (offset + 4 > gvt->device_info.mmio_size) {
|
||||
gvt_vgpu_err("%s access to (%x) outside of MMIO range\n",
|
||||
|
@ -894,6 +895,28 @@ static int cmd_reg_handler(struct parser_exec_state *s,
|
|||
patch_value(s, cmd_ptr(s, index), VGT_PVINFO_PAGE);
|
||||
}
|
||||
|
||||
/* TODO
|
||||
* Right now only scan LRI command on KBL and in inhibit context.
|
||||
* It's good enough to support initializing mmio by lri command in
|
||||
* vgpu inhibit context on KBL.
|
||||
*/
|
||||
if (IS_KABYLAKE(s->vgpu->gvt->dev_priv) &&
|
||||
intel_gvt_mmio_is_in_ctx(gvt, offset) &&
|
||||
!strncmp(cmd, "lri", 3)) {
|
||||
intel_gvt_hypervisor_read_gpa(s->vgpu,
|
||||
s->workload->ring_context_gpa + 12, &ctx_sr_ctl, 4);
|
||||
/* check inhibit context */
|
||||
if (ctx_sr_ctl & 1) {
|
||||
u32 data = cmd_val(s, index + 1);
|
||||
|
||||
if (intel_gvt_mmio_has_mode_mask(s->vgpu->gvt, offset))
|
||||
intel_vgpu_mask_mmio_write(vgpu,
|
||||
offset, &data, 4);
|
||||
else
|
||||
vgpu_vreg(vgpu, offset) = data;
|
||||
}
|
||||
}
|
||||
|
||||
/* TODO: Update the global mask if this MMIO is a masked-MMIO */
|
||||
intel_gvt_mmio_set_cmd_accessed(gvt, offset);
|
||||
return 0;
|
||||
|
|
|
@ -268,6 +268,8 @@ struct intel_gvt_mmio {
|
|||
#define F_CMD_ACCESSED (1 << 5)
|
||||
/* This reg could be accessed by unaligned address */
|
||||
#define F_UNALIGN (1 << 6)
|
||||
/* This reg is saved/restored in context */
|
||||
#define F_IN_CTX (1 << 7)
|
||||
|
||||
struct gvt_mmio_block *mmio_block;
|
||||
unsigned int num_mmio_block;
|
||||
|
@ -639,6 +641,33 @@ static inline bool intel_gvt_mmio_has_mode_mask(
|
|||
return gvt->mmio.mmio_attribute[offset >> 2] & F_MODE_MASK;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gvt_mmio_is_in_ctx - check if a MMIO has in-ctx mask
|
||||
* @gvt: a GVT device
|
||||
* @offset: register offset
|
||||
*
|
||||
* Returns:
|
||||
* True if a MMIO has a in-context mask, false if it isn't.
|
||||
*
|
||||
*/
|
||||
static inline bool intel_gvt_mmio_is_in_ctx(
|
||||
struct intel_gvt *gvt, unsigned int offset)
|
||||
{
|
||||
return gvt->mmio.mmio_attribute[offset >> 2] & F_IN_CTX;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gvt_mmio_set_in_ctx - mask a MMIO in logical context
|
||||
* @gvt: a GVT device
|
||||
* @offset: register offset
|
||||
*
|
||||
*/
|
||||
static inline void intel_gvt_mmio_set_in_ctx(
|
||||
struct intel_gvt *gvt, unsigned int offset)
|
||||
{
|
||||
gvt->mmio.mmio_attribute[offset >> 2] |= F_IN_CTX;
|
||||
}
|
||||
|
||||
int intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu);
|
||||
void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu);
|
||||
int intel_gvt_debugfs_init(struct intel_gvt *gvt);
|
||||
|
|
|
@ -3045,6 +3045,30 @@ int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_vgpu_mask_mmio_write - write mask register
|
||||
* @vgpu: a vGPU
|
||||
* @offset: access offset
|
||||
* @p_data: write data buffer
|
||||
* @bytes: access data length
|
||||
*
|
||||
* Returns:
|
||||
* Zero on success, negative error code if failed.
|
||||
*/
|
||||
int intel_vgpu_mask_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
void *p_data, unsigned int bytes)
|
||||
{
|
||||
u32 mask, old_vreg;
|
||||
|
||||
old_vreg = vgpu_vreg(vgpu, offset);
|
||||
write_vreg(vgpu, offset, p_data, bytes);
|
||||
mask = vgpu_vreg(vgpu, offset) >> 16;
|
||||
vgpu_vreg(vgpu, offset) = (old_vreg & ~mask) |
|
||||
(vgpu_vreg(vgpu, offset) & mask);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gvt_in_force_nonpriv_whitelist - if a mmio is in whitelist to be
|
||||
* force-nopriv register
|
||||
|
|
|
@ -98,4 +98,6 @@ bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt,
|
|||
int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
void *pdata, unsigned int bytes, bool is_read);
|
||||
|
||||
int intel_vgpu_mask_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
void *p_data, unsigned int bytes);
|
||||
#endif
|
||||
|
|
|
@ -581,7 +581,9 @@ void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt)
|
|||
|
||||
for (mmio = gvt->engine_mmio_list.mmio;
|
||||
i915_mmio_reg_valid(mmio->reg); mmio++) {
|
||||
if (mmio->in_context)
|
||||
if (mmio->in_context) {
|
||||
gvt->engine_mmio_list.ctx_mmio_count[mmio->ring_id]++;
|
||||
intel_gvt_mmio_set_in_ctx(gvt, mmio->reg.reg);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue