drm/i915/gvt: devirtualize ->{read,write}_gpa
Just call the VFIO functions directly instead of through the method table. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Zhi Wang <zhi.a.wang@intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20220411141403.86980-14-hch@lst.de Reviewed-by: Jason Gunthorpe <jgg@nvidia.com> Reviewed-by: Zhi Wang <zhi.a.wang@intel.com>
This commit is contained in:
parent
3c340d0586
commit
e3d7640eee
|
@ -1011,7 +1011,7 @@ static int cmd_reg_handler(struct parser_exec_state *s,
|
||||||
if (GRAPHICS_VER(s->engine->i915) == 9 &&
|
if (GRAPHICS_VER(s->engine->i915) == 9 &&
|
||||||
intel_gvt_mmio_is_sr_in_ctx(gvt, offset) &&
|
intel_gvt_mmio_is_sr_in_ctx(gvt, offset) &&
|
||||||
!strncmp(cmd, "lri", 3)) {
|
!strncmp(cmd, "lri", 3)) {
|
||||||
intel_gvt_hypervisor_read_gpa(s->vgpu,
|
intel_gvt_read_gpa(s->vgpu,
|
||||||
s->workload->ring_context_gpa + 12, &ctx_sr_ctl, 4);
|
s->workload->ring_context_gpa + 12, &ctx_sr_ctl, 4);
|
||||||
/* check inhibit context */
|
/* check inhibit context */
|
||||||
if (ctx_sr_ctl & 1) {
|
if (ctx_sr_ctl & 1) {
|
||||||
|
@ -1775,7 +1775,7 @@ static int copy_gma_to_hva(struct intel_vgpu *vgpu, struct intel_vgpu_mm *mm,
|
||||||
copy_len = (end_gma - gma) >= (I915_GTT_PAGE_SIZE - offset) ?
|
copy_len = (end_gma - gma) >= (I915_GTT_PAGE_SIZE - offset) ?
|
||||||
I915_GTT_PAGE_SIZE - offset : end_gma - gma;
|
I915_GTT_PAGE_SIZE - offset : end_gma - gma;
|
||||||
|
|
||||||
intel_gvt_hypervisor_read_gpa(vgpu, gpa, va + len, copy_len);
|
intel_gvt_read_gpa(vgpu, gpa, va + len, copy_len);
|
||||||
|
|
||||||
len += copy_len;
|
len += copy_len;
|
||||||
gma += copy_len;
|
gma += copy_len;
|
||||||
|
|
|
@ -159,12 +159,12 @@ static void emulate_csb_update(struct intel_vgpu_execlist *execlist,
|
||||||
hwsp_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
|
hwsp_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
|
||||||
vgpu->hws_pga[execlist->engine->id]);
|
vgpu->hws_pga[execlist->engine->id]);
|
||||||
if (hwsp_gpa != INTEL_GVT_INVALID_ADDR) {
|
if (hwsp_gpa != INTEL_GVT_INVALID_ADDR) {
|
||||||
intel_gvt_hypervisor_write_gpa(vgpu,
|
intel_gvt_write_gpa(vgpu,
|
||||||
hwsp_gpa + I915_HWS_CSB_BUF0_INDEX * 4 + write_pointer * 8,
|
hwsp_gpa + I915_HWS_CSB_BUF0_INDEX * 4 + write_pointer * 8,
|
||||||
status, 8);
|
status, 8);
|
||||||
intel_gvt_hypervisor_write_gpa(vgpu,
|
intel_gvt_write_gpa(vgpu,
|
||||||
hwsp_gpa + INTEL_HWS_CSB_WRITE_INDEX(execlist->engine->i915) * 4,
|
hwsp_gpa + INTEL_HWS_CSB_WRITE_INDEX(execlist->engine->i915) * 4,
|
||||||
&write_pointer, 4);
|
&write_pointer, 4);
|
||||||
}
|
}
|
||||||
|
|
||||||
gvt_dbg_el("vgpu%d: w pointer %u reg %x csb l %x csb h %x\n",
|
gvt_dbg_el("vgpu%d: w pointer %u reg %x csb l %x csb h %x\n",
|
||||||
|
|
|
@ -314,7 +314,7 @@ static inline int gtt_get_entry64(void *pt,
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (hypervisor_access) {
|
if (hypervisor_access) {
|
||||||
ret = intel_gvt_hypervisor_read_gpa(vgpu, gpa +
|
ret = intel_gvt_read_gpa(vgpu, gpa +
|
||||||
(index << info->gtt_entry_size_shift),
|
(index << info->gtt_entry_size_shift),
|
||||||
&e->val64, 8);
|
&e->val64, 8);
|
||||||
if (WARN_ON(ret))
|
if (WARN_ON(ret))
|
||||||
|
@ -339,7 +339,7 @@ static inline int gtt_set_entry64(void *pt,
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (hypervisor_access) {
|
if (hypervisor_access) {
|
||||||
ret = intel_gvt_hypervisor_write_gpa(vgpu, gpa +
|
ret = intel_gvt_write_gpa(vgpu, gpa +
|
||||||
(index << info->gtt_entry_size_shift),
|
(index << info->gtt_entry_size_shift),
|
||||||
&e->val64, 8);
|
&e->val64, 8);
|
||||||
if (WARN_ON(ret))
|
if (WARN_ON(ret))
|
||||||
|
@ -1497,7 +1497,7 @@ static int attach_oos_page(struct intel_vgpu_oos_page *oos_page,
|
||||||
struct intel_gvt *gvt = spt->vgpu->gvt;
|
struct intel_gvt *gvt = spt->vgpu->gvt;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = intel_gvt_hypervisor_read_gpa(spt->vgpu,
|
ret = intel_gvt_read_gpa(spt->vgpu,
|
||||||
spt->guest_page.gfn << I915_GTT_PAGE_SHIFT,
|
spt->guest_page.gfn << I915_GTT_PAGE_SHIFT,
|
||||||
oos_page->mem, I915_GTT_PAGE_SIZE);
|
oos_page->mem, I915_GTT_PAGE_SIZE);
|
||||||
if (ret)
|
if (ret)
|
||||||
|
|
|
@ -35,6 +35,7 @@
|
||||||
|
|
||||||
#include <uapi/linux/pci_regs.h>
|
#include <uapi/linux/pci_regs.h>
|
||||||
#include <linux/kvm_host.h>
|
#include <linux/kvm_host.h>
|
||||||
|
#include <linux/vfio.h>
|
||||||
|
|
||||||
#include "i915_drv.h"
|
#include "i915_drv.h"
|
||||||
#include "intel_gvt.h"
|
#include "intel_gvt.h"
|
||||||
|
@ -720,6 +721,42 @@ static inline bool intel_gvt_mmio_is_cmd_write_patch(
|
||||||
return gvt->mmio.mmio_attribute[offset >> 2] & F_CMD_WRITE_PATCH;
|
return gvt->mmio.mmio_attribute[offset >> 2] & F_CMD_WRITE_PATCH;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* intel_gvt_read_gpa - copy data from GPA to host data buffer
|
||||||
|
* @vgpu: a vGPU
|
||||||
|
* @gpa: guest physical address
|
||||||
|
* @buf: host data buffer
|
||||||
|
* @len: data length
|
||||||
|
*
|
||||||
|
* Returns:
|
||||||
|
* Zero on success, negative error code if failed.
|
||||||
|
*/
|
||||||
|
static inline int intel_gvt_read_gpa(struct intel_vgpu *vgpu, unsigned long gpa,
|
||||||
|
void *buf, unsigned long len)
|
||||||
|
{
|
||||||
|
if (!vgpu->attached)
|
||||||
|
return -ESRCH;
|
||||||
|
return vfio_dma_rw(vgpu->vfio_group, gpa, buf, len, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* intel_gvt_write_gpa - copy data from host data buffer to GPA
|
||||||
|
* @vgpu: a vGPU
|
||||||
|
* @gpa: guest physical address
|
||||||
|
* @buf: host data buffer
|
||||||
|
* @len: data length
|
||||||
|
*
|
||||||
|
* Returns:
|
||||||
|
* Zero on success, negative error code if failed.
|
||||||
|
*/
|
||||||
|
static inline int intel_gvt_write_gpa(struct intel_vgpu *vgpu,
|
||||||
|
unsigned long gpa, void *buf, unsigned long len)
|
||||||
|
{
|
||||||
|
if (!vgpu->attached)
|
||||||
|
return -ESRCH;
|
||||||
|
return vfio_dma_rw(vgpu->vfio_group, gpa, buf, len, true);
|
||||||
|
}
|
||||||
|
|
||||||
void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu);
|
void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu);
|
||||||
void intel_gvt_debugfs_init(struct intel_gvt *gvt);
|
void intel_gvt_debugfs_init(struct intel_gvt *gvt);
|
||||||
void intel_gvt_debugfs_clean(struct intel_gvt *gvt);
|
void intel_gvt_debugfs_clean(struct intel_gvt *gvt);
|
||||||
|
|
|
@ -49,10 +49,6 @@ struct intel_gvt_mpt {
|
||||||
int (*inject_msi)(struct intel_vgpu *vgpu, u32 addr, u16 data);
|
int (*inject_msi)(struct intel_vgpu *vgpu, u32 addr, u16 data);
|
||||||
int (*enable_page_track)(struct intel_vgpu *vgpu, u64 gfn);
|
int (*enable_page_track)(struct intel_vgpu *vgpu, u64 gfn);
|
||||||
int (*disable_page_track)(struct intel_vgpu *vgpu, u64 gfn);
|
int (*disable_page_track)(struct intel_vgpu *vgpu, u64 gfn);
|
||||||
int (*read_gpa)(struct intel_vgpu *vgpu, unsigned long gpa, void *buf,
|
|
||||||
unsigned long len);
|
|
||||||
int (*write_gpa)(struct intel_vgpu *vgpu, unsigned long gpa, void *buf,
|
|
||||||
unsigned long len);
|
|
||||||
unsigned long (*gfn_to_mfn)(struct intel_vgpu *vgpu, unsigned long gfn);
|
unsigned long (*gfn_to_mfn)(struct intel_vgpu *vgpu, unsigned long gfn);
|
||||||
|
|
||||||
int (*dma_map_guest_page)(struct intel_vgpu *vgpu, unsigned long gfn,
|
int (*dma_map_guest_page)(struct intel_vgpu *vgpu, unsigned long gfn,
|
||||||
|
|
|
@ -39,7 +39,6 @@
|
||||||
#include <linux/spinlock.h>
|
#include <linux/spinlock.h>
|
||||||
#include <linux/eventfd.h>
|
#include <linux/eventfd.h>
|
||||||
#include <linux/uuid.h>
|
#include <linux/uuid.h>
|
||||||
#include <linux/vfio.h>
|
|
||||||
#include <linux/mdev.h>
|
#include <linux/mdev.h>
|
||||||
#include <linux/debugfs.h>
|
#include <linux/debugfs.h>
|
||||||
|
|
||||||
|
@ -2024,26 +2023,6 @@ static void kvmgt_dma_unmap_guest_page(struct intel_vgpu *vgpu,
|
||||||
mutex_unlock(&vgpu->cache_lock);
|
mutex_unlock(&vgpu->cache_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int kvmgt_rw_gpa(struct intel_vgpu *vgpu, unsigned long gpa,
|
|
||||||
void *buf, unsigned long len, bool write)
|
|
||||||
{
|
|
||||||
if (!vgpu->attached)
|
|
||||||
return -ESRCH;
|
|
||||||
return vfio_dma_rw(vgpu->vfio_group, gpa, buf, len, write);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int kvmgt_read_gpa(struct intel_vgpu *vgpu, unsigned long gpa,
|
|
||||||
void *buf, unsigned long len)
|
|
||||||
{
|
|
||||||
return kvmgt_rw_gpa(vgpu, gpa, buf, len, false);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int kvmgt_write_gpa(struct intel_vgpu *vgpu, unsigned long gpa,
|
|
||||||
void *buf, unsigned long len)
|
|
||||||
{
|
|
||||||
return kvmgt_rw_gpa(vgpu, gpa, buf, len, true);
|
|
||||||
}
|
|
||||||
|
|
||||||
static bool kvmgt_is_valid_gfn(struct intel_vgpu *vgpu, unsigned long gfn)
|
static bool kvmgt_is_valid_gfn(struct intel_vgpu *vgpu, unsigned long gfn)
|
||||||
{
|
{
|
||||||
struct kvm *kvm = vgpu->kvm;
|
struct kvm *kvm = vgpu->kvm;
|
||||||
|
@ -2067,8 +2046,6 @@ static const struct intel_gvt_mpt kvmgt_mpt = {
|
||||||
.inject_msi = kvmgt_inject_msi,
|
.inject_msi = kvmgt_inject_msi,
|
||||||
.enable_page_track = kvmgt_page_track_add,
|
.enable_page_track = kvmgt_page_track_add,
|
||||||
.disable_page_track = kvmgt_page_track_remove,
|
.disable_page_track = kvmgt_page_track_remove,
|
||||||
.read_gpa = kvmgt_read_gpa,
|
|
||||||
.write_gpa = kvmgt_write_gpa,
|
|
||||||
.gfn_to_mfn = kvmgt_gfn_to_pfn,
|
.gfn_to_mfn = kvmgt_gfn_to_pfn,
|
||||||
.dma_map_guest_page = kvmgt_dma_map_guest_page,
|
.dma_map_guest_page = kvmgt_dma_map_guest_page,
|
||||||
.dma_unmap_guest_page = kvmgt_dma_unmap_guest_page,
|
.dma_unmap_guest_page = kvmgt_dma_unmap_guest_page,
|
||||||
|
|
|
@ -139,7 +139,7 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, u64 pa,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (drm_WARN_ON_ONCE(&i915->drm, !reg_is_mmio(gvt, offset))) {
|
if (drm_WARN_ON_ONCE(&i915->drm, !reg_is_mmio(gvt, offset))) {
|
||||||
ret = intel_gvt_hypervisor_read_gpa(vgpu, pa, p_data, bytes);
|
ret = intel_gvt_read_gpa(vgpu, pa, p_data, bytes);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -215,7 +215,7 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, u64 pa,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (drm_WARN_ON_ONCE(&i915->drm, !reg_is_mmio(gvt, offset))) {
|
if (drm_WARN_ON_ONCE(&i915->drm, !reg_is_mmio(gvt, offset))) {
|
||||||
ret = intel_gvt_hypervisor_write_gpa(vgpu, pa, p_data, bytes);
|
ret = intel_gvt_write_gpa(vgpu, pa, p_data, bytes);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -152,38 +152,6 @@ static inline int intel_gvt_hypervisor_disable_page_track(
|
||||||
return intel_gvt_host.mpt->disable_page_track(vgpu, gfn);
|
return intel_gvt_host.mpt->disable_page_track(vgpu, gfn);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* intel_gvt_hypervisor_read_gpa - copy data from GPA to host data buffer
|
|
||||||
* @vgpu: a vGPU
|
|
||||||
* @gpa: guest physical address
|
|
||||||
* @buf: host data buffer
|
|
||||||
* @len: data length
|
|
||||||
*
|
|
||||||
* Returns:
|
|
||||||
* Zero on success, negative error code if failed.
|
|
||||||
*/
|
|
||||||
static inline int intel_gvt_hypervisor_read_gpa(struct intel_vgpu *vgpu,
|
|
||||||
unsigned long gpa, void *buf, unsigned long len)
|
|
||||||
{
|
|
||||||
return intel_gvt_host.mpt->read_gpa(vgpu, gpa, buf, len);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* intel_gvt_hypervisor_write_gpa - copy data from host data buffer to GPA
|
|
||||||
* @vgpu: a vGPU
|
|
||||||
* @gpa: guest physical address
|
|
||||||
* @buf: host data buffer
|
|
||||||
* @len: data length
|
|
||||||
*
|
|
||||||
* Returns:
|
|
||||||
* Zero on success, negative error code if failed.
|
|
||||||
*/
|
|
||||||
static inline int intel_gvt_hypervisor_write_gpa(struct intel_vgpu *vgpu,
|
|
||||||
unsigned long gpa, void *buf, unsigned long len)
|
|
||||||
{
|
|
||||||
return intel_gvt_host.mpt->write_gpa(vgpu, gpa, buf, len);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* intel_gvt_hypervisor_gfn_to_mfn - translate a GFN to MFN
|
* intel_gvt_hypervisor_gfn_to_mfn - translate a GFN to MFN
|
||||||
* @vgpu: a vGPU
|
* @vgpu: a vGPU
|
||||||
|
|
|
@ -421,14 +421,14 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci)
|
||||||
INTEL_GVT_OPREGION_SCIC;
|
INTEL_GVT_OPREGION_SCIC;
|
||||||
parm_pa = (vgpu_opregion(vgpu)->gfn[0] << PAGE_SHIFT) +
|
parm_pa = (vgpu_opregion(vgpu)->gfn[0] << PAGE_SHIFT) +
|
||||||
INTEL_GVT_OPREGION_PARM;
|
INTEL_GVT_OPREGION_PARM;
|
||||||
ret = intel_gvt_hypervisor_read_gpa(vgpu, scic_pa, &scic, sizeof(scic));
|
ret = intel_gvt_read_gpa(vgpu, scic_pa, &scic, sizeof(scic));
|
||||||
if (ret) {
|
if (ret) {
|
||||||
gvt_vgpu_err("guest opregion read error %d, gpa 0x%llx, len %lu\n",
|
gvt_vgpu_err("guest opregion read error %d, gpa 0x%llx, len %lu\n",
|
||||||
ret, scic_pa, sizeof(scic));
|
ret, scic_pa, sizeof(scic));
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = intel_gvt_hypervisor_read_gpa(vgpu, parm_pa, &parm, sizeof(parm));
|
ret = intel_gvt_read_gpa(vgpu, parm_pa, &parm, sizeof(parm));
|
||||||
if (ret) {
|
if (ret) {
|
||||||
gvt_vgpu_err("guest opregion read error %d, gpa 0x%llx, len %lu\n",
|
gvt_vgpu_err("guest opregion read error %d, gpa 0x%llx, len %lu\n",
|
||||||
ret, scic_pa, sizeof(scic));
|
ret, scic_pa, sizeof(scic));
|
||||||
|
@ -465,16 +465,14 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci)
|
||||||
parm = 0;
|
parm = 0;
|
||||||
|
|
||||||
out:
|
out:
|
||||||
ret = intel_gvt_hypervisor_write_gpa(vgpu, scic_pa, &scic,
|
ret = intel_gvt_write_gpa(vgpu, scic_pa, &scic, sizeof(scic));
|
||||||
sizeof(scic));
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
gvt_vgpu_err("guest opregion write error %d, gpa 0x%llx, len %lu\n",
|
gvt_vgpu_err("guest opregion write error %d, gpa 0x%llx, len %lu\n",
|
||||||
ret, scic_pa, sizeof(scic));
|
ret, scic_pa, sizeof(scic));
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = intel_gvt_hypervisor_write_gpa(vgpu, parm_pa, &parm,
|
ret = intel_gvt_write_gpa(vgpu, parm_pa, &parm, sizeof(parm));
|
||||||
sizeof(parm));
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
gvt_vgpu_err("guest opregion write error %d, gpa 0x%llx, len %lu\n",
|
gvt_vgpu_err("guest opregion write error %d, gpa 0x%llx, len %lu\n",
|
||||||
ret, scic_pa, sizeof(scic));
|
ret, scic_pa, sizeof(scic));
|
||||||
|
|
|
@ -150,10 +150,10 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
|
||||||
|
|
||||||
sr_oa_regs(workload, (u32 *)shadow_ring_context, true);
|
sr_oa_regs(workload, (u32 *)shadow_ring_context, true);
|
||||||
#define COPY_REG(name) \
|
#define COPY_REG(name) \
|
||||||
intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
|
intel_gvt_read_gpa(vgpu, workload->ring_context_gpa \
|
||||||
+ RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
|
+ RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
|
||||||
#define COPY_REG_MASKED(name) {\
|
#define COPY_REG_MASKED(name) {\
|
||||||
intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
|
intel_gvt_read_gpa(vgpu, workload->ring_context_gpa \
|
||||||
+ RING_CTX_OFF(name.val),\
|
+ RING_CTX_OFF(name.val),\
|
||||||
&shadow_ring_context->name.val, 4);\
|
&shadow_ring_context->name.val, 4);\
|
||||||
shadow_ring_context->name.val |= 0xffff << 16;\
|
shadow_ring_context->name.val |= 0xffff << 16;\
|
||||||
|
@ -167,7 +167,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
|
||||||
COPY_REG(rcs_indirect_ctx);
|
COPY_REG(rcs_indirect_ctx);
|
||||||
COPY_REG(rcs_indirect_ctx_offset);
|
COPY_REG(rcs_indirect_ctx_offset);
|
||||||
} else if (workload->engine->id == BCS0)
|
} else if (workload->engine->id == BCS0)
|
||||||
intel_gvt_hypervisor_read_gpa(vgpu,
|
intel_gvt_read_gpa(vgpu,
|
||||||
workload->ring_context_gpa +
|
workload->ring_context_gpa +
|
||||||
BCS_TILE_REGISTER_VAL_OFFSET,
|
BCS_TILE_REGISTER_VAL_OFFSET,
|
||||||
(void *)shadow_ring_context +
|
(void *)shadow_ring_context +
|
||||||
|
@ -178,7 +178,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
|
||||||
/* don't copy Ring Context (the first 0x50 dwords),
|
/* don't copy Ring Context (the first 0x50 dwords),
|
||||||
* only copy the Engine Context part from guest
|
* only copy the Engine Context part from guest
|
||||||
*/
|
*/
|
||||||
intel_gvt_hypervisor_read_gpa(vgpu,
|
intel_gvt_read_gpa(vgpu,
|
||||||
workload->ring_context_gpa +
|
workload->ring_context_gpa +
|
||||||
RING_CTX_SIZE,
|
RING_CTX_SIZE,
|
||||||
(void *)shadow_ring_context +
|
(void *)shadow_ring_context +
|
||||||
|
@ -245,7 +245,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
read:
|
read:
|
||||||
intel_gvt_hypervisor_read_gpa(vgpu, gpa_base, dst, gpa_size);
|
intel_gvt_read_gpa(vgpu, gpa_base, dst, gpa_size);
|
||||||
gpa_base = context_gpa;
|
gpa_base = context_gpa;
|
||||||
gpa_size = I915_GTT_PAGE_SIZE;
|
gpa_size = I915_GTT_PAGE_SIZE;
|
||||||
dst = context_base + (i << I915_GTT_PAGE_SHIFT);
|
dst = context_base + (i << I915_GTT_PAGE_SHIFT);
|
||||||
|
@ -911,8 +911,7 @@ static void update_guest_pdps(struct intel_vgpu *vgpu,
|
||||||
gpa = ring_context_gpa + RING_CTX_OFF(pdps[0].val);
|
gpa = ring_context_gpa + RING_CTX_OFF(pdps[0].val);
|
||||||
|
|
||||||
for (i = 0; i < 8; i++)
|
for (i = 0; i < 8; i++)
|
||||||
intel_gvt_hypervisor_write_gpa(vgpu,
|
intel_gvt_write_gpa(vgpu, gpa + i * 8, &pdp[7 - i], 4);
|
||||||
gpa + i * 8, &pdp[7 - i], 4);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static __maybe_unused bool
|
static __maybe_unused bool
|
||||||
|
@ -1007,13 +1006,13 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
write:
|
write:
|
||||||
intel_gvt_hypervisor_write_gpa(vgpu, gpa_base, src, gpa_size);
|
intel_gvt_write_gpa(vgpu, gpa_base, src, gpa_size);
|
||||||
gpa_base = context_gpa;
|
gpa_base = context_gpa;
|
||||||
gpa_size = I915_GTT_PAGE_SIZE;
|
gpa_size = I915_GTT_PAGE_SIZE;
|
||||||
src = context_base + (i << I915_GTT_PAGE_SHIFT);
|
src = context_base + (i << I915_GTT_PAGE_SHIFT);
|
||||||
}
|
}
|
||||||
|
|
||||||
intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa +
|
intel_gvt_write_gpa(vgpu, workload->ring_context_gpa +
|
||||||
RING_CTX_OFF(ring_header.val), &workload->rb_tail, 4);
|
RING_CTX_OFF(ring_header.val), &workload->rb_tail, 4);
|
||||||
|
|
||||||
shadow_ring_context = (void *) ctx->lrc_reg_state;
|
shadow_ring_context = (void *) ctx->lrc_reg_state;
|
||||||
|
@ -1028,7 +1027,7 @@ write:
|
||||||
}
|
}
|
||||||
|
|
||||||
#define COPY_REG(name) \
|
#define COPY_REG(name) \
|
||||||
intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + \
|
intel_gvt_write_gpa(vgpu, workload->ring_context_gpa + \
|
||||||
RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
|
RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
|
||||||
|
|
||||||
COPY_REG(ctx_ctrl);
|
COPY_REG(ctx_ctrl);
|
||||||
|
@ -1036,7 +1035,7 @@ write:
|
||||||
|
|
||||||
#undef COPY_REG
|
#undef COPY_REG
|
||||||
|
|
||||||
intel_gvt_hypervisor_write_gpa(vgpu,
|
intel_gvt_write_gpa(vgpu,
|
||||||
workload->ring_context_gpa +
|
workload->ring_context_gpa +
|
||||||
sizeof(*shadow_ring_context),
|
sizeof(*shadow_ring_context),
|
||||||
(void *)shadow_ring_context +
|
(void *)shadow_ring_context +
|
||||||
|
@ -1573,7 +1572,7 @@ static void read_guest_pdps(struct intel_vgpu *vgpu,
|
||||||
gpa = ring_context_gpa + RING_CTX_OFF(pdps[0].val);
|
gpa = ring_context_gpa + RING_CTX_OFF(pdps[0].val);
|
||||||
|
|
||||||
for (i = 0; i < 8; i++)
|
for (i = 0; i < 8; i++)
|
||||||
intel_gvt_hypervisor_read_gpa(vgpu,
|
intel_gvt_read_gpa(vgpu,
|
||||||
gpa + i * 8, &pdp[7 - i], 4);
|
gpa + i * 8, &pdp[7 - i], 4);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1644,10 +1643,10 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu,
|
||||||
return ERR_PTR(-EINVAL);
|
return ERR_PTR(-EINVAL);
|
||||||
}
|
}
|
||||||
|
|
||||||
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
|
intel_gvt_read_gpa(vgpu, ring_context_gpa +
|
||||||
RING_CTX_OFF(ring_header.val), &head, 4);
|
RING_CTX_OFF(ring_header.val), &head, 4);
|
||||||
|
|
||||||
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
|
intel_gvt_read_gpa(vgpu, ring_context_gpa +
|
||||||
RING_CTX_OFF(ring_tail.val), &tail, 4);
|
RING_CTX_OFF(ring_tail.val), &tail, 4);
|
||||||
|
|
||||||
guest_head = head;
|
guest_head = head;
|
||||||
|
@ -1674,11 +1673,11 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu,
|
||||||
gvt_dbg_el("ring %s begin a new workload\n", engine->name);
|
gvt_dbg_el("ring %s begin a new workload\n", engine->name);
|
||||||
|
|
||||||
/* record some ring buffer register values for scan and shadow */
|
/* record some ring buffer register values for scan and shadow */
|
||||||
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
|
intel_gvt_read_gpa(vgpu, ring_context_gpa +
|
||||||
RING_CTX_OFF(rb_start.val), &start, 4);
|
RING_CTX_OFF(rb_start.val), &start, 4);
|
||||||
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
|
intel_gvt_read_gpa(vgpu, ring_context_gpa +
|
||||||
RING_CTX_OFF(rb_ctrl.val), &ctl, 4);
|
RING_CTX_OFF(rb_ctrl.val), &ctl, 4);
|
||||||
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
|
intel_gvt_read_gpa(vgpu, ring_context_gpa +
|
||||||
RING_CTX_OFF(ctx_ctrl.val), &ctx_ctl, 4);
|
RING_CTX_OFF(ctx_ctrl.val), &ctx_ctl, 4);
|
||||||
|
|
||||||
if (!intel_gvt_ggtt_validate_range(vgpu, start,
|
if (!intel_gvt_ggtt_validate_range(vgpu, start,
|
||||||
|
@ -1701,9 +1700,9 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu,
|
||||||
workload->rb_ctl = ctl;
|
workload->rb_ctl = ctl;
|
||||||
|
|
||||||
if (engine->id == RCS0) {
|
if (engine->id == RCS0) {
|
||||||
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
|
intel_gvt_read_gpa(vgpu, ring_context_gpa +
|
||||||
RING_CTX_OFF(bb_per_ctx_ptr.val), &per_ctx, 4);
|
RING_CTX_OFF(bb_per_ctx_ptr.val), &per_ctx, 4);
|
||||||
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
|
intel_gvt_read_gpa(vgpu, ring_context_gpa +
|
||||||
RING_CTX_OFF(rcs_indirect_ctx.val), &indirect_ctx, 4);
|
RING_CTX_OFF(rcs_indirect_ctx.val), &indirect_ctx, 4);
|
||||||
|
|
||||||
workload->wa_ctx.indirect_ctx.guest_gma =
|
workload->wa_ctx.indirect_ctx.guest_gma =
|
||||||
|
|
Loading…
Reference in New Issue