Merge tag 'gvt-next-2017-02-24' of https://github.com/01org/gvt-linux into drm-intel-fixes
gvt-next-2017-02-24 - Min's vGPU failsafe to guard against non-secured guest - Some guest warning fix and host error message cleanup - Fixed vGPU type refinement for usability issue - environ string fix from Takashi Iwai - one kernel oops fix from Chuanxiao - other misc fixes Signed-off-by: Jani Nikula <jani.nikula@intel.com>
This commit is contained in:
commit
77e14ae6d7
|
@ -237,6 +237,9 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
|
|||
{
|
||||
int ret;
|
||||
|
||||
if (vgpu->failsafe)
|
||||
return 0;
|
||||
|
||||
if (WARN_ON(bytes > 4))
|
||||
return -EINVAL;
|
||||
|
||||
|
|
|
@ -83,44 +83,80 @@ static int pipe_is_enabled(struct intel_vgpu *vgpu, int pipe)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static unsigned char virtual_dp_monitor_edid[GVT_EDID_NUM][EDID_SIZE] = {
|
||||
{
|
||||
/* EDID with 1024x768 as its resolution */
|
||||
/*Header*/
|
||||
0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
|
||||
/* Vendor & Product Identification */
|
||||
0x22, 0xf0, 0x54, 0x29, 0x00, 0x00, 0x00, 0x00, 0x04, 0x17,
|
||||
/* Version & Revision */
|
||||
0x01, 0x04,
|
||||
/* Basic Display Parameters & Features */
|
||||
0xa5, 0x34, 0x20, 0x78, 0x23,
|
||||
/* Color Characteristics */
|
||||
0xfc, 0x81, 0xa4, 0x55, 0x4d, 0x9d, 0x25, 0x12, 0x50, 0x54,
|
||||
/* Established Timings: maximum resolution is 1024x768 */
|
||||
0x21, 0x08, 0x00,
|
||||
/* Standard Timings. All invalid */
|
||||
0x00, 0xc0, 0x00, 0xc0, 0x00, 0x40, 0x00, 0x80, 0x00, 0x00,
|
||||
0x00, 0x40, 0x00, 0x00, 0x00, 0x01,
|
||||
/* 18 Byte Data Blocks 1: invalid */
|
||||
0x00, 0x00, 0x80, 0xa0, 0x70, 0xb0,
|
||||
0x23, 0x40, 0x30, 0x20, 0x36, 0x00, 0x06, 0x44, 0x21, 0x00, 0x00, 0x1a,
|
||||
/* 18 Byte Data Blocks 2: invalid */
|
||||
0x00, 0x00, 0x00, 0xfd, 0x00, 0x18, 0x3c, 0x18, 0x50, 0x11, 0x00, 0x0a,
|
||||
0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||
/* 18 Byte Data Blocks 3: invalid */
|
||||
0x00, 0x00, 0x00, 0xfc, 0x00, 0x48,
|
||||
0x50, 0x20, 0x5a, 0x52, 0x32, 0x34, 0x34, 0x30, 0x77, 0x0a, 0x20, 0x20,
|
||||
/* 18 Byte Data Blocks 4: invalid */
|
||||
0x00, 0x00, 0x00, 0xff, 0x00, 0x43, 0x4e, 0x34, 0x33, 0x30, 0x34, 0x30,
|
||||
0x44, 0x58, 0x51, 0x0a, 0x20, 0x20,
|
||||
/* Extension Block Count */
|
||||
0x00,
|
||||
/* Checksum */
|
||||
0xef,
|
||||
},
|
||||
{
|
||||
/* EDID with 1920x1200 as its resolution */
|
||||
static unsigned char virtual_dp_monitor_edid[] = {
|
||||
/*Header*/
|
||||
0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
|
||||
/* Vendor & Product Identification */
|
||||
0x22, 0xf0, 0x54, 0x29, 0x00, 0x00, 0x00, 0x00, 0x04, 0x17,
|
||||
/* Version & Revision */
|
||||
0x01, 0x04,
|
||||
/* Basic Display Parameters & Features */
|
||||
0xa5, 0x34, 0x20, 0x78, 0x23,
|
||||
/* Color Characteristics */
|
||||
0xfc, 0x81, 0xa4, 0x55, 0x4d, 0x9d, 0x25, 0x12, 0x50, 0x54,
|
||||
/* Established Timings: maximum resolution is 1024x768 */
|
||||
0x21, 0x08, 0x00,
|
||||
/*
|
||||
* Standard Timings.
|
||||
* below new resolutions can be supported:
|
||||
* 1920x1080, 1280x720, 1280x960, 1280x1024,
|
||||
* 1440x900, 1600x1200, 1680x1050
|
||||
*/
|
||||
0xd1, 0xc0, 0x81, 0xc0, 0x81, 0x40, 0x81, 0x80, 0x95, 0x00,
|
||||
0xa9, 0x40, 0xb3, 0x00, 0x01, 0x01,
|
||||
/* 18 Byte Data Blocks 1: max resolution is 1920x1200 */
|
||||
0x28, 0x3c, 0x80, 0xa0, 0x70, 0xb0,
|
||||
0x23, 0x40, 0x30, 0x20, 0x36, 0x00, 0x06, 0x44, 0x21, 0x00, 0x00, 0x1a,
|
||||
/* 18 Byte Data Blocks 2: invalid */
|
||||
0x00, 0x00, 0x00, 0xfd, 0x00, 0x18, 0x3c, 0x18, 0x50, 0x11, 0x00, 0x0a,
|
||||
0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||
/* 18 Byte Data Blocks 3: invalid */
|
||||
0x00, 0x00, 0x00, 0xfc, 0x00, 0x48,
|
||||
0x50, 0x20, 0x5a, 0x52, 0x32, 0x34, 0x34, 0x30, 0x77, 0x0a, 0x20, 0x20,
|
||||
/* 18 Byte Data Blocks 4: invalid */
|
||||
0x00, 0x00, 0x00, 0xff, 0x00, 0x43, 0x4e, 0x34, 0x33, 0x30, 0x34, 0x30,
|
||||
0x44, 0x58, 0x51, 0x0a, 0x20, 0x20,
|
||||
/* Extension Block Count */
|
||||
0x00,
|
||||
/* Checksum */
|
||||
0x45,
|
||||
/*Header*/
|
||||
0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
|
||||
/* Vendor & Product Identification */
|
||||
0x22, 0xf0, 0x54, 0x29, 0x00, 0x00, 0x00, 0x00, 0x04, 0x17,
|
||||
/* Version & Revision */
|
||||
0x01, 0x04,
|
||||
/* Basic Display Parameters & Features */
|
||||
0xa5, 0x34, 0x20, 0x78, 0x23,
|
||||
/* Color Characteristics */
|
||||
0xfc, 0x81, 0xa4, 0x55, 0x4d, 0x9d, 0x25, 0x12, 0x50, 0x54,
|
||||
/* Established Timings: maximum resolution is 1024x768 */
|
||||
0x21, 0x08, 0x00,
|
||||
/*
|
||||
* Standard Timings.
|
||||
* below new resolutions can be supported:
|
||||
* 1920x1080, 1280x720, 1280x960, 1280x1024,
|
||||
* 1440x900, 1600x1200, 1680x1050
|
||||
*/
|
||||
0xd1, 0xc0, 0x81, 0xc0, 0x81, 0x40, 0x81, 0x80, 0x95, 0x00,
|
||||
0xa9, 0x40, 0xb3, 0x00, 0x01, 0x01,
|
||||
/* 18 Byte Data Blocks 1: max resolution is 1920x1200 */
|
||||
0x28, 0x3c, 0x80, 0xa0, 0x70, 0xb0,
|
||||
0x23, 0x40, 0x30, 0x20, 0x36, 0x00, 0x06, 0x44, 0x21, 0x00, 0x00, 0x1a,
|
||||
/* 18 Byte Data Blocks 2: invalid */
|
||||
0x00, 0x00, 0x00, 0xfd, 0x00, 0x18, 0x3c, 0x18, 0x50, 0x11, 0x00, 0x0a,
|
||||
0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||
/* 18 Byte Data Blocks 3: invalid */
|
||||
0x00, 0x00, 0x00, 0xfc, 0x00, 0x48,
|
||||
0x50, 0x20, 0x5a, 0x52, 0x32, 0x34, 0x34, 0x30, 0x77, 0x0a, 0x20, 0x20,
|
||||
/* 18 Byte Data Blocks 4: invalid */
|
||||
0x00, 0x00, 0x00, 0xff, 0x00, 0x43, 0x4e, 0x34, 0x33, 0x30, 0x34, 0x30,
|
||||
0x44, 0x58, 0x51, 0x0a, 0x20, 0x20,
|
||||
/* Extension Block Count */
|
||||
0x00,
|
||||
/* Checksum */
|
||||
0x45,
|
||||
},
|
||||
};
|
||||
|
||||
#define DPCD_HEADER_SIZE 0xb
|
||||
|
@ -175,10 +211,13 @@ static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num)
|
|||
}
|
||||
|
||||
static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num,
|
||||
int type)
|
||||
int type, unsigned int resolution)
|
||||
{
|
||||
struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num);
|
||||
|
||||
if (WARN_ON(resolution >= GVT_EDID_NUM))
|
||||
return -EINVAL;
|
||||
|
||||
port->edid = kzalloc(sizeof(*(port->edid)), GFP_KERNEL);
|
||||
if (!port->edid)
|
||||
return -ENOMEM;
|
||||
|
@ -189,7 +228,7 @@ static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num,
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
memcpy(port->edid->edid_block, virtual_dp_monitor_edid,
|
||||
memcpy(port->edid->edid_block, virtual_dp_monitor_edid[resolution],
|
||||
EDID_SIZE);
|
||||
port->edid->data_valid = true;
|
||||
|
||||
|
@ -322,16 +361,18 @@ void intel_vgpu_clean_display(struct intel_vgpu *vgpu)
|
|||
* Zero on success, negative error code if failed.
|
||||
*
|
||||
*/
|
||||
int intel_vgpu_init_display(struct intel_vgpu *vgpu)
|
||||
int intel_vgpu_init_display(struct intel_vgpu *vgpu, u64 resolution)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
|
||||
|
||||
intel_vgpu_init_i2c_edid(vgpu);
|
||||
|
||||
if (IS_SKYLAKE(dev_priv))
|
||||
return setup_virtual_dp_monitor(vgpu, PORT_D, GVT_DP_D);
|
||||
return setup_virtual_dp_monitor(vgpu, PORT_D, GVT_DP_D,
|
||||
resolution);
|
||||
else
|
||||
return setup_virtual_dp_monitor(vgpu, PORT_B, GVT_DP_B);
|
||||
return setup_virtual_dp_monitor(vgpu, PORT_B, GVT_DP_B,
|
||||
resolution);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -154,10 +154,28 @@ struct intel_vgpu_port {
|
|||
int type;
|
||||
};
|
||||
|
||||
enum intel_vgpu_edid {
|
||||
GVT_EDID_1024_768,
|
||||
GVT_EDID_1920_1200,
|
||||
GVT_EDID_NUM,
|
||||
};
|
||||
|
||||
static inline char *vgpu_edid_str(enum intel_vgpu_edid id)
|
||||
{
|
||||
switch (id) {
|
||||
case GVT_EDID_1024_768:
|
||||
return "1024x768";
|
||||
case GVT_EDID_1920_1200:
|
||||
return "1920x1200";
|
||||
default:
|
||||
return "";
|
||||
}
|
||||
}
|
||||
|
||||
void intel_gvt_emulate_vblank(struct intel_gvt *gvt);
|
||||
void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt);
|
||||
|
||||
int intel_vgpu_init_display(struct intel_vgpu *vgpu);
|
||||
int intel_vgpu_init_display(struct intel_vgpu *vgpu, u64 resolution);
|
||||
void intel_vgpu_reset_display(struct intel_vgpu *vgpu);
|
||||
void intel_vgpu_clean_display(struct intel_vgpu *vgpu);
|
||||
|
||||
|
|
|
@ -80,7 +80,7 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
|
|||
int ret;
|
||||
|
||||
size = sizeof(*h) + info->mmio_size + info->cfg_space_size - 1;
|
||||
firmware = vmalloc(size);
|
||||
firmware = vzalloc(size);
|
||||
if (!firmware)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
|
@ -1825,11 +1825,8 @@ static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
|
|||
gma = g_gtt_index << GTT_PAGE_SHIFT;
|
||||
|
||||
/* the VM may configure the whole GM space when ballooning is used */
|
||||
if (WARN_ONCE(!vgpu_gmadr_is_valid(vgpu, gma),
|
||||
"vgpu%d: found oob ggtt write, offset %x\n",
|
||||
vgpu->id, off)) {
|
||||
if (!vgpu_gmadr_is_valid(vgpu, gma))
|
||||
return 0;
|
||||
}
|
||||
|
||||
ggtt_get_guest_entry(ggtt_mm, &e, g_gtt_index);
|
||||
|
||||
|
@ -2015,6 +2012,22 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
|
|||
return create_scratch_page_tree(vgpu);
|
||||
}
|
||||
|
||||
static void intel_vgpu_free_mm(struct intel_vgpu *vgpu, int type)
|
||||
{
|
||||
struct list_head *pos, *n;
|
||||
struct intel_vgpu_mm *mm;
|
||||
|
||||
list_for_each_safe(pos, n, &vgpu->gtt.mm_list_head) {
|
||||
mm = container_of(pos, struct intel_vgpu_mm, list);
|
||||
if (mm->type == type) {
|
||||
vgpu->gvt->gtt.mm_free_page_table(mm);
|
||||
list_del(&mm->list);
|
||||
list_del(&mm->lru_list);
|
||||
kfree(mm);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_vgpu_clean_gtt - clean up per-vGPU graphics memory virulization
|
||||
* @vgpu: a vGPU
|
||||
|
@ -2027,19 +2040,11 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
|
|||
*/
|
||||
void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu)
|
||||
{
|
||||
struct list_head *pos, *n;
|
||||
struct intel_vgpu_mm *mm;
|
||||
|
||||
ppgtt_free_all_shadow_page(vgpu);
|
||||
release_scratch_page_tree(vgpu);
|
||||
|
||||
list_for_each_safe(pos, n, &vgpu->gtt.mm_list_head) {
|
||||
mm = container_of(pos, struct intel_vgpu_mm, list);
|
||||
vgpu->gvt->gtt.mm_free_page_table(mm);
|
||||
list_del(&mm->list);
|
||||
list_del(&mm->lru_list);
|
||||
kfree(mm);
|
||||
}
|
||||
intel_vgpu_free_mm(vgpu, INTEL_GVT_MM_PPGTT);
|
||||
intel_vgpu_free_mm(vgpu, INTEL_GVT_MM_GGTT);
|
||||
}
|
||||
|
||||
static void clean_spt_oos(struct intel_gvt *gvt)
|
||||
|
@ -2322,6 +2327,13 @@ void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu, bool dmlr)
|
|||
int i;
|
||||
|
||||
ppgtt_free_all_shadow_page(vgpu);
|
||||
|
||||
/* Shadow pages are only created when there is no page
|
||||
* table tracking data, so remove page tracking data after
|
||||
* removing the shadow pages.
|
||||
*/
|
||||
intel_vgpu_free_mm(vgpu, INTEL_GVT_MM_PPGTT);
|
||||
|
||||
if (!dmlr)
|
||||
return;
|
||||
|
||||
|
|
|
@ -143,6 +143,8 @@ struct intel_vgpu {
|
|||
int id;
|
||||
unsigned long handle; /* vGPU handle used by hypervisor MPT modules */
|
||||
bool active;
|
||||
bool pv_notified;
|
||||
bool failsafe;
|
||||
bool resetting;
|
||||
void *sched_data;
|
||||
|
||||
|
@ -203,18 +205,18 @@ struct intel_gvt_firmware {
|
|||
};
|
||||
|
||||
struct intel_gvt_opregion {
|
||||
void __iomem *opregion_va;
|
||||
void *opregion_va;
|
||||
u32 opregion_pa;
|
||||
};
|
||||
|
||||
#define NR_MAX_INTEL_VGPU_TYPES 20
|
||||
struct intel_vgpu_type {
|
||||
char name[16];
|
||||
unsigned int max_instance;
|
||||
unsigned int avail_instance;
|
||||
unsigned int low_gm_size;
|
||||
unsigned int high_gm_size;
|
||||
unsigned int fence;
|
||||
enum intel_vgpu_edid resolution;
|
||||
};
|
||||
|
||||
struct intel_gvt {
|
||||
|
@ -317,6 +319,7 @@ struct intel_vgpu_creation_params {
|
|||
__u64 low_gm_sz; /* in MB */
|
||||
__u64 high_gm_sz; /* in MB */
|
||||
__u64 fence_sz;
|
||||
__u64 resolution;
|
||||
__s32 primary;
|
||||
__u64 vgpu_id;
|
||||
};
|
||||
|
@ -449,6 +452,11 @@ struct intel_gvt_ops {
|
|||
};
|
||||
|
||||
|
||||
enum {
|
||||
GVT_FAILSAFE_UNSUPPORTED_GUEST,
|
||||
GVT_FAILSAFE_INSUFFICIENT_RESOURCE,
|
||||
};
|
||||
|
||||
#include "mpt.h"
|
||||
|
||||
#endif
|
||||
|
|
|
@ -150,15 +150,44 @@ static int render_mmio_to_ring_id(struct intel_gvt *gvt, unsigned int reg)
|
|||
#define fence_num_to_offset(num) \
|
||||
(num * 8 + i915_mmio_reg_offset(FENCE_REG_GEN6_LO(0)))
|
||||
|
||||
|
||||
static void enter_failsafe_mode(struct intel_vgpu *vgpu, int reason)
|
||||
{
|
||||
switch (reason) {
|
||||
case GVT_FAILSAFE_UNSUPPORTED_GUEST:
|
||||
pr_err("Detected your guest driver doesn't support GVT-g.\n");
|
||||
break;
|
||||
case GVT_FAILSAFE_INSUFFICIENT_RESOURCE:
|
||||
pr_err("Graphics resource is not enough for the guest\n");
|
||||
default:
|
||||
break;
|
||||
}
|
||||
pr_err("Now vgpu %d will enter failsafe mode.\n", vgpu->id);
|
||||
vgpu->failsafe = true;
|
||||
}
|
||||
|
||||
static int sanitize_fence_mmio_access(struct intel_vgpu *vgpu,
|
||||
unsigned int fence_num, void *p_data, unsigned int bytes)
|
||||
{
|
||||
if (fence_num >= vgpu_fence_sz(vgpu)) {
|
||||
gvt_err("vgpu%d: found oob fence register access\n",
|
||||
vgpu->id);
|
||||
gvt_err("vgpu%d: total fence num %d access fence num %d\n",
|
||||
vgpu->id, vgpu_fence_sz(vgpu), fence_num);
|
||||
|
||||
/* When guest access oob fence regs without access
|
||||
* pv_info first, we treat guest not supporting GVT,
|
||||
* and we will let vgpu enter failsafe mode.
|
||||
*/
|
||||
if (!vgpu->pv_notified)
|
||||
enter_failsafe_mode(vgpu,
|
||||
GVT_FAILSAFE_UNSUPPORTED_GUEST);
|
||||
|
||||
if (!vgpu->mmio.disable_warn_untrack) {
|
||||
gvt_err("vgpu%d: found oob fence register access\n",
|
||||
vgpu->id);
|
||||
gvt_err("vgpu%d: total fence %d, access fence %d\n",
|
||||
vgpu->id, vgpu_fence_sz(vgpu),
|
||||
fence_num);
|
||||
}
|
||||
memset(p_data, 0, bytes);
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -369,6 +398,74 @@ static int pipeconf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* ascendingly sorted */
|
||||
static i915_reg_t force_nonpriv_white_list[] = {
|
||||
GEN9_CS_DEBUG_MODE1, //_MMIO(0x20ec)
|
||||
GEN9_CTX_PREEMPT_REG,//_MMIO(0x2248)
|
||||
GEN8_CS_CHICKEN1,//_MMIO(0x2580)
|
||||
_MMIO(0x2690),
|
||||
_MMIO(0x2694),
|
||||
_MMIO(0x2698),
|
||||
_MMIO(0x4de0),
|
||||
_MMIO(0x4de4),
|
||||
_MMIO(0x4dfc),
|
||||
GEN7_COMMON_SLICE_CHICKEN1,//_MMIO(0x7010)
|
||||
_MMIO(0x7014),
|
||||
HDC_CHICKEN0,//_MMIO(0x7300)
|
||||
GEN8_HDC_CHICKEN1,//_MMIO(0x7304)
|
||||
_MMIO(0x7700),
|
||||
_MMIO(0x7704),
|
||||
_MMIO(0x7708),
|
||||
_MMIO(0x770c),
|
||||
_MMIO(0xb110),
|
||||
GEN8_L3SQCREG4,//_MMIO(0xb118)
|
||||
_MMIO(0xe100),
|
||||
_MMIO(0xe18c),
|
||||
_MMIO(0xe48c),
|
||||
_MMIO(0xe5f4),
|
||||
};
|
||||
|
||||
/* a simple bsearch */
|
||||
static inline bool in_whitelist(unsigned int reg)
|
||||
{
|
||||
int left = 0, right = ARRAY_SIZE(force_nonpriv_white_list);
|
||||
i915_reg_t *array = force_nonpriv_white_list;
|
||||
|
||||
while (left < right) {
|
||||
int mid = (left + right)/2;
|
||||
|
||||
if (reg > array[mid].reg)
|
||||
left = mid + 1;
|
||||
else if (reg < array[mid].reg)
|
||||
right = mid;
|
||||
else
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static int force_nonpriv_write(struct intel_vgpu *vgpu,
|
||||
unsigned int offset, void *p_data, unsigned int bytes)
|
||||
{
|
||||
u32 reg_nonpriv = *(u32 *)p_data;
|
||||
int ret = -EINVAL;
|
||||
|
||||
if ((bytes != 4) || ((offset & (bytes - 1)) != 0)) {
|
||||
gvt_err("vgpu(%d) Invalid FORCE_NONPRIV offset %x(%dB)\n",
|
||||
vgpu->id, offset, bytes);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (in_whitelist(reg_nonpriv)) {
|
||||
ret = intel_vgpu_default_mmio_write(vgpu, offset, p_data,
|
||||
bytes);
|
||||
} else {
|
||||
gvt_err("vgpu(%d) Invalid FORCE_NONPRIV write %x\n",
|
||||
vgpu->id, reg_nonpriv);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ddi_buf_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
void *p_data, unsigned int bytes)
|
||||
{
|
||||
|
@ -1001,6 +1098,7 @@ static int pvinfo_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
|
|||
if (invalid_read)
|
||||
gvt_err("invalid pvinfo read: [%x:%x] = %x\n",
|
||||
offset, bytes, *(u32 *)p_data);
|
||||
vgpu->pv_notified = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1039,7 +1137,7 @@ static int send_display_ready_uevent(struct intel_vgpu *vgpu, int ready)
|
|||
char vmid_str[20];
|
||||
char display_ready_str[20];
|
||||
|
||||
snprintf(display_ready_str, 20, "GVT_DISPLAY_READY=%d\n", ready);
|
||||
snprintf(display_ready_str, 20, "GVT_DISPLAY_READY=%d", ready);
|
||||
env[0] = display_ready_str;
|
||||
|
||||
snprintf(vmid_str, 20, "VMID=%d", vgpu->id);
|
||||
|
@ -1078,6 +1176,9 @@ static int pvinfo_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
|
|||
case _vgtif_reg(execlist_context_descriptor_lo):
|
||||
case _vgtif_reg(execlist_context_descriptor_hi):
|
||||
break;
|
||||
case _vgtif_reg(rsv5[0])..._vgtif_reg(rsv5[3]):
|
||||
enter_failsafe_mode(vgpu, GVT_FAILSAFE_INSUFFICIENT_RESOURCE);
|
||||
break;
|
||||
default:
|
||||
gvt_err("invalid pvinfo write offset %x bytes %x data %x\n",
|
||||
offset, bytes, data);
|
||||
|
@ -1214,6 +1315,9 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset,
|
|||
else
|
||||
*data0 = 0x61514b3d;
|
||||
break;
|
||||
case SKL_PCODE_CDCLK_CONTROL:
|
||||
*data0 = SKL_CDCLK_READY_FOR_CHANGE;
|
||||
break;
|
||||
case 0x5:
|
||||
*data0 |= 0x1;
|
||||
break;
|
||||
|
@ -1221,8 +1325,13 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset,
|
|||
|
||||
gvt_dbg_core("VM(%d) write %x to mailbox, return data0 %x\n",
|
||||
vgpu->id, value, *data0);
|
||||
|
||||
value &= ~(1 << 31);
|
||||
/**
|
||||
* PCODE_READY clear means ready for pcode read/write,
|
||||
* PCODE_ERROR_MASK clear means no error happened. In GVT-g we
|
||||
* always emulate as pcode read/write success and ready for access
|
||||
* anytime, since we don't touch real physical registers here.
|
||||
*/
|
||||
value &= ~(GEN6_PCODE_READY | GEN6_PCODE_ERROR_MASK);
|
||||
return intel_vgpu_default_mmio_write(vgpu, offset, &value, bytes);
|
||||
}
|
||||
|
||||
|
@ -1318,6 +1427,17 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
|
|||
bool enable_execlist;
|
||||
|
||||
write_vreg(vgpu, offset, p_data, bytes);
|
||||
|
||||
/* when PPGTT mode enabled, we will check if guest has called
|
||||
* pvinfo, if not, we will treat this guest as non-gvtg-aware
|
||||
* guest, and stop emulating its cfg space, mmio, gtt, etc.
|
||||
*/
|
||||
if (((data & _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)) ||
|
||||
(data & _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE)))
|
||||
&& !vgpu->pv_notified) {
|
||||
enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
|
||||
return 0;
|
||||
}
|
||||
if ((data & _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE))
|
||||
|| (data & _MASKED_BIT_DISABLE(GFX_RUN_LIST_ENABLE))) {
|
||||
enable_execlist = !!(data & GFX_RUN_LIST_ENABLE);
|
||||
|
@ -1475,6 +1595,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
|
|||
MMIO_DFH(GEN7_GT_MODE, D_ALL, F_MODE_MASK, NULL, NULL);
|
||||
MMIO_DFH(CACHE_MODE_0_GEN7, D_ALL, F_MODE_MASK, NULL, NULL);
|
||||
MMIO_DFH(CACHE_MODE_1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(CACHE_MODE_0, D_ALL, F_MODE_MASK, NULL, NULL);
|
||||
MMIO_DFH(0x2124, D_ALL, F_MODE_MASK, NULL, NULL);
|
||||
|
||||
MMIO_DFH(0x20dc, D_ALL, F_MODE_MASK, NULL, NULL);
|
||||
MMIO_DFH(_3D_CHICKEN3, D_ALL, F_MODE_MASK, NULL, NULL);
|
||||
|
@ -1493,7 +1615,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
|
|||
MMIO_D(0x243c, D_ALL);
|
||||
MMIO_DFH(0x7018, D_ALL, F_MODE_MASK, NULL, NULL);
|
||||
MMIO_DFH(HALF_SLICE_CHICKEN3, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(0xe100, D_ALL, F_MODE_MASK, NULL, NULL);
|
||||
MMIO_DFH(GEN7_HALF_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
|
||||
|
||||
/* display */
|
||||
MMIO_F(0x60220, 0x20, 0, 0, 0, D_ALL, NULL, NULL);
|
||||
|
@ -2346,9 +2468,9 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
|
|||
|
||||
MMIO_DFH(HDC_CHICKEN0, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
|
||||
|
||||
MMIO_D(CHICKEN_PIPESL_1(PIPE_A), D_BDW);
|
||||
MMIO_D(CHICKEN_PIPESL_1(PIPE_B), D_BDW);
|
||||
MMIO_D(CHICKEN_PIPESL_1(PIPE_C), D_BDW);
|
||||
MMIO_D(CHICKEN_PIPESL_1(PIPE_A), D_BDW_PLUS);
|
||||
MMIO_D(CHICKEN_PIPESL_1(PIPE_B), D_BDW_PLUS);
|
||||
MMIO_D(CHICKEN_PIPESL_1(PIPE_C), D_BDW_PLUS);
|
||||
|
||||
MMIO_D(WM_MISC, D_BDW);
|
||||
MMIO_D(BDW_EDP_PSR_BASE, D_BDW);
|
||||
|
@ -2362,7 +2484,7 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
|
|||
MMIO_D(GEN8_EU_DISABLE1, D_BDW_PLUS);
|
||||
MMIO_D(GEN8_EU_DISABLE2, D_BDW_PLUS);
|
||||
|
||||
MMIO_D(0xfdc, D_BDW);
|
||||
MMIO_D(0xfdc, D_BDW_PLUS);
|
||||
MMIO_DFH(GEN8_ROW_CHICKEN, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_D(GEN7_ROW_CHICKEN2, D_BDW_PLUS);
|
||||
MMIO_D(GEN8_UCGCTL6, D_BDW_PLUS);
|
||||
|
@ -2374,10 +2496,12 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
|
|||
MMIO_D(0xb10c, D_BDW);
|
||||
MMIO_D(0xb110, D_BDW);
|
||||
|
||||
MMIO_DFH(0x24d0, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(0x24d4, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(0x24d8, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(0x24dc, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_F(0x24d0, 48, F_CMD_ACCESS, 0, 0, D_BDW_PLUS,
|
||||
NULL, force_nonpriv_write);
|
||||
|
||||
MMIO_D(0x22040, D_BDW_PLUS);
|
||||
MMIO_D(0x44484, D_BDW_PLUS);
|
||||
MMIO_D(0x4448c, D_BDW_PLUS);
|
||||
|
||||
MMIO_D(0x83a4, D_BDW);
|
||||
MMIO_D(GEN8_L3_LRA_1_GPGPU, D_BDW_PLUS);
|
||||
|
@ -2624,6 +2748,7 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
|
|||
MMIO_D(_PLANE_KEYMSK_1(PIPE_C), D_SKL);
|
||||
|
||||
MMIO_D(0x44500, D_SKL);
|
||||
MMIO_D(GEN9_CSFE_CHICKEN1_RCS, D_SKL_PLUS);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -295,10 +295,10 @@ static ssize_t description_show(struct kobject *kobj, struct device *dev,
|
|||
return 0;
|
||||
|
||||
return sprintf(buf, "low_gm_size: %dMB\nhigh_gm_size: %dMB\n"
|
||||
"fence: %d\n",
|
||||
BYTES_TO_MB(type->low_gm_size),
|
||||
BYTES_TO_MB(type->high_gm_size),
|
||||
type->fence);
|
||||
"fence: %d\nresolution: %s\n",
|
||||
BYTES_TO_MB(type->low_gm_size),
|
||||
BYTES_TO_MB(type->high_gm_size),
|
||||
type->fence, vgpu_edid_str(type->resolution));
|
||||
}
|
||||
|
||||
static MDEV_TYPE_ATTR_RO(available_instances);
|
||||
|
|
|
@ -57,6 +57,58 @@ int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa)
|
|||
(reg >= gvt->device_info.gtt_start_offset \
|
||||
&& reg < gvt->device_info.gtt_start_offset + gvt_ggtt_sz(gvt))
|
||||
|
||||
static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa,
|
||||
void *p_data, unsigned int bytes, bool read)
|
||||
{
|
||||
struct intel_gvt *gvt = NULL;
|
||||
void *pt = NULL;
|
||||
unsigned int offset = 0;
|
||||
|
||||
if (!vgpu || !p_data)
|
||||
return;
|
||||
|
||||
gvt = vgpu->gvt;
|
||||
mutex_lock(&gvt->lock);
|
||||
offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
|
||||
if (reg_is_mmio(gvt, offset)) {
|
||||
if (read)
|
||||
intel_vgpu_default_mmio_read(vgpu, offset, p_data,
|
||||
bytes);
|
||||
else
|
||||
intel_vgpu_default_mmio_write(vgpu, offset, p_data,
|
||||
bytes);
|
||||
} else if (reg_is_gtt(gvt, offset) &&
|
||||
vgpu->gtt.ggtt_mm->virtual_page_table) {
|
||||
offset -= gvt->device_info.gtt_start_offset;
|
||||
pt = vgpu->gtt.ggtt_mm->virtual_page_table + offset;
|
||||
if (read)
|
||||
memcpy(p_data, pt, bytes);
|
||||
else
|
||||
memcpy(pt, p_data, bytes);
|
||||
|
||||
} else if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) {
|
||||
struct intel_vgpu_guest_page *gp;
|
||||
|
||||
/* Since we enter the failsafe mode early during guest boot,
|
||||
* guest may not have chance to set up its ppgtt table, so
|
||||
* there should not be any wp pages for guest. Keep the wp
|
||||
* related code here in case we need to handle it in furture.
|
||||
*/
|
||||
gp = intel_vgpu_find_guest_page(vgpu, pa >> PAGE_SHIFT);
|
||||
if (gp) {
|
||||
/* remove write protection to prevent furture traps */
|
||||
intel_vgpu_clean_guest_page(vgpu, gp);
|
||||
if (read)
|
||||
intel_gvt_hypervisor_read_gpa(vgpu, pa,
|
||||
p_data, bytes);
|
||||
else
|
||||
intel_gvt_hypervisor_write_gpa(vgpu, pa,
|
||||
p_data, bytes);
|
||||
}
|
||||
}
|
||||
mutex_unlock(&gvt->lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_vgpu_emulate_mmio_read - emulate MMIO read
|
||||
* @vgpu: a vGPU
|
||||
|
@ -75,6 +127,11 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
|
|||
unsigned int offset = 0;
|
||||
int ret = -EINVAL;
|
||||
|
||||
|
||||
if (vgpu->failsafe) {
|
||||
failsafe_emulate_mmio_rw(vgpu, pa, p_data, bytes, true);
|
||||
return 0;
|
||||
}
|
||||
mutex_lock(&gvt->lock);
|
||||
|
||||
if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) {
|
||||
|
@ -188,6 +245,11 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
|
|||
u32 old_vreg = 0, old_sreg = 0;
|
||||
int ret = -EINVAL;
|
||||
|
||||
if (vgpu->failsafe) {
|
||||
failsafe_emulate_mmio_rw(vgpu, pa, p_data, bytes, false);
|
||||
return 0;
|
||||
}
|
||||
|
||||
mutex_lock(&gvt->lock);
|
||||
|
||||
if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) {
|
||||
|
@ -236,7 +298,7 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
|
|||
|
||||
mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4));
|
||||
if (!mmio && !vgpu->mmio.disable_warn_untrack)
|
||||
gvt_err("vgpu%d: write untracked MMIO %x len %d val %x\n",
|
||||
gvt_dbg_mmio("vgpu%d: write untracked MMIO %x len %d val %x\n",
|
||||
vgpu->id, offset, bytes, *(u32 *)p_data);
|
||||
|
||||
if (!intel_gvt_mmio_is_unalign(gvt, offset)) {
|
||||
|
@ -322,6 +384,8 @@ void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu)
|
|||
|
||||
/* set the bit 0:2(Core C-State ) to C0 */
|
||||
vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0;
|
||||
|
||||
vgpu->mmio.disable_warn_untrack = false;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -27,7 +27,6 @@
|
|||
|
||||
static int init_vgpu_opregion(struct intel_vgpu *vgpu, u32 gpa)
|
||||
{
|
||||
void __iomem *host_va = vgpu->gvt->opregion.opregion_va;
|
||||
u8 *buf;
|
||||
int i;
|
||||
|
||||
|
@ -43,8 +42,8 @@ static int init_vgpu_opregion(struct intel_vgpu *vgpu, u32 gpa)
|
|||
if (!vgpu_opregion(vgpu)->va)
|
||||
return -ENOMEM;
|
||||
|
||||
memcpy_fromio(vgpu_opregion(vgpu)->va, host_va,
|
||||
INTEL_GVT_OPREGION_SIZE);
|
||||
memcpy(vgpu_opregion(vgpu)->va, vgpu->gvt->opregion.opregion_va,
|
||||
INTEL_GVT_OPREGION_SIZE);
|
||||
|
||||
for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++)
|
||||
vgpu_opregion(vgpu)->gfn[i] = (gpa >> PAGE_SHIFT) + i;
|
||||
|
|
|
@ -53,6 +53,14 @@ static struct render_mmio gen8_render_mmio_list[] = {
|
|||
{RCS, _MMIO(0x24d4), 0, false},
|
||||
{RCS, _MMIO(0x24d8), 0, false},
|
||||
{RCS, _MMIO(0x24dc), 0, false},
|
||||
{RCS, _MMIO(0x24e0), 0, false},
|
||||
{RCS, _MMIO(0x24e4), 0, false},
|
||||
{RCS, _MMIO(0x24e8), 0, false},
|
||||
{RCS, _MMIO(0x24ec), 0, false},
|
||||
{RCS, _MMIO(0x24f0), 0, false},
|
||||
{RCS, _MMIO(0x24f4), 0, false},
|
||||
{RCS, _MMIO(0x24f8), 0, false},
|
||||
{RCS, _MMIO(0x24fc), 0, false},
|
||||
{RCS, _MMIO(0x7004), 0xffff, true},
|
||||
{RCS, _MMIO(0x7008), 0xffff, true},
|
||||
{RCS, _MMIO(0x7000), 0xffff, true},
|
||||
|
@ -76,6 +84,14 @@ static struct render_mmio gen9_render_mmio_list[] = {
|
|||
{RCS, _MMIO(0x24d4), 0, false},
|
||||
{RCS, _MMIO(0x24d8), 0, false},
|
||||
{RCS, _MMIO(0x24dc), 0, false},
|
||||
{RCS, _MMIO(0x24e0), 0, false},
|
||||
{RCS, _MMIO(0x24e4), 0, false},
|
||||
{RCS, _MMIO(0x24e8), 0, false},
|
||||
{RCS, _MMIO(0x24ec), 0, false},
|
||||
{RCS, _MMIO(0x24f0), 0, false},
|
||||
{RCS, _MMIO(0x24f4), 0, false},
|
||||
{RCS, _MMIO(0x24f8), 0, false},
|
||||
{RCS, _MMIO(0x24fc), 0, false},
|
||||
{RCS, _MMIO(0x7004), 0xffff, true},
|
||||
{RCS, _MMIO(0x7008), 0xffff, true},
|
||||
{RCS, _MMIO(0x7000), 0xffff, true},
|
||||
|
|
|
@ -139,6 +139,9 @@ static int shadow_context_status_change(struct notifier_block *nb,
|
|||
struct intel_vgpu_workload *workload =
|
||||
scheduler->current_workload[req->engine->id];
|
||||
|
||||
if (unlikely(!workload))
|
||||
return NOTIFY_OK;
|
||||
|
||||
switch (action) {
|
||||
case INTEL_CONTEXT_SCHEDULE_IN:
|
||||
intel_gvt_load_render_mmio(workload->vgpu,
|
||||
|
|
|
@ -64,6 +64,20 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu)
|
|||
WARN_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
|
||||
}
|
||||
|
||||
static struct {
|
||||
unsigned int low_mm;
|
||||
unsigned int high_mm;
|
||||
unsigned int fence;
|
||||
enum intel_vgpu_edid edid;
|
||||
char *name;
|
||||
} vgpu_types[] = {
|
||||
/* Fixed vGPU type table */
|
||||
{ MB_TO_BYTES(64), MB_TO_BYTES(512), 4, GVT_EDID_1024_768, "8" },
|
||||
{ MB_TO_BYTES(128), MB_TO_BYTES(512), 4, GVT_EDID_1920_1200, "4" },
|
||||
{ MB_TO_BYTES(256), MB_TO_BYTES(1024), 4, GVT_EDID_1920_1200, "2" },
|
||||
{ MB_TO_BYTES(512), MB_TO_BYTES(2048), 4, GVT_EDID_1920_1200, "1" },
|
||||
};
|
||||
|
||||
/**
|
||||
* intel_gvt_init_vgpu_types - initialize vGPU type list
|
||||
* @gvt : GVT device
|
||||
|
@ -78,9 +92,8 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
|
|||
unsigned int min_low;
|
||||
|
||||
/* vGPU type name is defined as GVTg_Vx_y which contains
|
||||
* physical GPU generation type and 'y' means maximum vGPU
|
||||
* instances user can create on one physical GPU for this
|
||||
* type.
|
||||
* physical GPU generation type (e.g V4 as BDW server, V5 as
|
||||
* SKL server).
|
||||
*
|
||||
* Depend on physical SKU resource, might see vGPU types like
|
||||
* GVTg_V4_8, GVTg_V4_4, GVTg_V4_2, etc. We can create
|
||||
|
@ -92,7 +105,7 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
|
|||
*/
|
||||
low_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE;
|
||||
high_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE;
|
||||
num_types = 4;
|
||||
num_types = sizeof(vgpu_types) / sizeof(vgpu_types[0]);
|
||||
|
||||
gvt->types = kzalloc(num_types * sizeof(struct intel_vgpu_type),
|
||||
GFP_KERNEL);
|
||||
|
@ -101,28 +114,29 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
|
|||
|
||||
min_low = MB_TO_BYTES(32);
|
||||
for (i = 0; i < num_types; ++i) {
|
||||
if (low_avail / min_low == 0)
|
||||
if (low_avail / vgpu_types[i].low_mm == 0)
|
||||
break;
|
||||
gvt->types[i].low_gm_size = min_low;
|
||||
gvt->types[i].high_gm_size = max((min_low<<3), MB_TO_BYTES(384U));
|
||||
gvt->types[i].fence = 4;
|
||||
gvt->types[i].max_instance = min(low_avail / min_low,
|
||||
high_avail / gvt->types[i].high_gm_size);
|
||||
gvt->types[i].avail_instance = gvt->types[i].max_instance;
|
||||
|
||||
gvt->types[i].low_gm_size = vgpu_types[i].low_mm;
|
||||
gvt->types[i].high_gm_size = vgpu_types[i].high_mm;
|
||||
gvt->types[i].fence = vgpu_types[i].fence;
|
||||
gvt->types[i].resolution = vgpu_types[i].edid;
|
||||
gvt->types[i].avail_instance = min(low_avail / vgpu_types[i].low_mm,
|
||||
high_avail / vgpu_types[i].high_mm);
|
||||
|
||||
if (IS_GEN8(gvt->dev_priv))
|
||||
sprintf(gvt->types[i].name, "GVTg_V4_%u",
|
||||
gvt->types[i].max_instance);
|
||||
sprintf(gvt->types[i].name, "GVTg_V4_%s",
|
||||
vgpu_types[i].name);
|
||||
else if (IS_GEN9(gvt->dev_priv))
|
||||
sprintf(gvt->types[i].name, "GVTg_V5_%u",
|
||||
gvt->types[i].max_instance);
|
||||
sprintf(gvt->types[i].name, "GVTg_V5_%s",
|
||||
vgpu_types[i].name);
|
||||
|
||||
min_low <<= 1;
|
||||
gvt_dbg_core("type[%d]: %s max %u avail %u low %u high %u fence %u\n",
|
||||
i, gvt->types[i].name, gvt->types[i].max_instance,
|
||||
gvt_dbg_core("type[%d]: %s avail %u low %u high %u fence %u res %s\n",
|
||||
i, gvt->types[i].name,
|
||||
gvt->types[i].avail_instance,
|
||||
gvt->types[i].low_gm_size,
|
||||
gvt->types[i].high_gm_size, gvt->types[i].fence);
|
||||
gvt->types[i].high_gm_size, gvt->types[i].fence,
|
||||
vgpu_edid_str(gvt->types[i].resolution));
|
||||
}
|
||||
|
||||
gvt->num_types = i;
|
||||
|
@ -138,7 +152,7 @@ static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt)
|
|||
{
|
||||
int i;
|
||||
unsigned int low_gm_avail, high_gm_avail, fence_avail;
|
||||
unsigned int low_gm_min, high_gm_min, fence_min, total_min;
|
||||
unsigned int low_gm_min, high_gm_min, fence_min;
|
||||
|
||||
/* Need to depend on maxium hw resource size but keep on
|
||||
* static config for now.
|
||||
|
@ -154,12 +168,11 @@ static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt)
|
|||
low_gm_min = low_gm_avail / gvt->types[i].low_gm_size;
|
||||
high_gm_min = high_gm_avail / gvt->types[i].high_gm_size;
|
||||
fence_min = fence_avail / gvt->types[i].fence;
|
||||
total_min = min(min(low_gm_min, high_gm_min), fence_min);
|
||||
gvt->types[i].avail_instance = min(gvt->types[i].max_instance,
|
||||
total_min);
|
||||
gvt->types[i].avail_instance = min(min(low_gm_min, high_gm_min),
|
||||
fence_min);
|
||||
|
||||
gvt_dbg_core("update type[%d]: %s max %u avail %u low %u high %u fence %u\n",
|
||||
i, gvt->types[i].name, gvt->types[i].max_instance,
|
||||
gvt_dbg_core("update type[%d]: %s avail %u low %u high %u fence %u\n",
|
||||
i, gvt->types[i].name,
|
||||
gvt->types[i].avail_instance, gvt->types[i].low_gm_size,
|
||||
gvt->types[i].high_gm_size, gvt->types[i].fence);
|
||||
}
|
||||
|
@ -248,7 +261,7 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
|
|||
if (ret)
|
||||
goto out_detach_hypervisor_vgpu;
|
||||
|
||||
ret = intel_vgpu_init_display(vgpu);
|
||||
ret = intel_vgpu_init_display(vgpu, param->resolution);
|
||||
if (ret)
|
||||
goto out_clean_gtt;
|
||||
|
||||
|
@ -312,6 +325,7 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
|
|||
param.low_gm_sz = type->low_gm_size;
|
||||
param.high_gm_sz = type->high_gm_size;
|
||||
param.fence_sz = type->fence;
|
||||
param.resolution = type->resolution;
|
||||
|
||||
/* XXX current param based on MB */
|
||||
param.low_gm_sz = BYTES_TO_MB(param.low_gm_sz);
|
||||
|
@ -387,8 +401,12 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
|
|||
populate_pvinfo_page(vgpu);
|
||||
intel_vgpu_reset_display(vgpu);
|
||||
|
||||
if (dmlr)
|
||||
if (dmlr) {
|
||||
intel_vgpu_reset_cfg_space(vgpu);
|
||||
/* only reset the failsafe mode when dmlr reset */
|
||||
vgpu->failsafe = false;
|
||||
vgpu->pv_notified = false;
|
||||
}
|
||||
}
|
||||
|
||||
vgpu->resetting = false;
|
||||
|
|
Loading…
Reference in New Issue