Merge tag 'drm-intel-fixes-2017-03-22' of git://anongit.freedesktop.org/git/drm-intel into drm-fixes
drm/i915 fixes for v4.11-rc4 * tag 'drm-intel-fixes-2017-03-22' of git://anongit.freedesktop.org/git/drm-intel: drm/i915: make context status notifier head be per engine drm/i915: Avoid rcu_barrier() from reclaim paths (shrinker) drm/i915/gvt: Fix gvt scheduler interval time drm/i915/gvt: GVT pin/unpin shadow context drm/i915/gvt: scan shadow indirect context image when valid drm/i915/kvmgt: fix suspicious rcu dereference usage drm/i915/gvt: add enable_execlists check before enable gvt drm/i915/gvt: Remove bogus retry around i915_wait_request drm/i915/gvt: correct the ggtt valid bit check in pipe control command drm/i915/gvt: replace the gvt_err with gvt_vgpu_err drm/i915/gvt: handle force-nonpriv registers, cmd parser part drm/i915: Do .init_clock_gating() earlier to avoid it clobbering watermarks drm/i915/glk: Remove MODULE_FIRMWARE() tag from Geminilake's DMC drm/i915: Reject HDMI 12bpc if the sink doesn't indicate support drm/i915: Always call i915_gem_reset_finish() following i915_gem_reset_prepare() drm/i915: Stop using RP_DOWN_EI on Baytrail drm/i915: Drop support for I915_EXEC_CONSTANTS_* execbuf parameters. drm/i915: Only enable hotplug interrupts if the display interrupts are enabled drm/i915: Disable engine->irq_tasklet around resets drm/i915: Split GEM resetting into 3 phases
This commit is contained in:
commit
d08997cb41
|
@ -242,7 +242,7 @@ static int alloc_resource(struct intel_vgpu *vgpu,
|
||||||
const char *item;
|
const char *item;
|
||||||
|
|
||||||
if (!param->low_gm_sz || !param->high_gm_sz || !param->fence_sz) {
|
if (!param->low_gm_sz || !param->high_gm_sz || !param->fence_sz) {
|
||||||
gvt_err("Invalid vGPU creation params\n");
|
gvt_vgpu_err("Invalid vGPU creation params\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -285,9 +285,9 @@ static int alloc_resource(struct intel_vgpu *vgpu,
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
no_enough_resource:
|
no_enough_resource:
|
||||||
gvt_err("vgpu%d: fail to allocate resource %s\n", vgpu->id, item);
|
gvt_vgpu_err("fail to allocate resource %s\n", item);
|
||||||
gvt_err("vgpu%d: request %luMB avail %luMB max %luMB taken %luMB\n",
|
gvt_vgpu_err("request %luMB avail %luMB max %luMB taken %luMB\n",
|
||||||
vgpu->id, BYTES_TO_MB(request), BYTES_TO_MB(avail),
|
BYTES_TO_MB(request), BYTES_TO_MB(avail),
|
||||||
BYTES_TO_MB(max), BYTES_TO_MB(taken));
|
BYTES_TO_MB(max), BYTES_TO_MB(taken));
|
||||||
return -ENOSPC;
|
return -ENOSPC;
|
||||||
}
|
}
|
||||||
|
|
|
@ -817,6 +817,25 @@ static bool is_shadowed_mmio(unsigned int offset)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool is_force_nonpriv_mmio(unsigned int offset)
|
||||||
|
{
|
||||||
|
return (offset >= 0x24d0 && offset < 0x2500);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int force_nonpriv_reg_handler(struct parser_exec_state *s,
|
||||||
|
unsigned int offset, unsigned int index)
|
||||||
|
{
|
||||||
|
struct intel_gvt *gvt = s->vgpu->gvt;
|
||||||
|
unsigned int data = cmd_val(s, index + 1);
|
||||||
|
|
||||||
|
if (!intel_gvt_in_force_nonpriv_whitelist(gvt, data)) {
|
||||||
|
gvt_err("Unexpected forcenonpriv 0x%x LRI write, value=0x%x\n",
|
||||||
|
offset, data);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static int cmd_reg_handler(struct parser_exec_state *s,
|
static int cmd_reg_handler(struct parser_exec_state *s,
|
||||||
unsigned int offset, unsigned int index, char *cmd)
|
unsigned int offset, unsigned int index, char *cmd)
|
||||||
{
|
{
|
||||||
|
@ -824,23 +843,26 @@ static int cmd_reg_handler(struct parser_exec_state *s,
|
||||||
struct intel_gvt *gvt = vgpu->gvt;
|
struct intel_gvt *gvt = vgpu->gvt;
|
||||||
|
|
||||||
if (offset + 4 > gvt->device_info.mmio_size) {
|
if (offset + 4 > gvt->device_info.mmio_size) {
|
||||||
gvt_err("%s access to (%x) outside of MMIO range\n",
|
gvt_vgpu_err("%s access to (%x) outside of MMIO range\n",
|
||||||
cmd, offset);
|
cmd, offset);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!intel_gvt_mmio_is_cmd_access(gvt, offset)) {
|
if (!intel_gvt_mmio_is_cmd_access(gvt, offset)) {
|
||||||
gvt_err("vgpu%d: %s access to non-render register (%x)\n",
|
gvt_vgpu_err("%s access to non-render register (%x)\n",
|
||||||
s->vgpu->id, cmd, offset);
|
cmd, offset);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (is_shadowed_mmio(offset)) {
|
if (is_shadowed_mmio(offset)) {
|
||||||
gvt_err("vgpu%d: found access of shadowed MMIO %x\n",
|
gvt_vgpu_err("found access of shadowed MMIO %x\n", offset);
|
||||||
s->vgpu->id, offset);
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (is_force_nonpriv_mmio(offset) &&
|
||||||
|
force_nonpriv_reg_handler(s, offset, index))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
if (offset == i915_mmio_reg_offset(DERRMR) ||
|
if (offset == i915_mmio_reg_offset(DERRMR) ||
|
||||||
offset == i915_mmio_reg_offset(FORCEWAKE_MT)) {
|
offset == i915_mmio_reg_offset(FORCEWAKE_MT)) {
|
||||||
/* Writing to HW VGT_PVINFO_PAGE offset will be discarded */
|
/* Writing to HW VGT_PVINFO_PAGE offset will be discarded */
|
||||||
|
@ -1008,7 +1030,7 @@ static int cmd_handler_pipe_control(struct parser_exec_state *s)
|
||||||
ret = cmd_reg_handler(s, 0x2358, 1, "pipe_ctrl");
|
ret = cmd_reg_handler(s, 0x2358, 1, "pipe_ctrl");
|
||||||
else if (post_sync == 1) {
|
else if (post_sync == 1) {
|
||||||
/* check ggtt*/
|
/* check ggtt*/
|
||||||
if ((cmd_val(s, 2) & (1 << 2))) {
|
if ((cmd_val(s, 1) & PIPE_CONTROL_GLOBAL_GTT_IVB)) {
|
||||||
gma = cmd_val(s, 2) & GENMASK(31, 3);
|
gma = cmd_val(s, 2) & GENMASK(31, 3);
|
||||||
if (gmadr_bytes == 8)
|
if (gmadr_bytes == 8)
|
||||||
gma |= (cmd_gma_hi(s, 3)) << 32;
|
gma |= (cmd_gma_hi(s, 3)) << 32;
|
||||||
|
@ -1129,6 +1151,7 @@ static int skl_decode_mi_display_flip(struct parser_exec_state *s,
|
||||||
struct mi_display_flip_command_info *info)
|
struct mi_display_flip_command_info *info)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
|
struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
|
||||||
|
struct intel_vgpu *vgpu = s->vgpu;
|
||||||
u32 dword0 = cmd_val(s, 0);
|
u32 dword0 = cmd_val(s, 0);
|
||||||
u32 dword1 = cmd_val(s, 1);
|
u32 dword1 = cmd_val(s, 1);
|
||||||
u32 dword2 = cmd_val(s, 2);
|
u32 dword2 = cmd_val(s, 2);
|
||||||
|
@ -1167,7 +1190,7 @@ static int skl_decode_mi_display_flip(struct parser_exec_state *s,
|
||||||
break;
|
break;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
gvt_err("unknown plane code %d\n", plane);
|
gvt_vgpu_err("unknown plane code %d\n", plane);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1274,25 +1297,26 @@ static int update_plane_mmio_from_mi_display_flip(
|
||||||
static int cmd_handler_mi_display_flip(struct parser_exec_state *s)
|
static int cmd_handler_mi_display_flip(struct parser_exec_state *s)
|
||||||
{
|
{
|
||||||
struct mi_display_flip_command_info info;
|
struct mi_display_flip_command_info info;
|
||||||
|
struct intel_vgpu *vgpu = s->vgpu;
|
||||||
int ret;
|
int ret;
|
||||||
int i;
|
int i;
|
||||||
int len = cmd_length(s);
|
int len = cmd_length(s);
|
||||||
|
|
||||||
ret = decode_mi_display_flip(s, &info);
|
ret = decode_mi_display_flip(s, &info);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
gvt_err("fail to decode MI display flip command\n");
|
gvt_vgpu_err("fail to decode MI display flip command\n");
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = check_mi_display_flip(s, &info);
|
ret = check_mi_display_flip(s, &info);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
gvt_err("invalid MI display flip command\n");
|
gvt_vgpu_err("invalid MI display flip command\n");
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = update_plane_mmio_from_mi_display_flip(s, &info);
|
ret = update_plane_mmio_from_mi_display_flip(s, &info);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
gvt_err("fail to update plane mmio\n");
|
gvt_vgpu_err("fail to update plane mmio\n");
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1350,7 +1374,8 @@ static inline int cmd_address_audit(struct parser_exec_state *s,
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (op_size > max_surface_size) {
|
if (op_size > max_surface_size) {
|
||||||
gvt_err("command address audit fail name %s\n", s->info->name);
|
gvt_vgpu_err("command address audit fail name %s\n",
|
||||||
|
s->info->name);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1367,7 +1392,7 @@ static inline int cmd_address_audit(struct parser_exec_state *s,
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
err:
|
err:
|
||||||
gvt_err("cmd_parser: Malicious %s detected, addr=0x%lx, len=%d!\n",
|
gvt_vgpu_err("cmd_parser: Malicious %s detected, addr=0x%lx, len=%d!\n",
|
||||||
s->info->name, guest_gma, op_size);
|
s->info->name, guest_gma, op_size);
|
||||||
|
|
||||||
pr_err("cmd dump: ");
|
pr_err("cmd dump: ");
|
||||||
|
@ -1412,8 +1437,10 @@ static int cmd_handler_mi_store_data_imm(struct parser_exec_state *s)
|
||||||
|
|
||||||
static inline int unexpected_cmd(struct parser_exec_state *s)
|
static inline int unexpected_cmd(struct parser_exec_state *s)
|
||||||
{
|
{
|
||||||
gvt_err("vgpu%d: Unexpected %s in command buffer!\n",
|
struct intel_vgpu *vgpu = s->vgpu;
|
||||||
s->vgpu->id, s->info->name);
|
|
||||||
|
gvt_vgpu_err("Unexpected %s in command buffer!\n", s->info->name);
|
||||||
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1516,7 +1543,7 @@ static int copy_gma_to_hva(struct intel_vgpu *vgpu, struct intel_vgpu_mm *mm,
|
||||||
while (gma != end_gma) {
|
while (gma != end_gma) {
|
||||||
gpa = intel_vgpu_gma_to_gpa(mm, gma);
|
gpa = intel_vgpu_gma_to_gpa(mm, gma);
|
||||||
if (gpa == INTEL_GVT_INVALID_ADDR) {
|
if (gpa == INTEL_GVT_INVALID_ADDR) {
|
||||||
gvt_err("invalid gma address: %lx\n", gma);
|
gvt_vgpu_err("invalid gma address: %lx\n", gma);
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1557,6 +1584,7 @@ static uint32_t find_bb_size(struct parser_exec_state *s)
|
||||||
uint32_t bb_size = 0;
|
uint32_t bb_size = 0;
|
||||||
uint32_t cmd_len = 0;
|
uint32_t cmd_len = 0;
|
||||||
bool met_bb_end = false;
|
bool met_bb_end = false;
|
||||||
|
struct intel_vgpu *vgpu = s->vgpu;
|
||||||
u32 cmd;
|
u32 cmd;
|
||||||
|
|
||||||
/* get the start gm address of the batch buffer */
|
/* get the start gm address of the batch buffer */
|
||||||
|
@ -1565,7 +1593,7 @@ static uint32_t find_bb_size(struct parser_exec_state *s)
|
||||||
|
|
||||||
info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
|
info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
|
||||||
if (info == NULL) {
|
if (info == NULL) {
|
||||||
gvt_err("unknown cmd 0x%x, opcode=0x%x\n",
|
gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n",
|
||||||
cmd, get_opcode(cmd, s->ring_id));
|
cmd, get_opcode(cmd, s->ring_id));
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
@ -1574,7 +1602,7 @@ static uint32_t find_bb_size(struct parser_exec_state *s)
|
||||||
gma, gma + 4, &cmd);
|
gma, gma + 4, &cmd);
|
||||||
info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
|
info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
|
||||||
if (info == NULL) {
|
if (info == NULL) {
|
||||||
gvt_err("unknown cmd 0x%x, opcode=0x%x\n",
|
gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n",
|
||||||
cmd, get_opcode(cmd, s->ring_id));
|
cmd, get_opcode(cmd, s->ring_id));
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
@ -1599,6 +1627,7 @@ static uint32_t find_bb_size(struct parser_exec_state *s)
|
||||||
static int perform_bb_shadow(struct parser_exec_state *s)
|
static int perform_bb_shadow(struct parser_exec_state *s)
|
||||||
{
|
{
|
||||||
struct intel_shadow_bb_entry *entry_obj;
|
struct intel_shadow_bb_entry *entry_obj;
|
||||||
|
struct intel_vgpu *vgpu = s->vgpu;
|
||||||
unsigned long gma = 0;
|
unsigned long gma = 0;
|
||||||
uint32_t bb_size;
|
uint32_t bb_size;
|
||||||
void *dst = NULL;
|
void *dst = NULL;
|
||||||
|
@ -1633,7 +1662,7 @@ static int perform_bb_shadow(struct parser_exec_state *s)
|
||||||
|
|
||||||
ret = i915_gem_object_set_to_cpu_domain(entry_obj->obj, false);
|
ret = i915_gem_object_set_to_cpu_domain(entry_obj->obj, false);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
gvt_err("failed to set shadow batch to CPU\n");
|
gvt_vgpu_err("failed to set shadow batch to CPU\n");
|
||||||
goto unmap_src;
|
goto unmap_src;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1645,7 +1674,7 @@ static int perform_bb_shadow(struct parser_exec_state *s)
|
||||||
gma, gma + bb_size,
|
gma, gma + bb_size,
|
||||||
dst);
|
dst);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
gvt_err("fail to copy guest ring buffer\n");
|
gvt_vgpu_err("fail to copy guest ring buffer\n");
|
||||||
goto unmap_src;
|
goto unmap_src;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1676,15 +1705,16 @@ static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s)
|
||||||
{
|
{
|
||||||
bool second_level;
|
bool second_level;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
struct intel_vgpu *vgpu = s->vgpu;
|
||||||
|
|
||||||
if (s->buf_type == BATCH_BUFFER_2ND_LEVEL) {
|
if (s->buf_type == BATCH_BUFFER_2ND_LEVEL) {
|
||||||
gvt_err("Found MI_BATCH_BUFFER_START in 2nd level BB\n");
|
gvt_vgpu_err("Found MI_BATCH_BUFFER_START in 2nd level BB\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
second_level = BATCH_BUFFER_2ND_LEVEL_BIT(cmd_val(s, 0)) == 1;
|
second_level = BATCH_BUFFER_2ND_LEVEL_BIT(cmd_val(s, 0)) == 1;
|
||||||
if (second_level && (s->buf_type != BATCH_BUFFER_INSTRUCTION)) {
|
if (second_level && (s->buf_type != BATCH_BUFFER_INSTRUCTION)) {
|
||||||
gvt_err("Jumping to 2nd level BB from RB is not allowed\n");
|
gvt_vgpu_err("Jumping to 2nd level BB from RB is not allowed\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1702,7 +1732,7 @@ static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s)
|
||||||
if (batch_buffer_needs_scan(s)) {
|
if (batch_buffer_needs_scan(s)) {
|
||||||
ret = perform_bb_shadow(s);
|
ret = perform_bb_shadow(s);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
gvt_err("invalid shadow batch buffer\n");
|
gvt_vgpu_err("invalid shadow batch buffer\n");
|
||||||
} else {
|
} else {
|
||||||
/* emulate a batch buffer end to do return right */
|
/* emulate a batch buffer end to do return right */
|
||||||
ret = cmd_handler_mi_batch_buffer_end(s);
|
ret = cmd_handler_mi_batch_buffer_end(s);
|
||||||
|
@ -2429,6 +2459,7 @@ static int cmd_parser_exec(struct parser_exec_state *s)
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
cycles_t t0, t1, t2;
|
cycles_t t0, t1, t2;
|
||||||
struct parser_exec_state s_before_advance_custom;
|
struct parser_exec_state s_before_advance_custom;
|
||||||
|
struct intel_vgpu *vgpu = s->vgpu;
|
||||||
|
|
||||||
t0 = get_cycles();
|
t0 = get_cycles();
|
||||||
|
|
||||||
|
@ -2436,7 +2467,7 @@ static int cmd_parser_exec(struct parser_exec_state *s)
|
||||||
|
|
||||||
info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
|
info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
|
||||||
if (info == NULL) {
|
if (info == NULL) {
|
||||||
gvt_err("unknown cmd 0x%x, opcode=0x%x\n",
|
gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n",
|
||||||
cmd, get_opcode(cmd, s->ring_id));
|
cmd, get_opcode(cmd, s->ring_id));
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
@ -2452,7 +2483,7 @@ static int cmd_parser_exec(struct parser_exec_state *s)
|
||||||
if (info->handler) {
|
if (info->handler) {
|
||||||
ret = info->handler(s);
|
ret = info->handler(s);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
gvt_err("%s handler error\n", info->name);
|
gvt_vgpu_err("%s handler error\n", info->name);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2463,7 +2494,7 @@ static int cmd_parser_exec(struct parser_exec_state *s)
|
||||||
if (!(info->flag & F_IP_ADVANCE_CUSTOM)) {
|
if (!(info->flag & F_IP_ADVANCE_CUSTOM)) {
|
||||||
ret = cmd_advance_default(s);
|
ret = cmd_advance_default(s);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
gvt_err("%s IP advance error\n", info->name);
|
gvt_vgpu_err("%s IP advance error\n", info->name);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2486,6 +2517,7 @@ static int command_scan(struct parser_exec_state *s,
|
||||||
|
|
||||||
unsigned long gma_head, gma_tail, gma_bottom;
|
unsigned long gma_head, gma_tail, gma_bottom;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
struct intel_vgpu *vgpu = s->vgpu;
|
||||||
|
|
||||||
gma_head = rb_start + rb_head;
|
gma_head = rb_start + rb_head;
|
||||||
gma_tail = rb_start + rb_tail;
|
gma_tail = rb_start + rb_tail;
|
||||||
|
@ -2497,7 +2529,7 @@ static int command_scan(struct parser_exec_state *s,
|
||||||
if (s->buf_type == RING_BUFFER_INSTRUCTION) {
|
if (s->buf_type == RING_BUFFER_INSTRUCTION) {
|
||||||
if (!(s->ip_gma >= rb_start) ||
|
if (!(s->ip_gma >= rb_start) ||
|
||||||
!(s->ip_gma < gma_bottom)) {
|
!(s->ip_gma < gma_bottom)) {
|
||||||
gvt_err("ip_gma %lx out of ring scope."
|
gvt_vgpu_err("ip_gma %lx out of ring scope."
|
||||||
"(base:0x%lx, bottom: 0x%lx)\n",
|
"(base:0x%lx, bottom: 0x%lx)\n",
|
||||||
s->ip_gma, rb_start,
|
s->ip_gma, rb_start,
|
||||||
gma_bottom);
|
gma_bottom);
|
||||||
|
@ -2505,7 +2537,7 @@ static int command_scan(struct parser_exec_state *s,
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
if (gma_out_of_range(s->ip_gma, gma_head, gma_tail)) {
|
if (gma_out_of_range(s->ip_gma, gma_head, gma_tail)) {
|
||||||
gvt_err("ip_gma %lx out of range."
|
gvt_vgpu_err("ip_gma %lx out of range."
|
||||||
"base 0x%lx head 0x%lx tail 0x%lx\n",
|
"base 0x%lx head 0x%lx tail 0x%lx\n",
|
||||||
s->ip_gma, rb_start,
|
s->ip_gma, rb_start,
|
||||||
rb_head, rb_tail);
|
rb_head, rb_tail);
|
||||||
|
@ -2515,7 +2547,7 @@ static int command_scan(struct parser_exec_state *s,
|
||||||
}
|
}
|
||||||
ret = cmd_parser_exec(s);
|
ret = cmd_parser_exec(s);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
gvt_err("cmd parser error\n");
|
gvt_vgpu_err("cmd parser error\n");
|
||||||
parser_exec_state_dump(s);
|
parser_exec_state_dump(s);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -2639,7 +2671,7 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
|
||||||
gma_head, gma_top,
|
gma_head, gma_top,
|
||||||
workload->shadow_ring_buffer_va);
|
workload->shadow_ring_buffer_va);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
gvt_err("fail to copy guest ring buffer\n");
|
gvt_vgpu_err("fail to copy guest ring buffer\n");
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
copy_len = gma_top - gma_head;
|
copy_len = gma_top - gma_head;
|
||||||
|
@ -2651,7 +2683,7 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
|
||||||
gma_head, gma_tail,
|
gma_head, gma_tail,
|
||||||
workload->shadow_ring_buffer_va + copy_len);
|
workload->shadow_ring_buffer_va + copy_len);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
gvt_err("fail to copy guest ring buffer\n");
|
gvt_vgpu_err("fail to copy guest ring buffer\n");
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
ring->tail += workload->rb_len;
|
ring->tail += workload->rb_len;
|
||||||
|
@ -2662,16 +2694,17 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
|
||||||
int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
|
int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
struct intel_vgpu *vgpu = workload->vgpu;
|
||||||
|
|
||||||
ret = shadow_workload_ring_buffer(workload);
|
ret = shadow_workload_ring_buffer(workload);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
gvt_err("fail to shadow workload ring_buffer\n");
|
gvt_vgpu_err("fail to shadow workload ring_buffer\n");
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = scan_workload(workload);
|
ret = scan_workload(workload);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
gvt_err("scan workload error\n");
|
gvt_vgpu_err("scan workload error\n");
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -2681,6 +2714,7 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
|
||||||
{
|
{
|
||||||
int ctx_size = wa_ctx->indirect_ctx.size;
|
int ctx_size = wa_ctx->indirect_ctx.size;
|
||||||
unsigned long guest_gma = wa_ctx->indirect_ctx.guest_gma;
|
unsigned long guest_gma = wa_ctx->indirect_ctx.guest_gma;
|
||||||
|
struct intel_vgpu *vgpu = wa_ctx->workload->vgpu;
|
||||||
struct drm_i915_gem_object *obj;
|
struct drm_i915_gem_object *obj;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
void *map;
|
void *map;
|
||||||
|
@ -2694,14 +2728,14 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
|
||||||
/* get the va of the shadow batch buffer */
|
/* get the va of the shadow batch buffer */
|
||||||
map = i915_gem_object_pin_map(obj, I915_MAP_WB);
|
map = i915_gem_object_pin_map(obj, I915_MAP_WB);
|
||||||
if (IS_ERR(map)) {
|
if (IS_ERR(map)) {
|
||||||
gvt_err("failed to vmap shadow indirect ctx\n");
|
gvt_vgpu_err("failed to vmap shadow indirect ctx\n");
|
||||||
ret = PTR_ERR(map);
|
ret = PTR_ERR(map);
|
||||||
goto put_obj;
|
goto put_obj;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = i915_gem_object_set_to_cpu_domain(obj, false);
|
ret = i915_gem_object_set_to_cpu_domain(obj, false);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
gvt_err("failed to set shadow indirect ctx to CPU\n");
|
gvt_vgpu_err("failed to set shadow indirect ctx to CPU\n");
|
||||||
goto unmap_src;
|
goto unmap_src;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2710,7 +2744,7 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
|
||||||
guest_gma, guest_gma + ctx_size,
|
guest_gma, guest_gma + ctx_size,
|
||||||
map);
|
map);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
gvt_err("fail to copy guest indirect ctx\n");
|
gvt_vgpu_err("fail to copy guest indirect ctx\n");
|
||||||
goto unmap_src;
|
goto unmap_src;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2744,13 +2778,14 @@ static int combine_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
|
||||||
int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
|
int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
struct intel_vgpu *vgpu = wa_ctx->workload->vgpu;
|
||||||
|
|
||||||
if (wa_ctx->indirect_ctx.size == 0)
|
if (wa_ctx->indirect_ctx.size == 0)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
ret = shadow_indirect_ctx(wa_ctx);
|
ret = shadow_indirect_ctx(wa_ctx);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
gvt_err("fail to shadow indirect ctx\n");
|
gvt_vgpu_err("fail to shadow indirect ctx\n");
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2758,7 +2793,7 @@ int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
|
||||||
|
|
||||||
ret = scan_wa_ctx(wa_ctx);
|
ret = scan_wa_ctx(wa_ctx);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
gvt_err("scan wa ctx error\n");
|
gvt_vgpu_err("scan wa ctx error\n");
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -27,6 +27,14 @@
|
||||||
#define gvt_err(fmt, args...) \
|
#define gvt_err(fmt, args...) \
|
||||||
DRM_ERROR("gvt: "fmt, ##args)
|
DRM_ERROR("gvt: "fmt, ##args)
|
||||||
|
|
||||||
|
#define gvt_vgpu_err(fmt, args...) \
|
||||||
|
do { \
|
||||||
|
if (IS_ERR_OR_NULL(vgpu)) \
|
||||||
|
DRM_DEBUG_DRIVER("gvt: "fmt, ##args); \
|
||||||
|
else \
|
||||||
|
DRM_DEBUG_DRIVER("gvt: vgpu %d: "fmt, vgpu->id, ##args);\
|
||||||
|
} while (0)
|
||||||
|
|
||||||
#define gvt_dbg_core(fmt, args...) \
|
#define gvt_dbg_core(fmt, args...) \
|
||||||
DRM_DEBUG_DRIVER("gvt: core: "fmt, ##args)
|
DRM_DEBUG_DRIVER("gvt: core: "fmt, ##args)
|
||||||
|
|
||||||
|
|
|
@ -52,16 +52,16 @@ static unsigned char edid_get_byte(struct intel_vgpu *vgpu)
|
||||||
unsigned char chr = 0;
|
unsigned char chr = 0;
|
||||||
|
|
||||||
if (edid->state == I2C_NOT_SPECIFIED || !edid->slave_selected) {
|
if (edid->state == I2C_NOT_SPECIFIED || !edid->slave_selected) {
|
||||||
gvt_err("Driver tries to read EDID without proper sequence!\n");
|
gvt_vgpu_err("Driver tries to read EDID without proper sequence!\n");
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
if (edid->current_edid_read >= EDID_SIZE) {
|
if (edid->current_edid_read >= EDID_SIZE) {
|
||||||
gvt_err("edid_get_byte() exceeds the size of EDID!\n");
|
gvt_vgpu_err("edid_get_byte() exceeds the size of EDID!\n");
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!edid->edid_available) {
|
if (!edid->edid_available) {
|
||||||
gvt_err("Reading EDID but EDID is not available!\n");
|
gvt_vgpu_err("Reading EDID but EDID is not available!\n");
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -72,7 +72,7 @@ static unsigned char edid_get_byte(struct intel_vgpu *vgpu)
|
||||||
chr = edid_data->edid_block[edid->current_edid_read];
|
chr = edid_data->edid_block[edid->current_edid_read];
|
||||||
edid->current_edid_read++;
|
edid->current_edid_read++;
|
||||||
} else {
|
} else {
|
||||||
gvt_err("No EDID available during the reading?\n");
|
gvt_vgpu_err("No EDID available during the reading?\n");
|
||||||
}
|
}
|
||||||
return chr;
|
return chr;
|
||||||
}
|
}
|
||||||
|
@ -223,7 +223,7 @@ static int gmbus1_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
|
||||||
vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_ACTIVE;
|
vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_ACTIVE;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
gvt_err("Unknown/reserved GMBUS cycle detected!\n");
|
gvt_vgpu_err("Unknown/reserved GMBUS cycle detected!\n");
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
|
@ -292,8 +292,7 @@ static int gmbus3_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
|
||||||
*/
|
*/
|
||||||
} else {
|
} else {
|
||||||
memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes);
|
memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes);
|
||||||
gvt_err("vgpu%d: warning: gmbus3 read with nothing returned\n",
|
gvt_vgpu_err("warning: gmbus3 read with nothing returned\n");
|
||||||
vgpu->id);
|
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -172,6 +172,7 @@ static int emulate_execlist_ctx_schedule_out(
|
||||||
struct intel_vgpu_execlist *execlist,
|
struct intel_vgpu_execlist *execlist,
|
||||||
struct execlist_ctx_descriptor_format *ctx)
|
struct execlist_ctx_descriptor_format *ctx)
|
||||||
{
|
{
|
||||||
|
struct intel_vgpu *vgpu = execlist->vgpu;
|
||||||
struct intel_vgpu_execlist_slot *running = execlist->running_slot;
|
struct intel_vgpu_execlist_slot *running = execlist->running_slot;
|
||||||
struct intel_vgpu_execlist_slot *pending = execlist->pending_slot;
|
struct intel_vgpu_execlist_slot *pending = execlist->pending_slot;
|
||||||
struct execlist_ctx_descriptor_format *ctx0 = &running->ctx[0];
|
struct execlist_ctx_descriptor_format *ctx0 = &running->ctx[0];
|
||||||
|
@ -183,7 +184,7 @@ static int emulate_execlist_ctx_schedule_out(
|
||||||
gvt_dbg_el("schedule out context id %x\n", ctx->context_id);
|
gvt_dbg_el("schedule out context id %x\n", ctx->context_id);
|
||||||
|
|
||||||
if (WARN_ON(!same_context(ctx, execlist->running_context))) {
|
if (WARN_ON(!same_context(ctx, execlist->running_context))) {
|
||||||
gvt_err("schedule out context is not running context,"
|
gvt_vgpu_err("schedule out context is not running context,"
|
||||||
"ctx id %x running ctx id %x\n",
|
"ctx id %x running ctx id %x\n",
|
||||||
ctx->context_id,
|
ctx->context_id,
|
||||||
execlist->running_context->context_id);
|
execlist->running_context->context_id);
|
||||||
|
@ -254,7 +255,7 @@ static struct intel_vgpu_execlist_slot *get_next_execlist_slot(
|
||||||
status.udw = vgpu_vreg(vgpu, status_reg + 4);
|
status.udw = vgpu_vreg(vgpu, status_reg + 4);
|
||||||
|
|
||||||
if (status.execlist_queue_full) {
|
if (status.execlist_queue_full) {
|
||||||
gvt_err("virtual execlist slots are full\n");
|
gvt_vgpu_err("virtual execlist slots are full\n");
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -270,11 +271,12 @@ static int emulate_execlist_schedule_in(struct intel_vgpu_execlist *execlist,
|
||||||
|
|
||||||
struct execlist_ctx_descriptor_format *ctx0, *ctx1;
|
struct execlist_ctx_descriptor_format *ctx0, *ctx1;
|
||||||
struct execlist_context_status_format status;
|
struct execlist_context_status_format status;
|
||||||
|
struct intel_vgpu *vgpu = execlist->vgpu;
|
||||||
|
|
||||||
gvt_dbg_el("emulate schedule-in\n");
|
gvt_dbg_el("emulate schedule-in\n");
|
||||||
|
|
||||||
if (!slot) {
|
if (!slot) {
|
||||||
gvt_err("no available execlist slot\n");
|
gvt_vgpu_err("no available execlist slot\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -375,7 +377,6 @@ static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
|
||||||
|
|
||||||
vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0, 4, 0);
|
vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0, 4, 0);
|
||||||
if (IS_ERR(vma)) {
|
if (IS_ERR(vma)) {
|
||||||
gvt_err("Cannot pin\n");
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -428,7 +429,6 @@ static void prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
|
||||||
vma = i915_gem_object_ggtt_pin(wa_ctx->indirect_ctx.obj, NULL,
|
vma = i915_gem_object_ggtt_pin(wa_ctx->indirect_ctx.obj, NULL,
|
||||||
0, CACHELINE_BYTES, 0);
|
0, CACHELINE_BYTES, 0);
|
||||||
if (IS_ERR(vma)) {
|
if (IS_ERR(vma)) {
|
||||||
gvt_err("Cannot pin indirect ctx obj\n");
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -561,6 +561,7 @@ static int prepare_mm(struct intel_vgpu_workload *workload)
|
||||||
{
|
{
|
||||||
struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc;
|
struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc;
|
||||||
struct intel_vgpu_mm *mm;
|
struct intel_vgpu_mm *mm;
|
||||||
|
struct intel_vgpu *vgpu = workload->vgpu;
|
||||||
int page_table_level;
|
int page_table_level;
|
||||||
u32 pdp[8];
|
u32 pdp[8];
|
||||||
|
|
||||||
|
@ -569,7 +570,7 @@ static int prepare_mm(struct intel_vgpu_workload *workload)
|
||||||
} else if (desc->addressing_mode == 3) { /* legacy 64 bit */
|
} else if (desc->addressing_mode == 3) { /* legacy 64 bit */
|
||||||
page_table_level = 4;
|
page_table_level = 4;
|
||||||
} else {
|
} else {
|
||||||
gvt_err("Advanced Context mode(SVM) is not supported!\n");
|
gvt_vgpu_err("Advanced Context mode(SVM) is not supported!\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -583,7 +584,7 @@ static int prepare_mm(struct intel_vgpu_workload *workload)
|
||||||
mm = intel_vgpu_create_mm(workload->vgpu, INTEL_GVT_MM_PPGTT,
|
mm = intel_vgpu_create_mm(workload->vgpu, INTEL_GVT_MM_PPGTT,
|
||||||
pdp, page_table_level, 0);
|
pdp, page_table_level, 0);
|
||||||
if (IS_ERR(mm)) {
|
if (IS_ERR(mm)) {
|
||||||
gvt_err("fail to create mm object.\n");
|
gvt_vgpu_err("fail to create mm object.\n");
|
||||||
return PTR_ERR(mm);
|
return PTR_ERR(mm);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -609,7 +610,7 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id,
|
||||||
ring_context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
|
ring_context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
|
||||||
(u32)((desc->lrca + 1) << GTT_PAGE_SHIFT));
|
(u32)((desc->lrca + 1) << GTT_PAGE_SHIFT));
|
||||||
if (ring_context_gpa == INTEL_GVT_INVALID_ADDR) {
|
if (ring_context_gpa == INTEL_GVT_INVALID_ADDR) {
|
||||||
gvt_err("invalid guest context LRCA: %x\n", desc->lrca);
|
gvt_vgpu_err("invalid guest context LRCA: %x\n", desc->lrca);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -724,8 +725,7 @@ int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (!desc[i]->privilege_access) {
|
if (!desc[i]->privilege_access) {
|
||||||
gvt_err("vgpu%d: unexpected GGTT elsp submission\n",
|
gvt_vgpu_err("unexpected GGTT elsp submission\n");
|
||||||
vgpu->id);
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -735,15 +735,13 @@ int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!valid_desc_bitmap) {
|
if (!valid_desc_bitmap) {
|
||||||
gvt_err("vgpu%d: no valid desc in a elsp submission\n",
|
gvt_vgpu_err("no valid desc in a elsp submission\n");
|
||||||
vgpu->id);
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!test_bit(0, (void *)&valid_desc_bitmap) &&
|
if (!test_bit(0, (void *)&valid_desc_bitmap) &&
|
||||||
test_bit(1, (void *)&valid_desc_bitmap)) {
|
test_bit(1, (void *)&valid_desc_bitmap)) {
|
||||||
gvt_err("vgpu%d: weird elsp submission, desc 0 is not valid\n",
|
gvt_vgpu_err("weird elsp submission, desc 0 is not valid\n");
|
||||||
vgpu->id);
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -752,8 +750,7 @@ int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id)
|
||||||
ret = submit_context(vgpu, ring_id, &valid_desc[i],
|
ret = submit_context(vgpu, ring_id, &valid_desc[i],
|
||||||
emulate_schedule_in);
|
emulate_schedule_in);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
gvt_err("vgpu%d: fail to schedule workload\n",
|
gvt_vgpu_err("fail to schedule workload\n");
|
||||||
vgpu->id);
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
emulate_schedule_in = false;
|
emulate_schedule_in = false;
|
||||||
|
|
|
@ -49,8 +49,8 @@ bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size)
|
||||||
{
|
{
|
||||||
if ((!vgpu_gmadr_is_valid(vgpu, addr)) || (size
|
if ((!vgpu_gmadr_is_valid(vgpu, addr)) || (size
|
||||||
&& !vgpu_gmadr_is_valid(vgpu, addr + size - 1))) {
|
&& !vgpu_gmadr_is_valid(vgpu, addr + size - 1))) {
|
||||||
gvt_err("vgpu%d: invalid range gmadr 0x%llx size 0x%x\n",
|
gvt_vgpu_err("invalid range gmadr 0x%llx size 0x%x\n",
|
||||||
vgpu->id, addr, size);
|
addr, size);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
|
@ -430,7 +430,7 @@ static int gtt_entry_p2m(struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *p,
|
||||||
|
|
||||||
mfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, gfn);
|
mfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, gfn);
|
||||||
if (mfn == INTEL_GVT_INVALID_ADDR) {
|
if (mfn == INTEL_GVT_INVALID_ADDR) {
|
||||||
gvt_err("fail to translate gfn: 0x%lx\n", gfn);
|
gvt_vgpu_err("fail to translate gfn: 0x%lx\n", gfn);
|
||||||
return -ENXIO;
|
return -ENXIO;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -611,7 +611,7 @@ static inline int init_shadow_page(struct intel_vgpu *vgpu,
|
||||||
|
|
||||||
daddr = dma_map_page(kdev, p->page, 0, 4096, PCI_DMA_BIDIRECTIONAL);
|
daddr = dma_map_page(kdev, p->page, 0, 4096, PCI_DMA_BIDIRECTIONAL);
|
||||||
if (dma_mapping_error(kdev, daddr)) {
|
if (dma_mapping_error(kdev, daddr)) {
|
||||||
gvt_err("fail to map dma addr\n");
|
gvt_vgpu_err("fail to map dma addr\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -735,7 +735,7 @@ retry:
|
||||||
if (reclaim_one_mm(vgpu->gvt))
|
if (reclaim_one_mm(vgpu->gvt))
|
||||||
goto retry;
|
goto retry;
|
||||||
|
|
||||||
gvt_err("fail to allocate ppgtt shadow page\n");
|
gvt_vgpu_err("fail to allocate ppgtt shadow page\n");
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -750,14 +750,14 @@ retry:
|
||||||
*/
|
*/
|
||||||
ret = init_shadow_page(vgpu, &spt->shadow_page, type);
|
ret = init_shadow_page(vgpu, &spt->shadow_page, type);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
gvt_err("fail to initialize shadow page for spt\n");
|
gvt_vgpu_err("fail to initialize shadow page for spt\n");
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = intel_vgpu_init_guest_page(vgpu, &spt->guest_page,
|
ret = intel_vgpu_init_guest_page(vgpu, &spt->guest_page,
|
||||||
gfn, ppgtt_write_protection_handler, NULL);
|
gfn, ppgtt_write_protection_handler, NULL);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
gvt_err("fail to initialize guest page for spt\n");
|
gvt_vgpu_err("fail to initialize guest page for spt\n");
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -776,8 +776,7 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_find_shadow_page(
|
||||||
if (p)
|
if (p)
|
||||||
return shadow_page_to_ppgtt_spt(p);
|
return shadow_page_to_ppgtt_spt(p);
|
||||||
|
|
||||||
gvt_err("vgpu%d: fail to find ppgtt shadow page: 0x%lx\n",
|
gvt_vgpu_err("fail to find ppgtt shadow page: 0x%lx\n", mfn);
|
||||||
vgpu->id, mfn);
|
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -827,8 +826,8 @@ static int ppgtt_invalidate_shadow_page_by_shadow_entry(struct intel_vgpu *vgpu,
|
||||||
}
|
}
|
||||||
s = ppgtt_find_shadow_page(vgpu, ops->get_pfn(e));
|
s = ppgtt_find_shadow_page(vgpu, ops->get_pfn(e));
|
||||||
if (!s) {
|
if (!s) {
|
||||||
gvt_err("vgpu%d: fail to find shadow page: mfn: 0x%lx\n",
|
gvt_vgpu_err("fail to find shadow page: mfn: 0x%lx\n",
|
||||||
vgpu->id, ops->get_pfn(e));
|
ops->get_pfn(e));
|
||||||
return -ENXIO;
|
return -ENXIO;
|
||||||
}
|
}
|
||||||
return ppgtt_invalidate_shadow_page(s);
|
return ppgtt_invalidate_shadow_page(s);
|
||||||
|
@ -836,6 +835,7 @@ static int ppgtt_invalidate_shadow_page_by_shadow_entry(struct intel_vgpu *vgpu,
|
||||||
|
|
||||||
static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
|
static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
|
||||||
{
|
{
|
||||||
|
struct intel_vgpu *vgpu = spt->vgpu;
|
||||||
struct intel_gvt_gtt_entry e;
|
struct intel_gvt_gtt_entry e;
|
||||||
unsigned long index;
|
unsigned long index;
|
||||||
int ret;
|
int ret;
|
||||||
|
@ -854,7 +854,7 @@ static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
|
||||||
|
|
||||||
for_each_present_shadow_entry(spt, &e, index) {
|
for_each_present_shadow_entry(spt, &e, index) {
|
||||||
if (!gtt_type_is_pt(get_next_pt_type(e.type))) {
|
if (!gtt_type_is_pt(get_next_pt_type(e.type))) {
|
||||||
gvt_err("GVT doesn't support pse bit for now\n");
|
gvt_vgpu_err("GVT doesn't support pse bit for now\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
ret = ppgtt_invalidate_shadow_page_by_shadow_entry(
|
ret = ppgtt_invalidate_shadow_page_by_shadow_entry(
|
||||||
|
@ -868,8 +868,8 @@ release:
|
||||||
ppgtt_free_shadow_page(spt);
|
ppgtt_free_shadow_page(spt);
|
||||||
return 0;
|
return 0;
|
||||||
fail:
|
fail:
|
||||||
gvt_err("vgpu%d: fail: shadow page %p shadow entry 0x%llx type %d\n",
|
gvt_vgpu_err("fail: shadow page %p shadow entry 0x%llx type %d\n",
|
||||||
spt->vgpu->id, spt, e.val64, e.type);
|
spt, e.val64, e.type);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -914,8 +914,8 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_populate_shadow_page_by_guest_entry(
|
||||||
}
|
}
|
||||||
return s;
|
return s;
|
||||||
fail:
|
fail:
|
||||||
gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d\n",
|
gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
|
||||||
vgpu->id, s, we->val64, we->type);
|
s, we->val64, we->type);
|
||||||
return ERR_PTR(ret);
|
return ERR_PTR(ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -953,7 +953,7 @@ static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
|
||||||
|
|
||||||
for_each_present_guest_entry(spt, &ge, i) {
|
for_each_present_guest_entry(spt, &ge, i) {
|
||||||
if (!gtt_type_is_pt(get_next_pt_type(ge.type))) {
|
if (!gtt_type_is_pt(get_next_pt_type(ge.type))) {
|
||||||
gvt_err("GVT doesn't support pse bit now\n");
|
gvt_vgpu_err("GVT doesn't support pse bit now\n");
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
@ -969,8 +969,8 @@ static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
fail:
|
fail:
|
||||||
gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d\n",
|
gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
|
||||||
vgpu->id, spt, ge.val64, ge.type);
|
spt, ge.val64, ge.type);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -999,7 +999,7 @@ static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_guest_page *gpt,
|
||||||
struct intel_vgpu_ppgtt_spt *s =
|
struct intel_vgpu_ppgtt_spt *s =
|
||||||
ppgtt_find_shadow_page(vgpu, ops->get_pfn(&e));
|
ppgtt_find_shadow_page(vgpu, ops->get_pfn(&e));
|
||||||
if (!s) {
|
if (!s) {
|
||||||
gvt_err("fail to find guest page\n");
|
gvt_vgpu_err("fail to find guest page\n");
|
||||||
ret = -ENXIO;
|
ret = -ENXIO;
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
@ -1011,8 +1011,8 @@ static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_guest_page *gpt,
|
||||||
ppgtt_set_shadow_entry(spt, &e, index);
|
ppgtt_set_shadow_entry(spt, &e, index);
|
||||||
return 0;
|
return 0;
|
||||||
fail:
|
fail:
|
||||||
gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d\n",
|
gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
|
||||||
vgpu->id, spt, e.val64, e.type);
|
spt, e.val64, e.type);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1046,8 +1046,8 @@ static int ppgtt_handle_guest_entry_add(struct intel_vgpu_guest_page *gpt,
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
fail:
|
fail:
|
||||||
gvt_err("vgpu%d: fail: spt %p guest entry 0x%llx type %d\n", vgpu->id,
|
gvt_vgpu_err("fail: spt %p guest entry 0x%llx type %d\n",
|
||||||
spt, we->val64, we->type);
|
spt, we->val64, we->type);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1250,8 +1250,8 @@ static int ppgtt_handle_guest_write_page_table(
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
fail:
|
fail:
|
||||||
gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d.\n",
|
gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d.\n",
|
||||||
vgpu->id, spt, we->val64, we->type);
|
spt, we->val64, we->type);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1493,7 +1493,7 @@ static int shadow_mm(struct intel_vgpu_mm *mm)
|
||||||
|
|
||||||
spt = ppgtt_populate_shadow_page_by_guest_entry(vgpu, &ge);
|
spt = ppgtt_populate_shadow_page_by_guest_entry(vgpu, &ge);
|
||||||
if (IS_ERR(spt)) {
|
if (IS_ERR(spt)) {
|
||||||
gvt_err("fail to populate guest root pointer\n");
|
gvt_vgpu_err("fail to populate guest root pointer\n");
|
||||||
ret = PTR_ERR(spt);
|
ret = PTR_ERR(spt);
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
@ -1566,7 +1566,7 @@ struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu,
|
||||||
|
|
||||||
ret = gtt->mm_alloc_page_table(mm);
|
ret = gtt->mm_alloc_page_table(mm);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
gvt_err("fail to allocate page table for mm\n");
|
gvt_vgpu_err("fail to allocate page table for mm\n");
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1584,7 +1584,7 @@ struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu,
|
||||||
}
|
}
|
||||||
return mm;
|
return mm;
|
||||||
fail:
|
fail:
|
||||||
gvt_err("fail to create mm\n");
|
gvt_vgpu_err("fail to create mm\n");
|
||||||
if (mm)
|
if (mm)
|
||||||
intel_gvt_mm_unreference(mm);
|
intel_gvt_mm_unreference(mm);
|
||||||
return ERR_PTR(ret);
|
return ERR_PTR(ret);
|
||||||
|
@ -1760,7 +1760,7 @@ unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma)
|
||||||
mm->page_table_level, gma, gpa);
|
mm->page_table_level, gma, gpa);
|
||||||
return gpa;
|
return gpa;
|
||||||
err:
|
err:
|
||||||
gvt_err("invalid mm type: %d gma %lx\n", mm->type, gma);
|
gvt_vgpu_err("invalid mm type: %d gma %lx\n", mm->type, gma);
|
||||||
return INTEL_GVT_INVALID_ADDR;
|
return INTEL_GVT_INVALID_ADDR;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1836,8 +1836,7 @@ static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
|
||||||
if (ops->test_present(&e)) {
|
if (ops->test_present(&e)) {
|
||||||
ret = gtt_entry_p2m(vgpu, &e, &m);
|
ret = gtt_entry_p2m(vgpu, &e, &m);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
gvt_err("vgpu%d: fail to translate guest gtt entry\n",
|
gvt_vgpu_err("fail to translate guest gtt entry\n");
|
||||||
vgpu->id);
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
@ -1893,14 +1892,14 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
|
||||||
|
|
||||||
scratch_pt = (void *)get_zeroed_page(GFP_KERNEL);
|
scratch_pt = (void *)get_zeroed_page(GFP_KERNEL);
|
||||||
if (!scratch_pt) {
|
if (!scratch_pt) {
|
||||||
gvt_err("fail to allocate scratch page\n");
|
gvt_vgpu_err("fail to allocate scratch page\n");
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
daddr = dma_map_page(dev, virt_to_page(scratch_pt), 0,
|
daddr = dma_map_page(dev, virt_to_page(scratch_pt), 0,
|
||||||
4096, PCI_DMA_BIDIRECTIONAL);
|
4096, PCI_DMA_BIDIRECTIONAL);
|
||||||
if (dma_mapping_error(dev, daddr)) {
|
if (dma_mapping_error(dev, daddr)) {
|
||||||
gvt_err("fail to dmamap scratch_pt\n");
|
gvt_vgpu_err("fail to dmamap scratch_pt\n");
|
||||||
__free_page(virt_to_page(scratch_pt));
|
__free_page(virt_to_page(scratch_pt));
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
@ -2003,7 +2002,7 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
|
||||||
ggtt_mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_GGTT,
|
ggtt_mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_GGTT,
|
||||||
NULL, 1, 0);
|
NULL, 1, 0);
|
||||||
if (IS_ERR(ggtt_mm)) {
|
if (IS_ERR(ggtt_mm)) {
|
||||||
gvt_err("fail to create mm for ggtt.\n");
|
gvt_vgpu_err("fail to create mm for ggtt.\n");
|
||||||
return PTR_ERR(ggtt_mm);
|
return PTR_ERR(ggtt_mm);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2076,7 +2075,6 @@ static int setup_spt_oos(struct intel_gvt *gvt)
|
||||||
for (i = 0; i < preallocated_oos_pages; i++) {
|
for (i = 0; i < preallocated_oos_pages; i++) {
|
||||||
oos_page = kzalloc(sizeof(*oos_page), GFP_KERNEL);
|
oos_page = kzalloc(sizeof(*oos_page), GFP_KERNEL);
|
||||||
if (!oos_page) {
|
if (!oos_page) {
|
||||||
gvt_err("fail to pre-allocate oos page\n");
|
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
@ -2166,7 +2164,7 @@ int intel_vgpu_g2v_create_ppgtt_mm(struct intel_vgpu *vgpu,
|
||||||
mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_PPGTT,
|
mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_PPGTT,
|
||||||
pdp, page_table_level, 0);
|
pdp, page_table_level, 0);
|
||||||
if (IS_ERR(mm)) {
|
if (IS_ERR(mm)) {
|
||||||
gvt_err("fail to create mm\n");
|
gvt_vgpu_err("fail to create mm\n");
|
||||||
return PTR_ERR(mm);
|
return PTR_ERR(mm);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2196,7 +2194,7 @@ int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu,
|
||||||
|
|
||||||
mm = intel_vgpu_find_ppgtt_mm(vgpu, page_table_level, pdp);
|
mm = intel_vgpu_find_ppgtt_mm(vgpu, page_table_level, pdp);
|
||||||
if (!mm) {
|
if (!mm) {
|
||||||
gvt_err("fail to find ppgtt instance.\n");
|
gvt_vgpu_err("fail to find ppgtt instance.\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
intel_gvt_mm_unreference(mm);
|
intel_gvt_mm_unreference(mm);
|
||||||
|
|
|
@ -162,7 +162,6 @@ struct intel_vgpu {
|
||||||
atomic_t running_workload_num;
|
atomic_t running_workload_num;
|
||||||
DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
|
DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
|
||||||
struct i915_gem_context *shadow_ctx;
|
struct i915_gem_context *shadow_ctx;
|
||||||
struct notifier_block shadow_ctx_notifier_block;
|
|
||||||
|
|
||||||
#if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT)
|
#if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT)
|
||||||
struct {
|
struct {
|
||||||
|
@ -233,6 +232,7 @@ struct intel_gvt {
|
||||||
struct intel_gvt_gtt gtt;
|
struct intel_gvt_gtt gtt;
|
||||||
struct intel_gvt_opregion opregion;
|
struct intel_gvt_opregion opregion;
|
||||||
struct intel_gvt_workload_scheduler scheduler;
|
struct intel_gvt_workload_scheduler scheduler;
|
||||||
|
struct notifier_block shadow_ctx_notifier_block[I915_NUM_ENGINES];
|
||||||
DECLARE_HASHTABLE(cmd_table, GVT_CMD_HASH_BITS);
|
DECLARE_HASHTABLE(cmd_table, GVT_CMD_HASH_BITS);
|
||||||
struct intel_vgpu_type *types;
|
struct intel_vgpu_type *types;
|
||||||
unsigned int num_types;
|
unsigned int num_types;
|
||||||
|
|
|
@ -181,11 +181,9 @@ static int sanitize_fence_mmio_access(struct intel_vgpu *vgpu,
|
||||||
GVT_FAILSAFE_UNSUPPORTED_GUEST);
|
GVT_FAILSAFE_UNSUPPORTED_GUEST);
|
||||||
|
|
||||||
if (!vgpu->mmio.disable_warn_untrack) {
|
if (!vgpu->mmio.disable_warn_untrack) {
|
||||||
gvt_err("vgpu%d: found oob fence register access\n",
|
gvt_vgpu_err("found oob fence register access\n");
|
||||||
vgpu->id);
|
gvt_vgpu_err("total fence %d, access fence %d\n",
|
||||||
gvt_err("vgpu%d: total fence %d, access fence %d\n",
|
vgpu_fence_sz(vgpu), fence_num);
|
||||||
vgpu->id, vgpu_fence_sz(vgpu),
|
|
||||||
fence_num);
|
|
||||||
}
|
}
|
||||||
memset(p_data, 0, bytes);
|
memset(p_data, 0, bytes);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
@ -249,7 +247,7 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu,
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
/*should not hit here*/
|
/*should not hit here*/
|
||||||
gvt_err("invalid forcewake offset 0x%x\n", offset);
|
gvt_vgpu_err("invalid forcewake offset 0x%x\n", offset);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
@ -530,7 +528,7 @@ static int check_fdi_rx_train_status(struct intel_vgpu *vgpu,
|
||||||
fdi_tx_train_bits = FDI_LINK_TRAIN_PATTERN_2;
|
fdi_tx_train_bits = FDI_LINK_TRAIN_PATTERN_2;
|
||||||
fdi_iir_check_bits = FDI_RX_SYMBOL_LOCK;
|
fdi_iir_check_bits = FDI_RX_SYMBOL_LOCK;
|
||||||
} else {
|
} else {
|
||||||
gvt_err("Invalid train pattern %d\n", train_pattern);
|
gvt_vgpu_err("Invalid train pattern %d\n", train_pattern);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -588,7 +586,7 @@ static int update_fdi_rx_iir_status(struct intel_vgpu *vgpu,
|
||||||
else if (FDI_RX_IMR_TO_PIPE(offset) != INVALID_INDEX)
|
else if (FDI_RX_IMR_TO_PIPE(offset) != INVALID_INDEX)
|
||||||
index = FDI_RX_IMR_TO_PIPE(offset);
|
index = FDI_RX_IMR_TO_PIPE(offset);
|
||||||
else {
|
else {
|
||||||
gvt_err("Unsupport registers %x\n", offset);
|
gvt_vgpu_err("Unsupport registers %x\n", offset);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -818,7 +816,7 @@ static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu,
|
||||||
u32 data;
|
u32 data;
|
||||||
|
|
||||||
if (!dpy_is_valid_port(port_index)) {
|
if (!dpy_is_valid_port(port_index)) {
|
||||||
gvt_err("GVT(%d): Unsupported DP port access!\n", vgpu->id);
|
gvt_vgpu_err("Unsupported DP port access!\n");
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1016,8 +1014,7 @@ static void write_virtual_sbi_register(struct intel_vgpu *vgpu,
|
||||||
|
|
||||||
if (i == num) {
|
if (i == num) {
|
||||||
if (num == SBI_REG_MAX) {
|
if (num == SBI_REG_MAX) {
|
||||||
gvt_err("vgpu%d: SBI caching meets maximum limits\n",
|
gvt_vgpu_err("SBI caching meets maximum limits\n");
|
||||||
vgpu->id);
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
display->sbi.number++;
|
display->sbi.number++;
|
||||||
|
@ -1097,7 +1094,7 @@ static int pvinfo_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
if (invalid_read)
|
if (invalid_read)
|
||||||
gvt_err("invalid pvinfo read: [%x:%x] = %x\n",
|
gvt_vgpu_err("invalid pvinfo read: [%x:%x] = %x\n",
|
||||||
offset, bytes, *(u32 *)p_data);
|
offset, bytes, *(u32 *)p_data);
|
||||||
vgpu->pv_notified = true;
|
vgpu->pv_notified = true;
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -1125,7 +1122,7 @@ static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification)
|
||||||
case 1: /* Remove this in guest driver. */
|
case 1: /* Remove this in guest driver. */
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
gvt_err("Invalid PV notification %d\n", notification);
|
gvt_vgpu_err("Invalid PV notification %d\n", notification);
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -1181,7 +1178,7 @@ static int pvinfo_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
|
||||||
enter_failsafe_mode(vgpu, GVT_FAILSAFE_INSUFFICIENT_RESOURCE);
|
enter_failsafe_mode(vgpu, GVT_FAILSAFE_INSUFFICIENT_RESOURCE);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
gvt_err("invalid pvinfo write offset %x bytes %x data %x\n",
|
gvt_vgpu_err("invalid pvinfo write offset %x bytes %x data %x\n",
|
||||||
offset, bytes, data);
|
offset, bytes, data);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -1415,7 +1412,8 @@ static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
|
||||||
if (execlist->elsp_dwords.index == 3) {
|
if (execlist->elsp_dwords.index == 3) {
|
||||||
ret = intel_vgpu_submit_execlist(vgpu, ring_id);
|
ret = intel_vgpu_submit_execlist(vgpu, ring_id);
|
||||||
if(ret)
|
if(ret)
|
||||||
gvt_err("fail submit workload on ring %d\n", ring_id);
|
gvt_vgpu_err("fail submit workload on ring %d\n",
|
||||||
|
ring_id);
|
||||||
}
|
}
|
||||||
|
|
||||||
++execlist->elsp_dwords.index;
|
++execlist->elsp_dwords.index;
|
||||||
|
@ -2988,3 +2986,20 @@ int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
|
||||||
write_vreg(vgpu, offset, p_data, bytes);
|
write_vreg(vgpu, offset, p_data, bytes);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* intel_gvt_in_force_nonpriv_whitelist - if a mmio is in whitelist to be
|
||||||
|
* force-nopriv register
|
||||||
|
*
|
||||||
|
* @gvt: a GVT device
|
||||||
|
* @offset: register offset
|
||||||
|
*
|
||||||
|
* Returns:
|
||||||
|
* True if the register is in force-nonpriv whitelist;
|
||||||
|
* False if outside;
|
||||||
|
*/
|
||||||
|
bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt,
|
||||||
|
unsigned int offset)
|
||||||
|
{
|
||||||
|
return in_whitelist(offset);
|
||||||
|
}
|
||||||
|
|
|
@ -426,7 +426,7 @@ static void kvmgt_protect_table_del(struct kvmgt_guest_info *info,
|
||||||
|
|
||||||
static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
|
static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
|
||||||
{
|
{
|
||||||
struct intel_vgpu *vgpu;
|
struct intel_vgpu *vgpu = NULL;
|
||||||
struct intel_vgpu_type *type;
|
struct intel_vgpu_type *type;
|
||||||
struct device *pdev;
|
struct device *pdev;
|
||||||
void *gvt;
|
void *gvt;
|
||||||
|
@ -437,7 +437,7 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
|
||||||
|
|
||||||
type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj));
|
type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj));
|
||||||
if (!type) {
|
if (!type) {
|
||||||
gvt_err("failed to find type %s to create\n",
|
gvt_vgpu_err("failed to find type %s to create\n",
|
||||||
kobject_name(kobj));
|
kobject_name(kobj));
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
goto out;
|
goto out;
|
||||||
|
@ -446,7 +446,7 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
|
||||||
vgpu = intel_gvt_ops->vgpu_create(gvt, type);
|
vgpu = intel_gvt_ops->vgpu_create(gvt, type);
|
||||||
if (IS_ERR_OR_NULL(vgpu)) {
|
if (IS_ERR_OR_NULL(vgpu)) {
|
||||||
ret = vgpu == NULL ? -EFAULT : PTR_ERR(vgpu);
|
ret = vgpu == NULL ? -EFAULT : PTR_ERR(vgpu);
|
||||||
gvt_err("failed to create intel vgpu: %d\n", ret);
|
gvt_vgpu_err("failed to create intel vgpu: %d\n", ret);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -526,7 +526,8 @@ static int intel_vgpu_open(struct mdev_device *mdev)
|
||||||
ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, &events,
|
ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, &events,
|
||||||
&vgpu->vdev.iommu_notifier);
|
&vgpu->vdev.iommu_notifier);
|
||||||
if (ret != 0) {
|
if (ret != 0) {
|
||||||
gvt_err("vfio_register_notifier for iommu failed: %d\n", ret);
|
gvt_vgpu_err("vfio_register_notifier for iommu failed: %d\n",
|
||||||
|
ret);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -534,7 +535,8 @@ static int intel_vgpu_open(struct mdev_device *mdev)
|
||||||
ret = vfio_register_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, &events,
|
ret = vfio_register_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, &events,
|
||||||
&vgpu->vdev.group_notifier);
|
&vgpu->vdev.group_notifier);
|
||||||
if (ret != 0) {
|
if (ret != 0) {
|
||||||
gvt_err("vfio_register_notifier for group failed: %d\n", ret);
|
gvt_vgpu_err("vfio_register_notifier for group failed: %d\n",
|
||||||
|
ret);
|
||||||
goto undo_iommu;
|
goto undo_iommu;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -635,7 +637,7 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
|
||||||
|
|
||||||
|
|
||||||
if (index >= VFIO_PCI_NUM_REGIONS) {
|
if (index >= VFIO_PCI_NUM_REGIONS) {
|
||||||
gvt_err("invalid index: %u\n", index);
|
gvt_vgpu_err("invalid index: %u\n", index);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -669,7 +671,7 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
|
||||||
case VFIO_PCI_VGA_REGION_INDEX:
|
case VFIO_PCI_VGA_REGION_INDEX:
|
||||||
case VFIO_PCI_ROM_REGION_INDEX:
|
case VFIO_PCI_ROM_REGION_INDEX:
|
||||||
default:
|
default:
|
||||||
gvt_err("unsupported region: %u\n", index);
|
gvt_vgpu_err("unsupported region: %u\n", index);
|
||||||
}
|
}
|
||||||
|
|
||||||
return ret == 0 ? count : ret;
|
return ret == 0 ? count : ret;
|
||||||
|
@ -861,7 +863,7 @@ static int intel_vgpu_set_msi_trigger(struct intel_vgpu *vgpu,
|
||||||
|
|
||||||
trigger = eventfd_ctx_fdget(fd);
|
trigger = eventfd_ctx_fdget(fd);
|
||||||
if (IS_ERR(trigger)) {
|
if (IS_ERR(trigger)) {
|
||||||
gvt_err("eventfd_ctx_fdget failed\n");
|
gvt_vgpu_err("eventfd_ctx_fdget failed\n");
|
||||||
return PTR_ERR(trigger);
|
return PTR_ERR(trigger);
|
||||||
}
|
}
|
||||||
vgpu->vdev.msi_trigger = trigger;
|
vgpu->vdev.msi_trigger = trigger;
|
||||||
|
@ -1120,7 +1122,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
|
||||||
ret = vfio_set_irqs_validate_and_prepare(&hdr, max,
|
ret = vfio_set_irqs_validate_and_prepare(&hdr, max,
|
||||||
VFIO_PCI_NUM_IRQS, &data_size);
|
VFIO_PCI_NUM_IRQS, &data_size);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
gvt_err("intel:vfio_set_irqs_validate_and_prepare failed\n");
|
gvt_vgpu_err("intel:vfio_set_irqs_validate_and_prepare failed\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
if (data_size) {
|
if (data_size) {
|
||||||
|
@ -1310,7 +1312,7 @@ static int kvmgt_guest_init(struct mdev_device *mdev)
|
||||||
|
|
||||||
kvm = vgpu->vdev.kvm;
|
kvm = vgpu->vdev.kvm;
|
||||||
if (!kvm || kvm->mm != current->mm) {
|
if (!kvm || kvm->mm != current->mm) {
|
||||||
gvt_err("KVM is required to use Intel vGPU\n");
|
gvt_vgpu_err("KVM is required to use Intel vGPU\n");
|
||||||
return -ESRCH;
|
return -ESRCH;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1337,8 +1339,10 @@ static int kvmgt_guest_init(struct mdev_device *mdev)
|
||||||
|
|
||||||
static bool kvmgt_guest_exit(struct kvmgt_guest_info *info)
|
static bool kvmgt_guest_exit(struct kvmgt_guest_info *info)
|
||||||
{
|
{
|
||||||
|
struct intel_vgpu *vgpu = info->vgpu;
|
||||||
|
|
||||||
if (!info) {
|
if (!info) {
|
||||||
gvt_err("kvmgt_guest_info invalid\n");
|
gvt_vgpu_err("kvmgt_guest_info invalid\n");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1383,12 +1387,14 @@ static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
|
||||||
unsigned long iova, pfn;
|
unsigned long iova, pfn;
|
||||||
struct kvmgt_guest_info *info;
|
struct kvmgt_guest_info *info;
|
||||||
struct device *dev;
|
struct device *dev;
|
||||||
|
struct intel_vgpu *vgpu;
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
if (!handle_valid(handle))
|
if (!handle_valid(handle))
|
||||||
return INTEL_GVT_INVALID_ADDR;
|
return INTEL_GVT_INVALID_ADDR;
|
||||||
|
|
||||||
info = (struct kvmgt_guest_info *)handle;
|
info = (struct kvmgt_guest_info *)handle;
|
||||||
|
vgpu = info->vgpu;
|
||||||
iova = gvt_cache_find(info->vgpu, gfn);
|
iova = gvt_cache_find(info->vgpu, gfn);
|
||||||
if (iova != INTEL_GVT_INVALID_ADDR)
|
if (iova != INTEL_GVT_INVALID_ADDR)
|
||||||
return iova;
|
return iova;
|
||||||
|
@ -1397,13 +1403,14 @@ static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
|
||||||
dev = mdev_dev(info->vgpu->vdev.mdev);
|
dev = mdev_dev(info->vgpu->vdev.mdev);
|
||||||
rc = vfio_pin_pages(dev, &gfn, 1, IOMMU_READ | IOMMU_WRITE, &pfn);
|
rc = vfio_pin_pages(dev, &gfn, 1, IOMMU_READ | IOMMU_WRITE, &pfn);
|
||||||
if (rc != 1) {
|
if (rc != 1) {
|
||||||
gvt_err("vfio_pin_pages failed for gfn 0x%lx: %d\n", gfn, rc);
|
gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx: %d\n",
|
||||||
|
gfn, rc);
|
||||||
return INTEL_GVT_INVALID_ADDR;
|
return INTEL_GVT_INVALID_ADDR;
|
||||||
}
|
}
|
||||||
/* transfer to host iova for GFX to use DMA */
|
/* transfer to host iova for GFX to use DMA */
|
||||||
rc = gvt_dma_map_iova(info->vgpu, pfn, &iova);
|
rc = gvt_dma_map_iova(info->vgpu, pfn, &iova);
|
||||||
if (rc) {
|
if (rc) {
|
||||||
gvt_err("gvt_dma_map_iova failed for gfn: 0x%lx\n", gfn);
|
gvt_vgpu_err("gvt_dma_map_iova failed for gfn: 0x%lx\n", gfn);
|
||||||
vfio_unpin_pages(dev, &gfn, 1);
|
vfio_unpin_pages(dev, &gfn, 1);
|
||||||
return INTEL_GVT_INVALID_ADDR;
|
return INTEL_GVT_INVALID_ADDR;
|
||||||
}
|
}
|
||||||
|
@ -1417,7 +1424,7 @@ static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa,
|
||||||
{
|
{
|
||||||
struct kvmgt_guest_info *info;
|
struct kvmgt_guest_info *info;
|
||||||
struct kvm *kvm;
|
struct kvm *kvm;
|
||||||
int ret;
|
int idx, ret;
|
||||||
bool kthread = current->mm == NULL;
|
bool kthread = current->mm == NULL;
|
||||||
|
|
||||||
if (!handle_valid(handle))
|
if (!handle_valid(handle))
|
||||||
|
@ -1429,8 +1436,10 @@ static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa,
|
||||||
if (kthread)
|
if (kthread)
|
||||||
use_mm(kvm->mm);
|
use_mm(kvm->mm);
|
||||||
|
|
||||||
|
idx = srcu_read_lock(&kvm->srcu);
|
||||||
ret = write ? kvm_write_guest(kvm, gpa, buf, len) :
|
ret = write ? kvm_write_guest(kvm, gpa, buf, len) :
|
||||||
kvm_read_guest(kvm, gpa, buf, len);
|
kvm_read_guest(kvm, gpa, buf, len);
|
||||||
|
srcu_read_unlock(&kvm->srcu, idx);
|
||||||
|
|
||||||
if (kthread)
|
if (kthread)
|
||||||
unuse_mm(kvm->mm);
|
unuse_mm(kvm->mm);
|
||||||
|
|
|
@ -142,10 +142,10 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
|
||||||
ret = intel_gvt_hypervisor_read_gpa(vgpu, pa,
|
ret = intel_gvt_hypervisor_read_gpa(vgpu, pa,
|
||||||
p_data, bytes);
|
p_data, bytes);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
gvt_err("vgpu%d: guest page read error %d, "
|
gvt_vgpu_err("guest page read error %d, "
|
||||||
"gfn 0x%lx, pa 0x%llx, var 0x%x, len %d\n",
|
"gfn 0x%lx, pa 0x%llx, var 0x%x, len %d\n",
|
||||||
vgpu->id, ret,
|
ret, gp->gfn, pa, *(u32 *)p_data,
|
||||||
gp->gfn, pa, *(u32 *)p_data, bytes);
|
bytes);
|
||||||
}
|
}
|
||||||
mutex_unlock(&gvt->lock);
|
mutex_unlock(&gvt->lock);
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -200,14 +200,13 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
|
||||||
ret = intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
|
ret = intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
|
||||||
|
|
||||||
if (!vgpu->mmio.disable_warn_untrack) {
|
if (!vgpu->mmio.disable_warn_untrack) {
|
||||||
gvt_err("vgpu%d: read untracked MMIO %x(%dB) val %x\n",
|
gvt_vgpu_err("read untracked MMIO %x(%dB) val %x\n",
|
||||||
vgpu->id, offset, bytes, *(u32 *)p_data);
|
offset, bytes, *(u32 *)p_data);
|
||||||
|
|
||||||
if (offset == 0x206c) {
|
if (offset == 0x206c) {
|
||||||
gvt_err("------------------------------------------\n");
|
gvt_vgpu_err("------------------------------------------\n");
|
||||||
gvt_err("vgpu%d: likely triggers a gfx reset\n",
|
gvt_vgpu_err("likely triggers a gfx reset\n");
|
||||||
vgpu->id);
|
gvt_vgpu_err("------------------------------------------\n");
|
||||||
gvt_err("------------------------------------------\n");
|
|
||||||
vgpu->mmio.disable_warn_untrack = true;
|
vgpu->mmio.disable_warn_untrack = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -220,8 +219,8 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
|
||||||
mutex_unlock(&gvt->lock);
|
mutex_unlock(&gvt->lock);
|
||||||
return 0;
|
return 0;
|
||||||
err:
|
err:
|
||||||
gvt_err("vgpu%d: fail to emulate MMIO read %08x len %d\n",
|
gvt_vgpu_err("fail to emulate MMIO read %08x len %d\n",
|
||||||
vgpu->id, offset, bytes);
|
offset, bytes);
|
||||||
mutex_unlock(&gvt->lock);
|
mutex_unlock(&gvt->lock);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -259,10 +258,11 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
|
||||||
if (gp) {
|
if (gp) {
|
||||||
ret = gp->handler(gp, pa, p_data, bytes);
|
ret = gp->handler(gp, pa, p_data, bytes);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
gvt_err("vgpu%d: guest page write error %d, "
|
gvt_err("guest page write error %d, "
|
||||||
"gfn 0x%lx, pa 0x%llx, var 0x%x, len %d\n",
|
"gfn 0x%lx, pa 0x%llx, "
|
||||||
vgpu->id, ret,
|
"var 0x%x, len %d\n",
|
||||||
gp->gfn, pa, *(u32 *)p_data, bytes);
|
ret, gp->gfn, pa,
|
||||||
|
*(u32 *)p_data, bytes);
|
||||||
}
|
}
|
||||||
mutex_unlock(&gvt->lock);
|
mutex_unlock(&gvt->lock);
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -329,8 +329,8 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
|
||||||
|
|
||||||
/* all register bits are RO. */
|
/* all register bits are RO. */
|
||||||
if (ro_mask == ~(u64)0) {
|
if (ro_mask == ~(u64)0) {
|
||||||
gvt_err("vgpu%d: try to write RO reg %x\n",
|
gvt_vgpu_err("try to write RO reg %x\n",
|
||||||
vgpu->id, offset);
|
offset);
|
||||||
ret = 0;
|
ret = 0;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
@ -360,8 +360,8 @@ out:
|
||||||
mutex_unlock(&gvt->lock);
|
mutex_unlock(&gvt->lock);
|
||||||
return 0;
|
return 0;
|
||||||
err:
|
err:
|
||||||
gvt_err("vgpu%d: fail to emulate MMIO write %08x len %d\n",
|
gvt_vgpu_err("fail to emulate MMIO write %08x len %d\n", offset,
|
||||||
vgpu->id, offset, bytes);
|
bytes);
|
||||||
mutex_unlock(&gvt->lock);
|
mutex_unlock(&gvt->lock);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
|
@ -107,4 +107,7 @@ int intel_vgpu_default_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
|
||||||
void *p_data, unsigned int bytes);
|
void *p_data, unsigned int bytes);
|
||||||
int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
|
int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
|
||||||
void *p_data, unsigned int bytes);
|
void *p_data, unsigned int bytes);
|
||||||
|
|
||||||
|
bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt,
|
||||||
|
unsigned int offset);
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -67,14 +67,15 @@ static int map_vgpu_opregion(struct intel_vgpu *vgpu, bool map)
|
||||||
mfn = intel_gvt_hypervisor_virt_to_mfn(vgpu_opregion(vgpu)->va
|
mfn = intel_gvt_hypervisor_virt_to_mfn(vgpu_opregion(vgpu)->va
|
||||||
+ i * PAGE_SIZE);
|
+ i * PAGE_SIZE);
|
||||||
if (mfn == INTEL_GVT_INVALID_ADDR) {
|
if (mfn == INTEL_GVT_INVALID_ADDR) {
|
||||||
gvt_err("fail to get MFN from VA\n");
|
gvt_vgpu_err("fail to get MFN from VA\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu,
|
ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu,
|
||||||
vgpu_opregion(vgpu)->gfn[i],
|
vgpu_opregion(vgpu)->gfn[i],
|
||||||
mfn, 1, map);
|
mfn, 1, map);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
gvt_err("fail to map GFN to MFN, errno: %d\n", ret);
|
gvt_vgpu_err("fail to map GFN to MFN, errno: %d\n",
|
||||||
|
ret);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -287,7 +288,7 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci)
|
||||||
parm = vgpu_opregion(vgpu)->va + INTEL_GVT_OPREGION_PARM;
|
parm = vgpu_opregion(vgpu)->va + INTEL_GVT_OPREGION_PARM;
|
||||||
|
|
||||||
if (!(swsci & SWSCI_SCI_SELECT)) {
|
if (!(swsci & SWSCI_SCI_SELECT)) {
|
||||||
gvt_err("vgpu%d: requesting SMI service\n", vgpu->id);
|
gvt_vgpu_err("requesting SMI service\n");
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
/* ignore non 0->1 trasitions */
|
/* ignore non 0->1 trasitions */
|
||||||
|
@ -300,9 +301,8 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci)
|
||||||
func = GVT_OPREGION_FUNC(*scic);
|
func = GVT_OPREGION_FUNC(*scic);
|
||||||
subfunc = GVT_OPREGION_SUBFUNC(*scic);
|
subfunc = GVT_OPREGION_SUBFUNC(*scic);
|
||||||
if (!querying_capabilities(*scic)) {
|
if (!querying_capabilities(*scic)) {
|
||||||
gvt_err("vgpu%d: requesting runtime service: func \"%s\","
|
gvt_vgpu_err("requesting runtime service: func \"%s\","
|
||||||
" subfunc \"%s\"\n",
|
" subfunc \"%s\"\n",
|
||||||
vgpu->id,
|
|
||||||
opregion_func_name(func),
|
opregion_func_name(func),
|
||||||
opregion_subfunc_name(subfunc));
|
opregion_subfunc_name(subfunc));
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -167,7 +167,7 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
|
||||||
I915_WRITE_FW(reg, 0x1);
|
I915_WRITE_FW(reg, 0x1);
|
||||||
|
|
||||||
if (wait_for_atomic((I915_READ_FW(reg) == 0), 50))
|
if (wait_for_atomic((I915_READ_FW(reg) == 0), 50))
|
||||||
gvt_err("timeout in invalidate ring (%d) tlb\n", ring_id);
|
gvt_vgpu_err("timeout in invalidate ring (%d) tlb\n", ring_id);
|
||||||
else
|
else
|
||||||
vgpu_vreg(vgpu, regs[ring_id]) = 0;
|
vgpu_vreg(vgpu, regs[ring_id]) = 0;
|
||||||
|
|
||||||
|
|
|
@ -101,7 +101,7 @@ struct tbs_sched_data {
|
||||||
struct list_head runq_head;
|
struct list_head runq_head;
|
||||||
};
|
};
|
||||||
|
|
||||||
#define GVT_DEFAULT_TIME_SLICE (1 * HZ / 1000)
|
#define GVT_DEFAULT_TIME_SLICE (msecs_to_jiffies(1))
|
||||||
|
|
||||||
static void tbs_sched_func(struct work_struct *work)
|
static void tbs_sched_func(struct work_struct *work)
|
||||||
{
|
{
|
||||||
|
@ -223,7 +223,7 @@ static void tbs_sched_start_schedule(struct intel_vgpu *vgpu)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
list_add_tail(&vgpu_data->list, &sched_data->runq_head);
|
list_add_tail(&vgpu_data->list, &sched_data->runq_head);
|
||||||
schedule_delayed_work(&sched_data->work, sched_data->period);
|
schedule_delayed_work(&sched_data->work, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu)
|
static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu)
|
||||||
|
|
|
@ -84,7 +84,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
|
||||||
(u32)((workload->ctx_desc.lrca + i) <<
|
(u32)((workload->ctx_desc.lrca + i) <<
|
||||||
GTT_PAGE_SHIFT));
|
GTT_PAGE_SHIFT));
|
||||||
if (context_gpa == INTEL_GVT_INVALID_ADDR) {
|
if (context_gpa == INTEL_GVT_INVALID_ADDR) {
|
||||||
gvt_err("Invalid guest context descriptor\n");
|
gvt_vgpu_err("Invalid guest context descriptor\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -130,12 +130,10 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
|
||||||
static int shadow_context_status_change(struct notifier_block *nb,
|
static int shadow_context_status_change(struct notifier_block *nb,
|
||||||
unsigned long action, void *data)
|
unsigned long action, void *data)
|
||||||
{
|
{
|
||||||
struct intel_vgpu *vgpu = container_of(nb,
|
struct drm_i915_gem_request *req = (struct drm_i915_gem_request *)data;
|
||||||
struct intel_vgpu, shadow_ctx_notifier_block);
|
struct intel_gvt *gvt = container_of(nb, struct intel_gvt,
|
||||||
struct drm_i915_gem_request *req =
|
shadow_ctx_notifier_block[req->engine->id]);
|
||||||
(struct drm_i915_gem_request *)data;
|
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
|
||||||
struct intel_gvt_workload_scheduler *scheduler =
|
|
||||||
&vgpu->gvt->scheduler;
|
|
||||||
struct intel_vgpu_workload *workload =
|
struct intel_vgpu_workload *workload =
|
||||||
scheduler->current_workload[req->engine->id];
|
scheduler->current_workload[req->engine->id];
|
||||||
|
|
||||||
|
@ -175,7 +173,9 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
|
||||||
int ring_id = workload->ring_id;
|
int ring_id = workload->ring_id;
|
||||||
struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
|
struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
|
||||||
struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
|
struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
|
||||||
|
struct intel_engine_cs *engine = dev_priv->engine[ring_id];
|
||||||
struct drm_i915_gem_request *rq;
|
struct drm_i915_gem_request *rq;
|
||||||
|
struct intel_vgpu *vgpu = workload->vgpu;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
|
gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
|
||||||
|
@ -187,9 +187,24 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
|
||||||
|
|
||||||
mutex_lock(&dev_priv->drm.struct_mutex);
|
mutex_lock(&dev_priv->drm.struct_mutex);
|
||||||
|
|
||||||
|
/* pin shadow context by gvt even the shadow context will be pinned
|
||||||
|
* when i915 alloc request. That is because gvt will update the guest
|
||||||
|
* context from shadow context when workload is completed, and at that
|
||||||
|
* moment, i915 may already unpined the shadow context to make the
|
||||||
|
* shadow_ctx pages invalid. So gvt need to pin itself. After update
|
||||||
|
* the guest context, gvt can unpin the shadow_ctx safely.
|
||||||
|
*/
|
||||||
|
ret = engine->context_pin(engine, shadow_ctx);
|
||||||
|
if (ret) {
|
||||||
|
gvt_vgpu_err("fail to pin shadow context\n");
|
||||||
|
workload->status = ret;
|
||||||
|
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
|
rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
|
||||||
if (IS_ERR(rq)) {
|
if (IS_ERR(rq)) {
|
||||||
gvt_err("fail to allocate gem request\n");
|
gvt_vgpu_err("fail to allocate gem request\n");
|
||||||
ret = PTR_ERR(rq);
|
ret = PTR_ERR(rq);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
@ -202,9 +217,12 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
|
if ((workload->ring_id == RCS) &&
|
||||||
if (ret)
|
(workload->wa_ctx.indirect_ctx.size != 0)) {
|
||||||
goto out;
|
ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
|
||||||
|
if (ret)
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
ret = populate_shadow_context(workload);
|
ret = populate_shadow_context(workload);
|
||||||
if (ret)
|
if (ret)
|
||||||
|
@ -227,6 +245,9 @@ out:
|
||||||
|
|
||||||
if (!IS_ERR_OR_NULL(rq))
|
if (!IS_ERR_OR_NULL(rq))
|
||||||
i915_add_request_no_flush(rq);
|
i915_add_request_no_flush(rq);
|
||||||
|
else
|
||||||
|
engine->context_unpin(engine, shadow_ctx);
|
||||||
|
|
||||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -322,7 +343,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
|
||||||
(u32)((workload->ctx_desc.lrca + i) <<
|
(u32)((workload->ctx_desc.lrca + i) <<
|
||||||
GTT_PAGE_SHIFT));
|
GTT_PAGE_SHIFT));
|
||||||
if (context_gpa == INTEL_GVT_INVALID_ADDR) {
|
if (context_gpa == INTEL_GVT_INVALID_ADDR) {
|
||||||
gvt_err("invalid guest context descriptor\n");
|
gvt_vgpu_err("invalid guest context descriptor\n");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -376,6 +397,10 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
|
||||||
* For the workload w/o request, directly complete the workload.
|
* For the workload w/o request, directly complete the workload.
|
||||||
*/
|
*/
|
||||||
if (workload->req) {
|
if (workload->req) {
|
||||||
|
struct drm_i915_private *dev_priv =
|
||||||
|
workload->vgpu->gvt->dev_priv;
|
||||||
|
struct intel_engine_cs *engine =
|
||||||
|
dev_priv->engine[workload->ring_id];
|
||||||
wait_event(workload->shadow_ctx_status_wq,
|
wait_event(workload->shadow_ctx_status_wq,
|
||||||
!atomic_read(&workload->shadow_ctx_active));
|
!atomic_read(&workload->shadow_ctx_active));
|
||||||
|
|
||||||
|
@ -388,6 +413,10 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
|
||||||
INTEL_GVT_EVENT_MAX)
|
INTEL_GVT_EVENT_MAX)
|
||||||
intel_vgpu_trigger_virtual_event(vgpu, event);
|
intel_vgpu_trigger_virtual_event(vgpu, event);
|
||||||
}
|
}
|
||||||
|
mutex_lock(&dev_priv->drm.struct_mutex);
|
||||||
|
/* unpin shadow ctx as the shadow_ctx update is done */
|
||||||
|
engine->context_unpin(engine, workload->vgpu->shadow_ctx);
|
||||||
|
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
gvt_dbg_sched("ring id %d complete workload %p status %d\n",
|
gvt_dbg_sched("ring id %d complete workload %p status %d\n",
|
||||||
|
@ -417,6 +446,7 @@ static int workload_thread(void *priv)
|
||||||
int ring_id = p->ring_id;
|
int ring_id = p->ring_id;
|
||||||
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
|
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
|
||||||
struct intel_vgpu_workload *workload = NULL;
|
struct intel_vgpu_workload *workload = NULL;
|
||||||
|
struct intel_vgpu *vgpu = NULL;
|
||||||
int ret;
|
int ret;
|
||||||
bool need_force_wake = IS_SKYLAKE(gvt->dev_priv);
|
bool need_force_wake = IS_SKYLAKE(gvt->dev_priv);
|
||||||
DEFINE_WAIT_FUNC(wait, woken_wake_function);
|
DEFINE_WAIT_FUNC(wait, woken_wake_function);
|
||||||
|
@ -459,25 +489,14 @@ static int workload_thread(void *priv)
|
||||||
mutex_unlock(&gvt->lock);
|
mutex_unlock(&gvt->lock);
|
||||||
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
gvt_err("fail to dispatch workload, skip\n");
|
vgpu = workload->vgpu;
|
||||||
|
gvt_vgpu_err("fail to dispatch workload, skip\n");
|
||||||
goto complete;
|
goto complete;
|
||||||
}
|
}
|
||||||
|
|
||||||
gvt_dbg_sched("ring id %d wait workload %p\n",
|
gvt_dbg_sched("ring id %d wait workload %p\n",
|
||||||
workload->ring_id, workload);
|
workload->ring_id, workload);
|
||||||
retry:
|
i915_wait_request(workload->req, 0, MAX_SCHEDULE_TIMEOUT);
|
||||||
i915_wait_request(workload->req,
|
|
||||||
0, MAX_SCHEDULE_TIMEOUT);
|
|
||||||
/* I915 has replay mechanism and a request will be replayed
|
|
||||||
* if there is i915 reset. So the seqno will be updated anyway.
|
|
||||||
* If the seqno is not updated yet after waiting, which means
|
|
||||||
* the replay may still be in progress and we can wait again.
|
|
||||||
*/
|
|
||||||
if (!i915_gem_request_completed(workload->req)) {
|
|
||||||
gvt_dbg_sched("workload %p not completed, wait again\n",
|
|
||||||
workload);
|
|
||||||
goto retry;
|
|
||||||
}
|
|
||||||
|
|
||||||
complete:
|
complete:
|
||||||
gvt_dbg_sched("will complete workload %p, status: %d\n",
|
gvt_dbg_sched("will complete workload %p, status: %d\n",
|
||||||
|
@ -513,15 +532,16 @@ void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu)
|
||||||
void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt)
|
void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt)
|
||||||
{
|
{
|
||||||
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
|
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
|
||||||
int i;
|
struct intel_engine_cs *engine;
|
||||||
|
enum intel_engine_id i;
|
||||||
|
|
||||||
gvt_dbg_core("clean workload scheduler\n");
|
gvt_dbg_core("clean workload scheduler\n");
|
||||||
|
|
||||||
for (i = 0; i < I915_NUM_ENGINES; i++) {
|
for_each_engine(engine, gvt->dev_priv, i) {
|
||||||
if (scheduler->thread[i]) {
|
atomic_notifier_chain_unregister(
|
||||||
kthread_stop(scheduler->thread[i]);
|
&engine->context_status_notifier,
|
||||||
scheduler->thread[i] = NULL;
|
&gvt->shadow_ctx_notifier_block[i]);
|
||||||
}
|
kthread_stop(scheduler->thread[i]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -529,18 +549,15 @@ int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
|
||||||
{
|
{
|
||||||
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
|
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
|
||||||
struct workload_thread_param *param = NULL;
|
struct workload_thread_param *param = NULL;
|
||||||
|
struct intel_engine_cs *engine;
|
||||||
|
enum intel_engine_id i;
|
||||||
int ret;
|
int ret;
|
||||||
int i;
|
|
||||||
|
|
||||||
gvt_dbg_core("init workload scheduler\n");
|
gvt_dbg_core("init workload scheduler\n");
|
||||||
|
|
||||||
init_waitqueue_head(&scheduler->workload_complete_wq);
|
init_waitqueue_head(&scheduler->workload_complete_wq);
|
||||||
|
|
||||||
for (i = 0; i < I915_NUM_ENGINES; i++) {
|
for_each_engine(engine, gvt->dev_priv, i) {
|
||||||
/* check ring mask at init time */
|
|
||||||
if (!HAS_ENGINE(gvt->dev_priv, i))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
init_waitqueue_head(&scheduler->waitq[i]);
|
init_waitqueue_head(&scheduler->waitq[i]);
|
||||||
|
|
||||||
param = kzalloc(sizeof(*param), GFP_KERNEL);
|
param = kzalloc(sizeof(*param), GFP_KERNEL);
|
||||||
|
@ -559,6 +576,11 @@ int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
|
||||||
ret = PTR_ERR(scheduler->thread[i]);
|
ret = PTR_ERR(scheduler->thread[i]);
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
gvt->shadow_ctx_notifier_block[i].notifier_call =
|
||||||
|
shadow_context_status_change;
|
||||||
|
atomic_notifier_chain_register(&engine->context_status_notifier,
|
||||||
|
&gvt->shadow_ctx_notifier_block[i]);
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
err:
|
err:
|
||||||
|
@ -570,9 +592,6 @@ err:
|
||||||
|
|
||||||
void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu)
|
void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu)
|
||||||
{
|
{
|
||||||
atomic_notifier_chain_unregister(&vgpu->shadow_ctx->status_notifier,
|
|
||||||
&vgpu->shadow_ctx_notifier_block);
|
|
||||||
|
|
||||||
i915_gem_context_put_unlocked(vgpu->shadow_ctx);
|
i915_gem_context_put_unlocked(vgpu->shadow_ctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -587,10 +606,5 @@ int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu)
|
||||||
|
|
||||||
vgpu->shadow_ctx->engine[RCS].initialised = true;
|
vgpu->shadow_ctx->engine[RCS].initialised = true;
|
||||||
|
|
||||||
vgpu->shadow_ctx_notifier_block.notifier_call =
|
|
||||||
shadow_context_status_change;
|
|
||||||
|
|
||||||
atomic_notifier_chain_register(&vgpu->shadow_ctx->status_notifier,
|
|
||||||
&vgpu->shadow_ctx_notifier_block);
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -248,6 +248,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
|
||||||
case I915_PARAM_IRQ_ACTIVE:
|
case I915_PARAM_IRQ_ACTIVE:
|
||||||
case I915_PARAM_ALLOW_BATCHBUFFER:
|
case I915_PARAM_ALLOW_BATCHBUFFER:
|
||||||
case I915_PARAM_LAST_DISPATCH:
|
case I915_PARAM_LAST_DISPATCH:
|
||||||
|
case I915_PARAM_HAS_EXEC_CONSTANTS:
|
||||||
/* Reject all old ums/dri params. */
|
/* Reject all old ums/dri params. */
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
case I915_PARAM_CHIPSET_ID:
|
case I915_PARAM_CHIPSET_ID:
|
||||||
|
@ -274,9 +275,6 @@ static int i915_getparam(struct drm_device *dev, void *data,
|
||||||
case I915_PARAM_HAS_BSD2:
|
case I915_PARAM_HAS_BSD2:
|
||||||
value = !!dev_priv->engine[VCS2];
|
value = !!dev_priv->engine[VCS2];
|
||||||
break;
|
break;
|
||||||
case I915_PARAM_HAS_EXEC_CONSTANTS:
|
|
||||||
value = INTEL_GEN(dev_priv) >= 4;
|
|
||||||
break;
|
|
||||||
case I915_PARAM_HAS_LLC:
|
case I915_PARAM_HAS_LLC:
|
||||||
value = HAS_LLC(dev_priv);
|
value = HAS_LLC(dev_priv);
|
||||||
break;
|
break;
|
||||||
|
@ -1788,7 +1786,7 @@ void i915_reset(struct drm_i915_private *dev_priv)
|
||||||
goto error;
|
goto error;
|
||||||
}
|
}
|
||||||
|
|
||||||
i915_gem_reset_finish(dev_priv);
|
i915_gem_reset(dev_priv);
|
||||||
intel_overlay_reset(dev_priv);
|
intel_overlay_reset(dev_priv);
|
||||||
|
|
||||||
/* Ok, now get things going again... */
|
/* Ok, now get things going again... */
|
||||||
|
@ -1814,6 +1812,7 @@ void i915_reset(struct drm_i915_private *dev_priv)
|
||||||
i915_queue_hangcheck(dev_priv);
|
i915_queue_hangcheck(dev_priv);
|
||||||
|
|
||||||
wakeup:
|
wakeup:
|
||||||
|
i915_gem_reset_finish(dev_priv);
|
||||||
enable_irq(dev_priv->drm.irq);
|
enable_irq(dev_priv->drm.irq);
|
||||||
wake_up_bit(&error->flags, I915_RESET_IN_PROGRESS);
|
wake_up_bit(&error->flags, I915_RESET_IN_PROGRESS);
|
||||||
return;
|
return;
|
||||||
|
|
|
@ -1325,7 +1325,7 @@ struct intel_gen6_power_mgmt {
|
||||||
unsigned boosts;
|
unsigned boosts;
|
||||||
|
|
||||||
/* manual wa residency calculations */
|
/* manual wa residency calculations */
|
||||||
struct intel_rps_ei up_ei, down_ei;
|
struct intel_rps_ei ei;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Protects RPS/RC6 register access and PCU communication.
|
* Protects RPS/RC6 register access and PCU communication.
|
||||||
|
@ -2064,8 +2064,6 @@ struct drm_i915_private {
|
||||||
|
|
||||||
const struct intel_device_info info;
|
const struct intel_device_info info;
|
||||||
|
|
||||||
int relative_constants_mode;
|
|
||||||
|
|
||||||
void __iomem *regs;
|
void __iomem *regs;
|
||||||
|
|
||||||
struct intel_uncore uncore;
|
struct intel_uncore uncore;
|
||||||
|
@ -3342,6 +3340,7 @@ static inline u32 i915_reset_count(struct i915_gpu_error *error)
|
||||||
}
|
}
|
||||||
|
|
||||||
int i915_gem_reset_prepare(struct drm_i915_private *dev_priv);
|
int i915_gem_reset_prepare(struct drm_i915_private *dev_priv);
|
||||||
|
void i915_gem_reset(struct drm_i915_private *dev_priv);
|
||||||
void i915_gem_reset_finish(struct drm_i915_private *dev_priv);
|
void i915_gem_reset_finish(struct drm_i915_private *dev_priv);
|
||||||
void i915_gem_set_wedged(struct drm_i915_private *dev_priv);
|
void i915_gem_set_wedged(struct drm_i915_private *dev_priv);
|
||||||
void i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
|
void i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
|
||||||
|
|
|
@ -2719,7 +2719,16 @@ int i915_gem_reset_prepare(struct drm_i915_private *dev_priv)
|
||||||
for_each_engine(engine, dev_priv, id) {
|
for_each_engine(engine, dev_priv, id) {
|
||||||
struct drm_i915_gem_request *request;
|
struct drm_i915_gem_request *request;
|
||||||
|
|
||||||
|
/* Prevent request submission to the hardware until we have
|
||||||
|
* completed the reset in i915_gem_reset_finish(). If a request
|
||||||
|
* is completed by one engine, it may then queue a request
|
||||||
|
* to a second via its engine->irq_tasklet *just* as we are
|
||||||
|
* calling engine->init_hw() and also writing the ELSP.
|
||||||
|
* Turning off the engine->irq_tasklet until the reset is over
|
||||||
|
* prevents the race.
|
||||||
|
*/
|
||||||
tasklet_kill(&engine->irq_tasklet);
|
tasklet_kill(&engine->irq_tasklet);
|
||||||
|
tasklet_disable(&engine->irq_tasklet);
|
||||||
|
|
||||||
if (engine_stalled(engine)) {
|
if (engine_stalled(engine)) {
|
||||||
request = i915_gem_find_active_request(engine);
|
request = i915_gem_find_active_request(engine);
|
||||||
|
@ -2834,7 +2843,7 @@ static void i915_gem_reset_engine(struct intel_engine_cs *engine)
|
||||||
engine->reset_hw(engine, request);
|
engine->reset_hw(engine, request);
|
||||||
}
|
}
|
||||||
|
|
||||||
void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
|
void i915_gem_reset(struct drm_i915_private *dev_priv)
|
||||||
{
|
{
|
||||||
struct intel_engine_cs *engine;
|
struct intel_engine_cs *engine;
|
||||||
enum intel_engine_id id;
|
enum intel_engine_id id;
|
||||||
|
@ -2856,6 +2865,17 @@ void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
|
||||||
|
{
|
||||||
|
struct intel_engine_cs *engine;
|
||||||
|
enum intel_engine_id id;
|
||||||
|
|
||||||
|
lockdep_assert_held(&dev_priv->drm.struct_mutex);
|
||||||
|
|
||||||
|
for_each_engine(engine, dev_priv, id)
|
||||||
|
tasklet_enable(&engine->irq_tasklet);
|
||||||
|
}
|
||||||
|
|
||||||
static void nop_submit_request(struct drm_i915_gem_request *request)
|
static void nop_submit_request(struct drm_i915_gem_request *request)
|
||||||
{
|
{
|
||||||
dma_fence_set_error(&request->fence, -EIO);
|
dma_fence_set_error(&request->fence, -EIO);
|
||||||
|
@ -4674,8 +4694,6 @@ i915_gem_load_init(struct drm_i915_private *dev_priv)
|
||||||
init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
|
init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
|
||||||
init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
|
init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
|
||||||
|
|
||||||
dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
|
|
||||||
|
|
||||||
init_waitqueue_head(&dev_priv->pending_flip_queue);
|
init_waitqueue_head(&dev_priv->pending_flip_queue);
|
||||||
|
|
||||||
dev_priv->mm.interruptible = true;
|
dev_priv->mm.interruptible = true;
|
||||||
|
|
|
@ -311,7 +311,6 @@ __create_hw_context(struct drm_i915_private *dev_priv,
|
||||||
ctx->ring_size = 4 * PAGE_SIZE;
|
ctx->ring_size = 4 * PAGE_SIZE;
|
||||||
ctx->desc_template = GEN8_CTX_ADDRESSING_MODE(dev_priv) <<
|
ctx->desc_template = GEN8_CTX_ADDRESSING_MODE(dev_priv) <<
|
||||||
GEN8_CTX_ADDRESSING_MODE_SHIFT;
|
GEN8_CTX_ADDRESSING_MODE_SHIFT;
|
||||||
ATOMIC_INIT_NOTIFIER_HEAD(&ctx->status_notifier);
|
|
||||||
|
|
||||||
/* GuC requires the ring to be placed above GUC_WOPCM_TOP. If GuC is not
|
/* GuC requires the ring to be placed above GUC_WOPCM_TOP. If GuC is not
|
||||||
* present or not in use we still need a small bias as ring wraparound
|
* present or not in use we still need a small bias as ring wraparound
|
||||||
|
|
|
@ -160,9 +160,6 @@ struct i915_gem_context {
|
||||||
/** desc_template: invariant fields for the HW context descriptor */
|
/** desc_template: invariant fields for the HW context descriptor */
|
||||||
u32 desc_template;
|
u32 desc_template;
|
||||||
|
|
||||||
/** status_notifier: list of callbacks for context-switch changes */
|
|
||||||
struct atomic_notifier_head status_notifier;
|
|
||||||
|
|
||||||
/** guilty_count: How many times this context has caused a GPU hang. */
|
/** guilty_count: How many times this context has caused a GPU hang. */
|
||||||
unsigned int guilty_count;
|
unsigned int guilty_count;
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -1408,10 +1408,7 @@ execbuf_submit(struct i915_execbuffer_params *params,
|
||||||
struct drm_i915_gem_execbuffer2 *args,
|
struct drm_i915_gem_execbuffer2 *args,
|
||||||
struct list_head *vmas)
|
struct list_head *vmas)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = params->request->i915;
|
|
||||||
u64 exec_start, exec_len;
|
u64 exec_start, exec_len;
|
||||||
int instp_mode;
|
|
||||||
u32 instp_mask;
|
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = i915_gem_execbuffer_move_to_gpu(params->request, vmas);
|
ret = i915_gem_execbuffer_move_to_gpu(params->request, vmas);
|
||||||
|
@ -1422,56 +1419,11 @@ execbuf_submit(struct i915_execbuffer_params *params,
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
|
if (args->flags & I915_EXEC_CONSTANTS_MASK) {
|
||||||
instp_mask = I915_EXEC_CONSTANTS_MASK;
|
DRM_DEBUG("I915_EXEC_CONSTANTS_* unsupported\n");
|
||||||
switch (instp_mode) {
|
|
||||||
case I915_EXEC_CONSTANTS_REL_GENERAL:
|
|
||||||
case I915_EXEC_CONSTANTS_ABSOLUTE:
|
|
||||||
case I915_EXEC_CONSTANTS_REL_SURFACE:
|
|
||||||
if (instp_mode != 0 && params->engine->id != RCS) {
|
|
||||||
DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (instp_mode != dev_priv->relative_constants_mode) {
|
|
||||||
if (INTEL_INFO(dev_priv)->gen < 4) {
|
|
||||||
DRM_DEBUG("no rel constants on pre-gen4\n");
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (INTEL_INFO(dev_priv)->gen > 5 &&
|
|
||||||
instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
|
|
||||||
DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* The HW changed the meaning on this bit on gen6 */
|
|
||||||
if (INTEL_INFO(dev_priv)->gen >= 6)
|
|
||||||
instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode);
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (params->engine->id == RCS &&
|
|
||||||
instp_mode != dev_priv->relative_constants_mode) {
|
|
||||||
struct intel_ring *ring = params->request->ring;
|
|
||||||
|
|
||||||
ret = intel_ring_begin(params->request, 4);
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
intel_ring_emit(ring, MI_NOOP);
|
|
||||||
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
|
|
||||||
intel_ring_emit_reg(ring, INSTPM);
|
|
||||||
intel_ring_emit(ring, instp_mask << 16 | instp_mode);
|
|
||||||
intel_ring_advance(ring);
|
|
||||||
|
|
||||||
dev_priv->relative_constants_mode = instp_mode;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
|
if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
|
||||||
ret = i915_reset_gen7_sol_offsets(params->request);
|
ret = i915_reset_gen7_sol_offsets(params->request);
|
||||||
if (ret)
|
if (ret)
|
||||||
|
|
|
@ -263,7 +263,7 @@ unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv)
|
||||||
I915_SHRINK_BOUND |
|
I915_SHRINK_BOUND |
|
||||||
I915_SHRINK_UNBOUND |
|
I915_SHRINK_UNBOUND |
|
||||||
I915_SHRINK_ACTIVE);
|
I915_SHRINK_ACTIVE);
|
||||||
rcu_barrier(); /* wait until our RCU delayed slab frees are completed */
|
synchronize_rcu(); /* wait for our earlier RCU delayed slab frees */
|
||||||
|
|
||||||
return freed;
|
return freed;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1046,68 +1046,51 @@ static void vlv_c0_read(struct drm_i915_private *dev_priv,
|
||||||
ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
|
ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool vlv_c0_above(struct drm_i915_private *dev_priv,
|
|
||||||
const struct intel_rps_ei *old,
|
|
||||||
const struct intel_rps_ei *now,
|
|
||||||
int threshold)
|
|
||||||
{
|
|
||||||
u64 time, c0;
|
|
||||||
unsigned int mul = 100;
|
|
||||||
|
|
||||||
if (old->cz_clock == 0)
|
|
||||||
return false;
|
|
||||||
|
|
||||||
if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
|
|
||||||
mul <<= 8;
|
|
||||||
|
|
||||||
time = now->cz_clock - old->cz_clock;
|
|
||||||
time *= threshold * dev_priv->czclk_freq;
|
|
||||||
|
|
||||||
/* Workload can be split between render + media, e.g. SwapBuffers
|
|
||||||
* being blitted in X after being rendered in mesa. To account for
|
|
||||||
* this we need to combine both engines into our activity counter.
|
|
||||||
*/
|
|
||||||
c0 = now->render_c0 - old->render_c0;
|
|
||||||
c0 += now->media_c0 - old->media_c0;
|
|
||||||
c0 *= mul * VLV_CZ_CLOCK_TO_MILLI_SEC;
|
|
||||||
|
|
||||||
return c0 >= time;
|
|
||||||
}
|
|
||||||
|
|
||||||
void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
|
void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
|
||||||
{
|
{
|
||||||
vlv_c0_read(dev_priv, &dev_priv->rps.down_ei);
|
memset(&dev_priv->rps.ei, 0, sizeof(dev_priv->rps.ei));
|
||||||
dev_priv->rps.up_ei = dev_priv->rps.down_ei;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
|
static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
|
||||||
{
|
{
|
||||||
|
const struct intel_rps_ei *prev = &dev_priv->rps.ei;
|
||||||
struct intel_rps_ei now;
|
struct intel_rps_ei now;
|
||||||
u32 events = 0;
|
u32 events = 0;
|
||||||
|
|
||||||
if ((pm_iir & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) == 0)
|
if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
vlv_c0_read(dev_priv, &now);
|
vlv_c0_read(dev_priv, &now);
|
||||||
if (now.cz_clock == 0)
|
if (now.cz_clock == 0)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (pm_iir & GEN6_PM_RP_DOWN_EI_EXPIRED) {
|
if (prev->cz_clock) {
|
||||||
if (!vlv_c0_above(dev_priv,
|
u64 time, c0;
|
||||||
&dev_priv->rps.down_ei, &now,
|
unsigned int mul;
|
||||||
dev_priv->rps.down_threshold))
|
|
||||||
events |= GEN6_PM_RP_DOWN_THRESHOLD;
|
mul = VLV_CZ_CLOCK_TO_MILLI_SEC * 100; /* scale to threshold% */
|
||||||
dev_priv->rps.down_ei = now;
|
if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
|
||||||
}
|
mul <<= 8;
|
||||||
|
|
||||||
if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
|
time = now.cz_clock - prev->cz_clock;
|
||||||
if (vlv_c0_above(dev_priv,
|
time *= dev_priv->czclk_freq;
|
||||||
&dev_priv->rps.up_ei, &now,
|
|
||||||
dev_priv->rps.up_threshold))
|
/* Workload can be split between render + media,
|
||||||
events |= GEN6_PM_RP_UP_THRESHOLD;
|
* e.g. SwapBuffers being blitted in X after being rendered in
|
||||||
dev_priv->rps.up_ei = now;
|
* mesa. To account for this we need to combine both engines
|
||||||
|
* into our activity counter.
|
||||||
|
*/
|
||||||
|
c0 = now.render_c0 - prev->render_c0;
|
||||||
|
c0 += now.media_c0 - prev->media_c0;
|
||||||
|
c0 *= mul;
|
||||||
|
|
||||||
|
if (c0 > time * dev_priv->rps.up_threshold)
|
||||||
|
events = GEN6_PM_RP_UP_THRESHOLD;
|
||||||
|
else if (c0 < time * dev_priv->rps.down_threshold)
|
||||||
|
events = GEN6_PM_RP_DOWN_THRESHOLD;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
dev_priv->rps.ei = now;
|
||||||
return events;
|
return events;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4228,7 +4211,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
|
||||||
/* Let's track the enabled rps events */
|
/* Let's track the enabled rps events */
|
||||||
if (IS_VALLEYVIEW(dev_priv))
|
if (IS_VALLEYVIEW(dev_priv))
|
||||||
/* WaGsvRC0ResidencyMethod:vlv */
|
/* WaGsvRC0ResidencyMethod:vlv */
|
||||||
dev_priv->pm_rps_events = GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED;
|
dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
|
||||||
else
|
else
|
||||||
dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
|
dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
|
||||||
|
|
||||||
|
@ -4266,6 +4249,16 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
|
||||||
if (!IS_GEN2(dev_priv))
|
if (!IS_GEN2(dev_priv))
|
||||||
dev->vblank_disable_immediate = true;
|
dev->vblank_disable_immediate = true;
|
||||||
|
|
||||||
|
/* Most platforms treat the display irq block as an always-on
|
||||||
|
* power domain. vlv/chv can disable it at runtime and need
|
||||||
|
* special care to avoid writing any of the display block registers
|
||||||
|
* outside of the power domain. We defer setting up the display irqs
|
||||||
|
* in this case to the runtime pm.
|
||||||
|
*/
|
||||||
|
dev_priv->display_irqs_enabled = true;
|
||||||
|
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
|
||||||
|
dev_priv->display_irqs_enabled = false;
|
||||||
|
|
||||||
dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
|
dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
|
||||||
dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
|
dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
|
||||||
|
|
||||||
|
|
|
@ -35,7 +35,6 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#define I915_CSR_GLK "i915/glk_dmc_ver1_01.bin"
|
#define I915_CSR_GLK "i915/glk_dmc_ver1_01.bin"
|
||||||
MODULE_FIRMWARE(I915_CSR_GLK);
|
|
||||||
#define GLK_CSR_VERSION_REQUIRED CSR_VERSION(1, 1)
|
#define GLK_CSR_VERSION_REQUIRED CSR_VERSION(1, 1)
|
||||||
|
|
||||||
#define I915_CSR_KBL "i915/kbl_dmc_ver1_01.bin"
|
#define I915_CSR_KBL "i915/kbl_dmc_ver1_01.bin"
|
||||||
|
|
|
@ -16696,12 +16696,11 @@ int intel_modeset_init(struct drm_device *dev)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
intel_update_czclk(dev_priv);
|
|
||||||
intel_update_cdclk(dev_priv);
|
|
||||||
dev_priv->atomic_cdclk_freq = dev_priv->cdclk_freq;
|
|
||||||
|
|
||||||
intel_shared_dpll_init(dev);
|
intel_shared_dpll_init(dev);
|
||||||
|
|
||||||
|
intel_update_czclk(dev_priv);
|
||||||
|
intel_modeset_init_hw(dev);
|
||||||
|
|
||||||
if (dev_priv->max_cdclk_freq == 0)
|
if (dev_priv->max_cdclk_freq == 0)
|
||||||
intel_update_max_cdclk(dev_priv);
|
intel_update_max_cdclk(dev_priv);
|
||||||
|
|
||||||
|
@ -17258,8 +17257,6 @@ void intel_modeset_gem_init(struct drm_device *dev)
|
||||||
|
|
||||||
intel_init_gt_powersave(dev_priv);
|
intel_init_gt_powersave(dev_priv);
|
||||||
|
|
||||||
intel_modeset_init_hw(dev);
|
|
||||||
|
|
||||||
intel_setup_overlay(dev_priv);
|
intel_setup_overlay(dev_priv);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -105,6 +105,8 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
|
||||||
/* Nothing to do here, execute in order of dependencies */
|
/* Nothing to do here, execute in order of dependencies */
|
||||||
engine->schedule = NULL;
|
engine->schedule = NULL;
|
||||||
|
|
||||||
|
ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier);
|
||||||
|
|
||||||
dev_priv->engine[id] = engine;
|
dev_priv->engine[id] = engine;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -77,6 +77,11 @@ int intel_gvt_init(struct drm_i915_private *dev_priv)
|
||||||
goto bail;
|
goto bail;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!i915.enable_execlists) {
|
||||||
|
DRM_INFO("GPU guest virtualisation [GVT-g] disabled due to disabled execlist submission [i915.enable_execlists module parameter]\n");
|
||||||
|
goto bail;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We're not in host or fail to find a MPT module, disable GVT-g
|
* We're not in host or fail to find a MPT module, disable GVT-g
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -1298,16 +1298,34 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
|
||||||
|
|
||||||
static bool hdmi_12bpc_possible(struct intel_crtc_state *crtc_state)
|
static bool hdmi_12bpc_possible(struct intel_crtc_state *crtc_state)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = crtc_state->base.crtc->dev;
|
struct drm_i915_private *dev_priv =
|
||||||
|
to_i915(crtc_state->base.crtc->dev);
|
||||||
|
struct drm_atomic_state *state = crtc_state->base.state;
|
||||||
|
struct drm_connector_state *connector_state;
|
||||||
|
struct drm_connector *connector;
|
||||||
|
int i;
|
||||||
|
|
||||||
if (HAS_GMCH_DISPLAY(to_i915(dev)))
|
if (HAS_GMCH_DISPLAY(dev_priv))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* HDMI 12bpc affects the clocks, so it's only possible
|
* HDMI 12bpc affects the clocks, so it's only possible
|
||||||
* when not cloning with other encoder types.
|
* when not cloning with other encoder types.
|
||||||
*/
|
*/
|
||||||
return crtc_state->output_types == 1 << INTEL_OUTPUT_HDMI;
|
if (crtc_state->output_types != 1 << INTEL_OUTPUT_HDMI)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
for_each_connector_in_state(state, connector, connector_state, i) {
|
||||||
|
const struct drm_display_info *info = &connector->display_info;
|
||||||
|
|
||||||
|
if (connector_state->crtc != crtc_state->base.crtc)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
if ((info->edid_hdmi_dc_modes & DRM_EDID_HDMI_DC_36) == 0)
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool intel_hdmi_compute_config(struct intel_encoder *encoder,
|
bool intel_hdmi_compute_config(struct intel_encoder *encoder,
|
||||||
|
|
|
@ -219,7 +219,7 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (dev_priv->display.hpd_irq_setup)
|
if (dev_priv->display_irqs_enabled && dev_priv->display.hpd_irq_setup)
|
||||||
dev_priv->display.hpd_irq_setup(dev_priv);
|
dev_priv->display.hpd_irq_setup(dev_priv);
|
||||||
spin_unlock_irq(&dev_priv->irq_lock);
|
spin_unlock_irq(&dev_priv->irq_lock);
|
||||||
|
|
||||||
|
@ -425,7 +425,7 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (storm_detected)
|
if (storm_detected && dev_priv->display_irqs_enabled)
|
||||||
dev_priv->display.hpd_irq_setup(dev_priv);
|
dev_priv->display.hpd_irq_setup(dev_priv);
|
||||||
spin_unlock(&dev_priv->irq_lock);
|
spin_unlock(&dev_priv->irq_lock);
|
||||||
|
|
||||||
|
@ -471,10 +471,12 @@ void intel_hpd_init(struct drm_i915_private *dev_priv)
|
||||||
* Interrupt setup is already guaranteed to be single-threaded, this is
|
* Interrupt setup is already guaranteed to be single-threaded, this is
|
||||||
* just to make the assert_spin_locked checks happy.
|
* just to make the assert_spin_locked checks happy.
|
||||||
*/
|
*/
|
||||||
spin_lock_irq(&dev_priv->irq_lock);
|
if (dev_priv->display_irqs_enabled && dev_priv->display.hpd_irq_setup) {
|
||||||
if (dev_priv->display.hpd_irq_setup)
|
spin_lock_irq(&dev_priv->irq_lock);
|
||||||
dev_priv->display.hpd_irq_setup(dev_priv);
|
if (dev_priv->display_irqs_enabled)
|
||||||
spin_unlock_irq(&dev_priv->irq_lock);
|
dev_priv->display.hpd_irq_setup(dev_priv);
|
||||||
|
spin_unlock_irq(&dev_priv->irq_lock);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void i915_hpd_poll_init_work(struct work_struct *work)
|
static void i915_hpd_poll_init_work(struct work_struct *work)
|
||||||
|
|
|
@ -345,7 +345,8 @@ execlists_context_status_change(struct drm_i915_gem_request *rq,
|
||||||
if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
|
if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
atomic_notifier_call_chain(&rq->ctx->status_notifier, status, rq);
|
atomic_notifier_call_chain(&rq->engine->context_status_notifier,
|
||||||
|
status, rq);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
|
|
|
@ -4928,8 +4928,9 @@ static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
|
||||||
{
|
{
|
||||||
u32 mask = 0;
|
u32 mask = 0;
|
||||||
|
|
||||||
|
/* We use UP_EI_EXPIRED interupts for both up/down in manual mode */
|
||||||
if (val > dev_priv->rps.min_freq_softlimit)
|
if (val > dev_priv->rps.min_freq_softlimit)
|
||||||
mask |= GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
|
mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
|
||||||
if (val < dev_priv->rps.max_freq_softlimit)
|
if (val < dev_priv->rps.max_freq_softlimit)
|
||||||
mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD;
|
mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD;
|
||||||
|
|
||||||
|
@ -5039,7 +5040,7 @@ void gen6_rps_busy(struct drm_i915_private *dev_priv)
|
||||||
{
|
{
|
||||||
mutex_lock(&dev_priv->rps.hw_lock);
|
mutex_lock(&dev_priv->rps.hw_lock);
|
||||||
if (dev_priv->rps.enabled) {
|
if (dev_priv->rps.enabled) {
|
||||||
if (dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED))
|
if (dev_priv->pm_rps_events & GEN6_PM_RP_UP_EI_EXPIRED)
|
||||||
gen6_rps_reset_ei(dev_priv);
|
gen6_rps_reset_ei(dev_priv);
|
||||||
I915_WRITE(GEN6_PMINTRMSK,
|
I915_WRITE(GEN6_PMINTRMSK,
|
||||||
gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
|
gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
|
||||||
|
|
|
@ -403,6 +403,9 @@ struct intel_engine_cs {
|
||||||
*/
|
*/
|
||||||
struct i915_gem_context *legacy_active_context;
|
struct i915_gem_context *legacy_active_context;
|
||||||
|
|
||||||
|
/* status_notifier: list of callbacks for context-switch changes */
|
||||||
|
struct atomic_notifier_head context_status_notifier;
|
||||||
|
|
||||||
struct intel_engine_hangcheck hangcheck;
|
struct intel_engine_hangcheck hangcheck;
|
||||||
|
|
||||||
bool needs_cmd_parser;
|
bool needs_cmd_parser;
|
||||||
|
|
Loading…
Reference in New Issue