Merge tag 'drm-intel-next-fixes-2017-07-11' of git://anongit.freedesktop.org/git/drm-intel into drm-next
drm/i915 fixes for v4.13-rc1 * tag 'drm-intel-next-fixes-2017-07-11' of git://anongit.freedesktop.org/git/drm-intel: drm/i915: Make DP-MST connector info work drm/i915/gvt: Use fence error from GVT request for workload status drm/i915/gvt: remove scheduler_mutex in per-engine workload_thread drm/i915/gvt: Revert "drm/i915/gvt: Fix possible recursive locking issue" drm/i915/gvt: Audit the command buffer address drm/i915/gvt: Fix a memory leak in intel_gvt_init_gtt() drm/i915/fbdev: Check for existence of ifbdev->vma before operations drm/i915: Hold RPM wakelock while initializing OA buffer drm/i915/cnl: Fix the CURSOR_COEFF_MASK used in DDI Vswing Programming drm/i915/cfl: Fix Workarounds. drm/i915: Avoid undefined behaviour of "u32 >> 32" drm/i915: reintroduce VLV/CHV PFI programming power domain workaround drm/i915: Fix an error checking test drm/i915: Disable MSI for all pre-gen5 drm/i915/gvt: Make function dpy_reg_mmio_readx safe drm/i915/gvt: Don't read ADPA_CRT_HOTPLUG_MONITOR from host drm/i915/gvt: Set initial PORT_CLK_SEL vreg for BDW drm/i915/gvt: Fix inconsistent locks holding sequence drm/i915/gvt: Fix possible recursive locking issue
This commit is contained in:
commit
caa164e373
|
@ -2536,6 +2536,11 @@ static int scan_workload(struct intel_vgpu_workload *workload)
|
||||||
gma_head == gma_tail)
|
gma_head == gma_tail)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
if (!intel_gvt_ggtt_validate_range(s.vgpu, s.ring_start, s.ring_size)) {
|
||||||
|
ret = -EINVAL;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
ret = ip_gma_set(&s, gma_head);
|
ret = ip_gma_set(&s, gma_head);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out;
|
goto out;
|
||||||
|
@ -2579,6 +2584,11 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
|
||||||
s.rb_va = wa_ctx->indirect_ctx.shadow_va;
|
s.rb_va = wa_ctx->indirect_ctx.shadow_va;
|
||||||
s.workload = workload;
|
s.workload = workload;
|
||||||
|
|
||||||
|
if (!intel_gvt_ggtt_validate_range(s.vgpu, s.ring_start, s.ring_size)) {
|
||||||
|
ret = -EINVAL;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
ret = ip_gma_set(&s, gma_head);
|
ret = ip_gma_set(&s, gma_head);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
|
@ -197,6 +197,12 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
|
||||||
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
|
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
|
||||||
(PORT_B << TRANS_DDI_PORT_SHIFT) |
|
(PORT_B << TRANS_DDI_PORT_SHIFT) |
|
||||||
TRANS_DDI_FUNC_ENABLE);
|
TRANS_DDI_FUNC_ENABLE);
|
||||||
|
if (IS_BROADWELL(dev_priv)) {
|
||||||
|
vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_B)) &=
|
||||||
|
~PORT_CLK_SEL_MASK;
|
||||||
|
vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_B)) |=
|
||||||
|
PORT_CLK_SEL_LCPLL_810;
|
||||||
|
}
|
||||||
vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_B)) |= DDI_BUF_CTL_ENABLE;
|
vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_B)) |= DDI_BUF_CTL_ENABLE;
|
||||||
vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_B)) &= ~DDI_BUF_IS_IDLE;
|
vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_B)) &= ~DDI_BUF_IS_IDLE;
|
||||||
vgpu_vreg(vgpu, SDEISR) |= SDE_PORTB_HOTPLUG_CPT;
|
vgpu_vreg(vgpu, SDEISR) |= SDE_PORTB_HOTPLUG_CPT;
|
||||||
|
@ -211,6 +217,12 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
|
||||||
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
|
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
|
||||||
(PORT_C << TRANS_DDI_PORT_SHIFT) |
|
(PORT_C << TRANS_DDI_PORT_SHIFT) |
|
||||||
TRANS_DDI_FUNC_ENABLE);
|
TRANS_DDI_FUNC_ENABLE);
|
||||||
|
if (IS_BROADWELL(dev_priv)) {
|
||||||
|
vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_C)) &=
|
||||||
|
~PORT_CLK_SEL_MASK;
|
||||||
|
vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_C)) |=
|
||||||
|
PORT_CLK_SEL_LCPLL_810;
|
||||||
|
}
|
||||||
vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_C)) |= DDI_BUF_CTL_ENABLE;
|
vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_C)) |= DDI_BUF_CTL_ENABLE;
|
||||||
vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_C)) &= ~DDI_BUF_IS_IDLE;
|
vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_C)) &= ~DDI_BUF_IS_IDLE;
|
||||||
vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIC_DETECTED;
|
vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIC_DETECTED;
|
||||||
|
@ -225,6 +237,12 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
|
||||||
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
|
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
|
||||||
(PORT_D << TRANS_DDI_PORT_SHIFT) |
|
(PORT_D << TRANS_DDI_PORT_SHIFT) |
|
||||||
TRANS_DDI_FUNC_ENABLE);
|
TRANS_DDI_FUNC_ENABLE);
|
||||||
|
if (IS_BROADWELL(dev_priv)) {
|
||||||
|
vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_D)) &=
|
||||||
|
~PORT_CLK_SEL_MASK;
|
||||||
|
vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_D)) |=
|
||||||
|
PORT_CLK_SEL_LCPLL_810;
|
||||||
|
}
|
||||||
vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_D)) |= DDI_BUF_CTL_ENABLE;
|
vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_D)) |= DDI_BUF_CTL_ENABLE;
|
||||||
vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_D)) &= ~DDI_BUF_IS_IDLE;
|
vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_D)) &= ~DDI_BUF_IS_IDLE;
|
||||||
vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDID_DETECTED;
|
vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDID_DETECTED;
|
||||||
|
@ -244,6 +262,10 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
|
||||||
|
|
||||||
vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_A)) |= DDI_INIT_DISPLAY_DETECTED;
|
vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_A)) |= DDI_INIT_DISPLAY_DETECTED;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Clear host CRT status, so guest couldn't detect this host CRT. */
|
||||||
|
if (IS_BROADWELL(dev_priv))
|
||||||
|
vgpu_vreg(vgpu, PCH_ADPA) &= ~ADPA_CRT_HOTPLUG_MONITOR_MASK;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num)
|
static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num)
|
||||||
|
|
|
@ -2259,6 +2259,8 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
|
||||||
ret = setup_spt_oos(gvt);
|
ret = setup_spt_oos(gvt);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
gvt_err("fail to initialize SPT oos\n");
|
gvt_err("fail to initialize SPT oos\n");
|
||||||
|
dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
|
||||||
|
__free_page(gvt->gtt.scratch_ggtt_page);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -367,21 +367,24 @@ static int lcpll_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
|
||||||
static int dpy_reg_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
|
static int dpy_reg_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
|
||||||
void *p_data, unsigned int bytes)
|
void *p_data, unsigned int bytes)
|
||||||
{
|
{
|
||||||
*(u32 *)p_data = (1 << 17);
|
switch (offset) {
|
||||||
return 0;
|
case 0xe651c:
|
||||||
}
|
case 0xe661c:
|
||||||
|
case 0xe671c:
|
||||||
|
case 0xe681c:
|
||||||
|
vgpu_vreg(vgpu, offset) = 1 << 17;
|
||||||
|
break;
|
||||||
|
case 0xe6c04:
|
||||||
|
vgpu_vreg(vgpu, offset) = 0x3;
|
||||||
|
break;
|
||||||
|
case 0xe6e1c:
|
||||||
|
vgpu_vreg(vgpu, offset) = 0x2f << 16;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
static int dpy_reg_mmio_read_2(struct intel_vgpu *vgpu, unsigned int offset,
|
read_vreg(vgpu, offset, p_data, bytes);
|
||||||
void *p_data, unsigned int bytes)
|
|
||||||
{
|
|
||||||
*(u32 *)p_data = 3;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int dpy_reg_mmio_read_3(struct intel_vgpu *vgpu, unsigned int offset,
|
|
||||||
void *p_data, unsigned int bytes)
|
|
||||||
{
|
|
||||||
*(u32 *)p_data = (0x2f << 16);
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1925,7 +1928,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
|
||||||
MMIO_F(_PCH_DPD_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_PRE_SKL, NULL,
|
MMIO_F(_PCH_DPD_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_PRE_SKL, NULL,
|
||||||
dp_aux_ch_ctl_mmio_write);
|
dp_aux_ch_ctl_mmio_write);
|
||||||
|
|
||||||
MMIO_RO(PCH_ADPA, D_ALL, 0, ADPA_CRT_HOTPLUG_MONITOR_MASK, NULL, pch_adpa_mmio_write);
|
MMIO_DH(PCH_ADPA, D_PRE_SKL, NULL, pch_adpa_mmio_write);
|
||||||
|
|
||||||
MMIO_DH(_PCH_TRANSACONF, D_ALL, NULL, transconf_mmio_write);
|
MMIO_DH(_PCH_TRANSACONF, D_ALL, NULL, transconf_mmio_write);
|
||||||
MMIO_DH(_PCH_TRANSBCONF, D_ALL, NULL, transconf_mmio_write);
|
MMIO_DH(_PCH_TRANSBCONF, D_ALL, NULL, transconf_mmio_write);
|
||||||
|
@ -2011,8 +2014,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
|
||||||
MMIO_DH(0xe661c, D_ALL, dpy_reg_mmio_read, NULL);
|
MMIO_DH(0xe661c, D_ALL, dpy_reg_mmio_read, NULL);
|
||||||
MMIO_DH(0xe671c, D_ALL, dpy_reg_mmio_read, NULL);
|
MMIO_DH(0xe671c, D_ALL, dpy_reg_mmio_read, NULL);
|
||||||
MMIO_DH(0xe681c, D_ALL, dpy_reg_mmio_read, NULL);
|
MMIO_DH(0xe681c, D_ALL, dpy_reg_mmio_read, NULL);
|
||||||
MMIO_DH(0xe6c04, D_ALL, dpy_reg_mmio_read_2, NULL);
|
MMIO_DH(0xe6c04, D_ALL, dpy_reg_mmio_read, NULL);
|
||||||
MMIO_DH(0xe6e1c, D_ALL, dpy_reg_mmio_read_3, NULL);
|
MMIO_DH(0xe6e1c, D_ALL, dpy_reg_mmio_read, NULL);
|
||||||
|
|
||||||
MMIO_RO(PCH_PORT_HOTPLUG, D_ALL, 0,
|
MMIO_RO(PCH_PORT_HOTPLUG, D_ALL, 0,
|
||||||
PORTA_HOTPLUG_STATUS_MASK
|
PORTA_HOTPLUG_STATUS_MASK
|
||||||
|
|
|
@ -232,16 +232,20 @@ static void gvt_cache_destroy(struct intel_vgpu *vgpu)
|
||||||
struct device *dev = mdev_dev(vgpu->vdev.mdev);
|
struct device *dev = mdev_dev(vgpu->vdev.mdev);
|
||||||
unsigned long gfn;
|
unsigned long gfn;
|
||||||
|
|
||||||
mutex_lock(&vgpu->vdev.cache_lock);
|
for (;;) {
|
||||||
while ((node = rb_first(&vgpu->vdev.cache))) {
|
mutex_lock(&vgpu->vdev.cache_lock);
|
||||||
|
node = rb_first(&vgpu->vdev.cache);
|
||||||
|
if (!node) {
|
||||||
|
mutex_unlock(&vgpu->vdev.cache_lock);
|
||||||
|
break;
|
||||||
|
}
|
||||||
dma = rb_entry(node, struct gvt_dma, node);
|
dma = rb_entry(node, struct gvt_dma, node);
|
||||||
gvt_dma_unmap_iova(vgpu, dma->iova);
|
gvt_dma_unmap_iova(vgpu, dma->iova);
|
||||||
gfn = dma->gfn;
|
gfn = dma->gfn;
|
||||||
|
|
||||||
vfio_unpin_pages(dev, &gfn, 1);
|
|
||||||
__gvt_cache_remove_entry(vgpu, dma);
|
__gvt_cache_remove_entry(vgpu, dma);
|
||||||
|
mutex_unlock(&vgpu->vdev.cache_lock);
|
||||||
|
vfio_unpin_pages(dev, &gfn, 1);
|
||||||
}
|
}
|
||||||
mutex_unlock(&vgpu->vdev.cache_lock);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct intel_vgpu_type *intel_gvt_find_vgpu_type(struct intel_gvt *gvt,
|
static struct intel_vgpu_type *intel_gvt_find_vgpu_type(struct intel_gvt *gvt,
|
||||||
|
|
|
@ -174,15 +174,6 @@ static int shadow_context_status_change(struct notifier_block *nb,
|
||||||
atomic_set(&workload->shadow_ctx_active, 1);
|
atomic_set(&workload->shadow_ctx_active, 1);
|
||||||
break;
|
break;
|
||||||
case INTEL_CONTEXT_SCHEDULE_OUT:
|
case INTEL_CONTEXT_SCHEDULE_OUT:
|
||||||
/* If the status is -EINPROGRESS means this workload
|
|
||||||
* doesn't meet any issue during dispatching so when
|
|
||||||
* get the SCHEDULE_OUT set the status to be zero for
|
|
||||||
* good. If the status is NOT -EINPROGRESS means there
|
|
||||||
* is something wrong happened during dispatching and
|
|
||||||
* the status should not be set to zero
|
|
||||||
*/
|
|
||||||
if (workload->status == -EINPROGRESS)
|
|
||||||
workload->status = 0;
|
|
||||||
atomic_set(&workload->shadow_ctx_active, 0);
|
atomic_set(&workload->shadow_ctx_active, 0);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
|
@ -427,6 +418,18 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
|
||||||
wait_event(workload->shadow_ctx_status_wq,
|
wait_event(workload->shadow_ctx_status_wq,
|
||||||
!atomic_read(&workload->shadow_ctx_active));
|
!atomic_read(&workload->shadow_ctx_active));
|
||||||
|
|
||||||
|
/* If this request caused GPU hang, req->fence.error will
|
||||||
|
* be set to -EIO. Use -EIO to set workload status so
|
||||||
|
* that when this request caused GPU hang, didn't trigger
|
||||||
|
* context switch interrupt to guest.
|
||||||
|
*/
|
||||||
|
if (likely(workload->status == -EINPROGRESS)) {
|
||||||
|
if (workload->req->fence.error == -EIO)
|
||||||
|
workload->status = -EIO;
|
||||||
|
else
|
||||||
|
workload->status = 0;
|
||||||
|
}
|
||||||
|
|
||||||
i915_gem_request_put(fetch_and_zero(&workload->req));
|
i915_gem_request_put(fetch_and_zero(&workload->req));
|
||||||
|
|
||||||
if (!workload->status && !vgpu->resetting) {
|
if (!workload->status && !vgpu->resetting) {
|
||||||
|
@ -464,8 +467,6 @@ struct workload_thread_param {
|
||||||
int ring_id;
|
int ring_id;
|
||||||
};
|
};
|
||||||
|
|
||||||
static DEFINE_MUTEX(scheduler_mutex);
|
|
||||||
|
|
||||||
static int workload_thread(void *priv)
|
static int workload_thread(void *priv)
|
||||||
{
|
{
|
||||||
struct workload_thread_param *p = (struct workload_thread_param *)priv;
|
struct workload_thread_param *p = (struct workload_thread_param *)priv;
|
||||||
|
@ -497,8 +498,6 @@ static int workload_thread(void *priv)
|
||||||
if (!workload)
|
if (!workload)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
mutex_lock(&scheduler_mutex);
|
|
||||||
|
|
||||||
gvt_dbg_sched("ring id %d next workload %p vgpu %d\n",
|
gvt_dbg_sched("ring id %d next workload %p vgpu %d\n",
|
||||||
workload->ring_id, workload,
|
workload->ring_id, workload,
|
||||||
workload->vgpu->id);
|
workload->vgpu->id);
|
||||||
|
@ -537,9 +536,6 @@ complete:
|
||||||
FORCEWAKE_ALL);
|
FORCEWAKE_ALL);
|
||||||
|
|
||||||
intel_runtime_pm_put(gvt->dev_priv);
|
intel_runtime_pm_put(gvt->dev_priv);
|
||||||
|
|
||||||
mutex_unlock(&scheduler_mutex);
|
|
||||||
|
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -3083,7 +3083,7 @@ static void intel_connector_info(struct seq_file *m,
|
||||||
connector->display_info.cea_rev);
|
connector->display_info.cea_rev);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
|
if (!intel_encoder)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
switch (connector->connector_type) {
|
switch (connector->connector_type) {
|
||||||
|
|
|
@ -1132,10 +1132,12 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
|
||||||
* and the registers being closely associated.
|
* and the registers being closely associated.
|
||||||
*
|
*
|
||||||
* According to chipset errata, on the 965GM, MSI interrupts may
|
* According to chipset errata, on the 965GM, MSI interrupts may
|
||||||
* be lost or delayed, but we use them anyways to avoid
|
* be lost or delayed, and was defeatured. MSI interrupts seem to
|
||||||
* stuck interrupts on some machines.
|
* get lost on g4x as well, and interrupt delivery seems to stay
|
||||||
|
* properly dead afterwards. So we'll just disable them for all
|
||||||
|
* pre-gen5 chipsets.
|
||||||
*/
|
*/
|
||||||
if (!IS_I945G(dev_priv) && !IS_I945GM(dev_priv)) {
|
if (INTEL_GEN(dev_priv) >= 5) {
|
||||||
if (pci_enable_msi(pdev) < 0)
|
if (pci_enable_msi(pdev) < 0)
|
||||||
DRM_DEBUG_DRIVER("can't enable MSI");
|
DRM_DEBUG_DRIVER("can't enable MSI");
|
||||||
}
|
}
|
||||||
|
|
|
@ -288,20 +288,26 @@ static int eb_create(struct i915_execbuffer *eb)
|
||||||
* direct lookup.
|
* direct lookup.
|
||||||
*/
|
*/
|
||||||
do {
|
do {
|
||||||
|
unsigned int flags;
|
||||||
|
|
||||||
|
/* While we can still reduce the allocation size, don't
|
||||||
|
* raise a warning and allow the allocation to fail.
|
||||||
|
* On the last pass though, we want to try as hard
|
||||||
|
* as possible to perform the allocation and warn
|
||||||
|
* if it fails.
|
||||||
|
*/
|
||||||
|
flags = GFP_TEMPORARY;
|
||||||
|
if (size > 1)
|
||||||
|
flags |= __GFP_NORETRY | __GFP_NOWARN;
|
||||||
|
|
||||||
eb->buckets = kzalloc(sizeof(struct hlist_head) << size,
|
eb->buckets = kzalloc(sizeof(struct hlist_head) << size,
|
||||||
GFP_TEMPORARY |
|
flags);
|
||||||
__GFP_NORETRY |
|
|
||||||
__GFP_NOWARN);
|
|
||||||
if (eb->buckets)
|
if (eb->buckets)
|
||||||
break;
|
break;
|
||||||
} while (--size);
|
} while (--size);
|
||||||
|
|
||||||
if (unlikely(!eb->buckets)) {
|
if (unlikely(!size))
|
||||||
eb->buckets = kzalloc(sizeof(struct hlist_head),
|
return -ENOMEM;
|
||||||
GFP_TEMPORARY);
|
|
||||||
if (unlikely(!eb->buckets))
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
|
|
||||||
eb->lut_size = size;
|
eb->lut_size = size;
|
||||||
} else {
|
} else {
|
||||||
|
@ -452,7 +458,7 @@ eb_add_vma(struct i915_execbuffer *eb,
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (eb->lut_size >= 0) {
|
if (eb->lut_size > 0) {
|
||||||
vma->exec_handle = entry->handle;
|
vma->exec_handle = entry->handle;
|
||||||
hlist_add_head(&vma->exec_node,
|
hlist_add_head(&vma->exec_node,
|
||||||
&eb->buckets[hash_32(entry->handle,
|
&eb->buckets[hash_32(entry->handle,
|
||||||
|
@ -894,7 +900,7 @@ static void eb_release_vmas(const struct i915_execbuffer *eb)
|
||||||
static void eb_reset_vmas(const struct i915_execbuffer *eb)
|
static void eb_reset_vmas(const struct i915_execbuffer *eb)
|
||||||
{
|
{
|
||||||
eb_release_vmas(eb);
|
eb_release_vmas(eb);
|
||||||
if (eb->lut_size >= 0)
|
if (eb->lut_size > 0)
|
||||||
memset(eb->buckets, 0,
|
memset(eb->buckets, 0,
|
||||||
sizeof(struct hlist_head) << eb->lut_size);
|
sizeof(struct hlist_head) << eb->lut_size);
|
||||||
}
|
}
|
||||||
|
@ -903,7 +909,7 @@ static void eb_destroy(const struct i915_execbuffer *eb)
|
||||||
{
|
{
|
||||||
GEM_BUG_ON(eb->reloc_cache.rq);
|
GEM_BUG_ON(eb->reloc_cache.rq);
|
||||||
|
|
||||||
if (eb->lut_size >= 0)
|
if (eb->lut_size > 0)
|
||||||
kfree(eb->buckets);
|
kfree(eb->buckets);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2180,8 +2186,11 @@ i915_gem_do_execbuffer(struct drm_device *dev,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (eb_create(&eb))
|
err = eb_create(&eb);
|
||||||
return -ENOMEM;
|
if (err)
|
||||||
|
goto err_out_fence;
|
||||||
|
|
||||||
|
GEM_BUG_ON(!eb.lut_size);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Take a local wakeref for preparing to dispatch the execbuf as
|
* Take a local wakeref for preparing to dispatch the execbuf as
|
||||||
|
@ -2340,6 +2349,7 @@ err_unlock:
|
||||||
err_rpm:
|
err_rpm:
|
||||||
intel_runtime_pm_put(eb.i915);
|
intel_runtime_pm_put(eb.i915);
|
||||||
eb_destroy(&eb);
|
eb_destroy(&eb);
|
||||||
|
err_out_fence:
|
||||||
if (out_fence_fd != -1)
|
if (out_fence_fd != -1)
|
||||||
put_unused_fd(out_fence_fd);
|
put_unused_fd(out_fence_fd);
|
||||||
err_in_fence:
|
err_in_fence:
|
||||||
|
|
|
@ -2067,10 +2067,6 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = alloc_oa_buffer(dev_priv);
|
|
||||||
if (ret)
|
|
||||||
goto err_oa_buf_alloc;
|
|
||||||
|
|
||||||
/* PRM - observability performance counters:
|
/* PRM - observability performance counters:
|
||||||
*
|
*
|
||||||
* OACONTROL, performance counter enable, note:
|
* OACONTROL, performance counter enable, note:
|
||||||
|
@ -2086,6 +2082,10 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
|
||||||
intel_runtime_pm_get(dev_priv);
|
intel_runtime_pm_get(dev_priv);
|
||||||
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
|
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
|
||||||
|
|
||||||
|
ret = alloc_oa_buffer(dev_priv);
|
||||||
|
if (ret)
|
||||||
|
goto err_oa_buf_alloc;
|
||||||
|
|
||||||
ret = dev_priv->perf.oa.ops.enable_metric_set(dev_priv);
|
ret = dev_priv->perf.oa.ops.enable_metric_set(dev_priv);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err_enable;
|
goto err_enable;
|
||||||
|
@ -2097,11 +2097,11 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err_enable:
|
err_enable:
|
||||||
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
|
|
||||||
intel_runtime_pm_put(dev_priv);
|
|
||||||
free_oa_buffer(dev_priv);
|
free_oa_buffer(dev_priv);
|
||||||
|
|
||||||
err_oa_buf_alloc:
|
err_oa_buf_alloc:
|
||||||
|
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
|
||||||
|
intel_runtime_pm_put(dev_priv);
|
||||||
if (stream->ctx)
|
if (stream->ctx)
|
||||||
oa_put_render_ctx_id(stream);
|
oa_put_render_ctx_id(stream);
|
||||||
|
|
||||||
|
|
|
@ -1802,7 +1802,7 @@ enum skl_disp_power_wells {
|
||||||
#define POST_CURSOR_2(x) ((x) << 6)
|
#define POST_CURSOR_2(x) ((x) << 6)
|
||||||
#define POST_CURSOR_2_MASK (0x3F << 6)
|
#define POST_CURSOR_2_MASK (0x3F << 6)
|
||||||
#define CURSOR_COEFF(x) ((x) << 0)
|
#define CURSOR_COEFF(x) ((x) << 0)
|
||||||
#define CURSOR_COEFF_MASK (0x3F << 6)
|
#define CURSOR_COEFF_MASK (0x3F << 0)
|
||||||
|
|
||||||
#define _CNL_PORT_TX_DW5_GRP_AE 0x162354
|
#define _CNL_PORT_TX_DW5_GRP_AE 0x162354
|
||||||
#define _CNL_PORT_TX_DW5_GRP_B 0x1623D4
|
#define _CNL_PORT_TX_DW5_GRP_B 0x1623D4
|
||||||
|
|
|
@ -491,6 +491,14 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
|
||||||
int cdclk = cdclk_state->cdclk;
|
int cdclk = cdclk_state->cdclk;
|
||||||
u32 val, cmd;
|
u32 val, cmd;
|
||||||
|
|
||||||
|
/* There are cases where we can end up here with power domains
|
||||||
|
* off and a CDCLK frequency other than the minimum, like when
|
||||||
|
* issuing a modeset without actually changing any display after
|
||||||
|
* a system suspend. So grab the PIPE-A domain, which covers
|
||||||
|
* the HW blocks needed for the following programming.
|
||||||
|
*/
|
||||||
|
intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
|
||||||
|
|
||||||
if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */
|
if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */
|
||||||
cmd = 2;
|
cmd = 2;
|
||||||
else if (cdclk == 266667)
|
else if (cdclk == 266667)
|
||||||
|
@ -549,6 +557,8 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
|
||||||
intel_update_cdclk(dev_priv);
|
intel_update_cdclk(dev_priv);
|
||||||
|
|
||||||
vlv_program_pfi_credits(dev_priv);
|
vlv_program_pfi_credits(dev_priv);
|
||||||
|
|
||||||
|
intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void chv_set_cdclk(struct drm_i915_private *dev_priv,
|
static void chv_set_cdclk(struct drm_i915_private *dev_priv,
|
||||||
|
@ -568,6 +578,14 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv,
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* There are cases where we can end up here with power domains
|
||||||
|
* off and a CDCLK frequency other than the minimum, like when
|
||||||
|
* issuing a modeset without actually changing any display after
|
||||||
|
* a system suspend. So grab the PIPE-A domain, which covers
|
||||||
|
* the HW blocks needed for the following programming.
|
||||||
|
*/
|
||||||
|
intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Specs are full of misinformation, but testing on actual
|
* Specs are full of misinformation, but testing on actual
|
||||||
* hardware has shown that we just need to write the desired
|
* hardware has shown that we just need to write the desired
|
||||||
|
@ -590,6 +608,8 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv,
|
||||||
intel_update_cdclk(dev_priv);
|
intel_update_cdclk(dev_priv);
|
||||||
|
|
||||||
vlv_program_pfi_credits(dev_priv);
|
vlv_program_pfi_credits(dev_priv);
|
||||||
|
|
||||||
|
intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int bdw_calc_cdclk(int max_pixclk)
|
static int bdw_calc_cdclk(int max_pixclk)
|
||||||
|
|
|
@ -821,9 +821,10 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
|
||||||
I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
|
I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
|
||||||
GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
|
GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
|
||||||
|
|
||||||
/* WaDisableKillLogic:bxt,skl,kbl,cfl */
|
/* WaDisableKillLogic:bxt,skl,kbl */
|
||||||
I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
|
if (!IS_COFFEELAKE(dev_priv))
|
||||||
ECOCHK_DIS_TLB);
|
I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
|
||||||
|
ECOCHK_DIS_TLB);
|
||||||
|
|
||||||
/* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl,glk,cfl */
|
/* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl,glk,cfl */
|
||||||
/* WaDisablePartialInstShootdown:skl,bxt,kbl,glk,cfl */
|
/* WaDisablePartialInstShootdown:skl,bxt,kbl,glk,cfl */
|
||||||
|
@ -894,10 +895,9 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
|
||||||
WA_SET_BIT_MASKED(HDC_CHICKEN0,
|
WA_SET_BIT_MASKED(HDC_CHICKEN0,
|
||||||
HDC_FORCE_NON_COHERENT);
|
HDC_FORCE_NON_COHERENT);
|
||||||
|
|
||||||
/* WaDisableHDCInvalidation:skl,bxt,kbl */
|
/* WaDisableHDCInvalidation:skl,bxt,kbl,cfl */
|
||||||
if (!IS_COFFEELAKE(dev_priv))
|
I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
|
||||||
I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
|
BDW_DISABLE_HDC_INVALIDATION);
|
||||||
BDW_DISABLE_HDC_INVALIDATION);
|
|
||||||
|
|
||||||
/* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl,cfl */
|
/* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl,cfl */
|
||||||
if (IS_SKYLAKE(dev_priv) ||
|
if (IS_SKYLAKE(dev_priv) ||
|
||||||
|
|
|
@ -535,14 +535,15 @@ static void intel_fbdev_destroy(struct intel_fbdev *ifbdev)
|
||||||
|
|
||||||
drm_fb_helper_fini(&ifbdev->helper);
|
drm_fb_helper_fini(&ifbdev->helper);
|
||||||
|
|
||||||
if (ifbdev->fb) {
|
if (ifbdev->vma) {
|
||||||
mutex_lock(&ifbdev->helper.dev->struct_mutex);
|
mutex_lock(&ifbdev->helper.dev->struct_mutex);
|
||||||
intel_unpin_fb_vma(ifbdev->vma);
|
intel_unpin_fb_vma(ifbdev->vma);
|
||||||
mutex_unlock(&ifbdev->helper.dev->struct_mutex);
|
mutex_unlock(&ifbdev->helper.dev->struct_mutex);
|
||||||
|
|
||||||
drm_framebuffer_remove(&ifbdev->fb->base);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (ifbdev->fb)
|
||||||
|
drm_framebuffer_remove(&ifbdev->fb->base);
|
||||||
|
|
||||||
kfree(ifbdev);
|
kfree(ifbdev);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -765,7 +766,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous
|
||||||
struct intel_fbdev *ifbdev = dev_priv->fbdev;
|
struct intel_fbdev *ifbdev = dev_priv->fbdev;
|
||||||
struct fb_info *info;
|
struct fb_info *info;
|
||||||
|
|
||||||
if (!ifbdev || !ifbdev->fb)
|
if (!ifbdev || !ifbdev->vma)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
info = ifbdev->helper.fbdev;
|
info = ifbdev->helper.fbdev;
|
||||||
|
@ -812,7 +813,7 @@ void intel_fbdev_output_poll_changed(struct drm_device *dev)
|
||||||
{
|
{
|
||||||
struct intel_fbdev *ifbdev = to_i915(dev)->fbdev;
|
struct intel_fbdev *ifbdev = to_i915(dev)->fbdev;
|
||||||
|
|
||||||
if (ifbdev && ifbdev->fb)
|
if (ifbdev && ifbdev->vma)
|
||||||
drm_fb_helper_hotplug_event(&ifbdev->helper);
|
drm_fb_helper_hotplug_event(&ifbdev->helper);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -824,7 +825,7 @@ void intel_fbdev_restore_mode(struct drm_device *dev)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
intel_fbdev_sync(ifbdev);
|
intel_fbdev_sync(ifbdev);
|
||||||
if (!ifbdev->fb)
|
if (!ifbdev->vma)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (drm_fb_helper_restore_fbdev_mode_unlocked(&ifbdev->helper) == 0)
|
if (drm_fb_helper_restore_fbdev_mode_unlocked(&ifbdev->helper) == 0)
|
||||||
|
|
|
@ -246,9 +246,9 @@ static int igt_dmabuf_export_vmap(void *arg)
|
||||||
i915_gem_object_put(obj);
|
i915_gem_object_put(obj);
|
||||||
|
|
||||||
ptr = dma_buf_vmap(dmabuf);
|
ptr = dma_buf_vmap(dmabuf);
|
||||||
if (IS_ERR(ptr)) {
|
if (!ptr) {
|
||||||
err = PTR_ERR(ptr);
|
pr_err("dma_buf_vmap failed\n");
|
||||||
pr_err("dma_buf_vmap failed with err=%d\n", err);
|
err = -ENOMEM;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue