Merge tag 'drm-intel-next-2014-10-03-no-ppgtt' of git://anongit.freedesktop.org/drm-intel into drm-next
Ok, new attempt, this time around with full ppgtt disabled again. drm-intel-next-2014-10-03: - first batch of skl stage 1 enabling - fixes from Rodrigo to the PSR, fbc and sink crc code - kerneldoc for the frontbuffer tracking code, runtime pm code and the basic interrupt enable/disable functions - smaller stuff all over drm-intel-next-2014-09-19: - bunch more i830M fixes from Ville - full ppgtt now again enabled by default - more ppgtt fixes from Michel Thierry and Chris Wilson - plane config work from Gustavo Padovan - spinlock clarifications - piles of smaller improvements all over, as usual * tag 'drm-intel-next-2014-10-03-no-ppgtt' of git://anongit.freedesktop.org/drm-intel: (114 commits) Revert "drm/i915: Enable full PPGTT on gen7" drm/i915: Update DRIVER_DATE to 20141003 drm/i915: Remove the duplicated logic between the two shrink phases drm/i915: kerneldoc for interrupt enable/disable functions drm/i915: Use dev_priv instead of dev in irq setup functions drm/i915: s/pm._irqs_disabled/pm.irqs_enabled/ drm/i915: Clear TX FIFO reset master override bits on chv drm/i915: Make sure hardware uses the correct swing margin/deemph bits on chv drm/i915: make sink_crc return -EIO on aux read/write failure drm/i915: Constify send buffer for intel_dp_aux_ch drm/i915: De-magic the PSR AUX message drm/i915: Reinstate error level message for non-simulated gpu hangs drm/i915: Kerneldoc for intel_runtime_pm.c drm/i915: Call runtime_pm_disable directly drm/i915: Move intel_display_set_init_power to intel_runtime_pm.c drm/i915: Bikeshed rpm functions name a bit. drm/i915: Extract intel_runtime_pm.c drm/i915: Remove intel_modeset_suspend_hw drm/i915: spelling fixes for frontbuffer tracking kerneldoc drm/i915: Tighting frontbuffer tracking around flips ...
This commit is contained in:
commit
bbf0ef0334
|
@ -3787,6 +3787,26 @@ int num_ioctls;</synopsis>
|
|||
blocks. This excludes a set of SoC platforms with an SGX rendering unit,
|
||||
those have basic support through the gma500 drm driver.
|
||||
</para>
|
||||
<sect1>
|
||||
<title>Core Driver Infrastructure</title>
|
||||
<para>
|
||||
This section covers core driver infrastructure used by both the display
|
||||
and the GEM parts of the driver.
|
||||
</para>
|
||||
<sect2>
|
||||
<title>Runtime Power Management</title>
|
||||
!Pdrivers/gpu/drm/i915/intel_runtime_pm.c runtime pm
|
||||
!Idrivers/gpu/drm/i915/intel_runtime_pm.c
|
||||
</sect2>
|
||||
<sect2>
|
||||
<title>Interrupt Handling</title>
|
||||
!Pdrivers/gpu/drm/i915/i915_irq.c interrupt handling
|
||||
!Fdrivers/gpu/drm/i915/i915_irq.c intel_irq_init intel_irq_init_hw intel_hpd_init
|
||||
!Fdrivers/gpu/drm/i915/i915_irq.c intel_irq_fini
|
||||
!Fdrivers/gpu/drm/i915/i915_irq.c intel_runtime_pm_disable_interrupts
|
||||
!Fdrivers/gpu/drm/i915/i915_irq.c intel_runtime_pm_enable_interrupts
|
||||
</sect2>
|
||||
</sect1>
|
||||
<sect1>
|
||||
<title>Display Hardware Handling</title>
|
||||
<para>
|
||||
|
@ -3803,6 +3823,13 @@ int num_ioctls;</synopsis>
|
|||
configuration change.
|
||||
</para>
|
||||
</sect2>
|
||||
<sect2>
|
||||
<title>Frontbuffer Tracking</title>
|
||||
!Pdrivers/gpu/drm/i915/intel_frontbuffer.c frontbuffer tracking
|
||||
!Idrivers/gpu/drm/i915/intel_frontbuffer.c
|
||||
!Fdrivers/gpu/drm/i915/intel_drv.h intel_frontbuffer_flip
|
||||
!Fdrivers/gpu/drm/i915/i915_gem.c i915_gem_track_fb
|
||||
</sect2>
|
||||
<sect2>
|
||||
<title>Plane Configuration</title>
|
||||
<para>
|
||||
|
@ -3932,5 +3959,6 @@ int num_ioctls;</synopsis>
|
|||
</sect2>
|
||||
</sect1>
|
||||
</chapter>
|
||||
!Cdrivers/gpu/drm/i915/i915_irq.c
|
||||
</part>
|
||||
</book>
|
||||
|
|
|
@ -455,6 +455,23 @@ struct intel_stolen_funcs {
|
|||
u32 (*base)(int num, int slot, int func, size_t size);
|
||||
};
|
||||
|
||||
static size_t __init gen9_stolen_size(int num, int slot, int func)
|
||||
{
|
||||
u16 gmch_ctrl;
|
||||
|
||||
gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL);
|
||||
gmch_ctrl >>= BDW_GMCH_GMS_SHIFT;
|
||||
gmch_ctrl &= BDW_GMCH_GMS_MASK;
|
||||
|
||||
if (gmch_ctrl < 0xf0)
|
||||
return gmch_ctrl << 25; /* 32 MB units */
|
||||
else
|
||||
/* 4MB increments starting at 0xf0 for 4MB */
|
||||
return (gmch_ctrl - 0xf0 + 1) << 22;
|
||||
}
|
||||
|
||||
typedef size_t (*stolen_size_fn)(int num, int slot, int func);
|
||||
|
||||
static const struct intel_stolen_funcs i830_stolen_funcs __initconst = {
|
||||
.base = i830_stolen_base,
|
||||
.size = i830_stolen_size,
|
||||
|
@ -490,6 +507,11 @@ static const struct intel_stolen_funcs gen8_stolen_funcs __initconst = {
|
|||
.size = gen8_stolen_size,
|
||||
};
|
||||
|
||||
static const struct intel_stolen_funcs gen9_stolen_funcs __initconst = {
|
||||
.base = intel_stolen_base,
|
||||
.size = gen9_stolen_size,
|
||||
};
|
||||
|
||||
static const struct intel_stolen_funcs chv_stolen_funcs __initconst = {
|
||||
.base = intel_stolen_base,
|
||||
.size = chv_stolen_size,
|
||||
|
@ -523,6 +545,7 @@ static const struct pci_device_id intel_stolen_ids[] __initconst = {
|
|||
INTEL_BDW_M_IDS(&gen8_stolen_funcs),
|
||||
INTEL_BDW_D_IDS(&gen8_stolen_funcs),
|
||||
INTEL_CHV_IDS(&chv_stolen_funcs),
|
||||
INTEL_SKL_IDS(&gen9_stolen_funcs),
|
||||
};
|
||||
|
||||
static void __init intel_graphics_stolen(int num, int slot, int func)
|
||||
|
|
|
@ -153,7 +153,6 @@ static struct page *i8xx_alloc_pages(void)
|
|||
__free_pages(page, 2);
|
||||
return NULL;
|
||||
}
|
||||
get_page(page);
|
||||
atomic_inc(&agp_bridge->current_memory_agp);
|
||||
return page;
|
||||
}
|
||||
|
@ -164,7 +163,6 @@ static void i8xx_destroy_pages(struct page *page)
|
|||
return;
|
||||
|
||||
set_pages_wb(page, 4);
|
||||
put_page(page);
|
||||
__free_pages(page, 2);
|
||||
atomic_dec(&agp_bridge->current_memory_agp);
|
||||
}
|
||||
|
@ -300,7 +298,6 @@ static int intel_gtt_setup_scratch_page(void)
|
|||
page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
|
||||
if (page == NULL)
|
||||
return -ENOMEM;
|
||||
get_page(page);
|
||||
set_pages_uc(page, 1);
|
||||
|
||||
if (intel_private.needs_dmar) {
|
||||
|
@ -560,7 +557,6 @@ static void intel_gtt_teardown_scratch_page(void)
|
|||
set_pages_wb(intel_private.scratch_page, 1);
|
||||
pci_unmap_page(intel_private.pcidev, intel_private.scratch_page_dma,
|
||||
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
|
||||
put_page(intel_private.scratch_page);
|
||||
__free_page(intel_private.scratch_page);
|
||||
}
|
||||
|
||||
|
|
|
@ -11,7 +11,9 @@ i915-y := i915_drv.o \
|
|||
i915_params.o \
|
||||
i915_suspend.o \
|
||||
i915_sysfs.o \
|
||||
intel_pm.o
|
||||
intel_pm.o \
|
||||
intel_runtime_pm.o
|
||||
|
||||
i915-$(CONFIG_COMPAT) += i915_ioc32.o
|
||||
i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o
|
||||
|
||||
|
@ -43,6 +45,7 @@ i915-y += intel_renderstate_gen6.o \
|
|||
# modesetting core code
|
||||
i915-y += intel_bios.o \
|
||||
intel_display.o \
|
||||
intel_frontbuffer.o \
|
||||
intel_modes.o \
|
||||
intel_overlay.o \
|
||||
intel_sideband.o \
|
||||
|
|
|
@ -847,12 +847,7 @@ bool i915_needs_cmd_parser(struct intel_engine_cs *ring)
|
|||
if (!ring->needs_cmd_parser)
|
||||
return false;
|
||||
|
||||
/*
|
||||
* XXX: VLV is Gen7 and therefore has cmd_tables, but has PPGTT
|
||||
* disabled. That will cause all of the parser's PPGTT checks to
|
||||
* fail. For now, disable parsing when PPGTT is off.
|
||||
*/
|
||||
if (USES_PPGTT(ring->dev))
|
||||
if (!USES_PPGTT(ring->dev))
|
||||
return false;
|
||||
|
||||
return (i915.enable_cmd_parser == 1);
|
||||
|
@ -888,8 +883,10 @@ static bool check_cmd(const struct intel_engine_cs *ring,
|
|||
* OACONTROL writes to only MI_LOAD_REGISTER_IMM commands.
|
||||
*/
|
||||
if (reg_addr == OACONTROL) {
|
||||
if (desc->cmd.value == MI_LOAD_REGISTER_MEM)
|
||||
if (desc->cmd.value == MI_LOAD_REGISTER_MEM) {
|
||||
DRM_DEBUG_DRIVER("CMD: Rejected LRM to OACONTROL\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1))
|
||||
*oacontrol_set = (cmd[2] != 0);
|
||||
|
|
|
@ -516,7 +516,6 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
|
|||
struct drm_info_node *node = m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
unsigned long flags;
|
||||
struct intel_crtc *crtc;
|
||||
int ret;
|
||||
|
||||
|
@ -529,7 +528,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
|
|||
const char plane = plane_name(crtc->plane);
|
||||
struct intel_unpin_work *work;
|
||||
|
||||
spin_lock_irqsave(&dev->event_lock, flags);
|
||||
spin_lock_irq(&dev->event_lock);
|
||||
work = crtc->unpin_work;
|
||||
if (work == NULL) {
|
||||
seq_printf(m, "No flip due on pipe %c (plane %c)\n",
|
||||
|
@ -575,7 +574,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
|
|||
seq_printf(m, "MMIO update completed? %d\n", addr == work->gtt_offset);
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&dev->event_lock, flags);
|
||||
spin_unlock_irq(&dev->event_lock);
|
||||
}
|
||||
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
@ -717,7 +716,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
|
|||
}
|
||||
|
||||
for_each_pipe(dev_priv, pipe) {
|
||||
if (!intel_display_power_enabled(dev_priv,
|
||||
if (!intel_display_power_is_enabled(dev_priv,
|
||||
POWER_DOMAIN_PIPE(pipe))) {
|
||||
seq_printf(m, "Pipe %c power disabled\n",
|
||||
pipe_name(pipe));
|
||||
|
@ -1986,7 +1985,7 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
|
|||
I915_READ(MAD_DIMM_C2));
|
||||
seq_printf(m, "TILECTL = 0x%08x\n",
|
||||
I915_READ(TILECTL));
|
||||
if (IS_GEN8(dev))
|
||||
if (INTEL_INFO(dev)->gen >= 8)
|
||||
seq_printf(m, "GAMTARBMODE = 0x%08x\n",
|
||||
I915_READ(GAMTARBMODE));
|
||||
else
|
||||
|
|
|
@ -1338,14 +1338,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
|
|||
|
||||
intel_power_domains_init_hw(dev_priv);
|
||||
|
||||
/*
|
||||
* We enable some interrupt sources in our postinstall hooks, so mark
|
||||
* interrupts as enabled _before_ actually enabling them to avoid
|
||||
* special cases in our ordering checks.
|
||||
*/
|
||||
dev_priv->pm._irqs_disabled = false;
|
||||
|
||||
ret = drm_irq_install(dev, dev->pdev->irq);
|
||||
ret = intel_irq_install(dev_priv);
|
||||
if (ret)
|
||||
goto cleanup_gem_stolen;
|
||||
|
||||
|
@ -1370,7 +1363,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
|
|||
goto cleanup_gem;
|
||||
|
||||
/* Only enable hotplug handling once the fbdev is fully set up. */
|
||||
intel_hpd_init(dev);
|
||||
intel_hpd_init(dev_priv);
|
||||
|
||||
/*
|
||||
* Some ports require correctly set-up hpd registers for detection to
|
||||
|
@ -1534,7 +1527,7 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
|
|||
|
||||
info = (struct intel_device_info *)&dev_priv->info;
|
||||
|
||||
if (IS_VALLEYVIEW(dev))
|
||||
if (IS_VALLEYVIEW(dev) || INTEL_INFO(dev)->gen == 9)
|
||||
for_each_pipe(dev_priv, pipe)
|
||||
info->num_sprites[pipe] = 2;
|
||||
else
|
||||
|
@ -1614,7 +1607,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
|
|||
|
||||
spin_lock_init(&dev_priv->irq_lock);
|
||||
spin_lock_init(&dev_priv->gpu_error.lock);
|
||||
spin_lock_init(&dev_priv->backlight_lock);
|
||||
mutex_init(&dev_priv->backlight_lock);
|
||||
spin_lock_init(&dev_priv->uncore.lock);
|
||||
spin_lock_init(&dev_priv->mm.object_stat_lock);
|
||||
spin_lock_init(&dev_priv->mmio_flip_lock);
|
||||
|
@ -1740,7 +1733,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
|
|||
goto out_freewq;
|
||||
}
|
||||
|
||||
intel_irq_init(dev);
|
||||
intel_irq_init(dev_priv);
|
||||
intel_uncore_sanitize(dev);
|
||||
|
||||
/* Try to make sure MCHBAR is enabled before poking at it */
|
||||
|
@ -1798,12 +1791,12 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
|
|||
if (IS_GEN5(dev))
|
||||
intel_gpu_ips_init(dev_priv);
|
||||
|
||||
intel_init_runtime_pm(dev_priv);
|
||||
intel_runtime_pm_enable(dev_priv);
|
||||
|
||||
return 0;
|
||||
|
||||
out_power_well:
|
||||
intel_power_domains_remove(dev_priv);
|
||||
intel_power_domains_fini(dev_priv);
|
||||
drm_vblank_cleanup(dev);
|
||||
out_gem_unload:
|
||||
WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
|
||||
|
@ -1846,16 +1839,10 @@ int i915_driver_unload(struct drm_device *dev)
|
|||
return ret;
|
||||
}
|
||||
|
||||
intel_fini_runtime_pm(dev_priv);
|
||||
intel_power_domains_fini(dev_priv);
|
||||
|
||||
intel_gpu_ips_teardown();
|
||||
|
||||
/* The i915.ko module is still not prepared to be loaded when
|
||||
* the power well is not enabled, so just enable it in case
|
||||
* we're going to unload/reload. */
|
||||
intel_display_set_init_power(dev_priv, true);
|
||||
intel_power_domains_remove(dev_priv);
|
||||
|
||||
i915_teardown_sysfs(dev);
|
||||
|
||||
WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
|
||||
|
|
|
@ -356,6 +356,19 @@ static const struct intel_device_info intel_cherryview_info = {
|
|||
CURSOR_OFFSETS,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_skylake_info = {
|
||||
.is_preliminary = 1,
|
||||
.is_skylake = 1,
|
||||
.gen = 9, .num_pipes = 3,
|
||||
.need_gfx_hws = 1, .has_hotplug = 1,
|
||||
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
|
||||
.has_llc = 1,
|
||||
.has_ddi = 1,
|
||||
.has_fbc = 1,
|
||||
GEN_DEFAULT_PIPEOFFSETS,
|
||||
IVB_CURSOR_OFFSETS,
|
||||
};
|
||||
|
||||
/*
|
||||
* Make sure any device matches here are from most specific to most
|
||||
* general. For example, since the Quanta match is based on the subsystem
|
||||
|
@ -392,7 +405,8 @@ static const struct intel_device_info intel_cherryview_info = {
|
|||
INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info), \
|
||||
INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info), \
|
||||
INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info), \
|
||||
INTEL_CHV_IDS(&intel_cherryview_info)
|
||||
INTEL_CHV_IDS(&intel_cherryview_info), \
|
||||
INTEL_SKL_IDS(&intel_skylake_info)
|
||||
|
||||
static const struct pci_device_id pciidlist[] = { /* aka */
|
||||
INTEL_PCI_IDS,
|
||||
|
@ -461,6 +475,16 @@ void intel_detect_pch(struct drm_device *dev)
|
|||
DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
|
||||
WARN_ON(!IS_HASWELL(dev));
|
||||
WARN_ON(!IS_ULT(dev));
|
||||
} else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) {
|
||||
dev_priv->pch_type = PCH_SPT;
|
||||
DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
|
||||
WARN_ON(!IS_SKYLAKE(dev));
|
||||
WARN_ON(IS_ULT(dev));
|
||||
} else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) {
|
||||
dev_priv->pch_type = PCH_SPT;
|
||||
DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
|
||||
WARN_ON(!IS_SKYLAKE(dev));
|
||||
WARN_ON(!IS_ULT(dev));
|
||||
} else
|
||||
continue;
|
||||
|
||||
|
@ -575,14 +599,14 @@ static int i915_drm_freeze(struct drm_device *dev)
|
|||
|
||||
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
|
||||
|
||||
intel_runtime_pm_disable_interrupts(dev);
|
||||
intel_runtime_pm_disable_interrupts(dev_priv);
|
||||
intel_hpd_cancel_work(dev_priv);
|
||||
|
||||
intel_suspend_encoders(dev_priv);
|
||||
|
||||
intel_suspend_gt_powersave(dev);
|
||||
|
||||
intel_modeset_suspend_hw(dev);
|
||||
intel_suspend_hw(dev);
|
||||
}
|
||||
|
||||
i915_gem_suspend_gtt_mappings(dev);
|
||||
|
@ -680,16 +704,16 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
|
|||
}
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
intel_runtime_pm_restore_interrupts(dev);
|
||||
/* We need working interrupts for modeset enabling ... */
|
||||
intel_runtime_pm_enable_interrupts(dev_priv);
|
||||
|
||||
intel_modeset_init_hw(dev);
|
||||
|
||||
{
|
||||
unsigned long irqflags;
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
|
||||
spin_lock_irq(&dev_priv->irq_lock);
|
||||
if (dev_priv->display.hpd_irq_setup)
|
||||
dev_priv->display.hpd_irq_setup(dev);
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
|
||||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
}
|
||||
|
||||
intel_dp_mst_resume(dev);
|
||||
|
@ -703,7 +727,7 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
|
|||
* bother with the tiny race here where we might loose hotplug
|
||||
* notifications.
|
||||
* */
|
||||
intel_hpd_init(dev);
|
||||
intel_hpd_init(dev_priv);
|
||||
/* Config may have changed between suspend and resume */
|
||||
drm_helper_hpd_irq_event(dev);
|
||||
}
|
||||
|
@ -820,6 +844,9 @@ int i915_reset(struct drm_device *dev)
|
|||
}
|
||||
}
|
||||
|
||||
if (i915_stop_ring_allow_warn(dev_priv))
|
||||
pr_notice("drm/i915: Resetting chip after gpu hang\n");
|
||||
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to reset chip: %i\n", ret);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
@ -1446,12 +1473,12 @@ static int intel_runtime_suspend(struct device *device)
|
|||
* intel_mark_idle().
|
||||
*/
|
||||
cancel_work_sync(&dev_priv->rps.work);
|
||||
intel_runtime_pm_disable_interrupts(dev);
|
||||
intel_runtime_pm_disable_interrupts(dev_priv);
|
||||
|
||||
ret = intel_suspend_complete(dev_priv);
|
||||
if (ret) {
|
||||
DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
|
||||
intel_runtime_pm_restore_interrupts(dev);
|
||||
intel_runtime_pm_enable_interrupts(dev_priv);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -1511,7 +1538,7 @@ static int intel_runtime_resume(struct device *device)
|
|||
i915_gem_init_swizzling(dev);
|
||||
gen6_update_ring_freq(dev);
|
||||
|
||||
intel_runtime_pm_restore_interrupts(dev);
|
||||
intel_runtime_pm_enable_interrupts(dev_priv);
|
||||
intel_reset_gt_powersave(dev);
|
||||
|
||||
if (ret)
|
||||
|
|
|
@ -55,7 +55,7 @@
|
|||
|
||||
#define DRIVER_NAME "i915"
|
||||
#define DRIVER_DESC "Intel Graphics"
|
||||
#define DRIVER_DATE "20140905"
|
||||
#define DRIVER_DATE "20141003"
|
||||
|
||||
enum pipe {
|
||||
INVALID_PIPE = -1,
|
||||
|
@ -76,6 +76,14 @@ enum transcoder {
|
|||
};
|
||||
#define transcoder_name(t) ((t) + 'A')
|
||||
|
||||
/*
|
||||
* This is the maximum (across all platforms) number of planes (primary +
|
||||
* sprites) that can be active at the same time on one pipe.
|
||||
*
|
||||
* This value doesn't count the cursor plane.
|
||||
*/
|
||||
#define I915_MAX_PLANES 3
|
||||
|
||||
enum plane {
|
||||
PLANE_A = 0,
|
||||
PLANE_B,
|
||||
|
@ -551,6 +559,7 @@ struct intel_uncore {
|
|||
func(is_ivybridge) sep \
|
||||
func(is_valleyview) sep \
|
||||
func(is_haswell) sep \
|
||||
func(is_skylake) sep \
|
||||
func(is_preliminary) sep \
|
||||
func(has_fbc) sep \
|
||||
func(has_pipe_cxsr) sep \
|
||||
|
@ -663,6 +672,18 @@ struct i915_fbc {
|
|||
|
||||
bool false_color;
|
||||
|
||||
/* Tracks whether the HW is actually enabled, not whether the feature is
|
||||
* possible. */
|
||||
bool enabled;
|
||||
|
||||
/* On gen8 some rings cannont perform fbc clean operation so for now
|
||||
* we are doing this on SW with mmio.
|
||||
* This variable works in the opposite information direction
|
||||
* of ring->fbc_dirty telling software on frontbuffer tracking
|
||||
* to perform the cache clean on sw side.
|
||||
*/
|
||||
bool need_sw_cache_clean;
|
||||
|
||||
struct intel_fbc_work {
|
||||
struct delayed_work work;
|
||||
struct drm_crtc *crtc;
|
||||
|
@ -704,6 +725,7 @@ enum intel_pch {
|
|||
PCH_IBX, /* Ibexpeak PCH */
|
||||
PCH_CPT, /* Cougarpoint PCH */
|
||||
PCH_LPT, /* Lynxpoint PCH */
|
||||
PCH_SPT, /* Sunrisepoint PCH */
|
||||
PCH_NOP,
|
||||
};
|
||||
|
||||
|
@ -1369,7 +1391,7 @@ struct ilk_wm_values {
|
|||
*
|
||||
* Our driver uses the autosuspend delay feature, which means we'll only really
|
||||
* suspend if we stay with zero refcount for a certain amount of time. The
|
||||
* default value is currently very conservative (see intel_init_runtime_pm), but
|
||||
* default value is currently very conservative (see intel_runtime_pm_enable), but
|
||||
* it can be changed with the standard runtime PM files from sysfs.
|
||||
*
|
||||
* The irqs_disabled variable becomes true exactly after we disable the IRQs and
|
||||
|
@ -1382,7 +1404,7 @@ struct ilk_wm_values {
|
|||
*/
|
||||
struct i915_runtime_pm {
|
||||
bool suspended;
|
||||
bool _irqs_disabled;
|
||||
bool irqs_enabled;
|
||||
};
|
||||
|
||||
enum intel_pipe_crc_source {
|
||||
|
@ -1509,7 +1531,7 @@ struct drm_i915_private {
|
|||
struct intel_overlay *overlay;
|
||||
|
||||
/* backlight registers and fields in struct intel_panel */
|
||||
spinlock_t backlight_lock;
|
||||
struct mutex backlight_lock;
|
||||
|
||||
/* LVDS info */
|
||||
bool no_aux_handshake;
|
||||
|
@ -2073,6 +2095,7 @@ struct drm_i915_cmd_table {
|
|||
#define IS_CHERRYVIEW(dev) (INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev))
|
||||
#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
|
||||
#define IS_BROADWELL(dev) (!INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev))
|
||||
#define IS_SKYLAKE(dev) (INTEL_INFO(dev)->is_skylake)
|
||||
#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
|
||||
#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \
|
||||
(INTEL_DEVID(dev) & 0xFF00) == 0x0C00)
|
||||
|
@ -2080,6 +2103,8 @@ struct drm_i915_cmd_table {
|
|||
((INTEL_DEVID(dev) & 0xf) == 0x2 || \
|
||||
(INTEL_DEVID(dev) & 0xf) == 0x6 || \
|
||||
(INTEL_DEVID(dev) & 0xf) == 0xe))
|
||||
#define IS_BDW_GT3(dev) (IS_BROADWELL(dev) && \
|
||||
(INTEL_DEVID(dev) & 0x00F0) == 0x0020)
|
||||
#define IS_HSW_ULT(dev) (IS_HASWELL(dev) && \
|
||||
(INTEL_DEVID(dev) & 0xFF00) == 0x0A00)
|
||||
#define IS_ULT(dev) (IS_HSW_ULT(dev) || IS_BDW_ULT(dev))
|
||||
|
@ -2103,6 +2128,7 @@ struct drm_i915_cmd_table {
|
|||
#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6)
|
||||
#define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7)
|
||||
#define IS_GEN8(dev) (INTEL_INFO(dev)->gen == 8)
|
||||
#define IS_GEN9(dev) (INTEL_INFO(dev)->gen == 9)
|
||||
|
||||
#define RENDER_RING (1<<RCS)
|
||||
#define BSD_RING (1<<VCS)
|
||||
|
@ -2120,8 +2146,6 @@ struct drm_i915_cmd_table {
|
|||
|
||||
#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6)
|
||||
#define HAS_LOGICAL_RING_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 8)
|
||||
#define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >= 6)
|
||||
#define HAS_PPGTT(dev) (INTEL_INFO(dev)->gen >= 7 && !IS_GEN8(dev))
|
||||
#define USES_PPGTT(dev) (i915.enable_ppgtt)
|
||||
#define USES_FULL_PPGTT(dev) (i915.enable_ppgtt == 2)
|
||||
|
||||
|
@ -2168,8 +2192,11 @@ struct drm_i915_cmd_table {
|
|||
#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
|
||||
#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
|
||||
#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00
|
||||
#define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100
|
||||
#define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00
|
||||
|
||||
#define INTEL_PCH_TYPE(dev) (to_i915(dev)->pch_type)
|
||||
#define HAS_PCH_SPT(dev) (INTEL_PCH_TYPE(dev) == PCH_SPT)
|
||||
#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
|
||||
#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
|
||||
#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
|
||||
|
@ -2262,8 +2289,10 @@ void i915_handle_error(struct drm_device *dev, bool wedged,
|
|||
|
||||
void gen6_set_pm_mask(struct drm_i915_private *dev_priv, u32 pm_iir,
|
||||
int new_delay);
|
||||
extern void intel_irq_init(struct drm_device *dev);
|
||||
extern void intel_hpd_init(struct drm_device *dev);
|
||||
extern void intel_irq_init(struct drm_i915_private *dev_priv);
|
||||
extern void intel_hpd_init(struct drm_i915_private *dev_priv);
|
||||
int intel_irq_install(struct drm_i915_private *dev_priv);
|
||||
void intel_irq_uninstall(struct drm_i915_private *dev_priv);
|
||||
|
||||
extern void intel_uncore_sanitize(struct drm_device *dev);
|
||||
extern void intel_uncore_early_sanitize(struct drm_device *dev,
|
||||
|
@ -2793,7 +2822,6 @@ static inline void intel_unregister_dsm_handler(void) { return; }
|
|||
|
||||
/* modesetting */
|
||||
extern void intel_modeset_init_hw(struct drm_device *dev);
|
||||
extern void intel_modeset_suspend_hw(struct drm_device *dev);
|
||||
extern void intel_modeset_init(struct drm_device *dev);
|
||||
extern void intel_modeset_gem_init(struct drm_device *dev);
|
||||
extern void intel_modeset_cleanup(struct drm_device *dev);
|
||||
|
@ -2804,7 +2832,7 @@ extern void intel_modeset_setup_hw_state(struct drm_device *dev,
|
|||
extern void i915_redisable_vga(struct drm_device *dev);
|
||||
extern void i915_redisable_vga_power_on(struct drm_device *dev);
|
||||
extern bool intel_fbc_enabled(struct drm_device *dev);
|
||||
extern void gen8_fbc_sw_flush(struct drm_device *dev, u32 value);
|
||||
extern void bdw_fbc_sw_flush(struct drm_device *dev, u32 value);
|
||||
extern void intel_disable_fbc(struct drm_device *dev);
|
||||
extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
|
||||
extern void intel_init_pch_refclk(struct drm_device *dev);
|
||||
|
|
|
@ -1945,7 +1945,14 @@ unsigned long
|
|||
i915_gem_shrink(struct drm_i915_private *dev_priv,
|
||||
long target, unsigned flags)
|
||||
{
|
||||
const bool purgeable_only = flags & I915_SHRINK_PURGEABLE;
|
||||
const struct {
|
||||
struct list_head *list;
|
||||
unsigned int bit;
|
||||
} phases[] = {
|
||||
{ &dev_priv->mm.unbound_list, I915_SHRINK_UNBOUND },
|
||||
{ &dev_priv->mm.bound_list, I915_SHRINK_BOUND },
|
||||
{ NULL, 0 },
|
||||
}, *phase;
|
||||
unsigned long count = 0;
|
||||
|
||||
/*
|
||||
|
@ -1967,48 +1974,30 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
|
|||
* dev->struct_mutex and so we won't ever be able to observe an
|
||||
* object on the bound_list with a reference count equals 0.
|
||||
*/
|
||||
if (flags & I915_SHRINK_UNBOUND) {
|
||||
for (phase = phases; phase->list; phase++) {
|
||||
struct list_head still_in_list;
|
||||
|
||||
INIT_LIST_HEAD(&still_in_list);
|
||||
while (count < target && !list_empty(&dev_priv->mm.unbound_list)) {
|
||||
struct drm_i915_gem_object *obj;
|
||||
|
||||
obj = list_first_entry(&dev_priv->mm.unbound_list,
|
||||
typeof(*obj), global_list);
|
||||
list_move_tail(&obj->global_list, &still_in_list);
|
||||
|
||||
if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
|
||||
continue;
|
||||
|
||||
drm_gem_object_reference(&obj->base);
|
||||
|
||||
if (i915_gem_object_put_pages(obj) == 0)
|
||||
count += obj->base.size >> PAGE_SHIFT;
|
||||
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
}
|
||||
list_splice(&still_in_list, &dev_priv->mm.unbound_list);
|
||||
}
|
||||
|
||||
if (flags & I915_SHRINK_BOUND) {
|
||||
struct list_head still_in_list;
|
||||
if ((flags & phase->bit) == 0)
|
||||
continue;
|
||||
|
||||
INIT_LIST_HEAD(&still_in_list);
|
||||
while (count < target && !list_empty(&dev_priv->mm.bound_list)) {
|
||||
while (count < target && !list_empty(phase->list)) {
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct i915_vma *vma, *v;
|
||||
|
||||
obj = list_first_entry(&dev_priv->mm.bound_list,
|
||||
obj = list_first_entry(phase->list,
|
||||
typeof(*obj), global_list);
|
||||
list_move_tail(&obj->global_list, &still_in_list);
|
||||
|
||||
if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
|
||||
if (flags & I915_SHRINK_PURGEABLE &&
|
||||
!i915_gem_object_is_purgeable(obj))
|
||||
continue;
|
||||
|
||||
drm_gem_object_reference(&obj->base);
|
||||
|
||||
list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link)
|
||||
/* For the unbound phase, this should be a no-op! */
|
||||
list_for_each_entry_safe(vma, v,
|
||||
&obj->vma_list, vma_link)
|
||||
if (i915_vma_unbind(vma))
|
||||
break;
|
||||
|
||||
|
@ -2017,7 +2006,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
|
|||
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
}
|
||||
list_splice(&still_in_list, &dev_priv->mm.bound_list);
|
||||
list_splice(&still_in_list, phase->list);
|
||||
}
|
||||
|
||||
return count;
|
||||
|
@ -3166,6 +3155,7 @@ static void i915_gem_write_fence(struct drm_device *dev, int reg,
|
|||
obj->stride, obj->tiling_mode);
|
||||
|
||||
switch (INTEL_INFO(dev)->gen) {
|
||||
case 9:
|
||||
case 8:
|
||||
case 7:
|
||||
case 6:
|
||||
|
@ -3384,46 +3374,6 @@ static bool i915_gem_valid_gtt_space(struct i915_vma *vma,
|
|||
return true;
|
||||
}
|
||||
|
||||
static void i915_gem_verify_gtt(struct drm_device *dev)
|
||||
{
|
||||
#if WATCH_GTT
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_gem_object *obj;
|
||||
int err = 0;
|
||||
|
||||
list_for_each_entry(obj, &dev_priv->mm.gtt_list, global_list) {
|
||||
if (obj->gtt_space == NULL) {
|
||||
printk(KERN_ERR "object found on GTT list with no space reserved\n");
|
||||
err++;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (obj->cache_level != obj->gtt_space->color) {
|
||||
printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
|
||||
i915_gem_obj_ggtt_offset(obj),
|
||||
i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
|
||||
obj->cache_level,
|
||||
obj->gtt_space->color);
|
||||
err++;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!i915_gem_valid_gtt_space(dev,
|
||||
obj->gtt_space,
|
||||
obj->cache_level)) {
|
||||
printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
|
||||
i915_gem_obj_ggtt_offset(obj),
|
||||
i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
|
||||
obj->cache_level);
|
||||
err++;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
WARN_ON(err);
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* Finds free space in the GTT aperture and binds the object there.
|
||||
*/
|
||||
|
@ -3532,7 +3482,6 @@ search_free:
|
|||
vma->bind_vma(vma, obj->cache_level,
|
||||
flags & (PIN_MAPPABLE | PIN_GLOBAL) ? GLOBAL_BIND : 0);
|
||||
|
||||
i915_gem_verify_gtt(dev);
|
||||
return vma;
|
||||
|
||||
err_remove_node:
|
||||
|
@ -3769,7 +3718,6 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
|
|||
old_write_domain);
|
||||
}
|
||||
|
||||
i915_gem_verify_gtt(dev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -5119,6 +5067,15 @@ int i915_gem_open(struct drm_device *dev, struct drm_file *file)
|
|||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* i915_gem_track_fb - update frontbuffer tracking
|
||||
* old: current GEM buffer for the frontbuffer slots
|
||||
* new: new GEM buffer for the frontbuffer slots
|
||||
* frontbuffer_bits: bitmask of frontbuffer slots
|
||||
*
|
||||
* This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
|
||||
* from @old and setting them in @new. Both @old and @new can be NULL.
|
||||
*/
|
||||
void i915_gem_track_fb(struct drm_i915_gem_object *old,
|
||||
struct drm_i915_gem_object *new,
|
||||
unsigned frontbuffer_bits)
|
||||
|
|
|
@ -35,13 +35,21 @@ static void chv_setup_private_ppat(struct drm_i915_private *dev_priv);
|
|||
|
||||
static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
|
||||
{
|
||||
if (enable_ppgtt == 0 || !HAS_ALIASING_PPGTT(dev))
|
||||
bool has_aliasing_ppgtt;
|
||||
bool has_full_ppgtt;
|
||||
|
||||
has_aliasing_ppgtt = INTEL_INFO(dev)->gen >= 6;
|
||||
has_full_ppgtt = INTEL_INFO(dev)->gen >= 7;
|
||||
if (IS_GEN8(dev))
|
||||
has_full_ppgtt = false; /* XXX why? */
|
||||
|
||||
if (enable_ppgtt == 0 || !has_aliasing_ppgtt)
|
||||
return 0;
|
||||
|
||||
if (enable_ppgtt == 1)
|
||||
return 1;
|
||||
|
||||
if (enable_ppgtt == 2 && HAS_PPGTT(dev))
|
||||
if (enable_ppgtt == 2 && has_full_ppgtt)
|
||||
return 2;
|
||||
|
||||
#ifdef CONFIG_INTEL_IOMMU
|
||||
|
@ -59,7 +67,7 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
|
|||
return 0;
|
||||
}
|
||||
|
||||
return HAS_ALIASING_PPGTT(dev) ? 1 : 0;
|
||||
return has_aliasing_ppgtt ? 1 : 0;
|
||||
}
|
||||
|
||||
|
||||
|
@ -1092,7 +1100,7 @@ static int __hw_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
|
|||
|
||||
if (INTEL_INFO(dev)->gen < 8)
|
||||
return gen6_ppgtt_init(ppgtt);
|
||||
else if (IS_GEN8(dev))
|
||||
else if (IS_GEN8(dev) || IS_GEN9(dev))
|
||||
return gen8_ppgtt_init(ppgtt, dev_priv->gtt.base.total);
|
||||
else
|
||||
BUG();
|
||||
|
@ -1764,7 +1772,6 @@ static int setup_scratch_page(struct drm_device *dev)
|
|||
page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
|
||||
if (page == NULL)
|
||||
return -ENOMEM;
|
||||
get_page(page);
|
||||
set_pages_uc(page, 1);
|
||||
|
||||
#ifdef CONFIG_INTEL_IOMMU
|
||||
|
@ -1789,7 +1796,6 @@ static void teardown_scratch_page(struct drm_device *dev)
|
|||
set_pages_wb(page, 1);
|
||||
pci_unmap_page(dev->pdev, dev_priv->gtt.base.scratch.addr,
|
||||
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
|
||||
put_page(page);
|
||||
__free_page(page);
|
||||
}
|
||||
|
||||
|
@ -1859,6 +1865,18 @@ static size_t chv_get_stolen_size(u16 gmch_ctrl)
|
|||
return (gmch_ctrl - 0x17 + 9) << 22;
|
||||
}
|
||||
|
||||
static size_t gen9_get_stolen_size(u16 gen9_gmch_ctl)
|
||||
{
|
||||
gen9_gmch_ctl >>= BDW_GMCH_GMS_SHIFT;
|
||||
gen9_gmch_ctl &= BDW_GMCH_GMS_MASK;
|
||||
|
||||
if (gen9_gmch_ctl < 0xf0)
|
||||
return gen9_gmch_ctl << 25; /* 32 MB units */
|
||||
else
|
||||
/* 4MB increments starting at 0xf0 for 4MB */
|
||||
return (gen9_gmch_ctl - 0xf0 + 1) << 22;
|
||||
}
|
||||
|
||||
static int ggtt_probe_common(struct drm_device *dev,
|
||||
size_t gtt_size)
|
||||
{
|
||||
|
@ -1955,7 +1973,10 @@ static int gen8_gmch_probe(struct drm_device *dev,
|
|||
|
||||
pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
|
||||
|
||||
if (IS_CHERRYVIEW(dev)) {
|
||||
if (INTEL_INFO(dev)->gen >= 9) {
|
||||
*stolen = gen9_get_stolen_size(snb_gmch_ctl);
|
||||
gtt_size = gen8_get_total_gtt_size(snb_gmch_ctl);
|
||||
} else if (IS_CHERRYVIEW(dev)) {
|
||||
*stolen = chv_get_stolen_size(snb_gmch_ctl);
|
||||
gtt_size = chv_get_total_gtt_size(snb_gmch_ctl);
|
||||
} else {
|
||||
|
@ -2127,6 +2148,7 @@ static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
|
|||
vma->obj = obj;
|
||||
|
||||
switch (INTEL_INFO(vm->dev)->gen) {
|
||||
case 9:
|
||||
case 8:
|
||||
case 7:
|
||||
case 6:
|
||||
|
|
|
@ -765,6 +765,7 @@ static void i915_gem_record_fences(struct drm_device *dev,
|
|||
|
||||
/* Fences */
|
||||
switch (INTEL_INFO(dev)->gen) {
|
||||
case 9:
|
||||
case 8:
|
||||
case 7:
|
||||
case 6:
|
||||
|
@ -923,6 +924,7 @@ static void i915_record_ring_state(struct drm_device *dev,
|
|||
ering->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(ring));
|
||||
|
||||
switch (INTEL_INFO(dev)->gen) {
|
||||
case 9:
|
||||
case 8:
|
||||
for (i = 0; i < 4; i++) {
|
||||
ering->vm_info.pdp[i] =
|
||||
|
@ -1326,13 +1328,12 @@ void i915_error_state_get(struct drm_device *dev,
|
|||
struct i915_error_state_file_priv *error_priv)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
|
||||
spin_lock_irq(&dev_priv->gpu_error.lock);
|
||||
error_priv->error = dev_priv->gpu_error.first_error;
|
||||
if (error_priv->error)
|
||||
kref_get(&error_priv->error->ref);
|
||||
spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
|
||||
spin_unlock_irq(&dev_priv->gpu_error.lock);
|
||||
|
||||
}
|
||||
|
||||
|
@ -1346,12 +1347,11 @@ void i915_destroy_error_state(struct drm_device *dev)
|
|||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_error_state *error;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
|
||||
spin_lock_irq(&dev_priv->gpu_error.lock);
|
||||
error = dev_priv->gpu_error.first_error;
|
||||
dev_priv->gpu_error.first_error = NULL;
|
||||
spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
|
||||
spin_unlock_irq(&dev_priv->gpu_error.lock);
|
||||
|
||||
if (error)
|
||||
kref_put(&error->ref, i915_error_state_free);
|
||||
|
@ -1389,6 +1389,7 @@ void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone)
|
|||
WARN_ONCE(1, "Unsupported platform\n");
|
||||
case 7:
|
||||
case 8:
|
||||
case 9:
|
||||
instdone[0] = I915_READ(GEN7_INSTDONE_1);
|
||||
instdone[1] = I915_READ(GEN7_SC_INSTDONE);
|
||||
instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
|
||||
|
|
|
@ -37,6 +37,14 @@
|
|||
#include "i915_trace.h"
|
||||
#include "intel_drv.h"
|
||||
|
||||
/**
|
||||
* DOC: interrupt handling
|
||||
*
|
||||
* These functions provide the basic support for enabling and disabling the
|
||||
* interrupt handling support. There's a lot more functionality in i915_irq.c
|
||||
* and related files, but that will be described in separate chapters.
|
||||
*/
|
||||
|
||||
static const u32 hpd_ibx[] = {
|
||||
[HPD_CRT] = SDE_CRT_HOTPLUG,
|
||||
[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
|
||||
|
@ -310,9 +318,8 @@ void i9xx_check_fifo_underruns(struct drm_device *dev)
|
|||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *crtc;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, flags);
|
||||
spin_lock_irq(&dev_priv->irq_lock);
|
||||
|
||||
for_each_intel_crtc(dev, crtc) {
|
||||
u32 reg = PIPESTAT(crtc->pipe);
|
||||
|
@ -331,7 +338,7 @@ void i9xx_check_fifo_underruns(struct drm_device *dev)
|
|||
DRM_ERROR("pipe %c underrun\n", pipe_name(crtc->pipe));
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
|
||||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
}
|
||||
|
||||
static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev,
|
||||
|
@ -503,7 +510,7 @@ static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
|
|||
ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
|
||||
else if (IS_GEN7(dev))
|
||||
ivybridge_set_fifo_underrun_reporting(dev, pipe, enable, old);
|
||||
else if (IS_GEN8(dev))
|
||||
else if (IS_GEN8(dev) || IS_GEN9(dev))
|
||||
broadwell_set_fifo_underrun_reporting(dev, pipe, enable);
|
||||
|
||||
return old;
|
||||
|
@ -589,6 +596,7 @@ __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
|
|||
u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
|
||||
|
||||
assert_spin_locked(&dev_priv->irq_lock);
|
||||
WARN_ON(!intel_irqs_enabled(dev_priv));
|
||||
|
||||
if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
|
||||
status_mask & ~PIPESTAT_INT_STATUS_MASK,
|
||||
|
@ -615,6 +623,7 @@ __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
|
|||
u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
|
||||
|
||||
assert_spin_locked(&dev_priv->irq_lock);
|
||||
WARN_ON(!intel_irqs_enabled(dev_priv));
|
||||
|
||||
if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
|
||||
status_mask & ~PIPESTAT_INT_STATUS_MASK,
|
||||
|
@ -694,19 +703,18 @@ i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
|
|||
static void i915_enable_asle_pipestat(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
unsigned long irqflags;
|
||||
|
||||
if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
|
||||
spin_lock_irq(&dev_priv->irq_lock);
|
||||
|
||||
i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
|
||||
if (INTEL_INFO(dev)->gen >= 4)
|
||||
i915_enable_pipestat(dev_priv, PIPE_A,
|
||||
PIPE_LEGACY_BLC_EVENT_STATUS);
|
||||
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
|
||||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1094,18 +1102,17 @@ static void i915_digport_work_func(struct work_struct *work)
|
|||
{
|
||||
struct drm_i915_private *dev_priv =
|
||||
container_of(work, struct drm_i915_private, dig_port_work);
|
||||
unsigned long irqflags;
|
||||
u32 long_port_mask, short_port_mask;
|
||||
struct intel_digital_port *intel_dig_port;
|
||||
int i, ret;
|
||||
u32 old_bits = 0;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
|
||||
spin_lock_irq(&dev_priv->irq_lock);
|
||||
long_port_mask = dev_priv->long_hpd_port_mask;
|
||||
dev_priv->long_hpd_port_mask = 0;
|
||||
short_port_mask = dev_priv->short_hpd_port_mask;
|
||||
dev_priv->short_hpd_port_mask = 0;
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
|
||||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
|
||||
for (i = 0; i < I915_MAX_PORTS; i++) {
|
||||
bool valid = false;
|
||||
|
@ -1130,9 +1137,9 @@ static void i915_digport_work_func(struct work_struct *work)
|
|||
}
|
||||
|
||||
if (old_bits) {
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
|
||||
spin_lock_irq(&dev_priv->irq_lock);
|
||||
dev_priv->hpd_event_bits |= old_bits;
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
|
||||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
schedule_work(&dev_priv->hotplug_work);
|
||||
}
|
||||
}
|
||||
|
@ -1151,7 +1158,6 @@ static void i915_hotplug_work_func(struct work_struct *work)
|
|||
struct intel_connector *intel_connector;
|
||||
struct intel_encoder *intel_encoder;
|
||||
struct drm_connector *connector;
|
||||
unsigned long irqflags;
|
||||
bool hpd_disabled = false;
|
||||
bool changed = false;
|
||||
u32 hpd_event_bits;
|
||||
|
@ -1159,7 +1165,7 @@ static void i915_hotplug_work_func(struct work_struct *work)
|
|||
mutex_lock(&mode_config->mutex);
|
||||
DRM_DEBUG_KMS("running encoder hotplug functions\n");
|
||||
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
|
||||
spin_lock_irq(&dev_priv->irq_lock);
|
||||
|
||||
hpd_event_bits = dev_priv->hpd_event_bits;
|
||||
dev_priv->hpd_event_bits = 0;
|
||||
|
@ -1193,7 +1199,7 @@ static void i915_hotplug_work_func(struct work_struct *work)
|
|||
msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
|
||||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
|
||||
list_for_each_entry(connector, &mode_config->connector_list, head) {
|
||||
intel_connector = to_intel_connector(connector);
|
||||
|
@ -1488,7 +1494,6 @@ static void ivybridge_parity_work(struct work_struct *work)
|
|||
u32 error_status, row, bank, subbank;
|
||||
char *parity_event[6];
|
||||
uint32_t misccpctl;
|
||||
unsigned long flags;
|
||||
uint8_t slice = 0;
|
||||
|
||||
/* We must turn off DOP level clock gating to access the L3 registers.
|
||||
|
@ -1547,9 +1552,9 @@ static void ivybridge_parity_work(struct work_struct *work)
|
|||
|
||||
out:
|
||||
WARN_ON(dev_priv->l3_parity.which_slice);
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, flags);
|
||||
spin_lock_irq(&dev_priv->irq_lock);
|
||||
gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
|
||||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
|
||||
mutex_unlock(&dev_priv->dev->struct_mutex);
|
||||
}
|
||||
|
@ -2566,7 +2571,7 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
|
|||
}
|
||||
|
||||
for_each_pipe(dev_priv, pipe) {
|
||||
uint32_t pipe_iir;
|
||||
uint32_t pipe_iir, flip_done = 0, fault_errors = 0;
|
||||
|
||||
if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
|
||||
continue;
|
||||
|
@ -2575,11 +2580,17 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
|
|||
if (pipe_iir) {
|
||||
ret = IRQ_HANDLED;
|
||||
I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
|
||||
|
||||
if (pipe_iir & GEN8_PIPE_VBLANK &&
|
||||
intel_pipe_handle_vblank(dev, pipe))
|
||||
intel_check_page_flip(dev, pipe);
|
||||
|
||||
if (pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE) {
|
||||
if (IS_GEN9(dev))
|
||||
flip_done = pipe_iir & GEN9_PIPE_PLANE1_FLIP_DONE;
|
||||
else
|
||||
flip_done = pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE;
|
||||
|
||||
if (flip_done) {
|
||||
intel_prepare_page_flip(dev, pipe);
|
||||
intel_finish_page_flip_plane(dev, pipe);
|
||||
}
|
||||
|
@ -2594,11 +2605,16 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
|
|||
pipe_name(pipe));
|
||||
}
|
||||
|
||||
if (pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS) {
|
||||
|
||||
if (IS_GEN9(dev))
|
||||
fault_errors = pipe_iir & GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
|
||||
else
|
||||
fault_errors = pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
|
||||
|
||||
if (fault_errors)
|
||||
DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
|
||||
pipe_name(pipe),
|
||||
pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
|
||||
}
|
||||
} else
|
||||
DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
|
||||
}
|
||||
|
@ -3444,8 +3460,8 @@ static void gen8_irq_reset(struct drm_device *dev)
|
|||
gen8_gt_irq_reset(dev_priv);
|
||||
|
||||
for_each_pipe(dev_priv, pipe)
|
||||
if (intel_display_power_enabled(dev_priv,
|
||||
POWER_DOMAIN_PIPE(pipe)))
|
||||
if (intel_display_power_is_enabled(dev_priv,
|
||||
POWER_DOMAIN_PIPE(pipe)))
|
||||
GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
|
||||
|
||||
GEN5_IRQ_RESET(GEN8_DE_PORT_);
|
||||
|
@ -3457,15 +3473,14 @@ static void gen8_irq_reset(struct drm_device *dev)
|
|||
|
||||
void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
unsigned long irqflags;
|
||||
uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
|
||||
spin_lock_irq(&dev_priv->irq_lock);
|
||||
GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B, dev_priv->de_irq_mask[PIPE_B],
|
||||
~dev_priv->de_irq_mask[PIPE_B] | extra_ier);
|
||||
GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C, dev_priv->de_irq_mask[PIPE_C],
|
||||
~dev_priv->de_irq_mask[PIPE_C] | extra_ier);
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
|
||||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
}
|
||||
|
||||
static void cherryview_irq_preinstall(struct drm_device *dev)
|
||||
|
@ -3584,7 +3599,6 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
|
|||
|
||||
static int ironlake_irq_postinstall(struct drm_device *dev)
|
||||
{
|
||||
unsigned long irqflags;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 display_mask, extra_mask;
|
||||
|
||||
|
@ -3623,9 +3637,9 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
|
|||
* spinlocking not required here for correctness since interrupt
|
||||
* setup is guaranteed to run in single-threaded context. But we
|
||||
* need it to make the assert_spin_locked happy. */
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
|
||||
spin_lock_irq(&dev_priv->irq_lock);
|
||||
ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
|
||||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -3701,7 +3715,7 @@ void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
|
|||
|
||||
dev_priv->display_irqs_enabled = true;
|
||||
|
||||
if (dev_priv->dev->irq_enabled)
|
||||
if (intel_irqs_enabled(dev_priv))
|
||||
valleyview_display_irqs_install(dev_priv);
|
||||
}
|
||||
|
||||
|
@ -3714,14 +3728,13 @@ void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
|
|||
|
||||
dev_priv->display_irqs_enabled = false;
|
||||
|
||||
if (dev_priv->dev->irq_enabled)
|
||||
if (intel_irqs_enabled(dev_priv))
|
||||
valleyview_display_irqs_uninstall(dev_priv);
|
||||
}
|
||||
|
||||
static int valleyview_irq_postinstall(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
unsigned long irqflags;
|
||||
|
||||
dev_priv->irq_mask = ~0;
|
||||
|
||||
|
@ -3735,10 +3748,10 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
|
|||
|
||||
/* Interrupt setup is already guaranteed to be single-threaded, this is
|
||||
* just to make the assert_spin_locked check happy. */
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
|
||||
spin_lock_irq(&dev_priv->irq_lock);
|
||||
if (dev_priv->display_irqs_enabled)
|
||||
valleyview_display_irqs_install(dev_priv);
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
|
||||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
|
||||
I915_WRITE(VLV_IIR, 0xffffffff);
|
||||
I915_WRITE(VLV_IIR, 0xffffffff);
|
||||
|
@ -3783,18 +3796,26 @@ static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
|
|||
|
||||
static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
uint32_t de_pipe_masked = GEN8_PIPE_PRIMARY_FLIP_DONE |
|
||||
GEN8_PIPE_CDCLK_CRC_DONE |
|
||||
GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
|
||||
uint32_t de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
|
||||
GEN8_PIPE_FIFO_UNDERRUN;
|
||||
uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
|
||||
uint32_t de_pipe_enables;
|
||||
int pipe;
|
||||
|
||||
if (IS_GEN9(dev_priv))
|
||||
de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE |
|
||||
GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
|
||||
else
|
||||
de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE |
|
||||
GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
|
||||
|
||||
de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
|
||||
GEN8_PIPE_FIFO_UNDERRUN;
|
||||
|
||||
dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
|
||||
dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
|
||||
dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
|
||||
|
||||
for_each_pipe(dev_priv, pipe)
|
||||
if (intel_display_power_enabled(dev_priv,
|
||||
if (intel_display_power_is_enabled(dev_priv,
|
||||
POWER_DOMAIN_PIPE(pipe)))
|
||||
GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
|
||||
dev_priv->de_irq_mask[pipe],
|
||||
|
@ -3829,7 +3850,6 @@ static int cherryview_irq_postinstall(struct drm_device *dev)
|
|||
I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
|
||||
u32 pipestat_enable = PLANE_FLIP_DONE_INT_STATUS_VLV |
|
||||
PIPE_CRC_DONE_INTERRUPT_STATUS;
|
||||
unsigned long irqflags;
|
||||
int pipe;
|
||||
|
||||
/*
|
||||
|
@ -3841,11 +3861,11 @@ static int cherryview_irq_postinstall(struct drm_device *dev)
|
|||
for_each_pipe(dev_priv, pipe)
|
||||
I915_WRITE(PIPESTAT(pipe), 0xffff);
|
||||
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
|
||||
spin_lock_irq(&dev_priv->irq_lock);
|
||||
i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
|
||||
for_each_pipe(dev_priv, pipe)
|
||||
i915_enable_pipestat(dev_priv, pipe, pipestat_enable);
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
|
||||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
|
||||
I915_WRITE(VLV_IIR, 0xffffffff);
|
||||
I915_WRITE(VLV_IMR, dev_priv->irq_mask);
|
||||
|
@ -3872,7 +3892,6 @@ static void gen8_irq_uninstall(struct drm_device *dev)
|
|||
static void valleyview_irq_uninstall(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
unsigned long irqflags;
|
||||
int pipe;
|
||||
|
||||
if (!dev_priv)
|
||||
|
@ -3887,10 +3906,12 @@ static void valleyview_irq_uninstall(struct drm_device *dev)
|
|||
I915_WRITE(PORT_HOTPLUG_EN, 0);
|
||||
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
|
||||
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
|
||||
/* Interrupt setup is already guaranteed to be single-threaded, this is
|
||||
* just to make the assert_spin_locked check happy. */
|
||||
spin_lock_irq(&dev_priv->irq_lock);
|
||||
if (dev_priv->display_irqs_enabled)
|
||||
valleyview_display_irqs_uninstall(dev_priv);
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
|
||||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
|
||||
dev_priv->irq_mask = 0;
|
||||
|
||||
|
@ -3976,7 +3997,6 @@ static void i8xx_irq_preinstall(struct drm_device * dev)
|
|||
static int i8xx_irq_postinstall(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
unsigned long irqflags;
|
||||
|
||||
I915_WRITE16(EMR,
|
||||
~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
|
||||
|
@ -3999,10 +4019,10 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
|
|||
|
||||
/* Interrupt setup is already guaranteed to be single-threaded, this is
|
||||
* just to make the assert_spin_locked check happy. */
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
|
||||
spin_lock_irq(&dev_priv->irq_lock);
|
||||
i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
|
||||
i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
|
||||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -4047,7 +4067,6 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
|
|||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u16 iir, new_iir;
|
||||
u32 pipe_stats[2];
|
||||
unsigned long irqflags;
|
||||
int pipe;
|
||||
u16 flip_mask =
|
||||
I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
|
||||
|
@ -4063,7 +4082,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
|
|||
* It doesn't set the bit in iir again, but it still produces
|
||||
* interrupts (for non-MSI).
|
||||
*/
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
|
||||
spin_lock(&dev_priv->irq_lock);
|
||||
if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
|
||||
i915_handle_error(dev, false,
|
||||
"Command parser error, iir 0x%08x",
|
||||
|
@ -4079,7 +4098,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
|
|||
if (pipe_stats[pipe] & 0x8000ffff)
|
||||
I915_WRITE(reg, pipe_stats[pipe]);
|
||||
}
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
|
||||
spin_unlock(&dev_priv->irq_lock);
|
||||
|
||||
I915_WRITE16(IIR, iir & ~flip_mask);
|
||||
new_iir = I915_READ16(IIR); /* Flush posted writes */
|
||||
|
@ -4149,7 +4168,6 @@ static int i915_irq_postinstall(struct drm_device *dev)
|
|||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 enable_mask;
|
||||
unsigned long irqflags;
|
||||
|
||||
I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
|
||||
|
||||
|
@ -4187,10 +4205,10 @@ static int i915_irq_postinstall(struct drm_device *dev)
|
|||
|
||||
/* Interrupt setup is already guaranteed to be single-threaded, this is
|
||||
* just to make the assert_spin_locked check happy. */
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
|
||||
spin_lock_irq(&dev_priv->irq_lock);
|
||||
i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
|
||||
i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
|
||||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -4234,7 +4252,6 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
|
|||
struct drm_device *dev = arg;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
|
||||
unsigned long irqflags;
|
||||
u32 flip_mask =
|
||||
I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
|
||||
I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
|
||||
|
@ -4250,7 +4267,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
|
|||
* It doesn't set the bit in iir again, but it still produces
|
||||
* interrupts (for non-MSI).
|
||||
*/
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
|
||||
spin_lock(&dev_priv->irq_lock);
|
||||
if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
|
||||
i915_handle_error(dev, false,
|
||||
"Command parser error, iir 0x%08x",
|
||||
|
@ -4266,7 +4283,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
|
|||
irq_received = true;
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
|
||||
spin_unlock(&dev_priv->irq_lock);
|
||||
|
||||
if (!irq_received)
|
||||
break;
|
||||
|
@ -4372,7 +4389,6 @@ static int i965_irq_postinstall(struct drm_device *dev)
|
|||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 enable_mask;
|
||||
u32 error_mask;
|
||||
unsigned long irqflags;
|
||||
|
||||
/* Unmask the interrupts that we always want on. */
|
||||
dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
|
||||
|
@ -4393,11 +4409,11 @@ static int i965_irq_postinstall(struct drm_device *dev)
|
|||
|
||||
/* Interrupt setup is already guaranteed to be single-threaded, this is
|
||||
* just to make the assert_spin_locked check happy. */
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
|
||||
spin_lock_irq(&dev_priv->irq_lock);
|
||||
i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
|
||||
i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
|
||||
i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
|
||||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
|
||||
/*
|
||||
* Enable some error detection, note the instruction error mask
|
||||
|
@ -4462,7 +4478,6 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
|
|||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 iir, new_iir;
|
||||
u32 pipe_stats[I915_MAX_PIPES];
|
||||
unsigned long irqflags;
|
||||
int ret = IRQ_NONE, pipe;
|
||||
u32 flip_mask =
|
||||
I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
|
||||
|
@ -4479,7 +4494,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
|
|||
* It doesn't set the bit in iir again, but it still produces
|
||||
* interrupts (for non-MSI).
|
||||
*/
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
|
||||
spin_lock(&dev_priv->irq_lock);
|
||||
if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
|
||||
i915_handle_error(dev, false,
|
||||
"Command parser error, iir 0x%08x",
|
||||
|
@ -4497,7 +4512,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
|
|||
irq_received = true;
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
|
||||
spin_unlock(&dev_priv->irq_lock);
|
||||
|
||||
if (!irq_received)
|
||||
break;
|
||||
|
@ -4584,19 +4599,18 @@ static void i965_irq_uninstall(struct drm_device * dev)
|
|||
I915_WRITE(IIR, I915_READ(IIR));
|
||||
}
|
||||
|
||||
static void intel_hpd_irq_reenable(struct work_struct *work)
|
||||
static void intel_hpd_irq_reenable_work(struct work_struct *work)
|
||||
{
|
||||
struct drm_i915_private *dev_priv =
|
||||
container_of(work, typeof(*dev_priv),
|
||||
hotplug_reenable_work.work);
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_mode_config *mode_config = &dev->mode_config;
|
||||
unsigned long irqflags;
|
||||
int i;
|
||||
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
|
||||
spin_lock_irq(&dev_priv->irq_lock);
|
||||
for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) {
|
||||
struct drm_connector *connector;
|
||||
|
||||
|
@ -4620,14 +4634,21 @@ static void intel_hpd_irq_reenable(struct work_struct *work)
|
|||
}
|
||||
if (dev_priv->display.hpd_irq_setup)
|
||||
dev_priv->display.hpd_irq_setup(dev);
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
|
||||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
}
|
||||
|
||||
void intel_irq_init(struct drm_device *dev)
|
||||
/**
|
||||
* intel_irq_init - initializes irq support
|
||||
* @dev_priv: i915 device instance
|
||||
*
|
||||
* This function initializes all the irq support including work items, timers
|
||||
* and all the vtables. It does not setup the interrupt itself though.
|
||||
*/
|
||||
void intel_irq_init(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
|
||||
INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
|
||||
INIT_WORK(&dev_priv->dig_port_work, i915_digport_work_func);
|
||||
|
@ -4636,7 +4657,7 @@ void intel_irq_init(struct drm_device *dev)
|
|||
INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
|
||||
|
||||
/* Let's track the enabled rps events */
|
||||
if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev))
|
||||
if (IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
|
||||
/* WaGsvRC0ResidencyMethod:vlv */
|
||||
dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
|
||||
else
|
||||
|
@ -4646,17 +4667,14 @@ void intel_irq_init(struct drm_device *dev)
|
|||
i915_hangcheck_elapsed,
|
||||
(unsigned long) dev);
|
||||
INIT_DELAYED_WORK(&dev_priv->hotplug_reenable_work,
|
||||
intel_hpd_irq_reenable);
|
||||
intel_hpd_irq_reenable_work);
|
||||
|
||||
pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
|
||||
|
||||
/* Haven't installed the IRQ handler yet */
|
||||
dev_priv->pm._irqs_disabled = true;
|
||||
|
||||
if (IS_GEN2(dev)) {
|
||||
if (IS_GEN2(dev_priv)) {
|
||||
dev->max_vblank_count = 0;
|
||||
dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
|
||||
} else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
|
||||
} else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
|
||||
dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
|
||||
dev->driver->get_vblank_counter = gm45_get_vblank_counter;
|
||||
} else {
|
||||
|
@ -4669,7 +4687,7 @@ void intel_irq_init(struct drm_device *dev)
|
|||
* Gen2 doesn't have a hardware frame counter and so depends on
|
||||
* vblank interrupts to produce sane vblank seuquence numbers.
|
||||
*/
|
||||
if (!IS_GEN2(dev))
|
||||
if (!IS_GEN2(dev_priv))
|
||||
dev->vblank_disable_immediate = true;
|
||||
|
||||
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
|
||||
|
@ -4677,7 +4695,7 @@ void intel_irq_init(struct drm_device *dev)
|
|||
dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
|
||||
}
|
||||
|
||||
if (IS_CHERRYVIEW(dev)) {
|
||||
if (IS_CHERRYVIEW(dev_priv)) {
|
||||
dev->driver->irq_handler = cherryview_irq_handler;
|
||||
dev->driver->irq_preinstall = cherryview_irq_preinstall;
|
||||
dev->driver->irq_postinstall = cherryview_irq_postinstall;
|
||||
|
@ -4685,7 +4703,7 @@ void intel_irq_init(struct drm_device *dev)
|
|||
dev->driver->enable_vblank = valleyview_enable_vblank;
|
||||
dev->driver->disable_vblank = valleyview_disable_vblank;
|
||||
dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
|
||||
} else if (IS_VALLEYVIEW(dev)) {
|
||||
} else if (IS_VALLEYVIEW(dev_priv)) {
|
||||
dev->driver->irq_handler = valleyview_irq_handler;
|
||||
dev->driver->irq_preinstall = valleyview_irq_preinstall;
|
||||
dev->driver->irq_postinstall = valleyview_irq_postinstall;
|
||||
|
@ -4693,7 +4711,7 @@ void intel_irq_init(struct drm_device *dev)
|
|||
dev->driver->enable_vblank = valleyview_enable_vblank;
|
||||
dev->driver->disable_vblank = valleyview_disable_vblank;
|
||||
dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
|
||||
} else if (IS_GEN8(dev)) {
|
||||
} else if (INTEL_INFO(dev_priv)->gen >= 8) {
|
||||
dev->driver->irq_handler = gen8_irq_handler;
|
||||
dev->driver->irq_preinstall = gen8_irq_reset;
|
||||
dev->driver->irq_postinstall = gen8_irq_postinstall;
|
||||
|
@ -4710,12 +4728,12 @@ void intel_irq_init(struct drm_device *dev)
|
|||
dev->driver->disable_vblank = ironlake_disable_vblank;
|
||||
dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
|
||||
} else {
|
||||
if (INTEL_INFO(dev)->gen == 2) {
|
||||
if (INTEL_INFO(dev_priv)->gen == 2) {
|
||||
dev->driver->irq_preinstall = i8xx_irq_preinstall;
|
||||
dev->driver->irq_postinstall = i8xx_irq_postinstall;
|
||||
dev->driver->irq_handler = i8xx_irq_handler;
|
||||
dev->driver->irq_uninstall = i8xx_irq_uninstall;
|
||||
} else if (INTEL_INFO(dev)->gen == 3) {
|
||||
} else if (INTEL_INFO(dev_priv)->gen == 3) {
|
||||
dev->driver->irq_preinstall = i915_irq_preinstall;
|
||||
dev->driver->irq_postinstall = i915_irq_postinstall;
|
||||
dev->driver->irq_uninstall = i915_irq_uninstall;
|
||||
|
@ -4733,12 +4751,23 @@ void intel_irq_init(struct drm_device *dev)
|
|||
}
|
||||
}
|
||||
|
||||
void intel_hpd_init(struct drm_device *dev)
|
||||
/**
|
||||
* intel_hpd_init - initializes and enables hpd support
|
||||
* @dev_priv: i915 device instance
|
||||
*
|
||||
* This function enables the hotplug support. It requires that interrupts have
|
||||
* already been enabled with intel_irq_init_hw(). From this point on hotplug and
|
||||
* poll request can run concurrently to other code, so locking rules must be
|
||||
* obeyed.
|
||||
*
|
||||
* This is a separate step from interrupt enabling to simplify the locking rules
|
||||
* in the driver load and resume code.
|
||||
*/
|
||||
void intel_hpd_init(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_mode_config *mode_config = &dev->mode_config;
|
||||
struct drm_connector *connector;
|
||||
unsigned long irqflags;
|
||||
int i;
|
||||
|
||||
for (i = 1; i < HPD_NUM_PINS; i++) {
|
||||
|
@ -4756,27 +4785,72 @@ void intel_hpd_init(struct drm_device *dev)
|
|||
|
||||
/* Interrupt setup is already guaranteed to be single-threaded, this is
|
||||
* just to make the assert_spin_locked checks happy. */
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
|
||||
spin_lock_irq(&dev_priv->irq_lock);
|
||||
if (dev_priv->display.hpd_irq_setup)
|
||||
dev_priv->display.hpd_irq_setup(dev);
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
|
||||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
}
|
||||
|
||||
/* Disable interrupts so we can allow runtime PM. */
|
||||
void intel_runtime_pm_disable_interrupts(struct drm_device *dev)
|
||||
/**
|
||||
* intel_irq_install - enables the hardware interrupt
|
||||
* @dev_priv: i915 device instance
|
||||
*
|
||||
* This function enables the hardware interrupt handling, but leaves the hotplug
|
||||
* handling still disabled. It is called after intel_irq_init().
|
||||
*
|
||||
* In the driver load and resume code we need working interrupts in a few places
|
||||
* but don't want to deal with the hassle of concurrent probe and hotplug
|
||||
* workers. Hence the split into this two-stage approach.
|
||||
*/
|
||||
int intel_irq_install(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
/*
|
||||
* We enable some interrupt sources in our postinstall hooks, so mark
|
||||
* interrupts as enabled _before_ actually enabling them to avoid
|
||||
* special cases in our ordering checks.
|
||||
*/
|
||||
dev_priv->pm.irqs_enabled = true;
|
||||
|
||||
dev->driver->irq_uninstall(dev);
|
||||
dev_priv->pm._irqs_disabled = true;
|
||||
return drm_irq_install(dev_priv->dev, dev_priv->dev->pdev->irq);
|
||||
}
|
||||
|
||||
/* Restore interrupts so we can recover from runtime PM. */
|
||||
void intel_runtime_pm_restore_interrupts(struct drm_device *dev)
|
||||
/**
|
||||
* intel_irq_uninstall - finilizes all irq handling
|
||||
* @dev_priv: i915 device instance
|
||||
*
|
||||
* This stops interrupt and hotplug handling and unregisters and frees all
|
||||
* resources acquired in the init functions.
|
||||
*/
|
||||
void intel_irq_uninstall(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
dev_priv->pm._irqs_disabled = false;
|
||||
dev->driver->irq_preinstall(dev);
|
||||
dev->driver->irq_postinstall(dev);
|
||||
drm_irq_uninstall(dev_priv->dev);
|
||||
intel_hpd_cancel_work(dev_priv);
|
||||
dev_priv->pm.irqs_enabled = false;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_runtime_pm_disable_interrupts - runtime interrupt disabling
|
||||
* @dev_priv: i915 device instance
|
||||
*
|
||||
* This function is used to disable interrupts at runtime, both in the runtime
|
||||
* pm and the system suspend/resume code.
|
||||
*/
|
||||
void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
dev_priv->dev->driver->irq_uninstall(dev_priv->dev);
|
||||
dev_priv->pm.irqs_enabled = false;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_runtime_pm_enable_interrupts - runtime interrupt enabling
|
||||
* @dev_priv: i915 device instance
|
||||
*
|
||||
* This function is used to enable interrupts at runtime, both in the runtime
|
||||
* pm and the system suspend/resume code.
|
||||
*/
|
||||
void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
dev_priv->pm.irqs_enabled = true;
|
||||
dev_priv->dev->driver->irq_preinstall(dev_priv->dev);
|
||||
dev_priv->dev->driver->irq_postinstall(dev_priv->dev);
|
||||
}
|
||||
|
|
|
@ -26,8 +26,8 @@
|
|||
#define _I915_REG_H_
|
||||
|
||||
#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a)))
|
||||
#define _PLANE(plane, a, b) _PIPE(plane, a, b)
|
||||
#define _TRANSCODER(tran, a, b) ((a) + (tran)*((b)-(a)))
|
||||
|
||||
#define _PORT(port, a, b) ((a) + (port)*((b)-(a)))
|
||||
#define _PIPE3(pipe, a, b, c) ((pipe) == PIPE_A ? (a) : \
|
||||
(pipe) == PIPE_B ? (b) : (c))
|
||||
|
@ -796,6 +796,8 @@ enum punit_power_well {
|
|||
#define _VLV_PCS_DW0_CH1 0x8400
|
||||
#define DPIO_PCS_TX_LANE2_RESET (1<<16)
|
||||
#define DPIO_PCS_TX_LANE1_RESET (1<<7)
|
||||
#define DPIO_LEFT_TXFIFO_RST_MASTER2 (1<<4)
|
||||
#define DPIO_RIGHT_TXFIFO_RST_MASTER2 (1<<3)
|
||||
#define VLV_PCS_DW0(ch) _PORT(ch, _VLV_PCS_DW0_CH0, _VLV_PCS_DW0_CH1)
|
||||
|
||||
#define _VLV_PCS01_DW0_CH0 0x200
|
||||
|
@ -836,12 +838,31 @@ enum punit_power_well {
|
|||
|
||||
#define _VLV_PCS_DW9_CH0 0x8224
|
||||
#define _VLV_PCS_DW9_CH1 0x8424
|
||||
#define DPIO_PCS_TX2MARGIN_MASK (0x7<<13)
|
||||
#define DPIO_PCS_TX2MARGIN_000 (0<<13)
|
||||
#define DPIO_PCS_TX2MARGIN_101 (1<<13)
|
||||
#define DPIO_PCS_TX1MARGIN_MASK (0x7<<10)
|
||||
#define DPIO_PCS_TX1MARGIN_000 (0<<10)
|
||||
#define DPIO_PCS_TX1MARGIN_101 (1<<10)
|
||||
#define VLV_PCS_DW9(ch) _PORT(ch, _VLV_PCS_DW9_CH0, _VLV_PCS_DW9_CH1)
|
||||
|
||||
#define _VLV_PCS01_DW9_CH0 0x224
|
||||
#define _VLV_PCS23_DW9_CH0 0x424
|
||||
#define _VLV_PCS01_DW9_CH1 0x2624
|
||||
#define _VLV_PCS23_DW9_CH1 0x2824
|
||||
#define VLV_PCS01_DW9(ch) _PORT(ch, _VLV_PCS01_DW9_CH0, _VLV_PCS01_DW9_CH1)
|
||||
#define VLV_PCS23_DW9(ch) _PORT(ch, _VLV_PCS23_DW9_CH0, _VLV_PCS23_DW9_CH1)
|
||||
|
||||
#define _CHV_PCS_DW10_CH0 0x8228
|
||||
#define _CHV_PCS_DW10_CH1 0x8428
|
||||
#define DPIO_PCS_SWING_CALC_TX0_TX2 (1<<30)
|
||||
#define DPIO_PCS_SWING_CALC_TX1_TX3 (1<<31)
|
||||
#define DPIO_PCS_TX2DEEMP_MASK (0xf<<24)
|
||||
#define DPIO_PCS_TX2DEEMP_9P5 (0<<24)
|
||||
#define DPIO_PCS_TX2DEEMP_6P0 (2<<24)
|
||||
#define DPIO_PCS_TX1DEEMP_MASK (0xf<<16)
|
||||
#define DPIO_PCS_TX1DEEMP_9P5 (0<<16)
|
||||
#define DPIO_PCS_TX1DEEMP_6P0 (2<<16)
|
||||
#define CHV_PCS_DW10(ch) _PORT(ch, _CHV_PCS_DW10_CH0, _CHV_PCS_DW10_CH1)
|
||||
|
||||
#define _VLV_PCS01_DW10_CH0 0x0228
|
||||
|
@ -853,8 +874,18 @@ enum punit_power_well {
|
|||
|
||||
#define _VLV_PCS_DW11_CH0 0x822c
|
||||
#define _VLV_PCS_DW11_CH1 0x842c
|
||||
#define DPIO_LANEDESKEW_STRAP_OVRD (1<<3)
|
||||
#define DPIO_LEFT_TXFIFO_RST_MASTER (1<<1)
|
||||
#define DPIO_RIGHT_TXFIFO_RST_MASTER (1<<0)
|
||||
#define VLV_PCS_DW11(ch) _PORT(ch, _VLV_PCS_DW11_CH0, _VLV_PCS_DW11_CH1)
|
||||
|
||||
#define _VLV_PCS01_DW11_CH0 0x022c
|
||||
#define _VLV_PCS23_DW11_CH0 0x042c
|
||||
#define _VLV_PCS01_DW11_CH1 0x262c
|
||||
#define _VLV_PCS23_DW11_CH1 0x282c
|
||||
#define VLV_PCS01_DW11(ch) _PORT(ch, _VLV_PCS01_DW0_CH0, _VLV_PCS01_DW0_CH1)
|
||||
#define VLV_PCS23_DW11(ch) _PORT(ch, _VLV_PCS23_DW0_CH0, _VLV_PCS23_DW0_CH1)
|
||||
|
||||
#define _VLV_PCS_DW12_CH0 0x8230
|
||||
#define _VLV_PCS_DW12_CH1 0x8430
|
||||
#define VLV_PCS_DW12(ch) _PORT(ch, _VLV_PCS_DW12_CH0, _VLV_PCS_DW12_CH1)
|
||||
|
@ -2506,9 +2537,7 @@ enum punit_power_well {
|
|||
|
||||
#define EDP_PSR_AUX_CTL(dev) (EDP_PSR_BASE(dev) + 0x10)
|
||||
#define EDP_PSR_AUX_DATA1(dev) (EDP_PSR_BASE(dev) + 0x14)
|
||||
#define EDP_PSR_DPCD_COMMAND 0x80060000
|
||||
#define EDP_PSR_AUX_DATA2(dev) (EDP_PSR_BASE(dev) + 0x18)
|
||||
#define EDP_PSR_DPCD_NORMAL_OPERATION (1<<24)
|
||||
#define EDP_PSR_AUX_DATA3(dev) (EDP_PSR_BASE(dev) + 0x1c)
|
||||
#define EDP_PSR_AUX_DATA4(dev) (EDP_PSR_BASE(dev) + 0x20)
|
||||
#define EDP_PSR_AUX_DATA5(dev) (EDP_PSR_BASE(dev) + 0x24)
|
||||
|
@ -3645,6 +3674,7 @@ enum punit_power_well {
|
|||
#define DP_AUX_CH_CTL_PRECHARGE_TEST (1 << 11)
|
||||
#define DP_AUX_CH_CTL_BIT_CLOCK_2X_MASK (0x7ff)
|
||||
#define DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT 0
|
||||
#define DP_AUX_CH_CTL_SYNC_PULSE_SKL(c) ((c) - 1)
|
||||
|
||||
/*
|
||||
* Computing GMCH M and N values for the Display Port link
|
||||
|
@ -4510,6 +4540,143 @@ enum punit_power_well {
|
|||
#define SPCONSTALPHA(pipe, plane) _PIPE(pipe * 2 + plane, _SPACONSTALPHA, _SPBCONSTALPHA)
|
||||
#define SPGAMC(pipe, plane) _PIPE(pipe * 2 + plane, _SPAGAMC, _SPBGAMC)
|
||||
|
||||
/* Skylake plane registers */
|
||||
|
||||
#define _PLANE_CTL_1_A 0x70180
|
||||
#define _PLANE_CTL_2_A 0x70280
|
||||
#define _PLANE_CTL_3_A 0x70380
|
||||
#define PLANE_CTL_ENABLE (1 << 31)
|
||||
#define PLANE_CTL_PIPE_GAMMA_ENABLE (1 << 30)
|
||||
#define PLANE_CTL_FORMAT_MASK (0xf << 24)
|
||||
#define PLANE_CTL_FORMAT_YUV422 ( 0 << 24)
|
||||
#define PLANE_CTL_FORMAT_NV12 ( 1 << 24)
|
||||
#define PLANE_CTL_FORMAT_XRGB_2101010 ( 2 << 24)
|
||||
#define PLANE_CTL_FORMAT_XRGB_8888 ( 4 << 24)
|
||||
#define PLANE_CTL_FORMAT_XRGB_16161616F ( 6 << 24)
|
||||
#define PLANE_CTL_FORMAT_AYUV ( 8 << 24)
|
||||
#define PLANE_CTL_FORMAT_INDEXED ( 12 << 24)
|
||||
#define PLANE_CTL_FORMAT_RGB_565 ( 14 << 24)
|
||||
#define PLANE_CTL_PIPE_CSC_ENABLE (1 << 23)
|
||||
#define PLANE_CTL_KEY_ENABLE_MASK (0x3 << 21)
|
||||
#define PLANE_CTL_KEY_ENABLE_SOURCE ( 1 << 21)
|
||||
#define PLANE_CTL_KEY_ENABLE_DESTINATION ( 2 << 21)
|
||||
#define PLANE_CTL_ORDER_BGRX (0 << 20)
|
||||
#define PLANE_CTL_ORDER_RGBX (1 << 20)
|
||||
#define PLANE_CTL_YUV422_ORDER_MASK (0x3 << 16)
|
||||
#define PLANE_CTL_YUV422_YUYV ( 0 << 16)
|
||||
#define PLANE_CTL_YUV422_UYVY ( 1 << 16)
|
||||
#define PLANE_CTL_YUV422_YVYU ( 2 << 16)
|
||||
#define PLANE_CTL_YUV422_VYUY ( 3 << 16)
|
||||
#define PLANE_CTL_DECOMPRESSION_ENABLE (1 << 15)
|
||||
#define PLANE_CTL_TRICKLE_FEED_DISABLE (1 << 14)
|
||||
#define PLANE_CTL_PLANE_GAMMA_DISABLE (1 << 13)
|
||||
#define PLANE_CTL_TILED_MASK (0x7 << 10)
|
||||
#define PLANE_CTL_TILED_LINEAR ( 0 << 10)
|
||||
#define PLANE_CTL_TILED_X ( 1 << 10)
|
||||
#define PLANE_CTL_TILED_Y ( 4 << 10)
|
||||
#define PLANE_CTL_TILED_YF ( 5 << 10)
|
||||
#define PLANE_CTL_ALPHA_MASK (0x3 << 4)
|
||||
#define PLANE_CTL_ALPHA_DISABLE ( 0 << 4)
|
||||
#define PLANE_CTL_ALPHA_SW_PREMULTIPLY ( 2 << 4)
|
||||
#define PLANE_CTL_ALPHA_HW_PREMULTIPLY ( 3 << 4)
|
||||
#define _PLANE_STRIDE_1_A 0x70188
|
||||
#define _PLANE_STRIDE_2_A 0x70288
|
||||
#define _PLANE_STRIDE_3_A 0x70388
|
||||
#define _PLANE_POS_1_A 0x7018c
|
||||
#define _PLANE_POS_2_A 0x7028c
|
||||
#define _PLANE_POS_3_A 0x7038c
|
||||
#define _PLANE_SIZE_1_A 0x70190
|
||||
#define _PLANE_SIZE_2_A 0x70290
|
||||
#define _PLANE_SIZE_3_A 0x70390
|
||||
#define _PLANE_SURF_1_A 0x7019c
|
||||
#define _PLANE_SURF_2_A 0x7029c
|
||||
#define _PLANE_SURF_3_A 0x7039c
|
||||
#define _PLANE_OFFSET_1_A 0x701a4
|
||||
#define _PLANE_OFFSET_2_A 0x702a4
|
||||
#define _PLANE_OFFSET_3_A 0x703a4
|
||||
#define _PLANE_KEYVAL_1_A 0x70194
|
||||
#define _PLANE_KEYVAL_2_A 0x70294
|
||||
#define _PLANE_KEYMSK_1_A 0x70198
|
||||
#define _PLANE_KEYMSK_2_A 0x70298
|
||||
#define _PLANE_KEYMAX_1_A 0x701a0
|
||||
#define _PLANE_KEYMAX_2_A 0x702a0
|
||||
|
||||
#define _PLANE_CTL_1_B 0x71180
|
||||
#define _PLANE_CTL_2_B 0x71280
|
||||
#define _PLANE_CTL_3_B 0x71380
|
||||
#define _PLANE_CTL_1(pipe) _PIPE(pipe, _PLANE_CTL_1_A, _PLANE_CTL_1_B)
|
||||
#define _PLANE_CTL_2(pipe) _PIPE(pipe, _PLANE_CTL_2_A, _PLANE_CTL_2_B)
|
||||
#define _PLANE_CTL_3(pipe) _PIPE(pipe, _PLANE_CTL_3_A, _PLANE_CTL_3_B)
|
||||
#define PLANE_CTL(pipe, plane) \
|
||||
_PLANE(plane, _PLANE_CTL_1(pipe), _PLANE_CTL_2(pipe))
|
||||
|
||||
#define _PLANE_STRIDE_1_B 0x71188
|
||||
#define _PLANE_STRIDE_2_B 0x71288
|
||||
#define _PLANE_STRIDE_3_B 0x71388
|
||||
#define _PLANE_STRIDE_1(pipe) \
|
||||
_PIPE(pipe, _PLANE_STRIDE_1_A, _PLANE_STRIDE_1_B)
|
||||
#define _PLANE_STRIDE_2(pipe) \
|
||||
_PIPE(pipe, _PLANE_STRIDE_2_A, _PLANE_STRIDE_2_B)
|
||||
#define _PLANE_STRIDE_3(pipe) \
|
||||
_PIPE(pipe, _PLANE_STRIDE_3_A, _PLANE_STRIDE_3_B)
|
||||
#define PLANE_STRIDE(pipe, plane) \
|
||||
_PLANE(plane, _PLANE_STRIDE_1(pipe), _PLANE_STRIDE_2(pipe))
|
||||
|
||||
#define _PLANE_POS_1_B 0x7118c
|
||||
#define _PLANE_POS_2_B 0x7128c
|
||||
#define _PLANE_POS_3_B 0x7138c
|
||||
#define _PLANE_POS_1(pipe) _PIPE(pipe, _PLANE_POS_1_A, _PLANE_POS_1_B)
|
||||
#define _PLANE_POS_2(pipe) _PIPE(pipe, _PLANE_POS_2_A, _PLANE_POS_2_B)
|
||||
#define _PLANE_POS_3(pipe) _PIPE(pipe, _PLANE_POS_3_A, _PLANE_POS_3_B)
|
||||
#define PLANE_POS(pipe, plane) \
|
||||
_PLANE(plane, _PLANE_POS_1(pipe), _PLANE_POS_2(pipe))
|
||||
|
||||
#define _PLANE_SIZE_1_B 0x71190
|
||||
#define _PLANE_SIZE_2_B 0x71290
|
||||
#define _PLANE_SIZE_3_B 0x71390
|
||||
#define _PLANE_SIZE_1(pipe) _PIPE(pipe, _PLANE_SIZE_1_A, _PLANE_SIZE_1_B)
|
||||
#define _PLANE_SIZE_2(pipe) _PIPE(pipe, _PLANE_SIZE_2_A, _PLANE_SIZE_2_B)
|
||||
#define _PLANE_SIZE_3(pipe) _PIPE(pipe, _PLANE_SIZE_3_A, _PLANE_SIZE_3_B)
|
||||
#define PLANE_SIZE(pipe, plane) \
|
||||
_PLANE(plane, _PLANE_SIZE_1(pipe), _PLANE_SIZE_2(pipe))
|
||||
|
||||
#define _PLANE_SURF_1_B 0x7119c
|
||||
#define _PLANE_SURF_2_B 0x7129c
|
||||
#define _PLANE_SURF_3_B 0x7139c
|
||||
#define _PLANE_SURF_1(pipe) _PIPE(pipe, _PLANE_SURF_1_A, _PLANE_SURF_1_B)
|
||||
#define _PLANE_SURF_2(pipe) _PIPE(pipe, _PLANE_SURF_2_A, _PLANE_SURF_2_B)
|
||||
#define _PLANE_SURF_3(pipe) _PIPE(pipe, _PLANE_SURF_3_A, _PLANE_SURF_3_B)
|
||||
#define PLANE_SURF(pipe, plane) \
|
||||
_PLANE(plane, _PLANE_SURF_1(pipe), _PLANE_SURF_2(pipe))
|
||||
|
||||
#define _PLANE_OFFSET_1_B 0x711a4
|
||||
#define _PLANE_OFFSET_2_B 0x712a4
|
||||
#define _PLANE_OFFSET_1(pipe) _PIPE(pipe, _PLANE_OFFSET_1_A, _PLANE_OFFSET_1_B)
|
||||
#define _PLANE_OFFSET_2(pipe) _PIPE(pipe, _PLANE_OFFSET_2_A, _PLANE_OFFSET_2_B)
|
||||
#define PLANE_OFFSET(pipe, plane) \
|
||||
_PLANE(plane, _PLANE_OFFSET_1(pipe), _PLANE_OFFSET_2(pipe))
|
||||
|
||||
#define _PLANE_KEYVAL_1_B 0x71194
|
||||
#define _PLANE_KEYVAL_2_B 0x71294
|
||||
#define _PLANE_KEYVAL_1(pipe) _PIPE(pipe, _PLANE_KEYVAL_1_A, _PLANE_KEYVAL_1_B)
|
||||
#define _PLANE_KEYVAL_2(pipe) _PIPE(pipe, _PLANE_KEYVAL_2_A, _PLANE_KEYVAL_2_B)
|
||||
#define PLANE_KEYVAL(pipe, plane) \
|
||||
_PLANE(plane, _PLANE_KEYVAL_1(pipe), _PLANE_KEYVAL_2(pipe))
|
||||
|
||||
#define _PLANE_KEYMSK_1_B 0x71198
|
||||
#define _PLANE_KEYMSK_2_B 0x71298
|
||||
#define _PLANE_KEYMSK_1(pipe) _PIPE(pipe, _PLANE_KEYMSK_1_A, _PLANE_KEYMSK_1_B)
|
||||
#define _PLANE_KEYMSK_2(pipe) _PIPE(pipe, _PLANE_KEYMSK_2_A, _PLANE_KEYMSK_2_B)
|
||||
#define PLANE_KEYMSK(pipe, plane) \
|
||||
_PLANE(plane, _PLANE_KEYMSK_1(pipe), _PLANE_KEYMSK_2(pipe))
|
||||
|
||||
#define _PLANE_KEYMAX_1_B 0x711a0
|
||||
#define _PLANE_KEYMAX_2_B 0x712a0
|
||||
#define _PLANE_KEYMAX_1(pipe) _PIPE(pipe, _PLANE_KEYMAX_1_A, _PLANE_KEYMAX_1_B)
|
||||
#define _PLANE_KEYMAX_2(pipe) _PIPE(pipe, _PLANE_KEYMAX_2_A, _PLANE_KEYMAX_2_B)
|
||||
#define PLANE_KEYMAX(pipe, plane) \
|
||||
_PLANE(plane, _PLANE_KEYMAX_1(pipe), _PLANE_KEYMAX_2(pipe))
|
||||
|
||||
/* VBIOS regs */
|
||||
#define VGACNTRL 0x71400
|
||||
# define VGA_DISP_DISABLE (1 << 31)
|
||||
|
@ -4746,10 +4913,23 @@ enum punit_power_well {
|
|||
#define GEN8_PIPE_SCAN_LINE_EVENT (1 << 2)
|
||||
#define GEN8_PIPE_VSYNC (1 << 1)
|
||||
#define GEN8_PIPE_VBLANK (1 << 0)
|
||||
#define GEN9_PIPE_CURSOR_FAULT (1 << 11)
|
||||
#define GEN9_PIPE_PLANE3_FAULT (1 << 9)
|
||||
#define GEN9_PIPE_PLANE2_FAULT (1 << 8)
|
||||
#define GEN9_PIPE_PLANE1_FAULT (1 << 7)
|
||||
#define GEN9_PIPE_PLANE3_FLIP_DONE (1 << 5)
|
||||
#define GEN9_PIPE_PLANE2_FLIP_DONE (1 << 4)
|
||||
#define GEN9_PIPE_PLANE1_FLIP_DONE (1 << 3)
|
||||
#define GEN9_PIPE_PLANE_FLIP_DONE(p) (1 << (3 + p))
|
||||
#define GEN8_DE_PIPE_IRQ_FAULT_ERRORS \
|
||||
(GEN8_PIPE_CURSOR_FAULT | \
|
||||
GEN8_PIPE_SPRITE_FAULT | \
|
||||
GEN8_PIPE_PRIMARY_FAULT)
|
||||
#define GEN9_DE_PIPE_IRQ_FAULT_ERRORS \
|
||||
(GEN9_PIPE_CURSOR_FAULT | \
|
||||
GEN9_PIPE_PLANE3_FAULT | \
|
||||
GEN9_PIPE_PLANE2_FAULT | \
|
||||
GEN9_PIPE_PLANE1_FAULT)
|
||||
|
||||
#define GEN8_DE_PORT_ISR 0x44440
|
||||
#define GEN8_DE_PORT_IMR 0x44444
|
||||
|
@ -4839,6 +5019,7 @@ enum punit_power_well {
|
|||
/* GEN8 chicken */
|
||||
#define HDC_CHICKEN0 0x7300
|
||||
#define HDC_FORCE_NON_COHERENT (1<<4)
|
||||
#define HDC_FENCE_DEST_SLM_DISABLE (1<<14)
|
||||
|
||||
/* WaCatErrorRejectionIssue */
|
||||
#define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG 0x9030
|
||||
|
@ -5751,6 +5932,9 @@ enum punit_power_well {
|
|||
#define GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE (1<<10)
|
||||
#define GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE (1<<3)
|
||||
|
||||
#define GEN9_HALF_SLICE_CHICKEN5 0xe188
|
||||
#define GEN9_DG_MIRROR_FIX_ENABLE (1<<5)
|
||||
|
||||
#define GEN8_ROW_CHICKEN 0xe4f0
|
||||
#define PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE (1<<8)
|
||||
#define STALL_DOP_GATING_DISABLE (1<<5)
|
||||
|
|
|
@ -46,7 +46,7 @@ struct bdb_header {
|
|||
u16 version; /**< decimal */
|
||||
u16 header_size; /**< in bytes */
|
||||
u16 bdb_size; /**< in bytes */
|
||||
};
|
||||
} __packed;
|
||||
|
||||
/* strictly speaking, this is a "skip" block, but it has interesting info */
|
||||
struct vbios_data {
|
||||
|
@ -252,7 +252,7 @@ union child_device_config {
|
|||
/* This one should also be safe to use anywhere, even without version
|
||||
* checks. */
|
||||
struct common_child_dev_config common;
|
||||
};
|
||||
} __packed;
|
||||
|
||||
struct bdb_general_definitions {
|
||||
/* DDC GPIO */
|
||||
|
@ -888,12 +888,12 @@ struct mipi_pps_data {
|
|||
u16 bl_disable_delay;
|
||||
u16 panel_off_delay;
|
||||
u16 panel_power_cycle_delay;
|
||||
};
|
||||
} __packed;
|
||||
|
||||
struct bdb_mipi_config {
|
||||
struct mipi_config config[MAX_MIPI_CONFIGURATIONS];
|
||||
struct mipi_pps_data pps[MAX_MIPI_CONFIGURATIONS];
|
||||
};
|
||||
} __packed;
|
||||
|
||||
/* Block 53 contains MIPI sequences as needed by the panel
|
||||
* for enabling it. This block can be variable in size and
|
||||
|
@ -902,7 +902,7 @@ struct bdb_mipi_config {
|
|||
struct bdb_mipi_sequence {
|
||||
u8 version;
|
||||
u8 data[0];
|
||||
};
|
||||
} __packed;
|
||||
|
||||
/* MIPI Sequnece Block definitions */
|
||||
enum mipi_seq {
|
||||
|
|
|
@ -72,7 +72,7 @@ static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
|
|||
u32 tmp;
|
||||
|
||||
power_domain = intel_display_port_power_domain(encoder);
|
||||
if (!intel_display_power_enabled(dev_priv, power_domain))
|
||||
if (!intel_display_power_is_enabled(dev_priv, power_domain))
|
||||
return false;
|
||||
|
||||
tmp = I915_READ(crt->adpa_reg);
|
||||
|
|
|
@ -95,8 +95,8 @@ static const struct ddi_buf_trans bdw_ddi_translations_dp[] = {
|
|||
{ 0x00BEFFFF, 0x00140006 },
|
||||
{ 0x80B2CFFF, 0x001B0002 },
|
||||
{ 0x00FFFFFF, 0x000E000A },
|
||||
{ 0x00D75FFF, 0x00180004 },
|
||||
{ 0x80CB2FFF, 0x001B0002 },
|
||||
{ 0x00DB6FFF, 0x00160005 },
|
||||
{ 0x80C71FFF, 0x001A0002 },
|
||||
{ 0x00F7DFFF, 0x00180004 },
|
||||
{ 0x80D75FFF, 0x001B0002 },
|
||||
};
|
||||
|
@ -127,6 +127,32 @@ static const struct ddi_buf_trans bdw_ddi_translations_hdmi[] = {
|
|||
{ 0x80FFFFFF, 0x001B0002 }, /* 9: 1000 1000 0 */
|
||||
};
|
||||
|
||||
static const struct ddi_buf_trans skl_ddi_translations_dp[] = {
|
||||
{ 0x00000018, 0x000000a0 },
|
||||
{ 0x00004014, 0x00000098 },
|
||||
{ 0x00006012, 0x00000088 },
|
||||
{ 0x00008010, 0x00000080 },
|
||||
{ 0x00000018, 0x00000098 },
|
||||
{ 0x00004014, 0x00000088 },
|
||||
{ 0x00006012, 0x00000080 },
|
||||
{ 0x00000018, 0x00000088 },
|
||||
{ 0x00004014, 0x00000080 },
|
||||
};
|
||||
|
||||
static const struct ddi_buf_trans skl_ddi_translations_hdmi[] = {
|
||||
/* Idx NT mV T mV db */
|
||||
{ 0x00000018, 0x000000a0 }, /* 0: 400 400 0 */
|
||||
{ 0x00004014, 0x00000098 }, /* 1: 400 600 3.5 */
|
||||
{ 0x00006012, 0x00000088 }, /* 2: 400 800 6 */
|
||||
{ 0x00000018, 0x0000003c }, /* 3: 450 450 0 */
|
||||
{ 0x00000018, 0x00000098 }, /* 4: 600 600 0 */
|
||||
{ 0x00003015, 0x00000088 }, /* 5: 600 800 2.5 */
|
||||
{ 0x00005013, 0x00000080 }, /* 6: 600 1000 4.5 */
|
||||
{ 0x00000018, 0x00000088 }, /* 7: 800 800 0 */
|
||||
{ 0x00000096, 0x00000080 }, /* 8: 800 1000 2 */
|
||||
{ 0x00000018, 0x00000080 }, /* 9: 1200 1200 0 */
|
||||
};
|
||||
|
||||
enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
|
||||
{
|
||||
struct drm_encoder *encoder = &intel_encoder->base;
|
||||
|
@ -169,7 +195,14 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
|
|||
const struct ddi_buf_trans *ddi_translations_hdmi;
|
||||
const struct ddi_buf_trans *ddi_translations;
|
||||
|
||||
if (IS_BROADWELL(dev)) {
|
||||
if (IS_SKYLAKE(dev)) {
|
||||
ddi_translations_fdi = NULL;
|
||||
ddi_translations_dp = skl_ddi_translations_dp;
|
||||
ddi_translations_edp = skl_ddi_translations_dp;
|
||||
ddi_translations_hdmi = skl_ddi_translations_hdmi;
|
||||
n_hdmi_entries = ARRAY_SIZE(skl_ddi_translations_hdmi);
|
||||
hdmi_800mV_0dB = 7;
|
||||
} else if (IS_BROADWELL(dev)) {
|
||||
ddi_translations_fdi = bdw_ddi_translations_fdi;
|
||||
ddi_translations_dp = bdw_ddi_translations_dp;
|
||||
ddi_translations_edp = bdw_ddi_translations_edp;
|
||||
|
@ -208,7 +241,10 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
|
|||
ddi_translations = ddi_translations_dp;
|
||||
break;
|
||||
case PORT_E:
|
||||
ddi_translations = ddi_translations_fdi;
|
||||
if (ddi_translations_fdi)
|
||||
ddi_translations = ddi_translations_fdi;
|
||||
else
|
||||
ddi_translations = ddi_translations_dp;
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
|
@ -962,7 +998,7 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
|
|||
uint32_t tmp;
|
||||
|
||||
power_domain = intel_display_port_power_domain(intel_encoder);
|
||||
if (!intel_display_power_enabled(dev_priv, power_domain))
|
||||
if (!intel_display_power_is_enabled(dev_priv, power_domain))
|
||||
return false;
|
||||
|
||||
if (!intel_encoder->get_hw_state(intel_encoder, &pipe))
|
||||
|
@ -1008,7 +1044,7 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
|
|||
int i;
|
||||
|
||||
power_domain = intel_display_port_power_domain(encoder);
|
||||
if (!intel_display_power_enabled(dev_priv, power_domain))
|
||||
if (!intel_display_power_is_enabled(dev_priv, power_domain))
|
||||
return false;
|
||||
|
||||
tmp = I915_READ(DDI_BUF_CTL(port));
|
||||
|
@ -1296,7 +1332,7 @@ static bool hsw_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
|
|||
{
|
||||
uint32_t val;
|
||||
|
||||
if (!intel_display_power_enabled(dev_priv, POWER_DOMAIN_PLLS))
|
||||
if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS))
|
||||
return false;
|
||||
|
||||
val = I915_READ(WRPLL_CTL(pll->id));
|
||||
|
@ -1486,7 +1522,7 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
|
|||
break;
|
||||
}
|
||||
|
||||
if (intel_display_power_enabled(dev_priv, POWER_DOMAIN_AUDIO)) {
|
||||
if (intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_AUDIO)) {
|
||||
temp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
|
||||
if (temp & (AUDIO_OUTPUT_ENABLE_A << (intel_crtc->pipe * 4)))
|
||||
pipe_config->has_audio = true;
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -225,7 +225,7 @@ intel_dp_mode_valid(struct drm_connector *connector,
|
|||
}
|
||||
|
||||
static uint32_t
|
||||
pack_aux(uint8_t *src, int src_bytes)
|
||||
pack_aux(const uint8_t *src, int src_bytes)
|
||||
{
|
||||
int i;
|
||||
uint32_t v = 0;
|
||||
|
@ -661,6 +661,16 @@ static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
|
|||
return index ? 0 : 100;
|
||||
}
|
||||
|
||||
static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
|
||||
{
|
||||
/*
|
||||
* SKL doesn't need us to program the AUX clock divider (Hardware will
|
||||
* derive the clock from CDCLK automatically). We still implement the
|
||||
* get_aux_clock_divider vfunc to plug-in into the existing code.
|
||||
*/
|
||||
return index ? 0 : 1;
|
||||
}
|
||||
|
||||
static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
|
||||
bool has_aux_irq,
|
||||
int send_bytes,
|
||||
|
@ -691,9 +701,24 @@ static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
|
|||
(aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
|
||||
}
|
||||
|
||||
static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
|
||||
bool has_aux_irq,
|
||||
int send_bytes,
|
||||
uint32_t unused)
|
||||
{
|
||||
return DP_AUX_CH_CTL_SEND_BUSY |
|
||||
DP_AUX_CH_CTL_DONE |
|
||||
(has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
|
||||
DP_AUX_CH_CTL_TIME_OUT_ERROR |
|
||||
DP_AUX_CH_CTL_TIME_OUT_1600us |
|
||||
DP_AUX_CH_CTL_RECEIVE_ERROR |
|
||||
(send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
|
||||
DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
|
||||
}
|
||||
|
||||
static int
|
||||
intel_dp_aux_ch(struct intel_dp *intel_dp,
|
||||
uint8_t *send, int send_bytes,
|
||||
const uint8_t *send, int send_bytes,
|
||||
uint8_t *recv, int recv_size)
|
||||
{
|
||||
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
|
||||
|
@ -925,7 +950,16 @@ intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
|
|||
BUG();
|
||||
}
|
||||
|
||||
if (!HAS_DDI(dev))
|
||||
/*
|
||||
* The AUX_CTL register is usually DP_CTL + 0x10.
|
||||
*
|
||||
* On Haswell and Broadwell though:
|
||||
* - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
|
||||
* - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
|
||||
*
|
||||
* Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
|
||||
*/
|
||||
if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
|
||||
intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
|
||||
|
||||
intel_dp->aux.name = name;
|
||||
|
@ -1819,7 +1853,7 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
|
|||
u32 tmp;
|
||||
|
||||
power_domain = intel_display_port_power_domain(encoder);
|
||||
if (!intel_display_power_enabled(dev_priv, power_domain))
|
||||
if (!intel_display_power_is_enabled(dev_priv, power_domain))
|
||||
return false;
|
||||
|
||||
tmp = I915_READ(intel_dp->output_reg);
|
||||
|
@ -1995,10 +2029,8 @@ static void intel_edp_psr_write_vsc(struct intel_dp *intel_dp,
|
|||
POSTING_READ(ctl_reg);
|
||||
}
|
||||
|
||||
static void intel_edp_psr_setup(struct intel_dp *intel_dp)
|
||||
static void intel_edp_psr_setup_vsc(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct drm_device *dev = intel_dp_to_dev(intel_dp);
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct edp_vsc_psr psr_vsc;
|
||||
|
||||
/* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
|
||||
|
@ -2008,10 +2040,6 @@ static void intel_edp_psr_setup(struct intel_dp *intel_dp)
|
|||
psr_vsc.sdp_header.HB2 = 0x2;
|
||||
psr_vsc.sdp_header.HB3 = 0x8;
|
||||
intel_edp_psr_write_vsc(intel_dp, &psr_vsc);
|
||||
|
||||
/* Avoid continuous PSR exit by masking memup and hpd */
|
||||
I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP |
|
||||
EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
|
||||
}
|
||||
|
||||
static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp)
|
||||
|
@ -2021,8 +2049,17 @@ static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp)
|
|||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
uint32_t aux_clock_divider;
|
||||
int precharge = 0x3;
|
||||
int msg_size = 5; /* Header(4) + Message(1) */
|
||||
bool only_standby = false;
|
||||
static const uint8_t aux_msg[] = {
|
||||
[0] = DP_AUX_NATIVE_WRITE << 4,
|
||||
[1] = DP_SET_POWER >> 8,
|
||||
[2] = DP_SET_POWER & 0xff,
|
||||
[3] = 1 - 1,
|
||||
[4] = DP_SET_POWER_D0,
|
||||
};
|
||||
int i;
|
||||
|
||||
BUILD_BUG_ON(sizeof(aux_msg) > 20);
|
||||
|
||||
aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
|
||||
|
||||
|
@ -2038,11 +2075,13 @@ static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp)
|
|||
DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
|
||||
|
||||
/* Setup AUX registers */
|
||||
I915_WRITE(EDP_PSR_AUX_DATA1(dev), EDP_PSR_DPCD_COMMAND);
|
||||
I915_WRITE(EDP_PSR_AUX_DATA2(dev), EDP_PSR_DPCD_NORMAL_OPERATION);
|
||||
for (i = 0; i < sizeof(aux_msg); i += 4)
|
||||
I915_WRITE(EDP_PSR_AUX_DATA1(dev) + i,
|
||||
pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
|
||||
|
||||
I915_WRITE(EDP_PSR_AUX_CTL(dev),
|
||||
DP_AUX_CH_CTL_TIME_OUT_400us |
|
||||
(msg_size << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
|
||||
(sizeof(aux_msg) << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
|
||||
(precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
|
||||
(aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT));
|
||||
}
|
||||
|
@ -2131,10 +2170,7 @@ static void intel_edp_psr_do_enable(struct intel_dp *intel_dp)
|
|||
WARN_ON(dev_priv->psr.active);
|
||||
lockdep_assert_held(&dev_priv->psr.lock);
|
||||
|
||||
/* Enable PSR on the panel */
|
||||
intel_edp_psr_enable_sink(intel_dp);
|
||||
|
||||
/* Enable PSR on the host */
|
||||
/* Enable/Re-enable PSR on the host */
|
||||
intel_edp_psr_enable_source(intel_dp);
|
||||
|
||||
dev_priv->psr.active = true;
|
||||
|
@ -2158,17 +2194,25 @@ void intel_edp_psr_enable(struct intel_dp *intel_dp)
|
|||
mutex_lock(&dev_priv->psr.lock);
|
||||
if (dev_priv->psr.enabled) {
|
||||
DRM_DEBUG_KMS("PSR already in use\n");
|
||||
mutex_unlock(&dev_priv->psr.lock);
|
||||
return;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
if (!intel_edp_psr_match_conditions(intel_dp))
|
||||
goto unlock;
|
||||
|
||||
dev_priv->psr.busy_frontbuffer_bits = 0;
|
||||
|
||||
/* Setup PSR once */
|
||||
intel_edp_psr_setup(intel_dp);
|
||||
intel_edp_psr_setup_vsc(intel_dp);
|
||||
|
||||
if (intel_edp_psr_match_conditions(intel_dp))
|
||||
dev_priv->psr.enabled = intel_dp;
|
||||
/* Avoid continuous PSR exit by masking memup and hpd */
|
||||
I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP |
|
||||
EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
|
||||
|
||||
/* Enable PSR on the panel */
|
||||
intel_edp_psr_enable_sink(intel_dp);
|
||||
|
||||
dev_priv->psr.enabled = intel_dp;
|
||||
unlock:
|
||||
mutex_unlock(&dev_priv->psr.lock);
|
||||
}
|
||||
|
||||
|
@ -2209,6 +2253,17 @@ static void intel_edp_psr_work(struct work_struct *work)
|
|||
container_of(work, typeof(*dev_priv), psr.work.work);
|
||||
struct intel_dp *intel_dp = dev_priv->psr.enabled;
|
||||
|
||||
/* We have to make sure PSR is ready for re-enable
|
||||
* otherwise it keeps disabled until next full enable/disable cycle.
|
||||
* PSR might take some time to get fully disabled
|
||||
* and be ready for re-enable.
|
||||
*/
|
||||
if (wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev_priv->dev)) &
|
||||
EDP_PSR_STATUS_STATE_MASK) == 0, 50)) {
|
||||
DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
|
||||
return;
|
||||
}
|
||||
|
||||
mutex_lock(&dev_priv->psr.lock);
|
||||
intel_dp = dev_priv->psr.enabled;
|
||||
|
||||
|
@ -2680,6 +2735,15 @@ static void chv_pre_enable_dp(struct intel_encoder *encoder)
|
|||
|
||||
mutex_lock(&dev_priv->dpio_lock);
|
||||
|
||||
/* allow hardware to manage TX FIFO reset source */
|
||||
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
|
||||
val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
|
||||
|
||||
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
|
||||
val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
|
||||
|
||||
/* Deassert soft data lane reset*/
|
||||
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
|
||||
val |= CHV_PCS_REQ_SOFTRESET_EN;
|
||||
|
@ -2836,7 +2900,9 @@ intel_dp_voltage_max(struct intel_dp *intel_dp)
|
|||
struct drm_device *dev = intel_dp_to_dev(intel_dp);
|
||||
enum port port = dp_to_dig_port(intel_dp)->port;
|
||||
|
||||
if (IS_VALLEYVIEW(dev))
|
||||
if (INTEL_INFO(dev)->gen >= 9)
|
||||
return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
|
||||
else if (IS_VALLEYVIEW(dev))
|
||||
return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
|
||||
else if (IS_GEN7(dev) && port == PORT_A)
|
||||
return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
|
||||
|
@ -2852,7 +2918,18 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
|
|||
struct drm_device *dev = intel_dp_to_dev(intel_dp);
|
||||
enum port port = dp_to_dig_port(intel_dp)->port;
|
||||
|
||||
if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
|
||||
if (INTEL_INFO(dev)->gen >= 9) {
|
||||
switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
|
||||
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
|
||||
return DP_TRAIN_PRE_EMPH_LEVEL_3;
|
||||
case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
|
||||
return DP_TRAIN_PRE_EMPH_LEVEL_2;
|
||||
case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
|
||||
return DP_TRAIN_PRE_EMPH_LEVEL_1;
|
||||
default:
|
||||
return DP_TRAIN_PRE_EMPH_LEVEL_0;
|
||||
}
|
||||
} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
|
||||
switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
|
||||
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
|
||||
return DP_TRAIN_PRE_EMPH_LEVEL_3;
|
||||
|
@ -3088,12 +3165,26 @@ static uint32_t intel_chv_signal_levels(struct intel_dp *intel_dp)
|
|||
/* Clear calc init */
|
||||
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
|
||||
val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
|
||||
val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
|
||||
val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
|
||||
|
||||
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
|
||||
val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
|
||||
val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
|
||||
val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
|
||||
|
||||
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
|
||||
val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
|
||||
val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
|
||||
|
||||
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
|
||||
val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
|
||||
val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
|
||||
|
||||
/* Program swing deemph */
|
||||
for (i = 0; i < 4; i++) {
|
||||
val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
|
||||
|
@ -3334,7 +3425,7 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
|
|||
uint32_t signal_levels, mask;
|
||||
uint8_t train_set = intel_dp->train_set[0];
|
||||
|
||||
if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
|
||||
if (IS_HASWELL(dev) || IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
|
||||
signal_levels = intel_hsw_signal_levels(train_set);
|
||||
mask = DDI_BUF_EMP_MASK;
|
||||
} else if (IS_CHERRYVIEW(dev)) {
|
||||
|
@ -3801,26 +3892,48 @@ int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
|
|||
struct drm_device *dev = intel_dig_port->base.base.dev;
|
||||
struct intel_crtc *intel_crtc =
|
||||
to_intel_crtc(intel_dig_port->base.base.crtc);
|
||||
u8 buf[1];
|
||||
u8 buf;
|
||||
int test_crc_count;
|
||||
int attempts = 6;
|
||||
|
||||
if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, buf) < 0)
|
||||
if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
|
||||
return -EIO;
|
||||
|
||||
if (!(buf[0] & DP_TEST_CRC_SUPPORTED))
|
||||
if (!(buf & DP_TEST_CRC_SUPPORTED))
|
||||
return -ENOTTY;
|
||||
|
||||
if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
|
||||
DP_TEST_SINK_START) < 0)
|
||||
if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
|
||||
return -EIO;
|
||||
|
||||
/* Wait 2 vblanks to be sure we will have the correct CRC value */
|
||||
intel_wait_for_vblank(dev, intel_crtc->pipe);
|
||||
intel_wait_for_vblank(dev, intel_crtc->pipe);
|
||||
if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
|
||||
buf | DP_TEST_SINK_START) < 0)
|
||||
return -EIO;
|
||||
|
||||
if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
|
||||
return -EIO;
|
||||
test_crc_count = buf & DP_TEST_COUNT_MASK;
|
||||
|
||||
do {
|
||||
if (drm_dp_dpcd_readb(&intel_dp->aux,
|
||||
DP_TEST_SINK_MISC, &buf) < 0)
|
||||
return -EIO;
|
||||
intel_wait_for_vblank(dev, intel_crtc->pipe);
|
||||
} while (--attempts && (buf & DP_TEST_COUNT_MASK) == test_crc_count);
|
||||
|
||||
if (attempts == 0) {
|
||||
DRM_ERROR("Panel is unable to calculate CRC after 6 vblanks\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0)
|
||||
return -EIO;
|
||||
|
||||
drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK, 0);
|
||||
if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
|
||||
return -EIO;
|
||||
if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
|
||||
buf & ~DP_TEST_SINK_START) < 0)
|
||||
return -EIO;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -5057,7 +5170,9 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
|
|||
intel_dp->pps_pipe = INVALID_PIPE;
|
||||
|
||||
/* intel_dp vfuncs */
|
||||
if (IS_VALLEYVIEW(dev))
|
||||
if (INTEL_INFO(dev)->gen >= 9)
|
||||
intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
|
||||
else if (IS_VALLEYVIEW(dev))
|
||||
intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
|
||||
else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
|
||||
intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
|
||||
|
@ -5066,7 +5181,10 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
|
|||
else
|
||||
intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
|
||||
|
||||
intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
|
||||
if (INTEL_INFO(dev)->gen >= 9)
|
||||
intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
|
||||
else
|
||||
intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
|
||||
|
||||
/* Preserve the current hw state. */
|
||||
intel_dp->DP = I915_READ(intel_dp->output_reg);
|
||||
|
|
|
@ -34,6 +34,7 @@
|
|||
#include <drm/drm_crtc_helper.h>
|
||||
#include <drm/drm_fb_helper.h>
|
||||
#include <drm/drm_dp_mst_helper.h>
|
||||
#include <drm/drm_rect.h>
|
||||
|
||||
#define DIV_ROUND_CLOSEST_ULL(ll, d) \
|
||||
({ unsigned long long _tmp = (ll)+(d)/2; do_div(_tmp, d); _tmp; })
|
||||
|
@ -240,6 +241,17 @@ typedef struct dpll {
|
|||
int p;
|
||||
} intel_clock_t;
|
||||
|
||||
struct intel_plane_state {
|
||||
struct drm_crtc *crtc;
|
||||
struct drm_framebuffer *fb;
|
||||
struct drm_rect src;
|
||||
struct drm_rect dst;
|
||||
struct drm_rect clip;
|
||||
struct drm_rect orig_src;
|
||||
struct drm_rect orig_dst;
|
||||
bool visible;
|
||||
};
|
||||
|
||||
struct intel_plane_config {
|
||||
bool tiled;
|
||||
int size;
|
||||
|
@ -734,6 +746,14 @@ hdmi_to_dig_port(struct intel_hdmi *intel_hdmi)
|
|||
return container_of(intel_hdmi, struct intel_digital_port, hdmi);
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns the number of planes for this pipe, ie the number of sprites + 1
|
||||
* (primary plane). This doesn't count the cursor plane then.
|
||||
*/
|
||||
static inline unsigned int intel_num_planes(struct intel_crtc *crtc)
|
||||
{
|
||||
return INTEL_INFO(crtc->base.dev)->num_sprites[crtc->pipe] + 1;
|
||||
}
|
||||
|
||||
/* i915_irq.c */
|
||||
bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
|
||||
|
@ -747,15 +767,15 @@ void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
|
|||
void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
|
||||
void gen8_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
|
||||
void gen8_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
|
||||
void intel_runtime_pm_disable_interrupts(struct drm_device *dev);
|
||||
void intel_runtime_pm_restore_interrupts(struct drm_device *dev);
|
||||
void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv);
|
||||
void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv);
|
||||
static inline bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
/*
|
||||
* We only use drm_irq_uninstall() at unload and VT switch, so
|
||||
* this is the only thing we need to check.
|
||||
*/
|
||||
return !dev_priv->pm._irqs_disabled;
|
||||
return dev_priv->pm.irqs_enabled;
|
||||
}
|
||||
|
||||
int intel_get_crtc_scanline(struct intel_crtc *crtc);
|
||||
|
@ -792,11 +812,7 @@ void intel_ddi_clock_get(struct intel_encoder *encoder,
|
|||
struct intel_crtc_config *pipe_config);
|
||||
void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state);
|
||||
|
||||
/* intel_display.c */
|
||||
const char *intel_output_name(int output);
|
||||
bool intel_has_pending_fb_unpin(struct drm_device *dev);
|
||||
int intel_pch_rawclk(struct drm_device *dev);
|
||||
void intel_mark_busy(struct drm_device *dev);
|
||||
/* intel_frontbuffer.c */
|
||||
void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
|
||||
struct intel_engine_cs *ring);
|
||||
void intel_frontbuffer_flip_prepare(struct drm_device *dev,
|
||||
|
@ -806,7 +822,7 @@ void intel_frontbuffer_flip_complete(struct drm_device *dev,
|
|||
void intel_frontbuffer_flush(struct drm_device *dev,
|
||||
unsigned frontbuffer_bits);
|
||||
/**
|
||||
* intel_frontbuffer_flip - prepare frontbuffer flip
|
||||
* intel_frontbuffer_flip - synchronous frontbuffer flip
|
||||
* @dev: DRM device
|
||||
* @frontbuffer_bits: frontbuffer plane tracking bits
|
||||
*
|
||||
|
@ -824,6 +840,13 @@ void intel_frontbuffer_flip(struct drm_device *dev,
|
|||
}
|
||||
|
||||
void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire);
|
||||
|
||||
|
||||
/* intel_display.c */
|
||||
const char *intel_output_name(int output);
|
||||
bool intel_has_pending_fb_unpin(struct drm_device *dev);
|
||||
int intel_pch_rawclk(struct drm_device *dev);
|
||||
void intel_mark_busy(struct drm_device *dev);
|
||||
void intel_mark_idle(struct drm_device *dev);
|
||||
void intel_crtc_restore_mode(struct drm_crtc *crtc);
|
||||
void intel_crtc_control(struct drm_crtc *crtc, bool enable);
|
||||
|
@ -844,7 +867,11 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
|
|||
struct drm_file *file_priv);
|
||||
enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
|
||||
enum pipe pipe);
|
||||
void intel_wait_for_vblank(struct drm_device *dev, int pipe);
|
||||
static inline void
|
||||
intel_wait_for_vblank(struct drm_device *dev, int pipe)
|
||||
{
|
||||
drm_wait_one_vblank(dev, pipe);
|
||||
}
|
||||
int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
|
||||
void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
|
||||
struct intel_digital_port *dport);
|
||||
|
@ -878,6 +905,8 @@ struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc);
|
|||
void intel_put_shared_dpll(struct intel_crtc *crtc);
|
||||
|
||||
/* modesetting asserts */
|
||||
void assert_panel_unlocked(struct drm_i915_private *dev_priv,
|
||||
enum pipe pipe);
|
||||
void assert_pll(struct drm_i915_private *dev_priv,
|
||||
enum pipe pipe, bool state);
|
||||
#define assert_pll_enabled(d, p) assert_pll(d, p, true)
|
||||
|
@ -908,7 +937,6 @@ ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config,
|
|||
bool intel_crtc_active(struct drm_crtc *crtc);
|
||||
void hsw_enable_ips(struct intel_crtc *crtc);
|
||||
void hsw_disable_ips(struct intel_crtc *crtc);
|
||||
void intel_display_set_init_power(struct drm_i915_private *dev, bool enable);
|
||||
enum intel_display_power_domain
|
||||
intel_display_port_power_domain(struct intel_encoder *intel_encoder);
|
||||
void intel_mode_from_pipe_config(struct drm_display_mode *mode,
|
||||
|
@ -1055,6 +1083,28 @@ extern struct drm_display_mode *intel_find_panel_downclock(
|
|||
struct drm_display_mode *fixed_mode,
|
||||
struct drm_connector *connector);
|
||||
|
||||
/* intel_runtime_pm.c */
|
||||
int intel_power_domains_init(struct drm_i915_private *);
|
||||
void intel_power_domains_fini(struct drm_i915_private *);
|
||||
void intel_power_domains_init_hw(struct drm_i915_private *dev_priv);
|
||||
void intel_runtime_pm_enable(struct drm_i915_private *dev_priv);
|
||||
|
||||
bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
|
||||
enum intel_display_power_domain domain);
|
||||
bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
|
||||
enum intel_display_power_domain domain);
|
||||
void intel_display_power_get(struct drm_i915_private *dev_priv,
|
||||
enum intel_display_power_domain domain);
|
||||
void intel_display_power_put(struct drm_i915_private *dev_priv,
|
||||
enum intel_display_power_domain domain);
|
||||
void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv);
|
||||
void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv);
|
||||
void intel_runtime_pm_get(struct drm_i915_private *dev_priv);
|
||||
void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv);
|
||||
void intel_runtime_pm_put(struct drm_i915_private *dev_priv);
|
||||
|
||||
void intel_display_set_init_power(struct drm_i915_private *dev, bool enable);
|
||||
|
||||
/* intel_pm.c */
|
||||
void intel_init_clock_gating(struct drm_device *dev);
|
||||
void intel_suspend_hw(struct drm_device *dev);
|
||||
|
@ -1072,17 +1122,6 @@ bool intel_fbc_enabled(struct drm_device *dev);
|
|||
void intel_update_fbc(struct drm_device *dev);
|
||||
void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
|
||||
void intel_gpu_ips_teardown(void);
|
||||
int intel_power_domains_init(struct drm_i915_private *);
|
||||
void intel_power_domains_remove(struct drm_i915_private *);
|
||||
bool intel_display_power_enabled(struct drm_i915_private *dev_priv,
|
||||
enum intel_display_power_domain domain);
|
||||
bool intel_display_power_enabled_unlocked(struct drm_i915_private *dev_priv,
|
||||
enum intel_display_power_domain domain);
|
||||
void intel_display_power_get(struct drm_i915_private *dev_priv,
|
||||
enum intel_display_power_domain domain);
|
||||
void intel_display_power_put(struct drm_i915_private *dev_priv,
|
||||
enum intel_display_power_domain domain);
|
||||
void intel_power_domains_init_hw(struct drm_i915_private *dev_priv);
|
||||
void intel_init_gt_powersave(struct drm_device *dev);
|
||||
void intel_cleanup_gt_powersave(struct drm_device *dev);
|
||||
void intel_enable_gt_powersave(struct drm_device *dev);
|
||||
|
@ -1093,13 +1132,6 @@ void ironlake_teardown_rc6(struct drm_device *dev);
|
|||
void gen6_update_ring_freq(struct drm_device *dev);
|
||||
void gen6_rps_idle(struct drm_i915_private *dev_priv);
|
||||
void gen6_rps_boost(struct drm_i915_private *dev_priv);
|
||||
void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv);
|
||||
void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv);
|
||||
void intel_runtime_pm_get(struct drm_i915_private *dev_priv);
|
||||
void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv);
|
||||
void intel_runtime_pm_put(struct drm_i915_private *dev_priv);
|
||||
void intel_init_runtime_pm(struct drm_i915_private *dev_priv);
|
||||
void intel_fini_runtime_pm(struct drm_i915_private *dev_priv);
|
||||
void ilk_wm_get_hw_state(struct drm_device *dev);
|
||||
|
||||
|
||||
|
|
|
@ -344,7 +344,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
|
|||
DRM_DEBUG_KMS("\n");
|
||||
|
||||
power_domain = intel_display_port_power_domain(encoder);
|
||||
if (!intel_display_power_enabled(dev_priv, power_domain))
|
||||
if (!intel_display_power_is_enabled(dev_priv, power_domain))
|
||||
return false;
|
||||
|
||||
/* XXX: this only works for one DSI output */
|
||||
|
|
|
@ -0,0 +1,279 @@
|
|||
/*
|
||||
* Copyright © 2014 Intel Corporation
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Daniel Vetter <daniel.vetter@ffwll.ch>
|
||||
*/
|
||||
|
||||
/**
|
||||
* DOC: frontbuffer tracking
|
||||
*
|
||||
* Many features require us to track changes to the currently active
|
||||
* frontbuffer, especially rendering targeted at the frontbuffer.
|
||||
*
|
||||
* To be able to do so GEM tracks frontbuffers using a bitmask for all possible
|
||||
* frontbuffer slots through i915_gem_track_fb(). The function in this file are
|
||||
* then called when the contents of the frontbuffer are invalidated, when
|
||||
* frontbuffer rendering has stopped again to flush out all the changes and when
|
||||
* the frontbuffer is exchanged with a flip. Subsystems interested in
|
||||
* frontbuffer changes (e.g. PSR, FBC, DRRS) should directly put their callbacks
|
||||
* into the relevant places and filter for the frontbuffer slots that they are
|
||||
* interested int.
|
||||
*
|
||||
* On a high level there are two types of powersaving features. The first one
|
||||
* work like a special cache (FBC and PSR) and are interested when they should
|
||||
* stop caching and when to restart caching. This is done by placing callbacks
|
||||
* into the invalidate and the flush functions: At invalidate the caching must
|
||||
* be stopped and at flush time it can be restarted. And maybe they need to know
|
||||
* when the frontbuffer changes (e.g. when the hw doesn't initiate an invalidate
|
||||
* and flush on its own) which can be achieved with placing callbacks into the
|
||||
* flip functions.
|
||||
*
|
||||
* The other type of display power saving feature only cares about busyness
|
||||
* (e.g. DRRS). In that case all three (invalidate, flush and flip) indicate
|
||||
* busyness. There is no direct way to detect idleness. Instead an idle timer
|
||||
* work delayed work should be started from the flush and flip functions and
|
||||
* cancelled as soon as busyness is detected.
|
||||
*
|
||||
* Note that there's also an older frontbuffer activity tracking scheme which
|
||||
* just tracks general activity. This is done by the various mark_busy and
|
||||
* mark_idle functions. For display power management features using these
|
||||
* functions is deprecated and should be avoided.
|
||||
*/
|
||||
|
||||
#include <drm/drmP.h>
|
||||
|
||||
#include "intel_drv.h"
|
||||
#include "i915_drv.h"
|
||||
|
||||
static void intel_increase_pllclock(struct drm_device *dev,
|
||||
enum pipe pipe)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int dpll_reg = DPLL(pipe);
|
||||
int dpll;
|
||||
|
||||
if (!HAS_GMCH_DISPLAY(dev))
|
||||
return;
|
||||
|
||||
if (!dev_priv->lvds_downclock_avail)
|
||||
return;
|
||||
|
||||
dpll = I915_READ(dpll_reg);
|
||||
if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
|
||||
DRM_DEBUG_DRIVER("upclocking LVDS\n");
|
||||
|
||||
assert_panel_unlocked(dev_priv, pipe);
|
||||
|
||||
dpll &= ~DISPLAY_RATE_SELECT_FPA1;
|
||||
I915_WRITE(dpll_reg, dpll);
|
||||
intel_wait_for_vblank(dev, pipe);
|
||||
|
||||
dpll = I915_READ(dpll_reg);
|
||||
if (dpll & DISPLAY_RATE_SELECT_FPA1)
|
||||
DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_mark_fb_busy - mark given planes as busy
|
||||
* @dev: DRM device
|
||||
* @frontbuffer_bits: bits for the affected planes
|
||||
* @ring: optional ring for asynchronous commands
|
||||
*
|
||||
* This function gets called every time the screen contents change. It can be
|
||||
* used to keep e.g. the update rate at the nominal refresh rate with DRRS.
|
||||
*/
|
||||
static void intel_mark_fb_busy(struct drm_device *dev,
|
||||
unsigned frontbuffer_bits,
|
||||
struct intel_engine_cs *ring)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
enum pipe pipe;
|
||||
|
||||
if (!i915.powersave)
|
||||
return;
|
||||
|
||||
for_each_pipe(dev_priv, pipe) {
|
||||
if (!(frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe)))
|
||||
continue;
|
||||
|
||||
intel_increase_pllclock(dev, pipe);
|
||||
if (ring && intel_fbc_enabled(dev))
|
||||
ring->fbc_dirty = true;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_fb_obj_invalidate - invalidate frontbuffer object
|
||||
* @obj: GEM object to invalidate
|
||||
* @ring: set for asynchronous rendering
|
||||
*
|
||||
* This function gets called every time rendering on the given object starts and
|
||||
* frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must
|
||||
* be invalidated. If @ring is non-NULL any subsequent invalidation will be delayed
|
||||
* until the rendering completes or a flip on this frontbuffer plane is
|
||||
* scheduled.
|
||||
*/
|
||||
void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
|
||||
struct intel_engine_cs *ring)
|
||||
{
|
||||
struct drm_device *dev = obj->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
|
||||
|
||||
if (!obj->frontbuffer_bits)
|
||||
return;
|
||||
|
||||
if (ring) {
|
||||
mutex_lock(&dev_priv->fb_tracking.lock);
|
||||
dev_priv->fb_tracking.busy_bits
|
||||
|= obj->frontbuffer_bits;
|
||||
dev_priv->fb_tracking.flip_bits
|
||||
&= ~obj->frontbuffer_bits;
|
||||
mutex_unlock(&dev_priv->fb_tracking.lock);
|
||||
}
|
||||
|
||||
intel_mark_fb_busy(dev, obj->frontbuffer_bits, ring);
|
||||
|
||||
intel_edp_psr_invalidate(dev, obj->frontbuffer_bits);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_frontbuffer_flush - flush frontbuffer
|
||||
* @dev: DRM device
|
||||
* @frontbuffer_bits: frontbuffer plane tracking bits
|
||||
*
|
||||
* This function gets called every time rendering on the given planes has
|
||||
* completed and frontbuffer caching can be started again. Flushes will get
|
||||
* delayed if they're blocked by some outstanding asynchronous rendering.
|
||||
*
|
||||
* Can be called without any locks held.
|
||||
*/
|
||||
void intel_frontbuffer_flush(struct drm_device *dev,
|
||||
unsigned frontbuffer_bits)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
/* Delay flushing when rings are still busy.*/
|
||||
mutex_lock(&dev_priv->fb_tracking.lock);
|
||||
frontbuffer_bits &= ~dev_priv->fb_tracking.busy_bits;
|
||||
mutex_unlock(&dev_priv->fb_tracking.lock);
|
||||
|
||||
intel_mark_fb_busy(dev, frontbuffer_bits, NULL);
|
||||
|
||||
intel_edp_psr_flush(dev, frontbuffer_bits);
|
||||
|
||||
/*
|
||||
* FIXME: Unconditional fbc flushing here is a rather gross hack and
|
||||
* needs to be reworked into a proper frontbuffer tracking scheme like
|
||||
* psr employs.
|
||||
*/
|
||||
if (dev_priv->fbc.need_sw_cache_clean) {
|
||||
dev_priv->fbc.need_sw_cache_clean = false;
|
||||
bdw_fbc_sw_flush(dev, FBC_REND_CACHE_CLEAN);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_fb_obj_flush - flush frontbuffer object
|
||||
* @obj: GEM object to flush
|
||||
* @retire: set when retiring asynchronous rendering
|
||||
*
|
||||
* This function gets called every time rendering on the given object has
|
||||
* completed and frontbuffer caching can be started again. If @retire is true
|
||||
* then any delayed flushes will be unblocked.
|
||||
*/
|
||||
void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
|
||||
bool retire)
|
||||
{
|
||||
struct drm_device *dev = obj->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
unsigned frontbuffer_bits;
|
||||
|
||||
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
|
||||
|
||||
if (!obj->frontbuffer_bits)
|
||||
return;
|
||||
|
||||
frontbuffer_bits = obj->frontbuffer_bits;
|
||||
|
||||
if (retire) {
|
||||
mutex_lock(&dev_priv->fb_tracking.lock);
|
||||
/* Filter out new bits since rendering started. */
|
||||
frontbuffer_bits &= dev_priv->fb_tracking.busy_bits;
|
||||
|
||||
dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
|
||||
mutex_unlock(&dev_priv->fb_tracking.lock);
|
||||
}
|
||||
|
||||
intel_frontbuffer_flush(dev, frontbuffer_bits);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_frontbuffer_flip_prepare - prepare asynchronous frontbuffer flip
|
||||
* @dev: DRM device
|
||||
* @frontbuffer_bits: frontbuffer plane tracking bits
|
||||
*
|
||||
* This function gets called after scheduling a flip on @obj. The actual
|
||||
* frontbuffer flushing will be delayed until completion is signalled with
|
||||
* intel_frontbuffer_flip_complete. If an invalidate happens in between this
|
||||
* flush will be cancelled.
|
||||
*
|
||||
* Can be called without any locks held.
|
||||
*/
|
||||
void intel_frontbuffer_flip_prepare(struct drm_device *dev,
|
||||
unsigned frontbuffer_bits)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
mutex_lock(&dev_priv->fb_tracking.lock);
|
||||
dev_priv->fb_tracking.flip_bits |= frontbuffer_bits;
|
||||
/* Remove stale busy bits due to the old buffer. */
|
||||
dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
|
||||
mutex_unlock(&dev_priv->fb_tracking.lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_frontbuffer_flip_complete - complete asynchronous frontbuffer flip
|
||||
* @dev: DRM device
|
||||
* @frontbuffer_bits: frontbuffer plane tracking bits
|
||||
*
|
||||
* This function gets called after the flip has been latched and will complete
|
||||
* on the next vblank. It will execute the flush if it hasn't been cancelled yet.
|
||||
*
|
||||
* Can be called without any locks held.
|
||||
*/
|
||||
void intel_frontbuffer_flip_complete(struct drm_device *dev,
|
||||
unsigned frontbuffer_bits)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
mutex_lock(&dev_priv->fb_tracking.lock);
|
||||
/* Mask any cancelled flips. */
|
||||
frontbuffer_bits &= dev_priv->fb_tracking.flip_bits;
|
||||
dev_priv->fb_tracking.flip_bits &= ~frontbuffer_bits;
|
||||
mutex_unlock(&dev_priv->fb_tracking.lock);
|
||||
|
||||
intel_frontbuffer_flush(dev, frontbuffer_bits);
|
||||
}
|
|
@ -690,7 +690,7 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
|
|||
u32 tmp;
|
||||
|
||||
power_domain = intel_display_port_power_domain(encoder);
|
||||
if (!intel_display_power_enabled(dev_priv, power_domain))
|
||||
if (!intel_display_power_is_enabled(dev_priv, power_domain))
|
||||
return false;
|
||||
|
||||
tmp = I915_READ(intel_hdmi->hdmi_reg);
|
||||
|
@ -1405,6 +1405,15 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
|
|||
|
||||
mutex_lock(&dev_priv->dpio_lock);
|
||||
|
||||
/* allow hardware to manage TX FIFO reset source */
|
||||
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
|
||||
val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
|
||||
|
||||
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
|
||||
val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
|
||||
|
||||
/* Deassert soft data lane reset*/
|
||||
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
|
||||
val |= CHV_PCS_REQ_SOFTRESET_EN;
|
||||
|
@ -1441,12 +1450,26 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
|
|||
/* Clear calc init */
|
||||
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
|
||||
val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
|
||||
val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
|
||||
val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
|
||||
|
||||
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
|
||||
val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
|
||||
val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
|
||||
val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
|
||||
|
||||
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
|
||||
val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
|
||||
val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
|
||||
|
||||
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
|
||||
val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
|
||||
val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
|
||||
|
||||
/* FIXME: Program the support xxx V-dB */
|
||||
/* Use 800mV-0dB */
|
||||
for (i = 0; i < 4; i++) {
|
||||
|
|
|
@ -1063,7 +1063,7 @@ static bool gen8_logical_ring_get_irq(struct intel_engine_cs *ring)
|
|||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
unsigned long flags;
|
||||
|
||||
if (!dev->irq_enabled)
|
||||
if (WARN_ON(!intel_irqs_enabled(dev_priv)))
|
||||
return false;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, flags);
|
||||
|
|
|
@ -76,7 +76,7 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
|
|||
u32 tmp;
|
||||
|
||||
power_domain = intel_display_port_power_domain(encoder);
|
||||
if (!intel_display_power_enabled(dev_priv, power_domain))
|
||||
if (!intel_display_power_is_enabled(dev_priv, power_domain))
|
||||
return false;
|
||||
|
||||
tmp = I915_READ(lvds_encoder->reg);
|
||||
|
|
|
@ -537,14 +537,13 @@ static u32 intel_panel_get_backlight(struct intel_connector *connector)
|
|||
struct drm_device *dev = connector->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 val;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->backlight_lock, flags);
|
||||
mutex_lock(&dev_priv->backlight_lock);
|
||||
|
||||
val = dev_priv->display.get_backlight(connector);
|
||||
val = intel_panel_compute_brightness(connector, val);
|
||||
|
||||
spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
|
||||
mutex_unlock(&dev_priv->backlight_lock);
|
||||
|
||||
DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val);
|
||||
return val;
|
||||
|
@ -628,12 +627,11 @@ static void intel_panel_set_backlight(struct intel_connector *connector,
|
|||
struct intel_panel *panel = &connector->panel;
|
||||
enum pipe pipe = intel_get_pipe_from_connector(connector);
|
||||
u32 hw_level;
|
||||
unsigned long flags;
|
||||
|
||||
if (!panel->backlight.present || pipe == INVALID_PIPE)
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->backlight_lock, flags);
|
||||
mutex_lock(&dev_priv->backlight_lock);
|
||||
|
||||
WARN_ON(panel->backlight.max == 0);
|
||||
|
||||
|
@ -643,7 +641,7 @@ static void intel_panel_set_backlight(struct intel_connector *connector,
|
|||
if (panel->backlight.enabled)
|
||||
intel_panel_actually_set_backlight(connector, hw_level);
|
||||
|
||||
spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
|
||||
mutex_unlock(&dev_priv->backlight_lock);
|
||||
}
|
||||
|
||||
/* set backlight brightness to level in range [0..max], assuming hw min is
|
||||
|
@ -657,12 +655,11 @@ void intel_panel_set_backlight_acpi(struct intel_connector *connector,
|
|||
struct intel_panel *panel = &connector->panel;
|
||||
enum pipe pipe = intel_get_pipe_from_connector(connector);
|
||||
u32 hw_level;
|
||||
unsigned long flags;
|
||||
|
||||
if (!panel->backlight.present || pipe == INVALID_PIPE)
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->backlight_lock, flags);
|
||||
mutex_lock(&dev_priv->backlight_lock);
|
||||
|
||||
WARN_ON(panel->backlight.max == 0);
|
||||
|
||||
|
@ -678,7 +675,7 @@ void intel_panel_set_backlight_acpi(struct intel_connector *connector,
|
|||
if (panel->backlight.enabled)
|
||||
intel_panel_actually_set_backlight(connector, hw_level);
|
||||
|
||||
spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
|
||||
mutex_unlock(&dev_priv->backlight_lock);
|
||||
}
|
||||
|
||||
static void pch_disable_backlight(struct intel_connector *connector)
|
||||
|
@ -732,7 +729,6 @@ void intel_panel_disable_backlight(struct intel_connector *connector)
|
|||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_panel *panel = &connector->panel;
|
||||
enum pipe pipe = intel_get_pipe_from_connector(connector);
|
||||
unsigned long flags;
|
||||
|
||||
if (!panel->backlight.present || pipe == INVALID_PIPE)
|
||||
return;
|
||||
|
@ -748,14 +744,14 @@ void intel_panel_disable_backlight(struct intel_connector *connector)
|
|||
return;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&dev_priv->backlight_lock, flags);
|
||||
mutex_lock(&dev_priv->backlight_lock);
|
||||
|
||||
if (panel->backlight.device)
|
||||
panel->backlight.device->props.power = FB_BLANK_POWERDOWN;
|
||||
panel->backlight.enabled = false;
|
||||
dev_priv->display.disable_backlight(connector);
|
||||
|
||||
spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
|
||||
mutex_unlock(&dev_priv->backlight_lock);
|
||||
}
|
||||
|
||||
static void bdw_enable_backlight(struct intel_connector *connector)
|
||||
|
@ -936,14 +932,13 @@ void intel_panel_enable_backlight(struct intel_connector *connector)
|
|||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_panel *panel = &connector->panel;
|
||||
enum pipe pipe = intel_get_pipe_from_connector(connector);
|
||||
unsigned long flags;
|
||||
|
||||
if (!panel->backlight.present || pipe == INVALID_PIPE)
|
||||
return;
|
||||
|
||||
DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe));
|
||||
|
||||
spin_lock_irqsave(&dev_priv->backlight_lock, flags);
|
||||
mutex_lock(&dev_priv->backlight_lock);
|
||||
|
||||
WARN_ON(panel->backlight.max == 0);
|
||||
|
||||
|
@ -961,7 +956,7 @@ void intel_panel_enable_backlight(struct intel_connector *connector)
|
|||
if (panel->backlight.device)
|
||||
panel->backlight.device->props.power = FB_BLANK_UNBLANK;
|
||||
|
||||
spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
|
||||
mutex_unlock(&dev_priv->backlight_lock);
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE)
|
||||
|
@ -1266,7 +1261,6 @@ int intel_panel_setup_backlight(struct drm_connector *connector)
|
|||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_connector *intel_connector = to_intel_connector(connector);
|
||||
struct intel_panel *panel = &intel_connector->panel;
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
if (!dev_priv->vbt.backlight.present) {
|
||||
|
@ -1279,9 +1273,9 @@ int intel_panel_setup_backlight(struct drm_connector *connector)
|
|||
}
|
||||
|
||||
/* set level and max in panel struct */
|
||||
spin_lock_irqsave(&dev_priv->backlight_lock, flags);
|
||||
mutex_lock(&dev_priv->backlight_lock);
|
||||
ret = dev_priv->display.setup_backlight(intel_connector);
|
||||
spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
|
||||
mutex_unlock(&dev_priv->backlight_lock);
|
||||
|
||||
if (ret) {
|
||||
DRM_DEBUG_KMS("failed to setup backlight for connector %s\n",
|
||||
|
@ -1316,7 +1310,7 @@ void intel_panel_init_backlight_funcs(struct drm_device *dev)
|
|||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
if (IS_BROADWELL(dev)) {
|
||||
if (IS_BROADWELL(dev) || (INTEL_INFO(dev)->gen >= 9)) {
|
||||
dev_priv->display.setup_backlight = bdw_setup_backlight;
|
||||
dev_priv->display.enable_backlight = bdw_enable_backlight;
|
||||
dev_priv->display.disable_backlight = pch_disable_backlight;
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -729,8 +729,12 @@ static int bdw_init_workarounds(struct intel_engine_cs *ring)
|
|||
* workaround for for a possible hang in the unlikely event a TLB
|
||||
* invalidation occurs during a PSD flush.
|
||||
*/
|
||||
/* WaDisableFenceDestinationToSLM:bdw (GT3 pre-production) */
|
||||
intel_ring_emit_wa(ring, HDC_CHICKEN0,
|
||||
_MASKED_BIT_ENABLE(HDC_FORCE_NON_COHERENT));
|
||||
_MASKED_BIT_ENABLE(HDC_FORCE_NON_COHERENT |
|
||||
(IS_BDW_GT3(dev) ?
|
||||
HDC_FENCE_DEST_SLM_DISABLE : 0)
|
||||
));
|
||||
|
||||
/* Wa4x4STCOptimizationDisable:bdw */
|
||||
intel_ring_emit_wa(ring, CACHE_MODE_1,
|
||||
|
@ -812,7 +816,7 @@ static int init_render_ring(struct intel_engine_cs *ring)
|
|||
*
|
||||
* WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv
|
||||
*/
|
||||
if (INTEL_INFO(dev)->gen >= 6)
|
||||
if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 9)
|
||||
I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
|
||||
|
||||
/* Required for the hardware to program scanline values for waiting */
|
||||
|
@ -1186,7 +1190,7 @@ gen5_ring_get_irq(struct intel_engine_cs *ring)
|
|||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
unsigned long flags;
|
||||
|
||||
if (!dev->irq_enabled)
|
||||
if (WARN_ON(!intel_irqs_enabled(dev_priv)))
|
||||
return false;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, flags);
|
||||
|
@ -1217,7 +1221,7 @@ i9xx_ring_get_irq(struct intel_engine_cs *ring)
|
|||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
unsigned long flags;
|
||||
|
||||
if (!dev->irq_enabled)
|
||||
if (!intel_irqs_enabled(dev_priv))
|
||||
return false;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, flags);
|
||||
|
@ -1254,7 +1258,7 @@ i8xx_ring_get_irq(struct intel_engine_cs *ring)
|
|||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
unsigned long flags;
|
||||
|
||||
if (!dev->irq_enabled)
|
||||
if (!intel_irqs_enabled(dev_priv))
|
||||
return false;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, flags);
|
||||
|
@ -1388,8 +1392,8 @@ gen6_ring_get_irq(struct intel_engine_cs *ring)
|
|||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
unsigned long flags;
|
||||
|
||||
if (!dev->irq_enabled)
|
||||
return false;
|
||||
if (WARN_ON(!intel_irqs_enabled(dev_priv)))
|
||||
return false;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, flags);
|
||||
if (ring->irq_refcount++ == 0) {
|
||||
|
@ -1431,7 +1435,7 @@ hsw_vebox_get_irq(struct intel_engine_cs *ring)
|
|||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
unsigned long flags;
|
||||
|
||||
if (!dev->irq_enabled)
|
||||
if (WARN_ON(!intel_irqs_enabled(dev_priv)))
|
||||
return false;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, flags);
|
||||
|
@ -1451,9 +1455,6 @@ hsw_vebox_put_irq(struct intel_engine_cs *ring)
|
|||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
unsigned long flags;
|
||||
|
||||
if (!dev->irq_enabled)
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, flags);
|
||||
if (--ring->irq_refcount == 0) {
|
||||
I915_WRITE_IMR(ring, ~0);
|
||||
|
@ -1469,7 +1470,7 @@ gen8_ring_get_irq(struct intel_engine_cs *ring)
|
|||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
unsigned long flags;
|
||||
|
||||
if (!dev->irq_enabled)
|
||||
if (WARN_ON(!intel_irqs_enabled(dev_priv)))
|
||||
return false;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, flags);
|
||||
|
@ -2229,6 +2230,7 @@ static int gen6_ring_flush(struct intel_engine_cs *ring,
|
|||
u32 invalidate, u32 flush)
|
||||
{
|
||||
struct drm_device *dev = ring->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
uint32_t cmd;
|
||||
int ret;
|
||||
|
||||
|
@ -2259,8 +2261,12 @@ static int gen6_ring_flush(struct intel_engine_cs *ring,
|
|||
}
|
||||
intel_ring_advance(ring);
|
||||
|
||||
if (IS_GEN7(dev) && !invalidate && flush)
|
||||
return gen7_ring_fbc_flush(ring, FBC_REND_CACHE_CLEAN);
|
||||
if (!invalidate && flush) {
|
||||
if (IS_GEN7(dev))
|
||||
return gen7_ring_fbc_flush(ring, FBC_REND_CACHE_CLEAN);
|
||||
else if (IS_BROADWELL(dev))
|
||||
dev_priv->fbc.need_sw_cache_clean = true;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -138,6 +138,184 @@ static void intel_update_primary_plane(struct intel_crtc *crtc)
|
|||
I915_WRITE(reg, I915_READ(reg) & ~DISPLAY_PLANE_ENABLE);
|
||||
}
|
||||
|
||||
static void
|
||||
skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
|
||||
struct drm_framebuffer *fb,
|
||||
struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
|
||||
unsigned int crtc_w, unsigned int crtc_h,
|
||||
uint32_t x, uint32_t y,
|
||||
uint32_t src_w, uint32_t src_h)
|
||||
{
|
||||
struct drm_device *dev = drm_plane->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_plane *intel_plane = to_intel_plane(drm_plane);
|
||||
const int pipe = intel_plane->pipe;
|
||||
const int plane = intel_plane->plane + 1;
|
||||
u32 plane_ctl, stride;
|
||||
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
|
||||
|
||||
plane_ctl = I915_READ(PLANE_CTL(pipe, plane));
|
||||
|
||||
/* Mask out pixel format bits in case we change it */
|
||||
plane_ctl &= ~PLANE_CTL_FORMAT_MASK;
|
||||
plane_ctl &= ~PLANE_CTL_ORDER_RGBX;
|
||||
plane_ctl &= ~PLANE_CTL_YUV422_ORDER_MASK;
|
||||
plane_ctl &= ~PLANE_CTL_TILED_MASK;
|
||||
plane_ctl &= ~PLANE_CTL_ALPHA_MASK;
|
||||
|
||||
/* Trickle feed has to be enabled */
|
||||
plane_ctl &= ~PLANE_CTL_TRICKLE_FEED_DISABLE;
|
||||
|
||||
switch (fb->pixel_format) {
|
||||
case DRM_FORMAT_RGB565:
|
||||
plane_ctl |= PLANE_CTL_FORMAT_RGB_565;
|
||||
break;
|
||||
case DRM_FORMAT_XBGR8888:
|
||||
plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
|
||||
break;
|
||||
case DRM_FORMAT_XRGB8888:
|
||||
plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
|
||||
break;
|
||||
/*
|
||||
* XXX: For ARBG/ABGR formats we default to expecting scanout buffers
|
||||
* to be already pre-multiplied. We need to add a knob (or a different
|
||||
* DRM_FORMAT) for user-space to configure that.
|
||||
*/
|
||||
case DRM_FORMAT_ABGR8888:
|
||||
plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888 |
|
||||
PLANE_CTL_ORDER_RGBX |
|
||||
PLANE_CTL_ALPHA_SW_PREMULTIPLY;
|
||||
break;
|
||||
case DRM_FORMAT_ARGB8888:
|
||||
plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888 |
|
||||
PLANE_CTL_ALPHA_SW_PREMULTIPLY;
|
||||
break;
|
||||
case DRM_FORMAT_YUYV:
|
||||
plane_ctl |= PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
|
||||
break;
|
||||
case DRM_FORMAT_YVYU:
|
||||
plane_ctl |= PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
|
||||
break;
|
||||
case DRM_FORMAT_UYVY:
|
||||
plane_ctl |= PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
|
||||
break;
|
||||
case DRM_FORMAT_VYUY:
|
||||
plane_ctl |= PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
|
||||
switch (obj->tiling_mode) {
|
||||
case I915_TILING_NONE:
|
||||
stride = fb->pitches[0] >> 6;
|
||||
break;
|
||||
case I915_TILING_X:
|
||||
plane_ctl |= PLANE_CTL_TILED_X;
|
||||
stride = fb->pitches[0] >> 9;
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
|
||||
plane_ctl |= PLANE_CTL_ENABLE;
|
||||
plane_ctl |= PLANE_CTL_PIPE_CSC_ENABLE;
|
||||
|
||||
intel_update_sprite_watermarks(drm_plane, crtc, src_w, src_h,
|
||||
pixel_size, true,
|
||||
src_w != crtc_w || src_h != crtc_h);
|
||||
|
||||
/* Sizes are 0 based */
|
||||
src_w--;
|
||||
src_h--;
|
||||
crtc_w--;
|
||||
crtc_h--;
|
||||
|
||||
I915_WRITE(PLANE_OFFSET(pipe, plane), (y << 16) | x);
|
||||
I915_WRITE(PLANE_STRIDE(pipe, plane), stride);
|
||||
I915_WRITE(PLANE_POS(pipe, plane), (crtc_y << 16) | crtc_x);
|
||||
I915_WRITE(PLANE_SIZE(pipe, plane), (crtc_h << 16) | crtc_w);
|
||||
I915_WRITE(PLANE_CTL(pipe, plane), plane_ctl);
|
||||
I915_WRITE(PLANE_SURF(pipe, plane), i915_gem_obj_ggtt_offset(obj));
|
||||
POSTING_READ(PLANE_SURF(pipe, plane));
|
||||
}
|
||||
|
||||
static void
|
||||
skl_disable_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = drm_plane->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_plane *intel_plane = to_intel_plane(drm_plane);
|
||||
const int pipe = intel_plane->pipe;
|
||||
const int plane = intel_plane->plane + 1;
|
||||
|
||||
I915_WRITE(PLANE_CTL(pipe, plane),
|
||||
I915_READ(PLANE_CTL(pipe, plane)) & ~PLANE_CTL_ENABLE);
|
||||
|
||||
/* Activate double buffered register update */
|
||||
I915_WRITE(PLANE_CTL(pipe, plane), 0);
|
||||
POSTING_READ(PLANE_CTL(pipe, plane));
|
||||
|
||||
intel_update_sprite_watermarks(drm_plane, crtc, 0, 0, 0, false, false);
|
||||
}
|
||||
|
||||
static int
|
||||
skl_update_colorkey(struct drm_plane *drm_plane,
|
||||
struct drm_intel_sprite_colorkey *key)
|
||||
{
|
||||
struct drm_device *dev = drm_plane->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_plane *intel_plane = to_intel_plane(drm_plane);
|
||||
const int pipe = intel_plane->pipe;
|
||||
const int plane = intel_plane->plane;
|
||||
u32 plane_ctl;
|
||||
|
||||
I915_WRITE(PLANE_KEYVAL(pipe, plane), key->min_value);
|
||||
I915_WRITE(PLANE_KEYMAX(pipe, plane), key->max_value);
|
||||
I915_WRITE(PLANE_KEYMSK(pipe, plane), key->channel_mask);
|
||||
|
||||
plane_ctl = I915_READ(PLANE_CTL(pipe, plane));
|
||||
plane_ctl &= ~PLANE_CTL_KEY_ENABLE_MASK;
|
||||
if (key->flags & I915_SET_COLORKEY_DESTINATION)
|
||||
plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION;
|
||||
else if (key->flags & I915_SET_COLORKEY_SOURCE)
|
||||
plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
|
||||
I915_WRITE(PLANE_CTL(pipe, plane), plane_ctl);
|
||||
|
||||
POSTING_READ(PLANE_CTL(pipe, plane));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
skl_get_colorkey(struct drm_plane *drm_plane,
|
||||
struct drm_intel_sprite_colorkey *key)
|
||||
{
|
||||
struct drm_device *dev = drm_plane->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_plane *intel_plane = to_intel_plane(drm_plane);
|
||||
const int pipe = intel_plane->pipe;
|
||||
const int plane = intel_plane->plane;
|
||||
u32 plane_ctl;
|
||||
|
||||
key->min_value = I915_READ(PLANE_KEYVAL(pipe, plane));
|
||||
key->max_value = I915_READ(PLANE_KEYMAX(pipe, plane));
|
||||
key->channel_mask = I915_READ(PLANE_KEYMSK(pipe, plane));
|
||||
|
||||
plane_ctl = I915_READ(PLANE_CTL(pipe, plane));
|
||||
|
||||
switch (plane_ctl & PLANE_CTL_KEY_ENABLE_MASK) {
|
||||
case PLANE_CTL_KEY_ENABLE_DESTINATION:
|
||||
key->flags = I915_SET_COLORKEY_DESTINATION;
|
||||
break;
|
||||
case PLANE_CTL_KEY_ENABLE_SOURCE:
|
||||
key->flags = I915_SET_COLORKEY_SOURCE;
|
||||
break;
|
||||
default:
|
||||
key->flags = I915_SET_COLORKEY_NONE;
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
|
||||
struct drm_framebuffer *fb,
|
||||
|
@ -845,57 +1023,24 @@ static bool colorkey_enabled(struct intel_plane *intel_plane)
|
|||
}
|
||||
|
||||
static int
|
||||
intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
|
||||
struct drm_framebuffer *fb, int crtc_x, int crtc_y,
|
||||
unsigned int crtc_w, unsigned int crtc_h,
|
||||
uint32_t src_x, uint32_t src_y,
|
||||
uint32_t src_w, uint32_t src_h)
|
||||
intel_check_sprite_plane(struct drm_plane *plane,
|
||||
struct intel_plane_state *state)
|
||||
{
|
||||
struct drm_device *dev = plane->dev;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(state->crtc);
|
||||
struct intel_plane *intel_plane = to_intel_plane(plane);
|
||||
enum pipe pipe = intel_crtc->pipe;
|
||||
struct drm_framebuffer *fb = state->fb;
|
||||
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
|
||||
struct drm_i915_gem_object *obj = intel_fb->obj;
|
||||
struct drm_i915_gem_object *old_obj = intel_plane->obj;
|
||||
int ret;
|
||||
bool primary_enabled;
|
||||
bool visible;
|
||||
int crtc_x, crtc_y;
|
||||
unsigned int crtc_w, crtc_h;
|
||||
uint32_t src_x, src_y, src_w, src_h;
|
||||
struct drm_rect *src = &state->src;
|
||||
struct drm_rect *dst = &state->dst;
|
||||
struct drm_rect *orig_src = &state->orig_src;
|
||||
const struct drm_rect *clip = &state->clip;
|
||||
int hscale, vscale;
|
||||
int max_scale, min_scale;
|
||||
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
|
||||
struct drm_rect src = {
|
||||
/* sample coordinates in 16.16 fixed point */
|
||||
.x1 = src_x,
|
||||
.x2 = src_x + src_w,
|
||||
.y1 = src_y,
|
||||
.y2 = src_y + src_h,
|
||||
};
|
||||
struct drm_rect dst = {
|
||||
/* integer pixels */
|
||||
.x1 = crtc_x,
|
||||
.x2 = crtc_x + crtc_w,
|
||||
.y1 = crtc_y,
|
||||
.y2 = crtc_y + crtc_h,
|
||||
};
|
||||
const struct drm_rect clip = {
|
||||
.x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0,
|
||||
.y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0,
|
||||
};
|
||||
const struct {
|
||||
int crtc_x, crtc_y;
|
||||
unsigned int crtc_w, crtc_h;
|
||||
uint32_t src_x, src_y, src_w, src_h;
|
||||
} orig = {
|
||||
.crtc_x = crtc_x,
|
||||
.crtc_y = crtc_y,
|
||||
.crtc_w = crtc_w,
|
||||
.crtc_h = crtc_h,
|
||||
.src_x = src_x,
|
||||
.src_y = src_y,
|
||||
.src_w = src_w,
|
||||
.src_h = src_h,
|
||||
};
|
||||
|
||||
/* Don't modify another pipe's plane */
|
||||
if (intel_plane->pipe != intel_crtc->pipe) {
|
||||
|
@ -927,55 +1072,55 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
|
|||
max_scale = intel_plane->max_downscale << 16;
|
||||
min_scale = intel_plane->can_scale ? 1 : (1 << 16);
|
||||
|
||||
drm_rect_rotate(&src, fb->width << 16, fb->height << 16,
|
||||
drm_rect_rotate(src, fb->width << 16, fb->height << 16,
|
||||
intel_plane->rotation);
|
||||
|
||||
hscale = drm_rect_calc_hscale_relaxed(&src, &dst, min_scale, max_scale);
|
||||
hscale = drm_rect_calc_hscale_relaxed(src, dst, min_scale, max_scale);
|
||||
BUG_ON(hscale < 0);
|
||||
|
||||
vscale = drm_rect_calc_vscale_relaxed(&src, &dst, min_scale, max_scale);
|
||||
vscale = drm_rect_calc_vscale_relaxed(src, dst, min_scale, max_scale);
|
||||
BUG_ON(vscale < 0);
|
||||
|
||||
visible = drm_rect_clip_scaled(&src, &dst, &clip, hscale, vscale);
|
||||
state->visible = drm_rect_clip_scaled(src, dst, clip, hscale, vscale);
|
||||
|
||||
crtc_x = dst.x1;
|
||||
crtc_y = dst.y1;
|
||||
crtc_w = drm_rect_width(&dst);
|
||||
crtc_h = drm_rect_height(&dst);
|
||||
crtc_x = dst->x1;
|
||||
crtc_y = dst->y1;
|
||||
crtc_w = drm_rect_width(dst);
|
||||
crtc_h = drm_rect_height(dst);
|
||||
|
||||
if (visible) {
|
||||
if (state->visible) {
|
||||
/* check again in case clipping clamped the results */
|
||||
hscale = drm_rect_calc_hscale(&src, &dst, min_scale, max_scale);
|
||||
hscale = drm_rect_calc_hscale(src, dst, min_scale, max_scale);
|
||||
if (hscale < 0) {
|
||||
DRM_DEBUG_KMS("Horizontal scaling factor out of limits\n");
|
||||
drm_rect_debug_print(&src, true);
|
||||
drm_rect_debug_print(&dst, false);
|
||||
drm_rect_debug_print(src, true);
|
||||
drm_rect_debug_print(dst, false);
|
||||
|
||||
return hscale;
|
||||
}
|
||||
|
||||
vscale = drm_rect_calc_vscale(&src, &dst, min_scale, max_scale);
|
||||
vscale = drm_rect_calc_vscale(src, dst, min_scale, max_scale);
|
||||
if (vscale < 0) {
|
||||
DRM_DEBUG_KMS("Vertical scaling factor out of limits\n");
|
||||
drm_rect_debug_print(&src, true);
|
||||
drm_rect_debug_print(&dst, false);
|
||||
drm_rect_debug_print(src, true);
|
||||
drm_rect_debug_print(dst, false);
|
||||
|
||||
return vscale;
|
||||
}
|
||||
|
||||
/* Make the source viewport size an exact multiple of the scaling factors. */
|
||||
drm_rect_adjust_size(&src,
|
||||
drm_rect_width(&dst) * hscale - drm_rect_width(&src),
|
||||
drm_rect_height(&dst) * vscale - drm_rect_height(&src));
|
||||
drm_rect_adjust_size(src,
|
||||
drm_rect_width(dst) * hscale - drm_rect_width(src),
|
||||
drm_rect_height(dst) * vscale - drm_rect_height(src));
|
||||
|
||||
drm_rect_rotate_inv(&src, fb->width << 16, fb->height << 16,
|
||||
drm_rect_rotate_inv(src, fb->width << 16, fb->height << 16,
|
||||
intel_plane->rotation);
|
||||
|
||||
/* sanity check to make sure the src viewport wasn't enlarged */
|
||||
WARN_ON(src.x1 < (int) src_x ||
|
||||
src.y1 < (int) src_y ||
|
||||
src.x2 > (int) (src_x + src_w) ||
|
||||
src.y2 > (int) (src_y + src_h));
|
||||
WARN_ON(src->x1 < (int) orig_src->x1 ||
|
||||
src->y1 < (int) orig_src->y1 ||
|
||||
src->x2 > (int) orig_src->x2 ||
|
||||
src->y2 > (int) orig_src->y2);
|
||||
|
||||
/*
|
||||
* Hardware doesn't handle subpixel coordinates.
|
||||
|
@ -983,10 +1128,10 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
|
|||
* increase the source viewport size, because that could
|
||||
* push the downscaling factor out of bounds.
|
||||
*/
|
||||
src_x = src.x1 >> 16;
|
||||
src_w = drm_rect_width(&src) >> 16;
|
||||
src_y = src.y1 >> 16;
|
||||
src_h = drm_rect_height(&src) >> 16;
|
||||
src_x = src->x1 >> 16;
|
||||
src_w = drm_rect_width(src) >> 16;
|
||||
src_y = src->y1 >> 16;
|
||||
src_h = drm_rect_height(src) >> 16;
|
||||
|
||||
if (format_is_yuv(fb->pixel_format)) {
|
||||
src_x &= ~1;
|
||||
|
@ -1000,12 +1145,12 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
|
|||
crtc_w &= ~1;
|
||||
|
||||
if (crtc_w == 0)
|
||||
visible = false;
|
||||
state->visible = false;
|
||||
}
|
||||
}
|
||||
|
||||
/* Check size restrictions when scaling */
|
||||
if (visible && (src_w != crtc_w || src_h != crtc_h)) {
|
||||
if (state->visible && (src_w != crtc_w || src_h != crtc_h)) {
|
||||
unsigned int width_bytes;
|
||||
|
||||
WARN_ON(!intel_plane->can_scale);
|
||||
|
@ -1013,12 +1158,13 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
|
|||
/* FIXME interlacing min height is 6 */
|
||||
|
||||
if (crtc_w < 3 || crtc_h < 3)
|
||||
visible = false;
|
||||
state->visible = false;
|
||||
|
||||
if (src_w < 3 || src_h < 3)
|
||||
visible = false;
|
||||
state->visible = false;
|
||||
|
||||
width_bytes = ((src_x * pixel_size) & 63) + src_w * pixel_size;
|
||||
width_bytes = ((src_x * pixel_size) & 63) +
|
||||
src_w * pixel_size;
|
||||
|
||||
if (src_w > 2048 || src_h > 2048 ||
|
||||
width_bytes > 4096 || fb->pitches[0] > 4096) {
|
||||
|
@ -1027,42 +1173,76 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
|
|||
}
|
||||
}
|
||||
|
||||
dst.x1 = crtc_x;
|
||||
dst.x2 = crtc_x + crtc_w;
|
||||
dst.y1 = crtc_y;
|
||||
dst.y2 = crtc_y + crtc_h;
|
||||
if (state->visible) {
|
||||
src->x1 = src_x;
|
||||
src->x2 = src_x + src_w;
|
||||
src->y1 = src_y;
|
||||
src->y2 = src_y + src_h;
|
||||
}
|
||||
|
||||
dst->x1 = crtc_x;
|
||||
dst->x2 = crtc_x + crtc_w;
|
||||
dst->y1 = crtc_y;
|
||||
dst->y2 = crtc_y + crtc_h;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
intel_commit_sprite_plane(struct drm_plane *plane,
|
||||
struct intel_plane_state *state)
|
||||
{
|
||||
struct drm_device *dev = plane->dev;
|
||||
struct drm_crtc *crtc = state->crtc;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
struct intel_plane *intel_plane = to_intel_plane(plane);
|
||||
enum pipe pipe = intel_crtc->pipe;
|
||||
struct drm_framebuffer *fb = state->fb;
|
||||
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
|
||||
struct drm_i915_gem_object *obj = intel_fb->obj;
|
||||
struct drm_i915_gem_object *old_obj = intel_plane->obj;
|
||||
int crtc_x, crtc_y;
|
||||
unsigned int crtc_w, crtc_h;
|
||||
uint32_t src_x, src_y, src_w, src_h;
|
||||
struct drm_rect *dst = &state->dst;
|
||||
const struct drm_rect *clip = &state->clip;
|
||||
bool primary_enabled;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* If the sprite is completely covering the primary plane,
|
||||
* we can disable the primary and save power.
|
||||
*/
|
||||
primary_enabled = !drm_rect_equals(&dst, &clip) || colorkey_enabled(intel_plane);
|
||||
WARN_ON(!primary_enabled && !visible && intel_crtc->active);
|
||||
primary_enabled = !drm_rect_equals(dst, clip) || colorkey_enabled(intel_plane);
|
||||
WARN_ON(!primary_enabled && !state->visible && intel_crtc->active);
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
|
||||
/* Note that this will apply the VT-d workaround for scanouts,
|
||||
* which is more restrictive than required for sprites. (The
|
||||
* primary plane requires 256KiB alignment with 64 PTE padding,
|
||||
* the sprite planes only require 128KiB alignment and 32 PTE padding.
|
||||
*/
|
||||
ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
|
||||
if (old_obj != obj) {
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
|
||||
i915_gem_track_fb(old_obj, obj,
|
||||
INTEL_FRONTBUFFER_SPRITE(pipe));
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
/* Note that this will apply the VT-d workaround for scanouts,
|
||||
* which is more restrictive than required for sprites. (The
|
||||
* primary plane requires 256KiB alignment with 64 PTE padding,
|
||||
* the sprite planes only require 128KiB alignment and 32 PTE
|
||||
* padding.
|
||||
*/
|
||||
ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
|
||||
if (ret == 0)
|
||||
i915_gem_track_fb(old_obj, obj,
|
||||
INTEL_FRONTBUFFER_SPRITE(pipe));
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
intel_plane->crtc_x = orig.crtc_x;
|
||||
intel_plane->crtc_y = orig.crtc_y;
|
||||
intel_plane->crtc_w = orig.crtc_w;
|
||||
intel_plane->crtc_h = orig.crtc_h;
|
||||
intel_plane->src_x = orig.src_x;
|
||||
intel_plane->src_y = orig.src_y;
|
||||
intel_plane->src_w = orig.src_w;
|
||||
intel_plane->src_h = orig.src_h;
|
||||
intel_plane->crtc_x = state->orig_dst.x1;
|
||||
intel_plane->crtc_y = state->orig_dst.y1;
|
||||
intel_plane->crtc_w = drm_rect_width(&state->orig_dst);
|
||||
intel_plane->crtc_h = drm_rect_height(&state->orig_dst);
|
||||
intel_plane->src_x = state->orig_src.x1;
|
||||
intel_plane->src_y = state->orig_src.y1;
|
||||
intel_plane->src_w = drm_rect_width(&state->orig_src);
|
||||
intel_plane->src_h = drm_rect_height(&state->orig_src);
|
||||
intel_plane->obj = obj;
|
||||
|
||||
if (intel_crtc->active) {
|
||||
|
@ -1076,12 +1256,22 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
|
|||
if (primary_was_enabled && !primary_enabled)
|
||||
intel_pre_disable_primary(crtc);
|
||||
|
||||
if (visible)
|
||||
if (state->visible) {
|
||||
crtc_x = state->dst.x1;
|
||||
crtc_y = state->dst.y1;
|
||||
crtc_w = drm_rect_width(&state->dst);
|
||||
crtc_h = drm_rect_height(&state->dst);
|
||||
src_x = state->src.x1;
|
||||
src_y = state->src.y1;
|
||||
src_w = drm_rect_width(&state->src);
|
||||
src_h = drm_rect_height(&state->src);
|
||||
intel_plane->update_plane(plane, crtc, fb, obj,
|
||||
crtc_x, crtc_y, crtc_w, crtc_h,
|
||||
src_x, src_y, src_w, src_h);
|
||||
else
|
||||
} else {
|
||||
intel_plane->disable_plane(plane, crtc);
|
||||
}
|
||||
|
||||
|
||||
intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_SPRITE(pipe));
|
||||
|
||||
|
@ -1090,14 +1280,15 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
|
|||
}
|
||||
|
||||
/* Unpin old obj after new one is active to avoid ugliness */
|
||||
if (old_obj) {
|
||||
if (old_obj && old_obj != obj) {
|
||||
|
||||
/*
|
||||
* It's fairly common to simply update the position of
|
||||
* an existing object. In that case, we don't need to
|
||||
* wait for vblank to avoid ugliness, we only need to
|
||||
* do the pin & ref bookkeeping.
|
||||
*/
|
||||
if (old_obj != obj && intel_crtc->active)
|
||||
if (intel_crtc->active)
|
||||
intel_wait_for_vblank(dev, intel_crtc->pipe);
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
|
@ -1108,6 +1299,46 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
|
||||
struct drm_framebuffer *fb, int crtc_x, int crtc_y,
|
||||
unsigned int crtc_w, unsigned int crtc_h,
|
||||
uint32_t src_x, uint32_t src_y,
|
||||
uint32_t src_w, uint32_t src_h)
|
||||
{
|
||||
struct intel_plane_state state;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
int ret;
|
||||
|
||||
state.crtc = crtc;
|
||||
state.fb = fb;
|
||||
|
||||
/* sample coordinates in 16.16 fixed point */
|
||||
state.src.x1 = src_x;
|
||||
state.src.x2 = src_x + src_w;
|
||||
state.src.y1 = src_y;
|
||||
state.src.y2 = src_y + src_h;
|
||||
|
||||
/* integer pixels */
|
||||
state.dst.x1 = crtc_x;
|
||||
state.dst.x2 = crtc_x + crtc_w;
|
||||
state.dst.y1 = crtc_y;
|
||||
state.dst.y2 = crtc_y + crtc_h;
|
||||
|
||||
state.clip.x1 = 0;
|
||||
state.clip.y1 = 0;
|
||||
state.clip.x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0;
|
||||
state.clip.y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0;
|
||||
state.orig_src = state.src;
|
||||
state.orig_dst = state.dst;
|
||||
|
||||
ret = intel_check_sprite_plane(plane, &state);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return intel_commit_sprite_plane(plane, &state);
|
||||
}
|
||||
|
||||
static int
|
||||
intel_disable_plane(struct drm_plane *plane)
|
||||
{
|
||||
|
@ -1305,6 +1536,18 @@ static uint32_t vlv_plane_formats[] = {
|
|||
DRM_FORMAT_VYUY,
|
||||
};
|
||||
|
||||
static uint32_t skl_plane_formats[] = {
|
||||
DRM_FORMAT_RGB565,
|
||||
DRM_FORMAT_ABGR8888,
|
||||
DRM_FORMAT_ARGB8888,
|
||||
DRM_FORMAT_XBGR8888,
|
||||
DRM_FORMAT_XRGB8888,
|
||||
DRM_FORMAT_YUYV,
|
||||
DRM_FORMAT_YVYU,
|
||||
DRM_FORMAT_UYVY,
|
||||
DRM_FORMAT_VYUY,
|
||||
};
|
||||
|
||||
int
|
||||
intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
|
||||
{
|
||||
|
@ -1368,7 +1611,21 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
|
|||
num_plane_formats = ARRAY_SIZE(snb_plane_formats);
|
||||
}
|
||||
break;
|
||||
case 9:
|
||||
/*
|
||||
* FIXME: Skylake planes can be scaled (with some restrictions),
|
||||
* but this is for another time.
|
||||
*/
|
||||
intel_plane->can_scale = false;
|
||||
intel_plane->max_downscale = 1;
|
||||
intel_plane->update_plane = skl_update_plane;
|
||||
intel_plane->disable_plane = skl_disable_plane;
|
||||
intel_plane->update_colorkey = skl_update_colorkey;
|
||||
intel_plane->get_colorkey = skl_get_colorkey;
|
||||
|
||||
plane_formats = skl_plane_formats;
|
||||
num_plane_formats = ARRAY_SIZE(skl_plane_formats);
|
||||
break;
|
||||
default:
|
||||
kfree(intel_plane);
|
||||
return -ENODEV;
|
||||
|
|
|
@ -1182,18 +1182,17 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
|
|||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
unsigned long irqflags;
|
||||
u32 tv_ctl, save_tv_ctl;
|
||||
u32 tv_dac, save_tv_dac;
|
||||
int type;
|
||||
|
||||
/* Disable TV interrupts around load detect or we'll recurse */
|
||||
if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
|
||||
spin_lock_irq(&dev_priv->irq_lock);
|
||||
i915_disable_pipestat(dev_priv, 0,
|
||||
PIPE_HOTPLUG_INTERRUPT_STATUS |
|
||||
PIPE_HOTPLUG_TV_INTERRUPT_STATUS);
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
|
||||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
}
|
||||
|
||||
save_tv_dac = tv_dac = I915_READ(TV_DAC);
|
||||
|
@ -1266,11 +1265,11 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
|
|||
|
||||
/* Restore interrupt config */
|
||||
if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
|
||||
spin_lock_irq(&dev_priv->irq_lock);
|
||||
i915_enable_pipestat(dev_priv, 0,
|
||||
PIPE_HOTPLUG_INTERRUPT_STATUS |
|
||||
PIPE_HOTPLUG_TV_INTERRUPT_STATUS);
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
|
||||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
}
|
||||
|
||||
return type;
|
||||
|
|
|
@ -194,13 +194,15 @@ static void vlv_force_wake_reset(struct drm_i915_private *dev_priv)
|
|||
static void __vlv_force_wake_get(struct drm_i915_private *dev_priv,
|
||||
int fw_engine)
|
||||
{
|
||||
/*
|
||||
* WaRsDontPollForAckOnClearingFWBits:vlv
|
||||
* Hardware clears ack bits lazily (only when all ack
|
||||
* bits become 0) so don't poll for individiual ack
|
||||
* bits to be clear here like on other platforms.
|
||||
*/
|
||||
|
||||
/* Check for Render Engine */
|
||||
if (FORCEWAKE_RENDER & fw_engine) {
|
||||
if (wait_for_atomic((__raw_i915_read32(dev_priv,
|
||||
FORCEWAKE_ACK_VLV) &
|
||||
FORCEWAKE_KERNEL) == 0,
|
||||
FORCEWAKE_ACK_TIMEOUT_MS))
|
||||
DRM_ERROR("Timed out: Render forcewake old ack to clear.\n");
|
||||
|
||||
__raw_i915_write32(dev_priv, FORCEWAKE_VLV,
|
||||
_MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
|
||||
|
@ -214,11 +216,6 @@ static void __vlv_force_wake_get(struct drm_i915_private *dev_priv,
|
|||
|
||||
/* Check for Media Engine */
|
||||
if (FORCEWAKE_MEDIA & fw_engine) {
|
||||
if (wait_for_atomic((__raw_i915_read32(dev_priv,
|
||||
FORCEWAKE_ACK_MEDIA_VLV) &
|
||||
FORCEWAKE_KERNEL) == 0,
|
||||
FORCEWAKE_ACK_TIMEOUT_MS))
|
||||
DRM_ERROR("Timed out: Media forcewake old ack to clear.\n");
|
||||
|
||||
__raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
|
||||
_MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
|
||||
|
@ -968,7 +965,7 @@ static const struct register_whitelist {
|
|||
/* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
|
||||
uint32_t gen_bitmask;
|
||||
} whitelist[] = {
|
||||
{ RING_TIMESTAMP(RENDER_RING_BASE), 8, GEN_RANGE(4, 8) },
|
||||
{ RING_TIMESTAMP(RENDER_RING_BASE), 8, GEN_RANGE(4, 9) },
|
||||
};
|
||||
|
||||
int i915_reg_read_ioctl(struct drm_device *dev,
|
||||
|
|
|
@ -303,7 +303,8 @@
|
|||
#define DP_TEST_CRC_B_CB 0x244
|
||||
|
||||
#define DP_TEST_SINK_MISC 0x246
|
||||
#define DP_TEST_CRC_SUPPORTED (1 << 5)
|
||||
# define DP_TEST_CRC_SUPPORTED (1 << 5)
|
||||
# define DP_TEST_COUNT_MASK 0x7
|
||||
|
||||
#define DP_TEST_RESPONSE 0x260
|
||||
# define DP_TEST_ACK (1 << 0)
|
||||
|
@ -313,7 +314,7 @@
|
|||
#define DP_TEST_EDID_CHECKSUM 0x261
|
||||
|
||||
#define DP_TEST_SINK 0x270
|
||||
#define DP_TEST_SINK_START (1 << 0)
|
||||
# define DP_TEST_SINK_START (1 << 0)
|
||||
|
||||
#define DP_PAYLOAD_TABLE_UPDATE_STATUS 0x2c0 /* 1.2 MST */
|
||||
# define DP_PAYLOAD_TABLE_UPDATED (1 << 0)
|
||||
|
|
|
@ -259,4 +259,21 @@
|
|||
INTEL_VGA_DEVICE(0x22b2, info), \
|
||||
INTEL_VGA_DEVICE(0x22b3, info)
|
||||
|
||||
#define INTEL_SKL_IDS(info) \
|
||||
INTEL_VGA_DEVICE(0x1916, info), /* ULT GT2 */ \
|
||||
INTEL_VGA_DEVICE(0x1906, info), /* ULT GT1 */ \
|
||||
INTEL_VGA_DEVICE(0x1926, info), /* ULT GT3 */ \
|
||||
INTEL_VGA_DEVICE(0x1921, info), /* ULT GT2F */ \
|
||||
INTEL_VGA_DEVICE(0x190E, info), /* ULX GT1 */ \
|
||||
INTEL_VGA_DEVICE(0x191E, info), /* ULX GT2 */ \
|
||||
INTEL_VGA_DEVICE(0x1912, info), /* DT GT2 */ \
|
||||
INTEL_VGA_DEVICE(0x1902, info), /* DT GT1 */ \
|
||||
INTEL_VGA_DEVICE(0x191B, info), /* Halo GT2 */ \
|
||||
INTEL_VGA_DEVICE(0x192B, info), /* Halo GT3 */ \
|
||||
INTEL_VGA_DEVICE(0x190B, info), /* Halo GT1 */ \
|
||||
INTEL_VGA_DEVICE(0x191A, info), /* SRV GT2 */ \
|
||||
INTEL_VGA_DEVICE(0x192A, info), /* SRV GT3 */ \
|
||||
INTEL_VGA_DEVICE(0x190A, info), /* SRV GT1 */ \
|
||||
INTEL_VGA_DEVICE(0x191D, info) /* WKS GT2 */
|
||||
|
||||
#endif /* _I915_PCIIDS_H */
|
||||
|
|
Loading…
Reference in New Issue