Merge branch 'drm-intel-next' of git://people.freedesktop.org/~keithp/linux into drm-core-next
* 'drm-intel-next' of git://people.freedesktop.org/~keithp/linux: (72 commits) drm/i915/dp: Fix eDP on PCH DP on CPT/PPT drm/i915/dp: Introduce is_cpu_edp() drm/i915: use correct SPD type value drm/i915: fix ILK+ infoframe support drm/i915: add DP test request handling drm/i915: read full receiver capability field during DP hot plug drm/i915/dp: Remove eDP special cases from bandwidth checks drm/i915/dp: Fix the math in intel_dp_link_required drm/i915/panel: Always record the backlight level again (but cleverly) i915: Move i915_read/write out of line drm/i915: remove transcoder PLL mashing from mode_set per specs drm/i915: if transcoder disable fails, say which drm/i915: set watermarks for third pipe on IVB drm/i915: export a CPT mode set verification function drm/i915: fix transcoder PLL select masking drm/i915: fix IVB cursor support drm/i915: fix debug output for 3 pipe configs drm/i915: add PLL sharing support to handle 3 pipes drm/i915: fix PCH PLL assertion check for 3 pipes drm/i915: use transcoder select bits on VGA and HDMI on CPT ...
This commit is contained in:
commit
9b553f7286
|
@ -923,6 +923,9 @@ static int intel_fake_agp_insert_entries(struct agp_memory *mem,
|
|||
{
|
||||
int ret = -EINVAL;
|
||||
|
||||
if (intel_private.base.do_idle_maps)
|
||||
return -ENODEV;
|
||||
|
||||
if (intel_private.clear_fake_agp) {
|
||||
int start = intel_private.base.stolen_size / PAGE_SIZE;
|
||||
int end = intel_private.base.gtt_mappable_entries;
|
||||
|
@ -985,6 +988,9 @@ static int intel_fake_agp_remove_entries(struct agp_memory *mem,
|
|||
if (mem->page_count == 0)
|
||||
return 0;
|
||||
|
||||
if (intel_private.base.do_idle_maps)
|
||||
return -ENODEV;
|
||||
|
||||
intel_gtt_clear_range(pg_start, mem->page_count);
|
||||
|
||||
if (intel_private.base.needs_dmar) {
|
||||
|
@ -1177,6 +1183,25 @@ static void gen6_cleanup(void)
|
|||
{
|
||||
}
|
||||
|
||||
/* Certain Gen5 chipsets require require idling the GPU before
|
||||
* unmapping anything from the GTT when VT-d is enabled.
|
||||
*/
|
||||
extern int intel_iommu_gfx_mapped;
|
||||
static inline int needs_idle_maps(void)
|
||||
{
|
||||
const unsigned short gpu_devid = intel_private.pcidev->device;
|
||||
|
||||
/* Query intel_iommu to see if we need the workaround. Presumably that
|
||||
* was loaded first.
|
||||
*/
|
||||
if ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB ||
|
||||
gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) &&
|
||||
intel_iommu_gfx_mapped)
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int i9xx_setup(void)
|
||||
{
|
||||
u32 reg_addr;
|
||||
|
@ -1211,6 +1236,9 @@ static int i9xx_setup(void)
|
|||
intel_private.gtt_bus_addr = reg_addr + gtt_offset;
|
||||
}
|
||||
|
||||
if (needs_idle_maps());
|
||||
intel_private.base.do_idle_maps = 1;
|
||||
|
||||
intel_i9xx_setup_flush();
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -2035,7 +2035,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
|
|||
spin_lock_init(&dev_priv->error_lock);
|
||||
spin_lock_init(&dev_priv->rps_lock);
|
||||
|
||||
if (IS_MOBILE(dev) || !IS_GEN2(dev))
|
||||
if (IS_IVYBRIDGE(dev))
|
||||
dev_priv->num_pipe = 3;
|
||||
else if (IS_MOBILE(dev) || !IS_GEN2(dev))
|
||||
dev_priv->num_pipe = 2;
|
||||
else
|
||||
dev_priv->num_pipe = 1;
|
||||
|
|
|
@ -79,11 +79,11 @@ MODULE_PARM_DESC(lvds_downclock,
|
|||
"Use panel (LVDS/eDP) downclocking for power savings "
|
||||
"(default: false)");
|
||||
|
||||
unsigned int i915_panel_use_ssc __read_mostly = 1;
|
||||
unsigned int i915_panel_use_ssc __read_mostly = -1;
|
||||
module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600);
|
||||
MODULE_PARM_DESC(lvds_use_ssc,
|
||||
"Use Spread Spectrum Clock with panels [LVDS/eDP] "
|
||||
"(default: true)");
|
||||
"(default: auto from VBT)");
|
||||
|
||||
int i915_vbt_sdvo_panel_type __read_mostly = -1;
|
||||
module_param_named(vbt_sdvo_panel_type, i915_vbt_sdvo_panel_type, int, 0600);
|
||||
|
@ -471,6 +471,9 @@ static int i915_drm_thaw(struct drm_device *dev)
|
|||
error = i915_gem_init_ringbuffer(dev);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
if (HAS_PCH_SPLIT(dev))
|
||||
ironlake_init_pch_refclk(dev);
|
||||
|
||||
drm_mode_config_reset(dev);
|
||||
drm_irq_install(dev);
|
||||
|
||||
|
@ -895,3 +898,43 @@ module_exit(i915_exit);
|
|||
MODULE_AUTHOR(DRIVER_AUTHOR);
|
||||
MODULE_DESCRIPTION(DRIVER_DESC);
|
||||
MODULE_LICENSE("GPL and additional rights");
|
||||
|
||||
/* We give fast paths for the really cool registers */
|
||||
#define NEEDS_FORCE_WAKE(dev_priv, reg) \
|
||||
(((dev_priv)->info->gen >= 6) && \
|
||||
((reg) < 0x40000) && \
|
||||
((reg) != FORCEWAKE))
|
||||
|
||||
#define __i915_read(x, y) \
|
||||
u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
|
||||
u##x val = 0; \
|
||||
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
|
||||
gen6_gt_force_wake_get(dev_priv); \
|
||||
val = read##y(dev_priv->regs + reg); \
|
||||
gen6_gt_force_wake_put(dev_priv); \
|
||||
} else { \
|
||||
val = read##y(dev_priv->regs + reg); \
|
||||
} \
|
||||
trace_i915_reg_rw(false, reg, val, sizeof(val)); \
|
||||
return val; \
|
||||
}
|
||||
|
||||
__i915_read(8, b)
|
||||
__i915_read(16, w)
|
||||
__i915_read(32, l)
|
||||
__i915_read(64, q)
|
||||
#undef __i915_read
|
||||
|
||||
#define __i915_write(x, y) \
|
||||
void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
|
||||
trace_i915_reg_rw(true, reg, val, sizeof(val)); \
|
||||
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
|
||||
__gen6_gt_wait_for_fifo(dev_priv); \
|
||||
} \
|
||||
write##y(val, dev_priv->regs + reg); \
|
||||
}
|
||||
__i915_write(8, b)
|
||||
__i915_write(16, w)
|
||||
__i915_write(32, l)
|
||||
__i915_write(64, q)
|
||||
#undef __i915_write
|
||||
|
|
|
@ -139,7 +139,6 @@ struct sdvo_device_mapping {
|
|||
u8 slave_addr;
|
||||
u8 dvo_wiring;
|
||||
u8 i2c_pin;
|
||||
u8 i2c_speed;
|
||||
u8 ddc_pin;
|
||||
};
|
||||
|
||||
|
@ -349,7 +348,6 @@ typedef struct drm_i915_private {
|
|||
/* LVDS info */
|
||||
int backlight_level; /* restore backlight to this value */
|
||||
bool backlight_enabled;
|
||||
struct drm_display_mode *panel_fixed_mode;
|
||||
struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
|
||||
struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
|
||||
|
||||
|
@ -359,6 +357,7 @@ typedef struct drm_i915_private {
|
|||
unsigned int lvds_vbt:1;
|
||||
unsigned int int_crt_support:1;
|
||||
unsigned int lvds_use_ssc:1;
|
||||
unsigned int display_clock_mode:1;
|
||||
int lvds_ssc_freq;
|
||||
struct {
|
||||
int rate;
|
||||
|
@ -674,10 +673,9 @@ typedef struct drm_i915_private {
|
|||
unsigned int lvds_border_bits;
|
||||
/* Panel fitter placement and size for Ironlake+ */
|
||||
u32 pch_pf_pos, pch_pf_size;
|
||||
int panel_t3, panel_t12;
|
||||
|
||||
struct drm_crtc *plane_to_crtc_mapping[2];
|
||||
struct drm_crtc *pipe_to_crtc_mapping[2];
|
||||
struct drm_crtc *plane_to_crtc_mapping[3];
|
||||
struct drm_crtc *pipe_to_crtc_mapping[3];
|
||||
wait_queue_head_t pending_flip_queue;
|
||||
bool flip_pending_is_done;
|
||||
|
||||
|
@ -1303,6 +1301,7 @@ extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
|
|||
extern bool intel_fbc_enabled(struct drm_device *dev);
|
||||
extern void intel_disable_fbc(struct drm_device *dev);
|
||||
extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
|
||||
extern void ironlake_init_pch_refclk(struct drm_device *dev);
|
||||
extern void ironlake_enable_rc6(struct drm_device *dev);
|
||||
extern void gen6_set_rps(struct drm_device *dev, u8 val);
|
||||
extern void intel_detect_pch(struct drm_device *dev);
|
||||
|
@ -1356,18 +1355,7 @@ void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
|
|||
((reg) != FORCEWAKE))
|
||||
|
||||
#define __i915_read(x, y) \
|
||||
static inline u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
|
||||
u##x val = 0; \
|
||||
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
|
||||
gen6_gt_force_wake_get(dev_priv); \
|
||||
val = read##y(dev_priv->regs + reg); \
|
||||
gen6_gt_force_wake_put(dev_priv); \
|
||||
} else { \
|
||||
val = read##y(dev_priv->regs + reg); \
|
||||
} \
|
||||
trace_i915_reg_rw(false, reg, val, sizeof(val)); \
|
||||
return val; \
|
||||
}
|
||||
u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg);
|
||||
|
||||
__i915_read(8, b)
|
||||
__i915_read(16, w)
|
||||
|
@ -1376,13 +1364,8 @@ __i915_read(64, q)
|
|||
#undef __i915_read
|
||||
|
||||
#define __i915_write(x, y) \
|
||||
static inline void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
|
||||
trace_i915_reg_rw(true, reg, val, sizeof(val)); \
|
||||
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
|
||||
__gen6_gt_wait_for_fifo(dev_priv); \
|
||||
} \
|
||||
write##y(val, dev_priv->regs + reg); \
|
||||
}
|
||||
void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val);
|
||||
|
||||
__i915_write(8, b)
|
||||
__i915_write(16, w)
|
||||
__i915_write(32, l)
|
||||
|
|
|
@ -195,6 +195,8 @@ i915_gem_create(struct drm_file *file,
|
|||
u32 handle;
|
||||
|
||||
size = roundup(size, PAGE_SIZE);
|
||||
if (size == 0)
|
||||
return -EINVAL;
|
||||
|
||||
/* Allocate the new object */
|
||||
obj = i915_gem_alloc_object(dev, size);
|
||||
|
@ -800,11 +802,11 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev,
|
|||
if (IS_ERR(page))
|
||||
return PTR_ERR(page);
|
||||
|
||||
vaddr = kmap_atomic(page, KM_USER0);
|
||||
vaddr = kmap_atomic(page);
|
||||
ret = __copy_from_user_inatomic(vaddr + page_offset,
|
||||
user_data,
|
||||
page_length);
|
||||
kunmap_atomic(vaddr, KM_USER0);
|
||||
kunmap_atomic(vaddr);
|
||||
|
||||
set_page_dirty(page);
|
||||
mark_page_accessed(page);
|
||||
|
@ -1476,7 +1478,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
|
|||
obj->pages[i] = page;
|
||||
}
|
||||
|
||||
if (obj->tiling_mode != I915_TILING_NONE)
|
||||
if (i915_gem_object_needs_bit17_swizzle(obj))
|
||||
i915_gem_object_do_bit_17_swizzle(obj);
|
||||
|
||||
return 0;
|
||||
|
@ -1498,7 +1500,7 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
|
|||
|
||||
BUG_ON(obj->madv == __I915_MADV_PURGED);
|
||||
|
||||
if (obj->tiling_mode != I915_TILING_NONE)
|
||||
if (i915_gem_object_needs_bit17_swizzle(obj))
|
||||
i915_gem_object_save_bit_17_swizzle(obj);
|
||||
|
||||
if (obj->madv == I915_MADV_DONTNEED)
|
||||
|
@ -2191,14 +2193,8 @@ int
|
|||
i915_gpu_idle(struct drm_device *dev)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
bool lists_empty;
|
||||
int ret, i;
|
||||
|
||||
lists_empty = (list_empty(&dev_priv->mm.flushing_list) &&
|
||||
list_empty(&dev_priv->mm.active_list));
|
||||
if (lists_empty)
|
||||
return 0;
|
||||
|
||||
/* Flush everything onto the inactive list. */
|
||||
for (i = 0; i < I915_NUM_RINGS; i++) {
|
||||
ret = i915_ring_idle(&dev_priv->ring[i]);
|
||||
|
|
|
@ -157,7 +157,7 @@ i915_gem_object_check_coherency(struct drm_i915_gem_object *obj, int handle)
|
|||
for (page = 0; page < obj->size / PAGE_SIZE; page++) {
|
||||
int i;
|
||||
|
||||
backing_map = kmap_atomic(obj->pages[page], KM_USER0);
|
||||
backing_map = kmap_atomic(obj->pages[page]);
|
||||
|
||||
if (backing_map == NULL) {
|
||||
DRM_ERROR("failed to map backing page\n");
|
||||
|
@ -181,13 +181,13 @@ i915_gem_object_check_coherency(struct drm_i915_gem_object *obj, int handle)
|
|||
}
|
||||
}
|
||||
}
|
||||
kunmap_atomic(backing_map, KM_USER0);
|
||||
kunmap_atomic(backing_map);
|
||||
backing_map = NULL;
|
||||
}
|
||||
|
||||
out:
|
||||
if (backing_map != NULL)
|
||||
kunmap_atomic(backing_map, KM_USER0);
|
||||
kunmap_atomic(backing_map);
|
||||
iounmap(gtt_mapping);
|
||||
|
||||
/* give syslog time to catch up */
|
||||
|
|
|
@ -49,6 +49,28 @@ static unsigned int cache_level_to_agp_type(struct drm_device *dev,
|
|||
}
|
||||
}
|
||||
|
||||
static bool do_idling(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
bool ret = dev_priv->mm.interruptible;
|
||||
|
||||
if (unlikely(dev_priv->mm.gtt->do_idle_maps)) {
|
||||
dev_priv->mm.interruptible = false;
|
||||
if (i915_gpu_idle(dev_priv->dev)) {
|
||||
DRM_ERROR("Couldn't idle GPU\n");
|
||||
/* Wait a bit, in hopes it avoids the hang */
|
||||
udelay(10);
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
|
||||
{
|
||||
if (unlikely(dev_priv->mm.gtt->do_idle_maps))
|
||||
dev_priv->mm.interruptible = interruptible;
|
||||
}
|
||||
|
||||
void i915_gem_restore_gtt_mappings(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
@ -117,6 +139,12 @@ void i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj,
|
|||
|
||||
void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct drm_device *dev = obj->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
bool interruptible;
|
||||
|
||||
interruptible = do_idling(dev_priv);
|
||||
|
||||
intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT,
|
||||
obj->base.size >> PAGE_SHIFT);
|
||||
|
||||
|
@ -124,4 +152,6 @@ void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
|
|||
intel_gtt_unmap_memory(obj->sg_list, obj->num_sg);
|
||||
obj->sg_list = NULL;
|
||||
}
|
||||
|
||||
undo_idling(dev_priv, interruptible);
|
||||
}
|
||||
|
|
|
@ -92,7 +92,10 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
|
|||
uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
|
||||
uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 5) {
|
||||
if (INTEL_INFO(dev)->gen >= 6) {
|
||||
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
|
||||
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
|
||||
} else if (IS_GEN5(dev)) {
|
||||
/* On Ironlake whatever DRAM config, GPU always do
|
||||
* same swizzling setup.
|
||||
*/
|
||||
|
@ -440,14 +443,9 @@ i915_gem_swizzle_page(struct page *page)
|
|||
void
|
||||
i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct drm_device *dev = obj->base.dev;
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
int page_count = obj->base.size >> PAGE_SHIFT;
|
||||
int i;
|
||||
|
||||
if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17)
|
||||
return;
|
||||
|
||||
if (obj->bit_17 == NULL)
|
||||
return;
|
||||
|
||||
|
@ -464,14 +462,9 @@ i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
|
|||
void
|
||||
i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct drm_device *dev = obj->base.dev;
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
int page_count = obj->base.size >> PAGE_SHIFT;
|
||||
int i;
|
||||
|
||||
if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17)
|
||||
return;
|
||||
|
||||
if (obj->bit_17 == NULL) {
|
||||
obj->bit_17 = kmalloc(BITS_TO_LONGS(page_count) *
|
||||
sizeof(long), GFP_KERNEL);
|
||||
|
|
|
@ -383,6 +383,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
|
|||
pm_iir = dev_priv->pm_iir;
|
||||
dev_priv->pm_iir = 0;
|
||||
pm_imr = I915_READ(GEN6_PMIMR);
|
||||
I915_WRITE(GEN6_PMIMR, 0);
|
||||
spin_unlock_irq(&dev_priv->rps_lock);
|
||||
|
||||
if (!pm_iir)
|
||||
|
@ -420,7 +421,6 @@ static void gen6_pm_rps_work(struct work_struct *work)
|
|||
* an *extremely* unlikely race with gen6_rps_enable() that is prevented
|
||||
* by holding struct_mutex for the duration of the write.
|
||||
*/
|
||||
I915_WRITE(GEN6_PMIMR, pm_imr & ~pm_iir);
|
||||
mutex_unlock(&dev_priv->dev->struct_mutex);
|
||||
}
|
||||
|
||||
|
@ -536,8 +536,9 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
|
|||
unsigned long flags;
|
||||
spin_lock_irqsave(&dev_priv->rps_lock, flags);
|
||||
WARN(dev_priv->pm_iir & pm_iir, "Missed a PM interrupt\n");
|
||||
I915_WRITE(GEN6_PMIMR, pm_iir);
|
||||
dev_priv->pm_iir |= pm_iir;
|
||||
I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir);
|
||||
POSTING_READ(GEN6_PMIMR);
|
||||
spin_unlock_irqrestore(&dev_priv->rps_lock, flags);
|
||||
queue_work(dev_priv->wq, &dev_priv->rps_work);
|
||||
}
|
||||
|
@ -649,8 +650,9 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
|
|||
unsigned long flags;
|
||||
spin_lock_irqsave(&dev_priv->rps_lock, flags);
|
||||
WARN(dev_priv->pm_iir & pm_iir, "Missed a PM interrupt\n");
|
||||
I915_WRITE(GEN6_PMIMR, pm_iir);
|
||||
dev_priv->pm_iir |= pm_iir;
|
||||
I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir);
|
||||
POSTING_READ(GEN6_PMIMR);
|
||||
spin_unlock_irqrestore(&dev_priv->rps_lock, flags);
|
||||
queue_work(dev_priv->wq, &dev_priv->rps_work);
|
||||
}
|
||||
|
@ -1777,6 +1779,26 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
|
|||
POSTING_READ(SDEIER);
|
||||
}
|
||||
|
||||
/*
|
||||
* Enable digital hotplug on the PCH, and configure the DP short pulse
|
||||
* duration to 2ms (which is the minimum in the Display Port spec)
|
||||
*
|
||||
* This register is the same on all known PCH chips.
|
||||
*/
|
||||
|
||||
static void ironlake_enable_pch_hotplug(struct drm_device *dev)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
|
||||
u32 hotplug;
|
||||
|
||||
hotplug = I915_READ(PCH_PORT_HOTPLUG);
|
||||
hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
|
||||
hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
|
||||
hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
|
||||
hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
|
||||
I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
|
||||
}
|
||||
|
||||
static int ironlake_irq_postinstall(struct drm_device *dev)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
|
||||
|
@ -1839,6 +1861,8 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
|
|||
I915_WRITE(SDEIER, hotplug_mask);
|
||||
POSTING_READ(SDEIER);
|
||||
|
||||
ironlake_enable_pch_hotplug(dev);
|
||||
|
||||
if (IS_IRONLAKE_M(dev)) {
|
||||
/* Clear & enable PCU event interrupts */
|
||||
I915_WRITE(DEIIR, DE_PCU_EVENT);
|
||||
|
@ -1896,6 +1920,8 @@ static int ivybridge_irq_postinstall(struct drm_device *dev)
|
|||
I915_WRITE(SDEIER, hotplug_mask);
|
||||
POSTING_READ(SDEIER);
|
||||
|
||||
ironlake_enable_pch_hotplug(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -2020,6 +2046,10 @@ static void ironlake_irq_uninstall(struct drm_device *dev)
|
|||
I915_WRITE(GTIMR, 0xffffffff);
|
||||
I915_WRITE(GTIER, 0x0);
|
||||
I915_WRITE(GTIIR, I915_READ(GTIIR));
|
||||
|
||||
I915_WRITE(SDEIMR, 0xffffffff);
|
||||
I915_WRITE(SDEIER, 0x0);
|
||||
I915_WRITE(SDEIIR, I915_READ(SDEIIR));
|
||||
}
|
||||
|
||||
static void i915_driver_irq_uninstall(struct drm_device * dev)
|
||||
|
|
|
@ -242,16 +242,22 @@
|
|||
#define ASYNC_FLIP (1<<22)
|
||||
#define DISPLAY_PLANE_A (0<<20)
|
||||
#define DISPLAY_PLANE_B (1<<20)
|
||||
#define GFX_OP_PIPE_CONTROL ((0x3<<29)|(0x3<<27)|(0x2<<24)|2)
|
||||
#define PIPE_CONTROL_QW_WRITE (1<<14)
|
||||
#define PIPE_CONTROL_DEPTH_STALL (1<<13)
|
||||
#define PIPE_CONTROL_WC_FLUSH (1<<12)
|
||||
#define PIPE_CONTROL_IS_FLUSH (1<<11) /* MBZ on Ironlake */
|
||||
#define PIPE_CONTROL_TC_FLUSH (1<<10) /* GM45+ only */
|
||||
#define PIPE_CONTROL_ISP_DIS (1<<9)
|
||||
#define PIPE_CONTROL_NOTIFY (1<<8)
|
||||
#define GFX_OP_PIPE_CONTROL(len) ((0x3<<29)|(0x3<<27)|(0x2<<24)|(len-2))
|
||||
#define PIPE_CONTROL_CS_STALL (1<<20)
|
||||
#define PIPE_CONTROL_QW_WRITE (1<<14)
|
||||
#define PIPE_CONTROL_DEPTH_STALL (1<<13)
|
||||
#define PIPE_CONTROL_WRITE_FLUSH (1<<12)
|
||||
#define PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH (1<<12) /* gen6+ */
|
||||
#define PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE (1<<11) /* MBZ on Ironlake */
|
||||
#define PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE (1<<10) /* GM45+ only */
|
||||
#define PIPE_CONTROL_INDIRECT_STATE_DISABLE (1<<9)
|
||||
#define PIPE_CONTROL_NOTIFY (1<<8)
|
||||
#define PIPE_CONTROL_VF_CACHE_INVALIDATE (1<<4)
|
||||
#define PIPE_CONTROL_CONST_CACHE_INVALIDATE (1<<3)
|
||||
#define PIPE_CONTROL_STATE_CACHE_INVALIDATE (1<<2)
|
||||
#define PIPE_CONTROL_STALL_AT_SCOREBOARD (1<<1)
|
||||
#define PIPE_CONTROL_DEPTH_CACHE_FLUSH (1<<0)
|
||||
#define PIPE_CONTROL_GLOBAL_GTT (1<<2) /* in addr dword */
|
||||
#define PIPE_CONTROL_STALL_EN (1<<1) /* in addr word, Ironlake+ only */
|
||||
|
||||
|
||||
/*
|
||||
|
@ -2429,6 +2435,7 @@
|
|||
#define WM0_PIPE_CURSOR_MASK (0x1f)
|
||||
|
||||
#define WM0_PIPEB_ILK 0x45104
|
||||
#define WM0_PIPEC_IVB 0x45200
|
||||
#define WM1_LP_ILK 0x45108
|
||||
#define WM1_LP_SR_EN (1<<31)
|
||||
#define WM1_LP_LATENCY_SHIFT 24
|
||||
|
@ -2567,10 +2574,18 @@
|
|||
#define _CURBBASE 0x700c4
|
||||
#define _CURBPOS 0x700c8
|
||||
|
||||
#define _CURBCNTR_IVB 0x71080
|
||||
#define _CURBBASE_IVB 0x71084
|
||||
#define _CURBPOS_IVB 0x71088
|
||||
|
||||
#define CURCNTR(pipe) _PIPE(pipe, _CURACNTR, _CURBCNTR)
|
||||
#define CURBASE(pipe) _PIPE(pipe, _CURABASE, _CURBBASE)
|
||||
#define CURPOS(pipe) _PIPE(pipe, _CURAPOS, _CURBPOS)
|
||||
|
||||
#define CURCNTR_IVB(pipe) _PIPE(pipe, _CURACNTR, _CURBCNTR_IVB)
|
||||
#define CURBASE_IVB(pipe) _PIPE(pipe, _CURABASE, _CURBBASE_IVB)
|
||||
#define CURPOS_IVB(pipe) _PIPE(pipe, _CURAPOS, _CURBPOS_IVB)
|
||||
|
||||
/* Display A control */
|
||||
#define _DSPACNTR 0x70180
|
||||
#define DISPLAY_PLANE_ENABLE (1<<31)
|
||||
|
@ -2916,12 +2931,13 @@
|
|||
#define SDEIER 0xc400c
|
||||
|
||||
/* digital port hotplug */
|
||||
#define PCH_PORT_HOTPLUG 0xc4030
|
||||
#define PCH_PORT_HOTPLUG 0xc4030 /* SHOTPLUG_CTL */
|
||||
#define PORTD_HOTPLUG_ENABLE (1 << 20)
|
||||
#define PORTD_PULSE_DURATION_2ms (0)
|
||||
#define PORTD_PULSE_DURATION_4_5ms (1 << 18)
|
||||
#define PORTD_PULSE_DURATION_6ms (2 << 18)
|
||||
#define PORTD_PULSE_DURATION_100ms (3 << 18)
|
||||
#define PORTD_PULSE_DURATION_MASK (3 << 18)
|
||||
#define PORTD_HOTPLUG_NO_DETECT (0)
|
||||
#define PORTD_HOTPLUG_SHORT_DETECT (1 << 16)
|
||||
#define PORTD_HOTPLUG_LONG_DETECT (1 << 17)
|
||||
|
@ -2930,6 +2946,7 @@
|
|||
#define PORTC_PULSE_DURATION_4_5ms (1 << 10)
|
||||
#define PORTC_PULSE_DURATION_6ms (2 << 10)
|
||||
#define PORTC_PULSE_DURATION_100ms (3 << 10)
|
||||
#define PORTC_PULSE_DURATION_MASK (3 << 10)
|
||||
#define PORTC_HOTPLUG_NO_DETECT (0)
|
||||
#define PORTC_HOTPLUG_SHORT_DETECT (1 << 8)
|
||||
#define PORTC_HOTPLUG_LONG_DETECT (1 << 9)
|
||||
|
@ -2938,6 +2955,7 @@
|
|||
#define PORTB_PULSE_DURATION_4_5ms (1 << 2)
|
||||
#define PORTB_PULSE_DURATION_6ms (2 << 2)
|
||||
#define PORTB_PULSE_DURATION_100ms (3 << 2)
|
||||
#define PORTB_PULSE_DURATION_MASK (3 << 2)
|
||||
#define PORTB_HOTPLUG_NO_DETECT (0)
|
||||
#define PORTB_HOTPLUG_SHORT_DETECT (1 << 0)
|
||||
#define PORTB_HOTPLUG_LONG_DETECT (1 << 1)
|
||||
|
@ -2958,15 +2976,15 @@
|
|||
|
||||
#define _PCH_DPLL_A 0xc6014
|
||||
#define _PCH_DPLL_B 0xc6018
|
||||
#define PCH_DPLL(pipe) _PIPE(pipe, _PCH_DPLL_A, _PCH_DPLL_B)
|
||||
#define PCH_DPLL(pipe) (pipe == 0 ? _PCH_DPLL_A : _PCH_DPLL_B)
|
||||
|
||||
#define _PCH_FPA0 0xc6040
|
||||
#define FP_CB_TUNE (0x3<<22)
|
||||
#define _PCH_FPA1 0xc6044
|
||||
#define _PCH_FPB0 0xc6048
|
||||
#define _PCH_FPB1 0xc604c
|
||||
#define PCH_FP0(pipe) _PIPE(pipe, _PCH_FPA0, _PCH_FPB0)
|
||||
#define PCH_FP1(pipe) _PIPE(pipe, _PCH_FPA1, _PCH_FPB1)
|
||||
#define PCH_FP0(pipe) (pipe == 0 ? _PCH_FPA0 : _PCH_FPB0)
|
||||
#define PCH_FP1(pipe) (pipe == 0 ? _PCH_FPA1 : _PCH_FPB1)
|
||||
|
||||
#define PCH_DPLL_TEST 0xc606c
|
||||
|
||||
|
@ -3180,6 +3198,7 @@
|
|||
#define FDI_LINK_TRAIN_NONE_IVB (3<<8)
|
||||
|
||||
/* both Tx and Rx */
|
||||
#define FDI_COMPOSITE_SYNC (1<<11)
|
||||
#define FDI_LINK_TRAIN_AUTO (1<<10)
|
||||
#define FDI_SCRAMBLING_ENABLE (0<<7)
|
||||
#define FDI_SCRAMBLING_DISABLE (1<<7)
|
||||
|
@ -3321,15 +3340,35 @@
|
|||
#define PCH_PP_STATUS 0xc7200
|
||||
#define PCH_PP_CONTROL 0xc7204
|
||||
#define PANEL_UNLOCK_REGS (0xabcd << 16)
|
||||
#define PANEL_UNLOCK_MASK (0xffff << 16)
|
||||
#define EDP_FORCE_VDD (1 << 3)
|
||||
#define EDP_BLC_ENABLE (1 << 2)
|
||||
#define PANEL_POWER_RESET (1 << 1)
|
||||
#define PANEL_POWER_OFF (0 << 0)
|
||||
#define PANEL_POWER_ON (1 << 0)
|
||||
#define PCH_PP_ON_DELAYS 0xc7208
|
||||
#define PANEL_PORT_SELECT_MASK (3 << 30)
|
||||
#define PANEL_PORT_SELECT_LVDS (0 << 30)
|
||||
#define PANEL_PORT_SELECT_DPA (1 << 30)
|
||||
#define EDP_PANEL (1 << 30)
|
||||
#define PANEL_PORT_SELECT_DPC (2 << 30)
|
||||
#define PANEL_PORT_SELECT_DPD (3 << 30)
|
||||
#define PANEL_POWER_UP_DELAY_MASK (0x1fff0000)
|
||||
#define PANEL_POWER_UP_DELAY_SHIFT 16
|
||||
#define PANEL_LIGHT_ON_DELAY_MASK (0x1fff)
|
||||
#define PANEL_LIGHT_ON_DELAY_SHIFT 0
|
||||
|
||||
#define PCH_PP_OFF_DELAYS 0xc720c
|
||||
#define PANEL_POWER_DOWN_DELAY_MASK (0x1fff0000)
|
||||
#define PANEL_POWER_DOWN_DELAY_SHIFT 16
|
||||
#define PANEL_LIGHT_OFF_DELAY_MASK (0x1fff)
|
||||
#define PANEL_LIGHT_OFF_DELAY_SHIFT 0
|
||||
|
||||
#define PCH_PP_DIVISOR 0xc7210
|
||||
#define PP_REFERENCE_DIVIDER_MASK (0xffffff00)
|
||||
#define PP_REFERENCE_DIVIDER_SHIFT 8
|
||||
#define PANEL_POWER_CYCLE_DELAY_MASK (0x1f)
|
||||
#define PANEL_POWER_CYCLE_DELAY_SHIFT 0
|
||||
|
||||
#define PCH_DP_B 0xe4100
|
||||
#define PCH_DPB_AUX_CH_CTL 0xe4110
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright © 2006 Intel Corporation
|
||||
* Copyright © 2006 Intel Corporation
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
|
@ -309,6 +309,13 @@ parse_general_features(struct drm_i915_private *dev_priv,
|
|||
dev_priv->lvds_use_ssc = general->enable_ssc;
|
||||
dev_priv->lvds_ssc_freq =
|
||||
intel_bios_ssc_frequency(dev, general->ssc_freq);
|
||||
dev_priv->display_clock_mode = general->display_clock_mode;
|
||||
DRM_DEBUG_KMS("BDB_GENERAL_FEATURES int_tv_support %d int_crt_support %d lvds_use_ssc %d lvds_ssc_freq %d display_clock_mode %d\n",
|
||||
dev_priv->int_tv_support,
|
||||
dev_priv->int_crt_support,
|
||||
dev_priv->lvds_use_ssc,
|
||||
dev_priv->lvds_ssc_freq,
|
||||
dev_priv->display_clock_mode);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -396,15 +403,13 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
|
|||
p_mapping->dvo_wiring = p_child->dvo_wiring;
|
||||
p_mapping->ddc_pin = p_child->ddc_pin;
|
||||
p_mapping->i2c_pin = p_child->i2c_pin;
|
||||
p_mapping->i2c_speed = p_child->i2c_speed;
|
||||
p_mapping->initialized = 1;
|
||||
DRM_DEBUG_KMS("SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d, i2c_speed=%d\n",
|
||||
DRM_DEBUG_KMS("SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d\n",
|
||||
p_mapping->dvo_port,
|
||||
p_mapping->slave_addr,
|
||||
p_mapping->dvo_wiring,
|
||||
p_mapping->ddc_pin,
|
||||
p_mapping->i2c_pin,
|
||||
p_mapping->i2c_speed);
|
||||
p_mapping->i2c_pin);
|
||||
} else {
|
||||
DRM_DEBUG_KMS("Maybe one SDVO port is shared by "
|
||||
"two SDVO device.\n");
|
||||
|
@ -610,7 +615,7 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
|
|||
/* Default to using SSC */
|
||||
dev_priv->lvds_use_ssc = 1;
|
||||
dev_priv->lvds_ssc_freq = intel_bios_ssc_frequency(dev, 1);
|
||||
DRM_DEBUG("Set default to SSC at %dMHz\n", dev_priv->lvds_ssc_freq);
|
||||
DRM_DEBUG_KMS("Set default to SSC at %dMHz\n", dev_priv->lvds_ssc_freq);
|
||||
|
||||
/* eDP data */
|
||||
dev_priv->edp.bpp = 18;
|
||||
|
@ -639,7 +644,7 @@ intel_parse_bios(struct drm_device *dev)
|
|||
if (dev_priv->opregion.vbt) {
|
||||
struct vbt_header *vbt = dev_priv->opregion.vbt;
|
||||
if (memcmp(vbt->signature, "$VBT", 4) == 0) {
|
||||
DRM_DEBUG_DRIVER("Using VBT from OpRegion: %20s\n",
|
||||
DRM_DEBUG_KMS("Using VBT from OpRegion: %20s\n",
|
||||
vbt->signature);
|
||||
bdb = (struct bdb_header *)((char *)vbt + vbt->bdb_offset);
|
||||
} else
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright © 2006 Intel Corporation
|
||||
* Copyright © 2006 Intel Corporation
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
|
@ -120,7 +120,9 @@ struct bdb_general_features {
|
|||
u8 ssc_freq:1;
|
||||
u8 enable_lfp_on_override:1;
|
||||
u8 disable_ssc_ddt:1;
|
||||
u8 rsvd8:3; /* finish byte */
|
||||
u8 rsvd7:1;
|
||||
u8 display_clock_mode:1;
|
||||
u8 rsvd8:1; /* finish byte */
|
||||
|
||||
/* bits 3 */
|
||||
u8 disable_smooth_vision:1;
|
||||
|
@ -133,7 +135,10 @@ struct bdb_general_features {
|
|||
/* bits 5 */
|
||||
u8 int_crt_support:1;
|
||||
u8 int_tv_support:1;
|
||||
u8 rsvd11:6; /* finish byte */
|
||||
u8 int_efp_support:1;
|
||||
u8 dp_ssc_enb:1; /* PCH attached eDP supports SSC */
|
||||
u8 dp_ssc_freq:1; /* SSC freq for PCH attached eDP */
|
||||
u8 rsvd11:3; /* finish byte */
|
||||
} __attribute__((packed));
|
||||
|
||||
/* pre-915 */
|
||||
|
@ -197,8 +202,7 @@ struct bdb_general_features {
|
|||
struct child_device_config {
|
||||
u16 handle;
|
||||
u16 device_type;
|
||||
u8 i2c_speed;
|
||||
u8 rsvd[9];
|
||||
u8 device_id[10]; /* ascii string */
|
||||
u16 addin_offset;
|
||||
u8 dvo_port; /* See Device_PORT_* above */
|
||||
u8 i2c_pin;
|
||||
|
@ -446,11 +450,11 @@ struct bdb_driver_features {
|
|||
#define EDP_VSWING_1_2V 3
|
||||
|
||||
struct edp_power_seq {
|
||||
u16 t3;
|
||||
u16 t7;
|
||||
u16 t1_t3;
|
||||
u16 t8;
|
||||
u16 t9;
|
||||
u16 t10;
|
||||
u16 t12;
|
||||
u16 t11_t12;
|
||||
} __attribute__ ((packed));
|
||||
|
||||
struct edp_link_params {
|
||||
|
|
|
@ -152,17 +152,13 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
|
|||
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
|
||||
adpa |= ADPA_VSYNC_ACTIVE_HIGH;
|
||||
|
||||
if (intel_crtc->pipe == 0) {
|
||||
if (HAS_PCH_CPT(dev))
|
||||
adpa |= PORT_TRANS_A_SEL_CPT;
|
||||
else
|
||||
adpa |= ADPA_PIPE_A_SELECT;
|
||||
} else {
|
||||
if (HAS_PCH_CPT(dev))
|
||||
adpa |= PORT_TRANS_B_SEL_CPT;
|
||||
else
|
||||
adpa |= ADPA_PIPE_B_SELECT;
|
||||
}
|
||||
/* For CPT allow 3 pipe config, for others just use A or B */
|
||||
if (HAS_PCH_CPT(dev))
|
||||
adpa |= PORT_TRANS_SEL_CPT(intel_crtc->pipe);
|
||||
else if (intel_crtc->pipe == 0)
|
||||
adpa |= ADPA_PIPE_A_SELECT;
|
||||
else
|
||||
adpa |= ADPA_PIPE_B_SELECT;
|
||||
|
||||
if (!HAS_PCH_SPLIT(dev))
|
||||
I915_WRITE(BCLRPAT(intel_crtc->pipe), 0);
|
||||
|
|
|
@ -803,6 +803,19 @@ static void assert_pch_pll(struct drm_i915_private *dev_priv,
|
|||
u32 val;
|
||||
bool cur_state;
|
||||
|
||||
if (HAS_PCH_CPT(dev_priv->dev)) {
|
||||
u32 pch_dpll;
|
||||
|
||||
pch_dpll = I915_READ(PCH_DPLL_SEL);
|
||||
|
||||
/* Make sure the selected PLL is enabled to the transcoder */
|
||||
WARN(!((pch_dpll >> (4 * pipe)) & 8),
|
||||
"transcoder %d PLL not enabled\n", pipe);
|
||||
|
||||
/* Convert the transcoder pipe number to a pll pipe number */
|
||||
pipe = (pch_dpll >> (4 * pipe)) & 1;
|
||||
}
|
||||
|
||||
reg = PCH_DPLL(pipe);
|
||||
val = I915_READ(reg);
|
||||
cur_state = !!(val & DPLL_VCO_ENABLE);
|
||||
|
@ -1172,6 +1185,9 @@ static void intel_enable_pch_pll(struct drm_i915_private *dev_priv,
|
|||
int reg;
|
||||
u32 val;
|
||||
|
||||
if (pipe > 1)
|
||||
return;
|
||||
|
||||
/* PCH only available on ILK+ */
|
||||
BUG_ON(dev_priv->info->gen < 5);
|
||||
|
||||
|
@ -1192,6 +1208,9 @@ static void intel_disable_pch_pll(struct drm_i915_private *dev_priv,
|
|||
int reg;
|
||||
u32 val;
|
||||
|
||||
if (pipe > 1)
|
||||
return;
|
||||
|
||||
/* PCH only available on ILK+ */
|
||||
BUG_ON(dev_priv->info->gen < 5);
|
||||
|
||||
|
@ -1257,7 +1276,7 @@ static void intel_disable_transcoder(struct drm_i915_private *dev_priv,
|
|||
I915_WRITE(reg, val);
|
||||
/* wait for PCH transcoder off, transcoder state */
|
||||
if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
|
||||
DRM_ERROR("failed to disable transcoder\n");
|
||||
DRM_ERROR("failed to disable transcoder %d\n", pipe);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -2086,6 +2105,7 @@ static int ironlake_update_plane(struct drm_crtc *crtc,
|
|||
switch (plane) {
|
||||
case 0:
|
||||
case 1:
|
||||
case 2:
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Can't update plane %d in SAREA\n", plane);
|
||||
|
@ -2185,6 +2205,10 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
|
|||
case 0:
|
||||
case 1:
|
||||
break;
|
||||
case 2:
|
||||
if (IS_IVYBRIDGE(dev))
|
||||
break;
|
||||
/* fall through otherwise */
|
||||
default:
|
||||
DRM_ERROR("no plane for crtc\n");
|
||||
return -EINVAL;
|
||||
|
@ -2601,6 +2625,7 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
|
|||
temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
|
||||
temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
|
||||
temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
|
||||
temp |= FDI_COMPOSITE_SYNC;
|
||||
I915_WRITE(reg, temp | FDI_TX_ENABLE);
|
||||
|
||||
reg = FDI_RX_CTL(pipe);
|
||||
|
@ -2608,6 +2633,7 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
|
|||
temp &= ~FDI_LINK_TRAIN_AUTO;
|
||||
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
|
||||
temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
|
||||
temp |= FDI_COMPOSITE_SYNC;
|
||||
I915_WRITE(reg, temp | FDI_RX_ENABLE);
|
||||
|
||||
POSTING_READ(reg);
|
||||
|
@ -2867,7 +2893,7 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
|
|||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
int pipe = intel_crtc->pipe;
|
||||
u32 reg, temp;
|
||||
u32 reg, temp, transc_sel;
|
||||
|
||||
/* For PCH output, training FDI link */
|
||||
dev_priv->display.fdi_link_train(crtc);
|
||||
|
@ -2875,12 +2901,21 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
|
|||
intel_enable_pch_pll(dev_priv, pipe);
|
||||
|
||||
if (HAS_PCH_CPT(dev)) {
|
||||
transc_sel = intel_crtc->use_pll_a ? TRANSC_DPLLA_SEL :
|
||||
TRANSC_DPLLB_SEL;
|
||||
|
||||
/* Be sure PCH DPLL SEL is set */
|
||||
temp = I915_READ(PCH_DPLL_SEL);
|
||||
if (pipe == 0 && (temp & TRANSA_DPLL_ENABLE) == 0)
|
||||
if (pipe == 0) {
|
||||
temp &= ~(TRANSA_DPLLB_SEL);
|
||||
temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
|
||||
else if (pipe == 1 && (temp & TRANSB_DPLL_ENABLE) == 0)
|
||||
} else if (pipe == 1) {
|
||||
temp &= ~(TRANSB_DPLLB_SEL);
|
||||
temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
|
||||
} else if (pipe == 2) {
|
||||
temp &= ~(TRANSC_DPLLB_SEL);
|
||||
temp |= (TRANSC_DPLL_ENABLE | transc_sel);
|
||||
}
|
||||
I915_WRITE(PCH_DPLL_SEL, temp);
|
||||
}
|
||||
|
||||
|
@ -2936,6 +2971,24 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
|
|||
intel_enable_transcoder(dev_priv, pipe);
|
||||
}
|
||||
|
||||
void intel_cpt_verify_modeset(struct drm_device *dev, int pipe)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int dslreg = PIPEDSL(pipe), tc2reg = TRANS_CHICKEN2(pipe);
|
||||
u32 temp;
|
||||
|
||||
temp = I915_READ(dslreg);
|
||||
udelay(500);
|
||||
if (wait_for(I915_READ(dslreg) != temp, 5)) {
|
||||
/* Without this, mode sets may fail silently on FDI */
|
||||
I915_WRITE(tc2reg, TRANS_AUTOTRAIN_GEN_STALL_DIS);
|
||||
udelay(250);
|
||||
I915_WRITE(tc2reg, 0);
|
||||
if (wait_for(I915_READ(dslreg) != temp, 5))
|
||||
DRM_ERROR("mode set failed: pipe %d stuck\n", pipe);
|
||||
}
|
||||
}
|
||||
|
||||
static void ironlake_crtc_enable(struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
|
@ -3046,13 +3099,13 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
|
|||
temp = I915_READ(PCH_DPLL_SEL);
|
||||
switch (pipe) {
|
||||
case 0:
|
||||
temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
|
||||
temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL);
|
||||
break;
|
||||
case 1:
|
||||
temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
|
||||
break;
|
||||
case 2:
|
||||
/* FIXME: manage transcoder PLLs? */
|
||||
/* C shares PLL A or B */
|
||||
temp &= ~(TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL);
|
||||
break;
|
||||
default:
|
||||
|
@ -3062,7 +3115,8 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
|
|||
}
|
||||
|
||||
/* disable PCH DPLL */
|
||||
intel_disable_pch_pll(dev_priv, pipe);
|
||||
if (!intel_crtc->no_pll)
|
||||
intel_disable_pch_pll(dev_priv, pipe);
|
||||
|
||||
/* Switch from PCDclk to Rawclk */
|
||||
reg = FDI_RX_CTL(pipe);
|
||||
|
@ -3304,8 +3358,15 @@ void intel_encoder_prepare(struct drm_encoder *encoder)
|
|||
void intel_encoder_commit(struct drm_encoder *encoder)
|
||||
{
|
||||
struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(intel_encoder->base.crtc);
|
||||
|
||||
/* lvds has its own version of commit see intel_lvds_commit */
|
||||
encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
|
||||
|
||||
if (HAS_PCH_CPT(dev))
|
||||
intel_cpt_verify_modeset(dev, intel_crtc->pipe);
|
||||
}
|
||||
|
||||
void intel_encoder_destroy(struct drm_encoder *encoder)
|
||||
|
@ -4479,6 +4540,20 @@ static void sandybridge_update_wm(struct drm_device *dev)
|
|||
enabled |= 2;
|
||||
}
|
||||
|
||||
/* IVB has 3 pipes */
|
||||
if (IS_IVYBRIDGE(dev) &&
|
||||
g4x_compute_wm0(dev, 2,
|
||||
&sandybridge_display_wm_info, latency,
|
||||
&sandybridge_cursor_wm_info, latency,
|
||||
&plane_wm, &cursor_wm)) {
|
||||
I915_WRITE(WM0_PIPEC_IVB,
|
||||
(plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
|
||||
DRM_DEBUG_KMS("FIFO watermarks For pipe C -"
|
||||
" plane %d, cursor: %d\n",
|
||||
plane_wm, cursor_wm);
|
||||
enabled |= 3;
|
||||
}
|
||||
|
||||
/*
|
||||
* Calculate and update the self-refresh watermark only when one
|
||||
* display plane is used.
|
||||
|
@ -4585,7 +4660,9 @@ static void intel_update_watermarks(struct drm_device *dev)
|
|||
|
||||
static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
return dev_priv->lvds_use_ssc && i915_panel_use_ssc
|
||||
if (i915_panel_use_ssc >= 0)
|
||||
return i915_panel_use_ssc != 0;
|
||||
return dev_priv->lvds_use_ssc
|
||||
&& !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
|
||||
}
|
||||
|
||||
|
@ -5108,36 +5185,52 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void ironlake_update_pch_refclk(struct drm_device *dev)
|
||||
/*
|
||||
* Initialize reference clocks when the driver loads
|
||||
*/
|
||||
void ironlake_init_pch_refclk(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_mode_config *mode_config = &dev->mode_config;
|
||||
struct drm_crtc *crtc;
|
||||
struct intel_encoder *encoder;
|
||||
struct intel_encoder *has_edp_encoder = NULL;
|
||||
u32 temp;
|
||||
bool has_lvds = false;
|
||||
bool has_cpu_edp = false;
|
||||
bool has_pch_edp = false;
|
||||
bool has_panel = false;
|
||||
bool has_ck505 = false;
|
||||
bool can_ssc = false;
|
||||
|
||||
/* We need to take the global config into account */
|
||||
list_for_each_entry(crtc, &mode_config->crtc_list, head) {
|
||||
if (!crtc->enabled)
|
||||
continue;
|
||||
|
||||
list_for_each_entry(encoder, &mode_config->encoder_list,
|
||||
base.head) {
|
||||
if (encoder->base.crtc != crtc)
|
||||
continue;
|
||||
|
||||
switch (encoder->type) {
|
||||
case INTEL_OUTPUT_LVDS:
|
||||
has_lvds = true;
|
||||
case INTEL_OUTPUT_EDP:
|
||||
has_edp_encoder = encoder;
|
||||
break;
|
||||
}
|
||||
list_for_each_entry(encoder, &mode_config->encoder_list,
|
||||
base.head) {
|
||||
switch (encoder->type) {
|
||||
case INTEL_OUTPUT_LVDS:
|
||||
has_panel = true;
|
||||
has_lvds = true;
|
||||
break;
|
||||
case INTEL_OUTPUT_EDP:
|
||||
has_panel = true;
|
||||
if (intel_encoder_is_pch_edp(&encoder->base))
|
||||
has_pch_edp = true;
|
||||
else
|
||||
has_cpu_edp = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (HAS_PCH_IBX(dev)) {
|
||||
has_ck505 = dev_priv->display_clock_mode;
|
||||
can_ssc = has_ck505;
|
||||
} else {
|
||||
has_ck505 = false;
|
||||
can_ssc = true;
|
||||
}
|
||||
|
||||
DRM_DEBUG_KMS("has_panel %d has_lvds %d has_pch_edp %d has_cpu_edp %d has_ck505 %d\n",
|
||||
has_panel, has_lvds, has_pch_edp, has_cpu_edp,
|
||||
has_ck505);
|
||||
|
||||
/* Ironlake: try to setup display ref clock before DPLL
|
||||
* enabling. This is only under driver's control after
|
||||
* PCH B stepping, previous chipset stepping should be
|
||||
|
@ -5146,43 +5239,102 @@ static void ironlake_update_pch_refclk(struct drm_device *dev)
|
|||
temp = I915_READ(PCH_DREF_CONTROL);
|
||||
/* Always enable nonspread source */
|
||||
temp &= ~DREF_NONSPREAD_SOURCE_MASK;
|
||||
temp |= DREF_NONSPREAD_SOURCE_ENABLE;
|
||||
temp &= ~DREF_SSC_SOURCE_MASK;
|
||||
temp |= DREF_SSC_SOURCE_ENABLE;
|
||||
I915_WRITE(PCH_DREF_CONTROL, temp);
|
||||
|
||||
POSTING_READ(PCH_DREF_CONTROL);
|
||||
udelay(200);
|
||||
if (has_ck505)
|
||||
temp |= DREF_NONSPREAD_CK505_ENABLE;
|
||||
else
|
||||
temp |= DREF_NONSPREAD_SOURCE_ENABLE;
|
||||
|
||||
if (has_edp_encoder) {
|
||||
if (intel_panel_use_ssc(dev_priv)) {
|
||||
if (has_panel) {
|
||||
temp &= ~DREF_SSC_SOURCE_MASK;
|
||||
temp |= DREF_SSC_SOURCE_ENABLE;
|
||||
|
||||
/* SSC must be turned on before enabling the CPU output */
|
||||
if (intel_panel_use_ssc(dev_priv) && can_ssc) {
|
||||
DRM_DEBUG_KMS("Using SSC on panel\n");
|
||||
temp |= DREF_SSC1_ENABLE;
|
||||
I915_WRITE(PCH_DREF_CONTROL, temp);
|
||||
|
||||
POSTING_READ(PCH_DREF_CONTROL);
|
||||
udelay(200);
|
||||
}
|
||||
|
||||
/* Get SSC going before enabling the outputs */
|
||||
I915_WRITE(PCH_DREF_CONTROL, temp);
|
||||
POSTING_READ(PCH_DREF_CONTROL);
|
||||
udelay(200);
|
||||
|
||||
temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
|
||||
|
||||
/* Enable CPU source on CPU attached eDP */
|
||||
if (!intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
|
||||
if (intel_panel_use_ssc(dev_priv))
|
||||
if (has_cpu_edp) {
|
||||
if (intel_panel_use_ssc(dev_priv) && can_ssc) {
|
||||
DRM_DEBUG_KMS("Using SSC on eDP\n");
|
||||
temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
|
||||
}
|
||||
else
|
||||
temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
|
||||
} else {
|
||||
/* Enable SSC on PCH eDP if needed */
|
||||
if (intel_panel_use_ssc(dev_priv)) {
|
||||
DRM_ERROR("enabling SSC on PCH\n");
|
||||
temp |= DREF_SUPERSPREAD_SOURCE_ENABLE;
|
||||
}
|
||||
}
|
||||
} else
|
||||
temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
|
||||
|
||||
I915_WRITE(PCH_DREF_CONTROL, temp);
|
||||
POSTING_READ(PCH_DREF_CONTROL);
|
||||
udelay(200);
|
||||
} else {
|
||||
DRM_DEBUG_KMS("Disabling SSC entirely\n");
|
||||
|
||||
temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
|
||||
|
||||
/* Turn off CPU output */
|
||||
temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
|
||||
|
||||
I915_WRITE(PCH_DREF_CONTROL, temp);
|
||||
POSTING_READ(PCH_DREF_CONTROL);
|
||||
udelay(200);
|
||||
|
||||
/* Turn off the SSC source */
|
||||
temp &= ~DREF_SSC_SOURCE_MASK;
|
||||
temp |= DREF_SSC_SOURCE_DISABLE;
|
||||
|
||||
/* Turn off SSC1 */
|
||||
temp &= ~ DREF_SSC1_ENABLE;
|
||||
|
||||
I915_WRITE(PCH_DREF_CONTROL, temp);
|
||||
POSTING_READ(PCH_DREF_CONTROL);
|
||||
udelay(200);
|
||||
}
|
||||
}
|
||||
|
||||
static int ironlake_get_refclk(struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_encoder *encoder;
|
||||
struct drm_mode_config *mode_config = &dev->mode_config;
|
||||
struct intel_encoder *edp_encoder = NULL;
|
||||
int num_connectors = 0;
|
||||
bool is_lvds = false;
|
||||
|
||||
list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
|
||||
if (encoder->base.crtc != crtc)
|
||||
continue;
|
||||
|
||||
switch (encoder->type) {
|
||||
case INTEL_OUTPUT_LVDS:
|
||||
is_lvds = true;
|
||||
break;
|
||||
case INTEL_OUTPUT_EDP:
|
||||
edp_encoder = encoder;
|
||||
break;
|
||||
}
|
||||
num_connectors++;
|
||||
}
|
||||
|
||||
if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
|
||||
DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
|
||||
dev_priv->lvds_ssc_freq);
|
||||
return dev_priv->lvds_ssc_freq * 1000;
|
||||
}
|
||||
|
||||
return 120000;
|
||||
}
|
||||
|
||||
static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
|
||||
struct drm_display_mode *mode,
|
||||
struct drm_display_mode *adjusted_mode,
|
||||
|
@ -5242,16 +5394,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
|
|||
num_connectors++;
|
||||
}
|
||||
|
||||
if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
|
||||
refclk = dev_priv->lvds_ssc_freq * 1000;
|
||||
DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
|
||||
refclk / 1000);
|
||||
} else {
|
||||
refclk = 96000;
|
||||
if (!has_edp_encoder ||
|
||||
intel_encoder_is_pch_edp(&has_edp_encoder->base))
|
||||
refclk = 120000; /* 120Mhz refclk */
|
||||
}
|
||||
refclk = ironlake_get_refclk(crtc);
|
||||
|
||||
/*
|
||||
* Returns a set of divisors for the desired target clock with the given
|
||||
|
@ -5378,8 +5521,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
|
|||
ironlake_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw,
|
||||
&m_n);
|
||||
|
||||
ironlake_update_pch_refclk(dev);
|
||||
|
||||
fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
|
||||
if (has_reduced_clock)
|
||||
fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
|
||||
|
@ -5451,39 +5592,32 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
|
|||
/* Set up the display plane register */
|
||||
dspcntr = DISPPLANE_GAMMA_ENABLE;
|
||||
|
||||
DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
|
||||
DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
|
||||
drm_mode_debug_printmodeline(mode);
|
||||
|
||||
/* PCH eDP needs FDI, but CPU eDP does not */
|
||||
if (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
|
||||
I915_WRITE(PCH_FP0(pipe), fp);
|
||||
I915_WRITE(PCH_DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
|
||||
if (!intel_crtc->no_pll) {
|
||||
if (!has_edp_encoder ||
|
||||
intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
|
||||
I915_WRITE(PCH_FP0(pipe), fp);
|
||||
I915_WRITE(PCH_DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
|
||||
|
||||
POSTING_READ(PCH_DPLL(pipe));
|
||||
udelay(150);
|
||||
}
|
||||
|
||||
/* enable transcoder DPLL */
|
||||
if (HAS_PCH_CPT(dev)) {
|
||||
temp = I915_READ(PCH_DPLL_SEL);
|
||||
switch (pipe) {
|
||||
case 0:
|
||||
temp |= TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL;
|
||||
break;
|
||||
case 1:
|
||||
temp |= TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL;
|
||||
break;
|
||||
case 2:
|
||||
/* FIXME: manage transcoder PLLs? */
|
||||
temp |= TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL;
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
POSTING_READ(PCH_DPLL(pipe));
|
||||
udelay(150);
|
||||
}
|
||||
} else {
|
||||
if (dpll == (I915_READ(PCH_DPLL(0)) & 0x7fffffff) &&
|
||||
fp == I915_READ(PCH_FP0(0))) {
|
||||
intel_crtc->use_pll_a = true;
|
||||
DRM_DEBUG_KMS("using pipe a dpll\n");
|
||||
} else if (dpll == (I915_READ(PCH_DPLL(1)) & 0x7fffffff) &&
|
||||
fp == I915_READ(PCH_FP0(1))) {
|
||||
intel_crtc->use_pll_a = false;
|
||||
DRM_DEBUG_KMS("using pipe b dpll\n");
|
||||
} else {
|
||||
DRM_DEBUG_KMS("no matching PLL configuration for pipe 2\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
I915_WRITE(PCH_DPLL_SEL, temp);
|
||||
|
||||
POSTING_READ(PCH_DPLL_SEL);
|
||||
udelay(150);
|
||||
}
|
||||
|
||||
/* The LVDS pin pair needs to be on before the DPLLs are enabled.
|
||||
|
@ -5493,17 +5627,13 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
|
|||
if (is_lvds) {
|
||||
temp = I915_READ(PCH_LVDS);
|
||||
temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
|
||||
if (pipe == 1) {
|
||||
if (HAS_PCH_CPT(dev))
|
||||
temp |= PORT_TRANS_B_SEL_CPT;
|
||||
else
|
||||
temp |= LVDS_PIPEB_SELECT;
|
||||
} else {
|
||||
if (HAS_PCH_CPT(dev))
|
||||
temp &= ~PORT_TRANS_SEL_MASK;
|
||||
else
|
||||
temp &= ~LVDS_PIPEB_SELECT;
|
||||
}
|
||||
if (HAS_PCH_CPT(dev))
|
||||
temp |= PORT_TRANS_SEL_CPT(pipe);
|
||||
else if (pipe == 1)
|
||||
temp |= LVDS_PIPEB_SELECT;
|
||||
else
|
||||
temp &= ~LVDS_PIPEB_SELECT;
|
||||
|
||||
/* set the corresponsding LVDS_BORDER bit */
|
||||
temp |= dev_priv->lvds_border_bits;
|
||||
/* Set the B0-B3 data pairs corresponding to whether we're going to
|
||||
|
@ -5553,8 +5683,9 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
|
|||
I915_WRITE(TRANSDPLINK_N1(pipe), 0);
|
||||
}
|
||||
|
||||
if (!has_edp_encoder ||
|
||||
intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
|
||||
if (!intel_crtc->no_pll &&
|
||||
(!has_edp_encoder ||
|
||||
intel_encoder_is_pch_edp(&has_edp_encoder->base))) {
|
||||
I915_WRITE(PCH_DPLL(pipe), dpll);
|
||||
|
||||
/* Wait for the clocks to stabilize. */
|
||||
|
@ -5570,18 +5701,20 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
|
|||
}
|
||||
|
||||
intel_crtc->lowfreq_avail = false;
|
||||
if (is_lvds && has_reduced_clock && i915_powersave) {
|
||||
I915_WRITE(PCH_FP1(pipe), fp2);
|
||||
intel_crtc->lowfreq_avail = true;
|
||||
if (HAS_PIPE_CXSR(dev)) {
|
||||
DRM_DEBUG_KMS("enabling CxSR downclocking\n");
|
||||
pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
|
||||
}
|
||||
} else {
|
||||
I915_WRITE(PCH_FP1(pipe), fp);
|
||||
if (HAS_PIPE_CXSR(dev)) {
|
||||
DRM_DEBUG_KMS("disabling CxSR downclocking\n");
|
||||
pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
|
||||
if (!intel_crtc->no_pll) {
|
||||
if (is_lvds && has_reduced_clock && i915_powersave) {
|
||||
I915_WRITE(PCH_FP1(pipe), fp2);
|
||||
intel_crtc->lowfreq_avail = true;
|
||||
if (HAS_PIPE_CXSR(dev)) {
|
||||
DRM_DEBUG_KMS("enabling CxSR downclocking\n");
|
||||
pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
|
||||
}
|
||||
} else {
|
||||
I915_WRITE(PCH_FP1(pipe), fp);
|
||||
if (HAS_PIPE_CXSR(dev)) {
|
||||
DRM_DEBUG_KMS("disabling CxSR downclocking\n");
|
||||
pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -5884,6 +6017,31 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
|
|||
I915_WRITE(CURBASE(pipe), base);
|
||||
}
|
||||
|
||||
static void ivb_update_cursor(struct drm_crtc *crtc, u32 base)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
int pipe = intel_crtc->pipe;
|
||||
bool visible = base != 0;
|
||||
|
||||
if (intel_crtc->cursor_visible != visible) {
|
||||
uint32_t cntl = I915_READ(CURCNTR_IVB(pipe));
|
||||
if (base) {
|
||||
cntl &= ~CURSOR_MODE;
|
||||
cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
|
||||
} else {
|
||||
cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
|
||||
cntl |= CURSOR_MODE_DISABLE;
|
||||
}
|
||||
I915_WRITE(CURCNTR_IVB(pipe), cntl);
|
||||
|
||||
intel_crtc->cursor_visible = visible;
|
||||
}
|
||||
/* and commit changes on next vblank */
|
||||
I915_WRITE(CURBASE_IVB(pipe), base);
|
||||
}
|
||||
|
||||
/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
|
||||
static void intel_crtc_update_cursor(struct drm_crtc *crtc,
|
||||
bool on)
|
||||
|
@ -5931,11 +6089,16 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
|
|||
if (!visible && !intel_crtc->cursor_visible)
|
||||
return;
|
||||
|
||||
I915_WRITE(CURPOS(pipe), pos);
|
||||
if (IS_845G(dev) || IS_I865G(dev))
|
||||
i845_update_cursor(crtc, base);
|
||||
else
|
||||
i9xx_update_cursor(crtc, base);
|
||||
if (IS_IVYBRIDGE(dev)) {
|
||||
I915_WRITE(CURPOS_IVB(pipe), pos);
|
||||
ivb_update_cursor(crtc, base);
|
||||
} else {
|
||||
I915_WRITE(CURPOS(pipe), pos);
|
||||
if (IS_845G(dev) || IS_I865G(dev))
|
||||
i845_update_cursor(crtc, base);
|
||||
else
|
||||
i9xx_update_cursor(crtc, base);
|
||||
}
|
||||
|
||||
if (visible)
|
||||
intel_mark_busy(dev, to_intel_framebuffer(crtc->fb)->obj);
|
||||
|
@ -7197,6 +7360,8 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
|
|||
intel_crtc->bpp = 24; /* default for pre-Ironlake */
|
||||
|
||||
if (HAS_PCH_SPLIT(dev)) {
|
||||
if (pipe == 2 && IS_IVYBRIDGE(dev))
|
||||
intel_crtc->no_pll = true;
|
||||
intel_helper_funcs.prepare = ironlake_crtc_prepare;
|
||||
intel_helper_funcs.commit = ironlake_crtc_commit;
|
||||
} else {
|
||||
|
@ -7376,6 +7541,9 @@ static void intel_setup_outputs(struct drm_device *dev)
|
|||
|
||||
/* disable all the possible outputs/crtcs before entering KMS mode */
|
||||
drm_helper_disable_unused_functions(dev);
|
||||
|
||||
if (HAS_PCH_SPLIT(dev))
|
||||
ironlake_init_pch_refclk(dev);
|
||||
}
|
||||
|
||||
static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
|
||||
|
@ -7620,6 +7788,10 @@ void gen6_disable_rps(struct drm_device *dev)
|
|||
I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
|
||||
I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
|
||||
I915_WRITE(GEN6_PMIER, 0);
|
||||
/* Complete PM interrupt masking here doesn't race with the rps work
|
||||
* item again unmasking PM interrupts because that is using a different
|
||||
* register (PMIMR) to mask PM interrupts. The only risk is in leaving
|
||||
* stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
|
||||
|
||||
spin_lock_irq(&dev_priv->rps_lock);
|
||||
dev_priv->pm_iir = 0;
|
||||
|
@ -8617,6 +8789,7 @@ void intel_modeset_cleanup(struct drm_device *dev)
|
|||
* enqueue unpin/hotplug work. */
|
||||
drm_irq_uninstall(dev);
|
||||
cancel_work_sync(&dev_priv->hotplug_work);
|
||||
cancel_work_sync(&dev_priv->rps_work);
|
||||
|
||||
/* flush any delayed tasks or pending work */
|
||||
flush_scheduled_work();
|
||||
|
|
|
@ -36,7 +36,7 @@
|
|||
#include "i915_drv.h"
|
||||
#include "drm_dp_helper.h"
|
||||
|
||||
|
||||
#define DP_RECEIVER_CAP_SIZE 0xf
|
||||
#define DP_LINK_STATUS_SIZE 6
|
||||
#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
|
||||
|
||||
|
@ -53,12 +53,21 @@ struct intel_dp {
|
|||
int dpms_mode;
|
||||
uint8_t link_bw;
|
||||
uint8_t lane_count;
|
||||
uint8_t dpcd[8];
|
||||
uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
|
||||
struct i2c_adapter adapter;
|
||||
struct i2c_algo_dp_aux_data algo;
|
||||
bool is_pch_edp;
|
||||
uint8_t train_set[4];
|
||||
uint8_t link_status[DP_LINK_STATUS_SIZE];
|
||||
int panel_power_up_delay;
|
||||
int panel_power_down_delay;
|
||||
int panel_power_cycle_delay;
|
||||
int backlight_on_delay;
|
||||
int backlight_off_delay;
|
||||
struct drm_display_mode *panel_fixed_mode; /* for eDP */
|
||||
struct delayed_work panel_vdd_work;
|
||||
bool want_panel_vdd;
|
||||
unsigned long panel_off_jiffies;
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -86,6 +95,17 @@ static bool is_pch_edp(struct intel_dp *intel_dp)
|
|||
return intel_dp->is_pch_edp;
|
||||
}
|
||||
|
||||
/**
|
||||
* is_cpu_edp - is the port on the CPU and attached to an eDP panel?
|
||||
* @intel_dp: DP struct
|
||||
*
|
||||
* Returns true if the given DP struct corresponds to a CPU eDP port.
|
||||
*/
|
||||
static bool is_cpu_edp(struct intel_dp *intel_dp)
|
||||
{
|
||||
return is_edp(intel_dp) && !is_pch_edp(intel_dp);
|
||||
}
|
||||
|
||||
static struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder)
|
||||
{
|
||||
return container_of(encoder, struct intel_dp, base.base);
|
||||
|
@ -175,9 +195,25 @@ intel_dp_link_clock(uint8_t link_bw)
|
|||
return 162000;
|
||||
}
|
||||
|
||||
/* I think this is a fiction */
|
||||
/*
|
||||
* The units on the numbers in the next two are... bizarre. Examples will
|
||||
* make it clearer; this one parallels an example in the eDP spec.
|
||||
*
|
||||
* intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
|
||||
*
|
||||
* 270000 * 1 * 8 / 10 == 216000
|
||||
*
|
||||
* The actual data capacity of that configuration is 2.16Gbit/s, so the
|
||||
* units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
|
||||
* or equivalently, kilopixels per second - so for 1680x1050R it'd be
|
||||
* 119000. At 18bpp that's 2142000 kilobits per second.
|
||||
*
|
||||
* Thus the strange-looking division by 10 in intel_dp_link_required, to
|
||||
* get the result in decakilobits instead of kilobits.
|
||||
*/
|
||||
|
||||
static int
|
||||
intel_dp_link_required(struct drm_device *dev, struct intel_dp *intel_dp, int pixel_clock)
|
||||
intel_dp_link_required(struct intel_dp *intel_dp, int pixel_clock)
|
||||
{
|
||||
struct drm_crtc *crtc = intel_dp->base.base.crtc;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
|
@ -186,7 +222,7 @@ intel_dp_link_required(struct drm_device *dev, struct intel_dp *intel_dp, int pi
|
|||
if (intel_crtc)
|
||||
bpp = intel_crtc->bpp;
|
||||
|
||||
return (pixel_clock * bpp + 7) / 8;
|
||||
return (pixel_clock * bpp + 9) / 10;
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -200,24 +236,19 @@ intel_dp_mode_valid(struct drm_connector *connector,
|
|||
struct drm_display_mode *mode)
|
||||
{
|
||||
struct intel_dp *intel_dp = intel_attached_dp(connector);
|
||||
struct drm_device *dev = connector->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp));
|
||||
int max_lanes = intel_dp_max_lane_count(intel_dp);
|
||||
|
||||
if (is_edp(intel_dp) && dev_priv->panel_fixed_mode) {
|
||||
if (mode->hdisplay > dev_priv->panel_fixed_mode->hdisplay)
|
||||
if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
|
||||
if (mode->hdisplay > intel_dp->panel_fixed_mode->hdisplay)
|
||||
return MODE_PANEL;
|
||||
|
||||
if (mode->vdisplay > dev_priv->panel_fixed_mode->vdisplay)
|
||||
if (mode->vdisplay > intel_dp->panel_fixed_mode->vdisplay)
|
||||
return MODE_PANEL;
|
||||
}
|
||||
|
||||
/* only refuse the mode on non eDP since we have seen some weird eDP panels
|
||||
which are outside spec tolerances but somehow work by magic */
|
||||
if (!is_edp(intel_dp) &&
|
||||
(intel_dp_link_required(connector->dev, intel_dp, mode->clock)
|
||||
> intel_dp_max_data_rate(max_link_clock, max_lanes)))
|
||||
if (intel_dp_link_required(intel_dp, mode->clock)
|
||||
> intel_dp_max_data_rate(max_link_clock, max_lanes))
|
||||
return MODE_CLOCK_HIGH;
|
||||
|
||||
if (mode->clock < 10000)
|
||||
|
@ -279,6 +310,38 @@ intel_hrawclk(struct drm_device *dev)
|
|||
}
|
||||
}
|
||||
|
||||
static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct drm_device *dev = intel_dp->base.base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
return (I915_READ(PCH_PP_STATUS) & PP_ON) != 0;
|
||||
}
|
||||
|
||||
static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct drm_device *dev = intel_dp->base.base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
return (I915_READ(PCH_PP_CONTROL) & EDP_FORCE_VDD) != 0;
|
||||
}
|
||||
|
||||
static void
|
||||
intel_dp_check_edp(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct drm_device *dev = intel_dp->base.base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
if (!is_edp(intel_dp))
|
||||
return;
|
||||
if (!ironlake_edp_have_panel_power(intel_dp) && !ironlake_edp_have_panel_vdd(intel_dp)) {
|
||||
WARN(1, "eDP powered off while attempting aux channel communication.\n");
|
||||
DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
|
||||
I915_READ(PCH_PP_STATUS),
|
||||
I915_READ(PCH_PP_CONTROL));
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
intel_dp_aux_ch(struct intel_dp *intel_dp,
|
||||
uint8_t *send, int send_bytes,
|
||||
|
@ -295,6 +358,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
|
|||
uint32_t aux_clock_divider;
|
||||
int try, precharge;
|
||||
|
||||
intel_dp_check_edp(intel_dp);
|
||||
/* The clock divider is based off the hrawclk,
|
||||
* and would like to run at 2MHz. So, take the
|
||||
* hrawclk value and divide by 2 and use that
|
||||
|
@ -302,7 +366,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
|
|||
* Note that PCH attached eDP panels should use a 125MHz input
|
||||
* clock divider.
|
||||
*/
|
||||
if (is_edp(intel_dp) && !is_pch_edp(intel_dp)) {
|
||||
if (is_cpu_edp(intel_dp)) {
|
||||
if (IS_GEN6(dev))
|
||||
aux_clock_divider = 200; /* SNB eDP input clock at 400Mhz */
|
||||
else
|
||||
|
@ -408,6 +472,7 @@ intel_dp_aux_native_write(struct intel_dp *intel_dp,
|
|||
int msg_bytes;
|
||||
uint8_t ack;
|
||||
|
||||
intel_dp_check_edp(intel_dp);
|
||||
if (send_bytes > 16)
|
||||
return -1;
|
||||
msg[0] = AUX_NATIVE_WRITE << 4;
|
||||
|
@ -450,6 +515,7 @@ intel_dp_aux_native_read(struct intel_dp *intel_dp,
|
|||
uint8_t ack;
|
||||
int ret;
|
||||
|
||||
intel_dp_check_edp(intel_dp);
|
||||
msg[0] = AUX_NATIVE_READ << 4;
|
||||
msg[1] = address >> 8;
|
||||
msg[2] = address & 0xff;
|
||||
|
@ -493,6 +559,7 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
|
|||
int reply_bytes;
|
||||
int ret;
|
||||
|
||||
intel_dp_check_edp(intel_dp);
|
||||
/* Set up the command byte */
|
||||
if (mode & MODE_I2C_READ)
|
||||
msg[0] = AUX_I2C_READ << 4;
|
||||
|
@ -573,10 +640,15 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
|
|||
return -EREMOTEIO;
|
||||
}
|
||||
|
||||
static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp);
|
||||
static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
|
||||
|
||||
static int
|
||||
intel_dp_i2c_init(struct intel_dp *intel_dp,
|
||||
struct intel_connector *intel_connector, const char *name)
|
||||
{
|
||||
int ret;
|
||||
|
||||
DRM_DEBUG_KMS("i2c_init %s\n", name);
|
||||
intel_dp->algo.running = false;
|
||||
intel_dp->algo.address = 0;
|
||||
|
@ -590,7 +662,10 @@ intel_dp_i2c_init(struct intel_dp *intel_dp,
|
|||
intel_dp->adapter.algo_data = &intel_dp->algo;
|
||||
intel_dp->adapter.dev.parent = &intel_connector->base.kdev;
|
||||
|
||||
return i2c_dp_aux_add_bus(&intel_dp->adapter);
|
||||
ironlake_edp_panel_vdd_on(intel_dp);
|
||||
ret = i2c_dp_aux_add_bus(&intel_dp->adapter);
|
||||
ironlake_edp_panel_vdd_off(intel_dp, false);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool
|
||||
|
@ -598,29 +673,28 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
|
|||
struct drm_display_mode *adjusted_mode)
|
||||
{
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
||||
int lane_count, clock;
|
||||
int max_lane_count = intel_dp_max_lane_count(intel_dp);
|
||||
int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
|
||||
static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
|
||||
|
||||
if (is_edp(intel_dp) && dev_priv->panel_fixed_mode) {
|
||||
intel_fixed_panel_mode(dev_priv->panel_fixed_mode, adjusted_mode);
|
||||
if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
|
||||
intel_fixed_panel_mode(intel_dp->panel_fixed_mode, adjusted_mode);
|
||||
intel_pch_panel_fitting(dev, DRM_MODE_SCALE_FULLSCREEN,
|
||||
mode, adjusted_mode);
|
||||
/*
|
||||
* the mode->clock is used to calculate the Data&Link M/N
|
||||
* of the pipe. For the eDP the fixed clock should be used.
|
||||
*/
|
||||
mode->clock = dev_priv->panel_fixed_mode->clock;
|
||||
mode->clock = intel_dp->panel_fixed_mode->clock;
|
||||
}
|
||||
|
||||
for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
|
||||
for (clock = 0; clock <= max_clock; clock++) {
|
||||
int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
|
||||
|
||||
if (intel_dp_link_required(encoder->dev, intel_dp, mode->clock)
|
||||
if (intel_dp_link_required(intel_dp, mode->clock)
|
||||
<= link_avail) {
|
||||
intel_dp->link_bw = bws[clock];
|
||||
intel_dp->lane_count = lane_count;
|
||||
|
@ -634,19 +708,6 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
|
|||
}
|
||||
}
|
||||
|
||||
if (is_edp(intel_dp)) {
|
||||
/* okay we failed just pick the highest */
|
||||
intel_dp->lane_count = max_lane_count;
|
||||
intel_dp->link_bw = bws[max_clock];
|
||||
adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw);
|
||||
DRM_DEBUG_KMS("Force picking display port link bw %02x lane "
|
||||
"count %d clock %d\n",
|
||||
intel_dp->link_bw, intel_dp->lane_count,
|
||||
adjusted_mode->clock);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -740,6 +801,9 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
|
|||
}
|
||||
}
|
||||
|
||||
static void ironlake_edp_pll_on(struct drm_encoder *encoder);
|
||||
static void ironlake_edp_pll_off(struct drm_encoder *encoder);
|
||||
|
||||
static void
|
||||
intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
|
||||
struct drm_display_mode *adjusted_mode)
|
||||
|
@ -749,6 +813,14 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
|
|||
struct drm_crtc *crtc = intel_dp->base.base.crtc;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
|
||||
/* Turn on the eDP PLL if needed */
|
||||
if (is_edp(intel_dp)) {
|
||||
if (!is_pch_edp(intel_dp))
|
||||
ironlake_edp_pll_on(encoder);
|
||||
else
|
||||
ironlake_edp_pll_off(encoder);
|
||||
}
|
||||
|
||||
intel_dp->DP = DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
|
||||
intel_dp->DP |= intel_dp->color_range;
|
||||
|
||||
|
@ -757,7 +829,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
|
|||
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
|
||||
intel_dp->DP |= DP_SYNC_VS_HIGH;
|
||||
|
||||
if (HAS_PCH_CPT(dev) && !is_edp(intel_dp))
|
||||
if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
|
||||
intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
|
||||
else
|
||||
intel_dp->DP |= DP_LINK_TRAIN_OFF;
|
||||
|
@ -798,7 +870,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
|
|||
if (intel_crtc->pipe == 1 && !HAS_PCH_CPT(dev))
|
||||
intel_dp->DP |= DP_PIPEB_SELECT;
|
||||
|
||||
if (is_edp(intel_dp) && !is_pch_edp(intel_dp)) {
|
||||
if (is_cpu_edp(intel_dp)) {
|
||||
/* don't miss out required setting for eDP */
|
||||
intel_dp->DP |= DP_PLL_ENABLE;
|
||||
if (adjusted_mode->clock < 200000)
|
||||
|
@ -808,58 +880,150 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
|
|||
}
|
||||
}
|
||||
|
||||
static void ironlake_wait_panel_off(struct intel_dp *intel_dp)
|
||||
{
|
||||
unsigned long off_time;
|
||||
unsigned long delay;
|
||||
|
||||
DRM_DEBUG_KMS("Wait for panel power off time\n");
|
||||
|
||||
if (ironlake_edp_have_panel_power(intel_dp) ||
|
||||
ironlake_edp_have_panel_vdd(intel_dp))
|
||||
{
|
||||
DRM_DEBUG_KMS("Panel still on, no delay needed\n");
|
||||
return;
|
||||
}
|
||||
|
||||
off_time = intel_dp->panel_off_jiffies + msecs_to_jiffies(intel_dp->panel_power_down_delay);
|
||||
if (time_after(jiffies, off_time)) {
|
||||
DRM_DEBUG_KMS("Time already passed");
|
||||
return;
|
||||
}
|
||||
delay = jiffies_to_msecs(off_time - jiffies);
|
||||
if (delay > intel_dp->panel_power_down_delay)
|
||||
delay = intel_dp->panel_power_down_delay;
|
||||
DRM_DEBUG_KMS("Waiting an additional %ld ms\n", delay);
|
||||
msleep(delay);
|
||||
}
|
||||
|
||||
static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct drm_device *dev = intel_dp->base.base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 pp;
|
||||
|
||||
/*
|
||||
* If the panel wasn't on, make sure there's not a currently
|
||||
* active PP sequence before enabling AUX VDD.
|
||||
*/
|
||||
if (!(I915_READ(PCH_PP_STATUS) & PP_ON))
|
||||
msleep(dev_priv->panel_t3);
|
||||
if (!is_edp(intel_dp))
|
||||
return;
|
||||
DRM_DEBUG_KMS("Turn eDP VDD on\n");
|
||||
|
||||
WARN(intel_dp->want_panel_vdd,
|
||||
"eDP VDD already requested on\n");
|
||||
|
||||
intel_dp->want_panel_vdd = true;
|
||||
if (ironlake_edp_have_panel_vdd(intel_dp)) {
|
||||
DRM_DEBUG_KMS("eDP VDD already on\n");
|
||||
return;
|
||||
}
|
||||
|
||||
ironlake_wait_panel_off(intel_dp);
|
||||
pp = I915_READ(PCH_PP_CONTROL);
|
||||
pp &= ~PANEL_UNLOCK_MASK;
|
||||
pp |= PANEL_UNLOCK_REGS;
|
||||
pp |= EDP_FORCE_VDD;
|
||||
I915_WRITE(PCH_PP_CONTROL, pp);
|
||||
POSTING_READ(PCH_PP_CONTROL);
|
||||
DRM_DEBUG_KMS("PCH_PP_STATUS: 0x%08x PCH_PP_CONTROL: 0x%08x\n",
|
||||
I915_READ(PCH_PP_STATUS), I915_READ(PCH_PP_CONTROL));
|
||||
|
||||
/*
|
||||
* If the panel wasn't on, delay before accessing aux channel
|
||||
*/
|
||||
if (!ironlake_edp_have_panel_power(intel_dp)) {
|
||||
DRM_DEBUG_KMS("eDP was not running\n");
|
||||
msleep(intel_dp->panel_power_up_delay);
|
||||
}
|
||||
}
|
||||
|
||||
static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp)
|
||||
static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct drm_device *dev = intel_dp->base.base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 pp;
|
||||
|
||||
pp = I915_READ(PCH_PP_CONTROL);
|
||||
pp &= ~EDP_FORCE_VDD;
|
||||
I915_WRITE(PCH_PP_CONTROL, pp);
|
||||
POSTING_READ(PCH_PP_CONTROL);
|
||||
if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) {
|
||||
pp = I915_READ(PCH_PP_CONTROL);
|
||||
pp &= ~PANEL_UNLOCK_MASK;
|
||||
pp |= PANEL_UNLOCK_REGS;
|
||||
pp &= ~EDP_FORCE_VDD;
|
||||
I915_WRITE(PCH_PP_CONTROL, pp);
|
||||
POSTING_READ(PCH_PP_CONTROL);
|
||||
|
||||
/* Make sure sequencer is idle before allowing subsequent activity */
|
||||
msleep(dev_priv->panel_t12);
|
||||
/* Make sure sequencer is idle before allowing subsequent activity */
|
||||
DRM_DEBUG_KMS("PCH_PP_STATUS: 0x%08x PCH_PP_CONTROL: 0x%08x\n",
|
||||
I915_READ(PCH_PP_STATUS), I915_READ(PCH_PP_CONTROL));
|
||||
intel_dp->panel_off_jiffies = jiffies;
|
||||
}
|
||||
}
|
||||
|
||||
static void ironlake_panel_vdd_work(struct work_struct *__work)
|
||||
{
|
||||
struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
|
||||
struct intel_dp, panel_vdd_work);
|
||||
struct drm_device *dev = intel_dp->base.base.dev;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
ironlake_panel_vdd_off_sync(intel_dp);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
}
|
||||
|
||||
static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
|
||||
{
|
||||
if (!is_edp(intel_dp))
|
||||
return;
|
||||
|
||||
DRM_DEBUG_KMS("Turn eDP VDD off %d\n", intel_dp->want_panel_vdd);
|
||||
WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on");
|
||||
|
||||
intel_dp->want_panel_vdd = false;
|
||||
|
||||
if (sync) {
|
||||
ironlake_panel_vdd_off_sync(intel_dp);
|
||||
} else {
|
||||
/*
|
||||
* Queue the timer to fire a long
|
||||
* time from now (relative to the power down delay)
|
||||
* to keep the panel power up across a sequence of operations
|
||||
*/
|
||||
schedule_delayed_work(&intel_dp->panel_vdd_work,
|
||||
msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5));
|
||||
}
|
||||
}
|
||||
|
||||
/* Returns true if the panel was already on when called */
|
||||
static bool ironlake_edp_panel_on(struct intel_dp *intel_dp)
|
||||
static void ironlake_edp_panel_on(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct drm_device *dev = intel_dp->base.base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 pp, idle_on_mask = PP_ON | PP_SEQUENCE_STATE_ON_IDLE;
|
||||
|
||||
if (I915_READ(PCH_PP_STATUS) & PP_ON)
|
||||
return true;
|
||||
if (!is_edp(intel_dp))
|
||||
return;
|
||||
if (ironlake_edp_have_panel_power(intel_dp))
|
||||
return;
|
||||
|
||||
ironlake_wait_panel_off(intel_dp);
|
||||
pp = I915_READ(PCH_PP_CONTROL);
|
||||
pp &= ~PANEL_UNLOCK_MASK;
|
||||
pp |= PANEL_UNLOCK_REGS;
|
||||
|
||||
/* ILK workaround: disable reset around power sequence */
|
||||
pp &= ~PANEL_POWER_RESET;
|
||||
I915_WRITE(PCH_PP_CONTROL, pp);
|
||||
POSTING_READ(PCH_PP_CONTROL);
|
||||
if (IS_GEN5(dev)) {
|
||||
/* ILK workaround: disable reset around power sequence */
|
||||
pp &= ~PANEL_POWER_RESET;
|
||||
I915_WRITE(PCH_PP_CONTROL, pp);
|
||||
POSTING_READ(PCH_PP_CONTROL);
|
||||
}
|
||||
|
||||
pp |= PANEL_UNLOCK_REGS | POWER_TARGET_ON;
|
||||
pp |= POWER_TARGET_ON;
|
||||
I915_WRITE(PCH_PP_CONTROL, pp);
|
||||
POSTING_READ(PCH_PP_CONTROL);
|
||||
|
||||
|
@ -868,44 +1032,64 @@ static bool ironlake_edp_panel_on(struct intel_dp *intel_dp)
|
|||
DRM_ERROR("panel on wait timed out: 0x%08x\n",
|
||||
I915_READ(PCH_PP_STATUS));
|
||||
|
||||
pp |= PANEL_POWER_RESET; /* restore panel reset bit */
|
||||
I915_WRITE(PCH_PP_CONTROL, pp);
|
||||
POSTING_READ(PCH_PP_CONTROL);
|
||||
|
||||
return false;
|
||||
if (IS_GEN5(dev)) {
|
||||
pp |= PANEL_POWER_RESET; /* restore panel reset bit */
|
||||
I915_WRITE(PCH_PP_CONTROL, pp);
|
||||
POSTING_READ(PCH_PP_CONTROL);
|
||||
}
|
||||
}
|
||||
|
||||
static void ironlake_edp_panel_off(struct drm_device *dev)
|
||||
static void ironlake_edp_panel_off(struct drm_encoder *encoder)
|
||||
{
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 pp, idle_off_mask = PP_ON | PP_SEQUENCE_MASK |
|
||||
PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK;
|
||||
|
||||
if (!is_edp(intel_dp))
|
||||
return;
|
||||
pp = I915_READ(PCH_PP_CONTROL);
|
||||
pp &= ~PANEL_UNLOCK_MASK;
|
||||
pp |= PANEL_UNLOCK_REGS;
|
||||
|
||||
/* ILK workaround: disable reset around power sequence */
|
||||
pp &= ~PANEL_POWER_RESET;
|
||||
I915_WRITE(PCH_PP_CONTROL, pp);
|
||||
POSTING_READ(PCH_PP_CONTROL);
|
||||
if (IS_GEN5(dev)) {
|
||||
/* ILK workaround: disable reset around power sequence */
|
||||
pp &= ~PANEL_POWER_RESET;
|
||||
I915_WRITE(PCH_PP_CONTROL, pp);
|
||||
POSTING_READ(PCH_PP_CONTROL);
|
||||
}
|
||||
|
||||
pp &= ~POWER_TARGET_ON;
|
||||
I915_WRITE(PCH_PP_CONTROL, pp);
|
||||
POSTING_READ(PCH_PP_CONTROL);
|
||||
intel_dp->panel_off_jiffies = jiffies;
|
||||
|
||||
if (wait_for((I915_READ(PCH_PP_STATUS) & idle_off_mask) == 0, 5000))
|
||||
DRM_ERROR("panel off wait timed out: 0x%08x\n",
|
||||
I915_READ(PCH_PP_STATUS));
|
||||
if (IS_GEN5(dev)) {
|
||||
pp &= ~POWER_TARGET_ON;
|
||||
I915_WRITE(PCH_PP_CONTROL, pp);
|
||||
POSTING_READ(PCH_PP_CONTROL);
|
||||
pp &= ~POWER_TARGET_ON;
|
||||
I915_WRITE(PCH_PP_CONTROL, pp);
|
||||
POSTING_READ(PCH_PP_CONTROL);
|
||||
msleep(intel_dp->panel_power_cycle_delay);
|
||||
|
||||
pp |= PANEL_POWER_RESET; /* restore panel reset bit */
|
||||
I915_WRITE(PCH_PP_CONTROL, pp);
|
||||
POSTING_READ(PCH_PP_CONTROL);
|
||||
if (wait_for((I915_READ(PCH_PP_STATUS) & idle_off_mask) == 0, 5000))
|
||||
DRM_ERROR("panel off wait timed out: 0x%08x\n",
|
||||
I915_READ(PCH_PP_STATUS));
|
||||
|
||||
pp |= PANEL_POWER_RESET; /* restore panel reset bit */
|
||||
I915_WRITE(PCH_PP_CONTROL, pp);
|
||||
POSTING_READ(PCH_PP_CONTROL);
|
||||
}
|
||||
}
|
||||
|
||||
static void ironlake_edp_backlight_on(struct drm_device *dev)
|
||||
static void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct drm_device *dev = intel_dp->base.base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 pp;
|
||||
|
||||
if (!is_edp(intel_dp))
|
||||
return;
|
||||
|
||||
DRM_DEBUG_KMS("\n");
|
||||
/*
|
||||
* If we enable the backlight right away following a panel power
|
||||
|
@ -913,21 +1097,32 @@ static void ironlake_edp_backlight_on(struct drm_device *dev)
|
|||
* link. So delay a bit to make sure the image is solid before
|
||||
* allowing it to appear.
|
||||
*/
|
||||
msleep(300);
|
||||
msleep(intel_dp->backlight_on_delay);
|
||||
pp = I915_READ(PCH_PP_CONTROL);
|
||||
pp &= ~PANEL_UNLOCK_MASK;
|
||||
pp |= PANEL_UNLOCK_REGS;
|
||||
pp |= EDP_BLC_ENABLE;
|
||||
I915_WRITE(PCH_PP_CONTROL, pp);
|
||||
POSTING_READ(PCH_PP_CONTROL);
|
||||
}
|
||||
|
||||
static void ironlake_edp_backlight_off(struct drm_device *dev)
|
||||
static void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct drm_device *dev = intel_dp->base.base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 pp;
|
||||
|
||||
if (!is_edp(intel_dp))
|
||||
return;
|
||||
|
||||
DRM_DEBUG_KMS("\n");
|
||||
pp = I915_READ(PCH_PP_CONTROL);
|
||||
pp &= ~PANEL_UNLOCK_MASK;
|
||||
pp |= PANEL_UNLOCK_REGS;
|
||||
pp &= ~EDP_BLC_ENABLE;
|
||||
I915_WRITE(PCH_PP_CONTROL, pp);
|
||||
POSTING_READ(PCH_PP_CONTROL);
|
||||
msleep(intel_dp->backlight_off_delay);
|
||||
}
|
||||
|
||||
static void ironlake_edp_pll_on(struct drm_encoder *encoder)
|
||||
|
@ -990,43 +1185,39 @@ static void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
|
|||
static void intel_dp_prepare(struct drm_encoder *encoder)
|
||||
{
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
||||
struct drm_device *dev = encoder->dev;
|
||||
|
||||
/* Wake up the sink first */
|
||||
ironlake_edp_panel_vdd_on(intel_dp);
|
||||
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
|
||||
ironlake_edp_panel_vdd_off(intel_dp, false);
|
||||
|
||||
if (is_edp(intel_dp)) {
|
||||
ironlake_edp_backlight_off(dev);
|
||||
ironlake_edp_panel_off(dev);
|
||||
if (!is_pch_edp(intel_dp))
|
||||
ironlake_edp_pll_on(encoder);
|
||||
else
|
||||
ironlake_edp_pll_off(encoder);
|
||||
}
|
||||
/* Make sure the panel is off before trying to
|
||||
* change the mode
|
||||
*/
|
||||
ironlake_edp_backlight_off(intel_dp);
|
||||
intel_dp_link_down(intel_dp);
|
||||
ironlake_edp_panel_off(encoder);
|
||||
}
|
||||
|
||||
static void intel_dp_commit(struct drm_encoder *encoder)
|
||||
{
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.base.crtc);
|
||||
|
||||
if (is_edp(intel_dp))
|
||||
ironlake_edp_panel_vdd_on(intel_dp);
|
||||
|
||||
ironlake_edp_panel_vdd_on(intel_dp);
|
||||
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
|
||||
intel_dp_start_link_train(intel_dp);
|
||||
|
||||
if (is_edp(intel_dp)) {
|
||||
ironlake_edp_panel_on(intel_dp);
|
||||
ironlake_edp_panel_vdd_off(intel_dp);
|
||||
}
|
||||
ironlake_edp_panel_on(intel_dp);
|
||||
ironlake_edp_panel_vdd_off(intel_dp, true);
|
||||
|
||||
intel_dp_complete_link_train(intel_dp);
|
||||
|
||||
if (is_edp(intel_dp))
|
||||
ironlake_edp_backlight_on(dev);
|
||||
ironlake_edp_backlight_on(intel_dp);
|
||||
|
||||
intel_dp->dpms_mode = DRM_MODE_DPMS_ON;
|
||||
|
||||
if (HAS_PCH_CPT(dev))
|
||||
intel_cpt_verify_modeset(dev, intel_crtc->pipe);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -1038,28 +1229,27 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
|
|||
uint32_t dp_reg = I915_READ(intel_dp->output_reg);
|
||||
|
||||
if (mode != DRM_MODE_DPMS_ON) {
|
||||
ironlake_edp_panel_vdd_on(intel_dp);
|
||||
if (is_edp(intel_dp))
|
||||
ironlake_edp_backlight_off(dev);
|
||||
ironlake_edp_backlight_off(intel_dp);
|
||||
intel_dp_sink_dpms(intel_dp, mode);
|
||||
intel_dp_link_down(intel_dp);
|
||||
if (is_edp(intel_dp))
|
||||
ironlake_edp_panel_off(dev);
|
||||
ironlake_edp_panel_off(encoder);
|
||||
if (is_edp(intel_dp) && !is_pch_edp(intel_dp))
|
||||
ironlake_edp_pll_off(encoder);
|
||||
ironlake_edp_panel_vdd_off(intel_dp, false);
|
||||
} else {
|
||||
if (is_edp(intel_dp))
|
||||
ironlake_edp_panel_vdd_on(intel_dp);
|
||||
ironlake_edp_panel_vdd_on(intel_dp);
|
||||
intel_dp_sink_dpms(intel_dp, mode);
|
||||
if (!(dp_reg & DP_PORT_EN)) {
|
||||
intel_dp_start_link_train(intel_dp);
|
||||
if (is_edp(intel_dp)) {
|
||||
ironlake_edp_panel_on(intel_dp);
|
||||
ironlake_edp_panel_vdd_off(intel_dp);
|
||||
}
|
||||
ironlake_edp_panel_on(intel_dp);
|
||||
ironlake_edp_panel_vdd_off(intel_dp, true);
|
||||
intel_dp_complete_link_train(intel_dp);
|
||||
}
|
||||
if (is_edp(intel_dp))
|
||||
ironlake_edp_backlight_on(dev);
|
||||
ironlake_edp_backlight_on(intel_dp);
|
||||
} else
|
||||
ironlake_edp_panel_vdd_off(intel_dp, false);
|
||||
ironlake_edp_backlight_on(intel_dp);
|
||||
}
|
||||
intel_dp->dpms_mode = mode;
|
||||
}
|
||||
|
@ -1368,7 +1558,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
|
|||
DP_LINK_CONFIGURATION_SIZE);
|
||||
|
||||
DP |= DP_PORT_EN;
|
||||
if (HAS_PCH_CPT(dev) && !is_edp(intel_dp))
|
||||
if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
|
||||
DP &= ~DP_LINK_TRAIN_MASK_CPT;
|
||||
else
|
||||
DP &= ~DP_LINK_TRAIN_MASK;
|
||||
|
@ -1387,7 +1577,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
|
|||
DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
|
||||
}
|
||||
|
||||
if (HAS_PCH_CPT(dev) && !is_edp(intel_dp))
|
||||
if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
|
||||
reg = DP | DP_LINK_TRAIN_PAT_1_CPT;
|
||||
else
|
||||
reg = DP | DP_LINK_TRAIN_PAT_1;
|
||||
|
@ -1462,7 +1652,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
|
|||
DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
|
||||
}
|
||||
|
||||
if (HAS_PCH_CPT(dev) && !is_edp(intel_dp))
|
||||
if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
|
||||
reg = DP | DP_LINK_TRAIN_PAT_2_CPT;
|
||||
else
|
||||
reg = DP | DP_LINK_TRAIN_PAT_2;
|
||||
|
@ -1503,7 +1693,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
|
|||
++tries;
|
||||
}
|
||||
|
||||
if (HAS_PCH_CPT(dev) && !is_edp(intel_dp))
|
||||
if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
|
||||
reg = DP | DP_LINK_TRAIN_OFF_CPT;
|
||||
else
|
||||
reg = DP | DP_LINK_TRAIN_OFF;
|
||||
|
@ -1533,7 +1723,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
|
|||
udelay(100);
|
||||
}
|
||||
|
||||
if (HAS_PCH_CPT(dev) && !is_edp(intel_dp)) {
|
||||
if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp)) {
|
||||
DP &= ~DP_LINK_TRAIN_MASK_CPT;
|
||||
I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
|
||||
} else {
|
||||
|
@ -1582,6 +1772,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
|
|||
|
||||
I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
|
||||
POSTING_READ(intel_dp->output_reg);
|
||||
msleep(intel_dp->panel_power_down_delay);
|
||||
}
|
||||
|
||||
static bool
|
||||
|
@ -1596,6 +1787,27 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
|
|||
return false;
|
||||
}
|
||||
|
||||
static bool
|
||||
intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = intel_dp_aux_native_read_retry(intel_dp,
|
||||
DP_DEVICE_SERVICE_IRQ_VECTOR,
|
||||
sink_irq_vector, 1);
|
||||
if (!ret)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void
|
||||
intel_dp_handle_test_request(struct intel_dp *intel_dp)
|
||||
{
|
||||
/* NAK by default */
|
||||
intel_dp_aux_native_write_1(intel_dp, DP_TEST_RESPONSE, DP_TEST_ACK);
|
||||
}
|
||||
|
||||
/*
|
||||
* According to DP spec
|
||||
* 5.1.2:
|
||||
|
@ -1608,6 +1820,8 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
|
|||
static void
|
||||
intel_dp_check_link_status(struct intel_dp *intel_dp)
|
||||
{
|
||||
u8 sink_irq_vector;
|
||||
|
||||
if (intel_dp->dpms_mode != DRM_MODE_DPMS_ON)
|
||||
return;
|
||||
|
||||
|
@ -1626,6 +1840,20 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
|
|||
return;
|
||||
}
|
||||
|
||||
/* Try to read the source of the interrupt */
|
||||
if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
|
||||
intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
|
||||
/* Clear interrupt source */
|
||||
intel_dp_aux_native_write_1(intel_dp,
|
||||
DP_DEVICE_SERVICE_IRQ_VECTOR,
|
||||
sink_irq_vector);
|
||||
|
||||
if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
|
||||
intel_dp_handle_test_request(intel_dp);
|
||||
if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
|
||||
DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
|
||||
}
|
||||
|
||||
if (!intel_channel_eq_ok(intel_dp)) {
|
||||
DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
|
||||
drm_get_encoder_name(&intel_dp->base.base));
|
||||
|
@ -1687,6 +1915,31 @@ g4x_dp_detect(struct intel_dp *intel_dp)
|
|||
return intel_dp_detect_dpcd(intel_dp);
|
||||
}
|
||||
|
||||
static struct edid *
|
||||
intel_dp_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
|
||||
{
|
||||
struct intel_dp *intel_dp = intel_attached_dp(connector);
|
||||
struct edid *edid;
|
||||
|
||||
ironlake_edp_panel_vdd_on(intel_dp);
|
||||
edid = drm_get_edid(connector, adapter);
|
||||
ironlake_edp_panel_vdd_off(intel_dp, false);
|
||||
return edid;
|
||||
}
|
||||
|
||||
static int
|
||||
intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *adapter)
|
||||
{
|
||||
struct intel_dp *intel_dp = intel_attached_dp(connector);
|
||||
int ret;
|
||||
|
||||
ironlake_edp_panel_vdd_on(intel_dp);
|
||||
ret = intel_ddc_get_modes(connector, adapter);
|
||||
ironlake_edp_panel_vdd_off(intel_dp, false);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection.
|
||||
*
|
||||
|
@ -1719,7 +1972,7 @@ intel_dp_detect(struct drm_connector *connector, bool force)
|
|||
if (intel_dp->force_audio) {
|
||||
intel_dp->has_audio = intel_dp->force_audio > 0;
|
||||
} else {
|
||||
edid = drm_get_edid(connector, &intel_dp->adapter);
|
||||
edid = intel_dp_get_edid(connector, &intel_dp->adapter);
|
||||
if (edid) {
|
||||
intel_dp->has_audio = drm_detect_monitor_audio(edid);
|
||||
connector->display_info.raw_edid = NULL;
|
||||
|
@ -1740,28 +1993,36 @@ static int intel_dp_get_modes(struct drm_connector *connector)
|
|||
/* We should parse the EDID data and find out if it has an audio sink
|
||||
*/
|
||||
|
||||
ret = intel_ddc_get_modes(connector, &intel_dp->adapter);
|
||||
ret = intel_dp_get_edid_modes(connector, &intel_dp->adapter);
|
||||
if (ret) {
|
||||
if (is_edp(intel_dp) && !dev_priv->panel_fixed_mode) {
|
||||
if (is_edp(intel_dp) && !intel_dp->panel_fixed_mode) {
|
||||
struct drm_display_mode *newmode;
|
||||
list_for_each_entry(newmode, &connector->probed_modes,
|
||||
head) {
|
||||
if (newmode->type & DRM_MODE_TYPE_PREFERRED) {
|
||||
dev_priv->panel_fixed_mode =
|
||||
if ((newmode->type & DRM_MODE_TYPE_PREFERRED)) {
|
||||
intel_dp->panel_fixed_mode =
|
||||
drm_mode_duplicate(dev, newmode);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* if eDP has no EDID, try to use fixed panel mode from VBT */
|
||||
if (is_edp(intel_dp)) {
|
||||
if (dev_priv->panel_fixed_mode != NULL) {
|
||||
/* initialize panel mode from VBT if available for eDP */
|
||||
if (intel_dp->panel_fixed_mode == NULL && dev_priv->lfp_lvds_vbt_mode != NULL) {
|
||||
intel_dp->panel_fixed_mode =
|
||||
drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
|
||||
if (intel_dp->panel_fixed_mode) {
|
||||
intel_dp->panel_fixed_mode->type |=
|
||||
DRM_MODE_TYPE_PREFERRED;
|
||||
}
|
||||
}
|
||||
if (intel_dp->panel_fixed_mode) {
|
||||
struct drm_display_mode *mode;
|
||||
mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode);
|
||||
mode = drm_mode_duplicate(dev, intel_dp->panel_fixed_mode);
|
||||
drm_mode_probed_add(connector, mode);
|
||||
return 1;
|
||||
}
|
||||
|
@ -1776,7 +2037,7 @@ intel_dp_detect_audio(struct drm_connector *connector)
|
|||
struct edid *edid;
|
||||
bool has_audio = false;
|
||||
|
||||
edid = drm_get_edid(connector, &intel_dp->adapter);
|
||||
edid = intel_dp_get_edid(connector, &intel_dp->adapter);
|
||||
if (edid) {
|
||||
has_audio = drm_detect_monitor_audio(edid);
|
||||
|
||||
|
@ -1861,6 +2122,10 @@ static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
|
|||
|
||||
i2c_del_adapter(&intel_dp->adapter);
|
||||
drm_encoder_cleanup(encoder);
|
||||
if (is_edp(intel_dp)) {
|
||||
cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
|
||||
ironlake_panel_vdd_off_sync(intel_dp);
|
||||
}
|
||||
kfree(intel_dp);
|
||||
}
|
||||
|
||||
|
@ -1997,10 +2262,13 @@ intel_dp_init(struct drm_device *dev, int output_reg)
|
|||
else if (output_reg == DP_D || output_reg == PCH_DP_D)
|
||||
intel_encoder->clone_mask = (1 << INTEL_DP_D_CLONE_BIT);
|
||||
|
||||
if (is_edp(intel_dp))
|
||||
if (is_edp(intel_dp)) {
|
||||
intel_encoder->clone_mask = (1 << INTEL_EDP_CLONE_BIT);
|
||||
INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
|
||||
ironlake_panel_vdd_work);
|
||||
}
|
||||
|
||||
intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
|
||||
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
|
||||
connector->interlace_allowed = true;
|
||||
connector->doublescan_allowed = 0;
|
||||
|
||||
|
@ -2036,25 +2304,60 @@ intel_dp_init(struct drm_device *dev, int output_reg)
|
|||
break;
|
||||
}
|
||||
|
||||
intel_dp_i2c_init(intel_dp, intel_connector, name);
|
||||
|
||||
/* Cache some DPCD data in the eDP case */
|
||||
if (is_edp(intel_dp)) {
|
||||
bool ret;
|
||||
u32 pp_on, pp_div;
|
||||
struct edp_power_seq cur, vbt;
|
||||
u32 pp_on, pp_off, pp_div;
|
||||
|
||||
pp_on = I915_READ(PCH_PP_ON_DELAYS);
|
||||
pp_off = I915_READ(PCH_PP_OFF_DELAYS);
|
||||
pp_div = I915_READ(PCH_PP_DIVISOR);
|
||||
|
||||
/* Get T3 & T12 values (note: VESA not bspec terminology) */
|
||||
dev_priv->panel_t3 = (pp_on & 0x1fff0000) >> 16;
|
||||
dev_priv->panel_t3 /= 10; /* t3 in 100us units */
|
||||
dev_priv->panel_t12 = pp_div & 0xf;
|
||||
dev_priv->panel_t12 *= 100; /* t12 in 100ms units */
|
||||
/* Pull timing values out of registers */
|
||||
cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
|
||||
PANEL_POWER_UP_DELAY_SHIFT;
|
||||
|
||||
cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
|
||||
PANEL_LIGHT_ON_DELAY_SHIFT;
|
||||
|
||||
cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
|
||||
PANEL_LIGHT_OFF_DELAY_SHIFT;
|
||||
|
||||
cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
|
||||
PANEL_POWER_DOWN_DELAY_SHIFT;
|
||||
|
||||
cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
|
||||
PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
|
||||
|
||||
DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
|
||||
cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
|
||||
|
||||
vbt = dev_priv->edp.pps;
|
||||
|
||||
DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
|
||||
vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
|
||||
|
||||
#define get_delay(field) ((max(cur.field, vbt.field) + 9) / 10)
|
||||
|
||||
intel_dp->panel_power_up_delay = get_delay(t1_t3);
|
||||
intel_dp->backlight_on_delay = get_delay(t8);
|
||||
intel_dp->backlight_off_delay = get_delay(t9);
|
||||
intel_dp->panel_power_down_delay = get_delay(t10);
|
||||
intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
|
||||
|
||||
DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
|
||||
intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
|
||||
intel_dp->panel_power_cycle_delay);
|
||||
|
||||
DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
|
||||
intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
|
||||
|
||||
intel_dp->panel_off_jiffies = jiffies - intel_dp->panel_power_down_delay;
|
||||
|
||||
ironlake_edp_panel_vdd_on(intel_dp);
|
||||
ret = intel_dp_get_dpcd(intel_dp);
|
||||
ironlake_edp_panel_vdd_off(intel_dp);
|
||||
ironlake_edp_panel_vdd_off(intel_dp, false);
|
||||
if (ret) {
|
||||
if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
|
||||
dev_priv->no_aux_handshake =
|
||||
|
@ -2069,18 +2372,11 @@ intel_dp_init(struct drm_device *dev, int output_reg)
|
|||
}
|
||||
}
|
||||
|
||||
intel_dp_i2c_init(intel_dp, intel_connector, name);
|
||||
|
||||
intel_encoder->hot_plug = intel_dp_hot_plug;
|
||||
|
||||
if (is_edp(intel_dp)) {
|
||||
/* initialize panel mode from VBT if available for eDP */
|
||||
if (dev_priv->lfp_lvds_vbt_mode) {
|
||||
dev_priv->panel_fixed_mode =
|
||||
drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
|
||||
if (dev_priv->panel_fixed_mode) {
|
||||
dev_priv->panel_fixed_mode->type |=
|
||||
DRM_MODE_TYPE_PREFERRED;
|
||||
}
|
||||
}
|
||||
dev_priv->int_edp_connector = connector;
|
||||
intel_panel_setup_backlight(dev);
|
||||
}
|
||||
|
|
|
@ -171,6 +171,9 @@ struct intel_crtc {
|
|||
int16_t cursor_width, cursor_height;
|
||||
bool cursor_visible;
|
||||
unsigned int bpp;
|
||||
|
||||
bool no_pll; /* tertiary pipe for IVB */
|
||||
bool use_pll_a;
|
||||
};
|
||||
|
||||
#define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
|
||||
|
@ -184,7 +187,7 @@ struct intel_crtc {
|
|||
#define DIP_VERSION_AVI 0x2
|
||||
#define DIP_LEN_AVI 13
|
||||
|
||||
#define DIP_TYPE_SPD 0x3
|
||||
#define DIP_TYPE_SPD 0x83
|
||||
#define DIP_VERSION_SPD 0x1
|
||||
#define DIP_LEN_SPD 25
|
||||
#define DIP_SPD_UNKNOWN 0
|
||||
|
@ -379,4 +382,6 @@ extern void intel_fb_restore_mode(struct drm_device *dev);
|
|||
extern void intel_init_clock_gating(struct drm_device *dev);
|
||||
extern void intel_write_eld(struct drm_encoder *encoder,
|
||||
struct drm_display_mode *mode);
|
||||
extern void intel_cpt_verify_modeset(struct drm_device *dev, int pipe);
|
||||
|
||||
#endif /* __INTEL_DRV_H__ */
|
||||
|
|
|
@ -69,8 +69,7 @@ void intel_dip_infoframe_csum(struct dip_infoframe *frame)
|
|||
frame->checksum = 0;
|
||||
frame->ecc = 0;
|
||||
|
||||
/* Header isn't part of the checksum */
|
||||
for (i = 5; i < frame->len; i++)
|
||||
for (i = 0; i < frame->len + DIP_HEADER_SIZE; i++)
|
||||
sum += data[i];
|
||||
|
||||
frame->checksum = 0x100 - sum;
|
||||
|
@ -104,7 +103,7 @@ static u32 intel_infoframe_flags(struct dip_infoframe *frame)
|
|||
flags |= VIDEO_DIP_ENABLE_AVI | VIDEO_DIP_FREQ_VSYNC;
|
||||
break;
|
||||
case DIP_TYPE_SPD:
|
||||
flags |= VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_FREQ_2VSYNC;
|
||||
flags |= VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_FREQ_VSYNC;
|
||||
break;
|
||||
default:
|
||||
DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
|
||||
|
@ -165,9 +164,9 @@ static void ironlake_write_infoframe(struct drm_encoder *encoder,
|
|||
|
||||
flags = intel_infoframe_index(frame);
|
||||
|
||||
val &= ~VIDEO_DIP_SELECT_MASK;
|
||||
val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
|
||||
|
||||
I915_WRITE(reg, val | flags);
|
||||
I915_WRITE(reg, VIDEO_DIP_ENABLE | val | flags);
|
||||
|
||||
for (i = 0; i < len; i += 4) {
|
||||
I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
|
||||
|
@ -252,12 +251,10 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
|
|||
intel_write_eld(encoder, adjusted_mode);
|
||||
}
|
||||
|
||||
if (intel_crtc->pipe == 1) {
|
||||
if (HAS_PCH_CPT(dev))
|
||||
sdvox |= PORT_TRANS_B_SEL_CPT;
|
||||
else
|
||||
sdvox |= SDVO_PIPE_B_SELECT;
|
||||
}
|
||||
if (HAS_PCH_CPT(dev))
|
||||
sdvox |= PORT_TRANS_SEL_CPT(intel_crtc->pipe);
|
||||
else if (intel_crtc->pipe == 1)
|
||||
sdvox |= SDVO_PIPE_B_SELECT;
|
||||
|
||||
I915_WRITE(intel_hdmi->sdvox_reg, sdvox);
|
||||
POSTING_READ(intel_hdmi->sdvox_reg);
|
||||
|
@ -489,6 +486,7 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
|
|||
struct intel_encoder *intel_encoder;
|
||||
struct intel_connector *intel_connector;
|
||||
struct intel_hdmi *intel_hdmi;
|
||||
int i;
|
||||
|
||||
intel_hdmi = kzalloc(sizeof(struct intel_hdmi), GFP_KERNEL);
|
||||
if (!intel_hdmi)
|
||||
|
@ -514,7 +512,7 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
|
|||
connector->polled = DRM_CONNECTOR_POLL_HPD;
|
||||
connector->interlace_allowed = 0;
|
||||
connector->doublescan_allowed = 0;
|
||||
intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
|
||||
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
|
||||
|
||||
/* Set up the DDC bus. */
|
||||
if (sdvox_reg == SDVOB) {
|
||||
|
@ -541,10 +539,14 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
|
|||
|
||||
intel_hdmi->sdvox_reg = sdvox_reg;
|
||||
|
||||
if (!HAS_PCH_SPLIT(dev))
|
||||
if (!HAS_PCH_SPLIT(dev)) {
|
||||
intel_hdmi->write_infoframe = i9xx_write_infoframe;
|
||||
else
|
||||
I915_WRITE(VIDEO_DIP_CTL, 0);
|
||||
} else {
|
||||
intel_hdmi->write_infoframe = ironlake_write_infoframe;
|
||||
for_each_pipe(i)
|
||||
I915_WRITE(TVIDEO_DIP_CTL(i), 0);
|
||||
}
|
||||
|
||||
drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs);
|
||||
|
||||
|
|
|
@ -422,13 +422,7 @@ void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed)
|
|||
{
|
||||
struct intel_gmbus *bus = to_intel_gmbus(adapter);
|
||||
|
||||
/* speed:
|
||||
* 0x0 = 100 KHz
|
||||
* 0x1 = 50 KHz
|
||||
* 0x2 = 400 KHz
|
||||
* 0x3 = 1000 Khz
|
||||
*/
|
||||
bus->reg0 = (bus->reg0 & ~(0x3 << 8)) | (speed << 8);
|
||||
bus->reg0 = (bus->reg0 & ~(0x3 << 8)) | speed;
|
||||
}
|
||||
|
||||
void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit)
|
||||
|
|
|
@ -888,9 +888,11 @@ bool intel_lvds_init(struct drm_device *dev)
|
|||
intel_encoder->type = INTEL_OUTPUT_LVDS;
|
||||
|
||||
intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT);
|
||||
intel_encoder->crtc_mask = (1 << 1);
|
||||
if (INTEL_INFO(dev)->gen >= 5)
|
||||
intel_encoder->crtc_mask |= (1 << 0);
|
||||
if (HAS_PCH_SPLIT(dev))
|
||||
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
|
||||
else
|
||||
intel_encoder->crtc_mask = (1 << 1);
|
||||
|
||||
drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs);
|
||||
drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs);
|
||||
connector->display_info.subpixel_order = SubPixelHorizontalRGB;
|
||||
|
|
|
@ -226,7 +226,7 @@ static void intel_pch_panel_set_backlight(struct drm_device *dev, u32 level)
|
|||
I915_WRITE(BLC_PWM_CPU_CTL, val | level);
|
||||
}
|
||||
|
||||
void intel_panel_set_backlight(struct drm_device *dev, u32 level)
|
||||
static void intel_panel_actually_set_backlight(struct drm_device *dev, u32 level)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 tmp;
|
||||
|
@ -254,16 +254,21 @@ void intel_panel_set_backlight(struct drm_device *dev, u32 level)
|
|||
I915_WRITE(BLC_PWM_CTL, tmp | level);
|
||||
}
|
||||
|
||||
void intel_panel_set_backlight(struct drm_device *dev, u32 level)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
dev_priv->backlight_level = level;
|
||||
if (dev_priv->backlight_enabled)
|
||||
intel_panel_actually_set_backlight(dev, level);
|
||||
}
|
||||
|
||||
void intel_panel_disable_backlight(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
if (dev_priv->backlight_enabled) {
|
||||
dev_priv->backlight_level = intel_panel_get_backlight(dev);
|
||||
dev_priv->backlight_enabled = false;
|
||||
}
|
||||
|
||||
intel_panel_set_backlight(dev, 0);
|
||||
dev_priv->backlight_enabled = false;
|
||||
intel_panel_actually_set_backlight(dev, 0);
|
||||
}
|
||||
|
||||
void intel_panel_enable_backlight(struct drm_device *dev)
|
||||
|
@ -273,8 +278,8 @@ void intel_panel_enable_backlight(struct drm_device *dev)
|
|||
if (dev_priv->backlight_level == 0)
|
||||
dev_priv->backlight_level = intel_panel_get_max_backlight(dev);
|
||||
|
||||
intel_panel_set_backlight(dev, dev_priv->backlight_level);
|
||||
dev_priv->backlight_enabled = true;
|
||||
intel_panel_actually_set_backlight(dev, dev_priv->backlight_level);
|
||||
}
|
||||
|
||||
static void intel_panel_init_backlight(struct drm_device *dev)
|
||||
|
|
|
@ -34,6 +34,16 @@
|
|||
#include "i915_trace.h"
|
||||
#include "intel_drv.h"
|
||||
|
||||
/*
|
||||
* 965+ support PIPE_CONTROL commands, which provide finer grained control
|
||||
* over cache flushing.
|
||||
*/
|
||||
struct pipe_control {
|
||||
struct drm_i915_gem_object *obj;
|
||||
volatile u32 *cpu_page;
|
||||
u32 gtt_offset;
|
||||
};
|
||||
|
||||
static inline int ring_space(struct intel_ring_buffer *ring)
|
||||
{
|
||||
int space = (ring->head & HEAD_ADDR) - (ring->tail + 8);
|
||||
|
@ -123,6 +133,118 @@ render_ring_flush(struct intel_ring_buffer *ring,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Emits a PIPE_CONTROL with a non-zero post-sync operation, for
|
||||
* implementing two workarounds on gen6. From section 1.4.7.1
|
||||
* "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
|
||||
*
|
||||
* [DevSNB-C+{W/A}] Before any depth stall flush (including those
|
||||
* produced by non-pipelined state commands), software needs to first
|
||||
* send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
|
||||
* 0.
|
||||
*
|
||||
* [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
|
||||
* =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
|
||||
*
|
||||
* And the workaround for these two requires this workaround first:
|
||||
*
|
||||
* [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
|
||||
* BEFORE the pipe-control with a post-sync op and no write-cache
|
||||
* flushes.
|
||||
*
|
||||
* And this last workaround is tricky because of the requirements on
|
||||
* that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
|
||||
* volume 2 part 1:
|
||||
*
|
||||
* "1 of the following must also be set:
|
||||
* - Render Target Cache Flush Enable ([12] of DW1)
|
||||
* - Depth Cache Flush Enable ([0] of DW1)
|
||||
* - Stall at Pixel Scoreboard ([1] of DW1)
|
||||
* - Depth Stall ([13] of DW1)
|
||||
* - Post-Sync Operation ([13] of DW1)
|
||||
* - Notify Enable ([8] of DW1)"
|
||||
*
|
||||
* The cache flushes require the workaround flush that triggered this
|
||||
* one, so we can't use it. Depth stall would trigger the same.
|
||||
* Post-sync nonzero is what triggered this second workaround, so we
|
||||
* can't use that one either. Notify enable is IRQs, which aren't
|
||||
* really our business. That leaves only stall at scoreboard.
|
||||
*/
|
||||
static int
|
||||
intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring)
|
||||
{
|
||||
struct pipe_control *pc = ring->private;
|
||||
u32 scratch_addr = pc->gtt_offset + 128;
|
||||
int ret;
|
||||
|
||||
|
||||
ret = intel_ring_begin(ring, 6);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
|
||||
intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
|
||||
PIPE_CONTROL_STALL_AT_SCOREBOARD);
|
||||
intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
|
||||
intel_ring_emit(ring, 0); /* low dword */
|
||||
intel_ring_emit(ring, 0); /* high dword */
|
||||
intel_ring_emit(ring, MI_NOOP);
|
||||
intel_ring_advance(ring);
|
||||
|
||||
ret = intel_ring_begin(ring, 6);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
|
||||
intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE);
|
||||
intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
|
||||
intel_ring_emit(ring, 0);
|
||||
intel_ring_emit(ring, 0);
|
||||
intel_ring_emit(ring, MI_NOOP);
|
||||
intel_ring_advance(ring);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
gen6_render_ring_flush(struct intel_ring_buffer *ring,
|
||||
u32 invalidate_domains, u32 flush_domains)
|
||||
{
|
||||
u32 flags = 0;
|
||||
struct pipe_control *pc = ring->private;
|
||||
u32 scratch_addr = pc->gtt_offset + 128;
|
||||
int ret;
|
||||
|
||||
/* Force SNB workarounds for PIPE_CONTROL flushes */
|
||||
intel_emit_post_sync_nonzero_flush(ring);
|
||||
|
||||
/* Just flush everything. Experiments have shown that reducing the
|
||||
* number of bits based on the write domains has little performance
|
||||
* impact.
|
||||
*/
|
||||
flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
|
||||
flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
|
||||
flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
|
||||
flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
|
||||
flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
|
||||
flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
|
||||
flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
|
||||
|
||||
ret = intel_ring_begin(ring, 6);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
|
||||
intel_ring_emit(ring, flags);
|
||||
intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
|
||||
intel_ring_emit(ring, 0); /* lower dword */
|
||||
intel_ring_emit(ring, 0); /* uppwer dword */
|
||||
intel_ring_emit(ring, MI_NOOP);
|
||||
intel_ring_advance(ring);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ring_write_tail(struct intel_ring_buffer *ring,
|
||||
u32 value)
|
||||
{
|
||||
|
@ -206,16 +328,6 @@ static int init_ring_common(struct intel_ring_buffer *ring)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* 965+ support PIPE_CONTROL commands, which provide finer grained control
|
||||
* over cache flushing.
|
||||
*/
|
||||
struct pipe_control {
|
||||
struct drm_i915_gem_object *obj;
|
||||
volatile u32 *cpu_page;
|
||||
u32 gtt_offset;
|
||||
};
|
||||
|
||||
static int
|
||||
init_pipe_control(struct intel_ring_buffer *ring)
|
||||
{
|
||||
|
@ -296,8 +408,7 @@ static int init_render_ring(struct intel_ring_buffer *ring)
|
|||
GFX_MODE_ENABLE(GFX_REPLAY_MODE));
|
||||
}
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 6) {
|
||||
} else if (IS_GEN5(dev)) {
|
||||
if (INTEL_INFO(dev)->gen >= 5) {
|
||||
ret = init_pipe_control(ring);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -438,8 +549,8 @@ gen6_blt_ring_sync_to(struct intel_ring_buffer *waiter,
|
|||
|
||||
#define PIPE_CONTROL_FLUSH(ring__, addr__) \
|
||||
do { \
|
||||
intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \
|
||||
PIPE_CONTROL_DEPTH_STALL | 2); \
|
||||
intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | \
|
||||
PIPE_CONTROL_DEPTH_STALL); \
|
||||
intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT); \
|
||||
intel_ring_emit(ring__, 0); \
|
||||
intel_ring_emit(ring__, 0); \
|
||||
|
@ -467,8 +578,9 @@ pc_render_add_request(struct intel_ring_buffer *ring,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
|
||||
PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
|
||||
intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
|
||||
PIPE_CONTROL_WRITE_FLUSH |
|
||||
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
|
||||
intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
|
||||
intel_ring_emit(ring, seqno);
|
||||
intel_ring_emit(ring, 0);
|
||||
|
@ -483,8 +595,9 @@ pc_render_add_request(struct intel_ring_buffer *ring,
|
|||
PIPE_CONTROL_FLUSH(ring, scratch_addr);
|
||||
scratch_addr += 128;
|
||||
PIPE_CONTROL_FLUSH(ring, scratch_addr);
|
||||
intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
|
||||
PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
|
||||
intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
|
||||
PIPE_CONTROL_WRITE_FLUSH |
|
||||
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
|
||||
PIPE_CONTROL_NOTIFY);
|
||||
intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
|
||||
intel_ring_emit(ring, seqno);
|
||||
|
@ -1358,6 +1471,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
|
|||
*ring = render_ring;
|
||||
if (INTEL_INFO(dev)->gen >= 6) {
|
||||
ring->add_request = gen6_add_request;
|
||||
ring->flush = gen6_render_ring_flush;
|
||||
ring->irq_get = gen6_render_ring_get_irq;
|
||||
ring->irq_put = gen6_render_ring_put_irq;
|
||||
} else if (IS_GEN5(dev)) {
|
||||
|
|
|
@ -1232,8 +1232,7 @@ static bool
|
|||
intel_sdvo_multifunc_encoder(struct intel_sdvo *intel_sdvo)
|
||||
{
|
||||
/* Is there more than one type of output? */
|
||||
int caps = intel_sdvo->caps.output_flags & 0xf;
|
||||
return caps & -caps;
|
||||
return hweight16(intel_sdvo->caps.output_flags) > 1;
|
||||
}
|
||||
|
||||
static struct edid *
|
||||
|
@ -1254,7 +1253,7 @@ intel_sdvo_get_analog_edid(struct drm_connector *connector)
|
|||
}
|
||||
|
||||
enum drm_connector_status
|
||||
intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
|
||||
intel_sdvo_tmds_sink_detect(struct drm_connector *connector)
|
||||
{
|
||||
struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
|
||||
enum drm_connector_status status;
|
||||
|
@ -1349,7 +1348,7 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
|
|||
if ((intel_sdvo_connector->output_flag & response) == 0)
|
||||
ret = connector_status_disconnected;
|
||||
else if (IS_TMDS(intel_sdvo_connector))
|
||||
ret = intel_sdvo_hdmi_sink_detect(connector);
|
||||
ret = intel_sdvo_tmds_sink_detect(connector);
|
||||
else {
|
||||
struct edid *edid;
|
||||
|
||||
|
@ -1896,7 +1895,7 @@ intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv,
|
|||
struct intel_sdvo *sdvo, u32 reg)
|
||||
{
|
||||
struct sdvo_device_mapping *mapping;
|
||||
u8 pin, speed;
|
||||
u8 pin;
|
||||
|
||||
if (IS_SDVOB(reg))
|
||||
mapping = &dev_priv->sdvo_mappings[0];
|
||||
|
@ -1904,18 +1903,16 @@ intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv,
|
|||
mapping = &dev_priv->sdvo_mappings[1];
|
||||
|
||||
pin = GMBUS_PORT_DPB;
|
||||
speed = GMBUS_RATE_1MHZ >> 8;
|
||||
if (mapping->initialized) {
|
||||
if (mapping->initialized)
|
||||
pin = mapping->i2c_pin;
|
||||
speed = mapping->i2c_speed;
|
||||
}
|
||||
|
||||
if (pin < GMBUS_NUM_PORTS) {
|
||||
sdvo->i2c = &dev_priv->gmbus[pin].adapter;
|
||||
intel_gmbus_set_speed(sdvo->i2c, speed);
|
||||
intel_gmbus_set_speed(sdvo->i2c, GMBUS_RATE_1MHZ);
|
||||
intel_gmbus_force_bit(sdvo->i2c, true);
|
||||
} else
|
||||
} else {
|
||||
sdvo->i2c = &dev_priv->gmbus[GMBUS_PORT_DPB].adapter;
|
||||
}
|
||||
}
|
||||
|
||||
static bool
|
||||
|
@ -2206,7 +2203,7 @@ intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, uint16_t flags)
|
|||
bytes[0], bytes[1]);
|
||||
return false;
|
||||
}
|
||||
intel_sdvo->base.crtc_mask = (1 << 0) | (1 << 1);
|
||||
intel_sdvo->base.crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -404,6 +404,9 @@ static int dmar_forcedac;
|
|||
static int intel_iommu_strict;
|
||||
static int intel_iommu_superpage = 1;
|
||||
|
||||
int intel_iommu_gfx_mapped;
|
||||
EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
|
||||
|
||||
#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
|
||||
static DEFINE_SPINLOCK(device_domain_lock);
|
||||
static LIST_HEAD(device_domain_list);
|
||||
|
@ -3226,9 +3229,6 @@ static void __init init_no_remapping_devices(void)
|
|||
}
|
||||
}
|
||||
|
||||
if (dmar_map_gfx)
|
||||
return;
|
||||
|
||||
for_each_drhd_unit(drhd) {
|
||||
int i;
|
||||
if (drhd->ignored || drhd->include_all)
|
||||
|
@ -3236,18 +3236,23 @@ static void __init init_no_remapping_devices(void)
|
|||
|
||||
for (i = 0; i < drhd->devices_cnt; i++)
|
||||
if (drhd->devices[i] &&
|
||||
!IS_GFX_DEVICE(drhd->devices[i]))
|
||||
!IS_GFX_DEVICE(drhd->devices[i]))
|
||||
break;
|
||||
|
||||
if (i < drhd->devices_cnt)
|
||||
continue;
|
||||
|
||||
/* bypass IOMMU if it is just for gfx devices */
|
||||
drhd->ignored = 1;
|
||||
for (i = 0; i < drhd->devices_cnt; i++) {
|
||||
if (!drhd->devices[i])
|
||||
continue;
|
||||
drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
|
||||
/* This IOMMU has *only* gfx devices. Either bypass it or
|
||||
set the gfx_mapped flag, as appropriate */
|
||||
if (dmar_map_gfx) {
|
||||
intel_iommu_gfx_mapped = 1;
|
||||
} else {
|
||||
drhd->ignored = 1;
|
||||
for (i = 0; i < drhd->devices_cnt; i++) {
|
||||
if (!drhd->devices[i])
|
||||
continue;
|
||||
drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -3950,7 +3955,11 @@ static void __devinit quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
|
|||
if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
|
||||
printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
|
||||
dmar_map_gfx = 0;
|
||||
}
|
||||
} else if (dmar_map_gfx) {
|
||||
/* we have to ensure the gfx device is idle before we flush */
|
||||
printk(KERN_INFO "DMAR: Disabling batched IOTLB flush on Ironlake\n");
|
||||
intel_iommu_strict = 1;
|
||||
}
|
||||
}
|
||||
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
|
||||
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
|
||||
|
|
|
@ -74,6 +74,20 @@
|
|||
|
||||
#define DP_TRAINING_AUX_RD_INTERVAL 0x00e
|
||||
|
||||
#define DP_PSR_SUPPORT 0x070
|
||||
# define DP_PSR_IS_SUPPORTED 1
|
||||
#define DP_PSR_CAPS 0x071
|
||||
# define DP_PSR_NO_TRAIN_ON_EXIT 1
|
||||
# define DP_PSR_SETUP_TIME_330 (0 << 1)
|
||||
# define DP_PSR_SETUP_TIME_275 (1 << 1)
|
||||
# define DP_PSR_SETUP_TIME_220 (2 << 1)
|
||||
# define DP_PSR_SETUP_TIME_165 (3 << 1)
|
||||
# define DP_PSR_SETUP_TIME_110 (4 << 1)
|
||||
# define DP_PSR_SETUP_TIME_55 (5 << 1)
|
||||
# define DP_PSR_SETUP_TIME_0 (6 << 1)
|
||||
# define DP_PSR_SETUP_TIME_MASK (7 << 1)
|
||||
# define DP_PSR_SETUP_TIME_SHIFT 1
|
||||
|
||||
/* link configuration */
|
||||
#define DP_LINK_BW_SET 0x100
|
||||
# define DP_LINK_BW_1_62 0x06
|
||||
|
@ -133,6 +147,18 @@
|
|||
#define DP_MAIN_LINK_CHANNEL_CODING_SET 0x108
|
||||
# define DP_SET_ANSI_8B10B (1 << 0)
|
||||
|
||||
#define DP_PSR_EN_CFG 0x170
|
||||
# define DP_PSR_ENABLE (1 << 0)
|
||||
# define DP_PSR_MAIN_LINK_ACTIVE (1 << 1)
|
||||
# define DP_PSR_CRC_VERIFICATION (1 << 2)
|
||||
# define DP_PSR_FRAME_CAPTURE (1 << 3)
|
||||
|
||||
#define DP_DEVICE_SERVICE_IRQ_VECTOR 0x201
|
||||
# define DP_REMOTE_CONTROL_COMMAND_PENDING (1 << 0)
|
||||
# define DP_AUTOMATED_TEST_REQUEST (1 << 1)
|
||||
# define DP_CP_IRQ (1 << 2)
|
||||
# define DP_SINK_SPECIFIC_IRQ (1 << 6)
|
||||
|
||||
#define DP_LANE0_1_STATUS 0x202
|
||||
#define DP_LANE2_3_STATUS 0x203
|
||||
# define DP_LANE_CR_DONE (1 << 0)
|
||||
|
@ -165,10 +191,45 @@
|
|||
# define DP_ADJUST_PRE_EMPHASIS_LANE1_MASK 0xc0
|
||||
# define DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT 6
|
||||
|
||||
#define DP_TEST_REQUEST 0x218
|
||||
# define DP_TEST_LINK_TRAINING (1 << 0)
|
||||
# define DP_TEST_LINK_PATTERN (1 << 1)
|
||||
# define DP_TEST_LINK_EDID_READ (1 << 2)
|
||||
# define DP_TEST_LINK_PHY_TEST_PATTERN (1 << 3) /* DPCD >= 1.1 */
|
||||
|
||||
#define DP_TEST_LINK_RATE 0x219
|
||||
# define DP_LINK_RATE_162 (0x6)
|
||||
# define DP_LINK_RATE_27 (0xa)
|
||||
|
||||
#define DP_TEST_LANE_COUNT 0x220
|
||||
|
||||
#define DP_TEST_PATTERN 0x221
|
||||
|
||||
#define DP_TEST_RESPONSE 0x260
|
||||
# define DP_TEST_ACK (1 << 0)
|
||||
# define DP_TEST_NAK (1 << 1)
|
||||
# define DP_TEST_EDID_CHECKSUM_WRITE (1 << 2)
|
||||
|
||||
#define DP_SET_POWER 0x600
|
||||
# define DP_SET_POWER_D0 0x1
|
||||
# define DP_SET_POWER_D3 0x2
|
||||
|
||||
#define DP_PSR_ERROR_STATUS 0x2006
|
||||
# define DP_PSR_LINK_CRC_ERROR (1 << 0)
|
||||
# define DP_PSR_RFB_STORAGE_ERROR (1 << 1)
|
||||
|
||||
#define DP_PSR_ESI 0x2007
|
||||
# define DP_PSR_CAPS_CHANGE (1 << 0)
|
||||
|
||||
#define DP_PSR_STATUS 0x2008
|
||||
# define DP_PSR_SINK_INACTIVE 0
|
||||
# define DP_PSR_SINK_ACTIVE_SRC_SYNCED 1
|
||||
# define DP_PSR_SINK_ACTIVE_RFB 2
|
||||
# define DP_PSR_SINK_ACTIVE_SINK_SYNCED 3
|
||||
# define DP_PSR_SINK_ACTIVE_RESYNC 4
|
||||
# define DP_PSR_SINK_INTERNAL_ERROR 7
|
||||
# define DP_PSR_SINK_STATE_MASK 0x07
|
||||
|
||||
#define MODE_I2C_START 1
|
||||
#define MODE_I2C_WRITE 2
|
||||
#define MODE_I2C_READ 4
|
||||
|
|
|
@ -13,6 +13,8 @@ const struct intel_gtt {
|
|||
unsigned int gtt_mappable_entries;
|
||||
/* Whether i915 needs to use the dmar apis or not. */
|
||||
unsigned int needs_dmar : 1;
|
||||
/* Whether we idle the gpu before mapping/unmapping */
|
||||
unsigned int do_idle_maps : 1;
|
||||
} *intel_gtt_get(void);
|
||||
|
||||
void intel_gtt_chipset_flush(void);
|
||||
|
|
|
@ -117,6 +117,8 @@ io_mapping_unmap(void __iomem *vaddr)
|
|||
|
||||
#else
|
||||
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
/* this struct isn't actually defined anywhere */
|
||||
struct io_mapping;
|
||||
|
||||
|
@ -138,12 +140,14 @@ static inline void __iomem *
|
|||
io_mapping_map_atomic_wc(struct io_mapping *mapping,
|
||||
unsigned long offset)
|
||||
{
|
||||
pagefault_disable();
|
||||
return ((char __force __iomem *) mapping) + offset;
|
||||
}
|
||||
|
||||
static inline void
|
||||
io_mapping_unmap_atomic(void __iomem *vaddr)
|
||||
{
|
||||
pagefault_enable();
|
||||
}
|
||||
|
||||
/* Non-atomic map/unmap */
|
||||
|
|
Loading…
Reference in New Issue