Merge tag 'drm-intel-next-2014-07-11' of git://anongit.freedesktop.org/drm-intel into drm-next
- fbc improvements when stolen memory is tight (Ben) - cdclk handling improvements for vlv/chv (Ville) - proper fix for stuck primary planes on gmch platforms with cxsr (Imre&Ebgert Eich) - gen8 hw semaphore support (Ben) - more execlist prep work from Oscar Mateo - locking fixes for primary planes (Matt Roper) - code rework to support runtime pm for dpms on hsw/bdw (Paulo, Imre & me), but not yet enabled because some fixes from Paulo haven't made the cut - more gpu boost tuning from Chris - as usual piles of little things all over * tag 'drm-intel-next-2014-07-11' of git://anongit.freedesktop.org/drm-intel: (93 commits) drm/i915: Make the RPS interrupt generation mask handle the vlv wa drm/i915: Move RPS evaluation interval counters to i915->rps drm/i915: Don't cast a pointer to void* unnecessarily drm/i915: don't read LVDS regs at compute_config time drm/i915: check the power domains in intel_lvds_get_hw_state() drm/i915: check the power domains in ironlake_get_pipe_config() drm/i915: don't skip shared DPLL assertion on LPT drm/i915: Only touch WRPLL hw state in enable/disable hooks drm/i915: Switch to common shared dpll framework for WRPLLs drm/i915: ->enable hook for WRPLLs drm/i915: ->disable hook for WRPLLs drm/i915: State readout support for WRPLLs drm/i915: add POWER_DOMAIN_PLLS drm/i915: Document that the pll->mode_set hook is optional drm/i915: Basic shared dpll support for WRPLLs drm/i915: Precompute static ddi_pll_sel values in encoders drm/i915: BDW also has special-purpose DP DDI clocks drm/i915: State readout and cross-checking for ddi_pll_sel drm/i915: Move ddi_pll_sel into the pipe config drm/i915: Add a debugfs file for the shared dpll state ...
This commit is contained in:
commit
c51f716790
|
@ -176,7 +176,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
|
|||
|
||||
static void describe_ctx(struct seq_file *m, struct intel_context *ctx)
|
||||
{
|
||||
seq_putc(m, ctx->is_initialized ? 'I' : 'i');
|
||||
seq_putc(m, ctx->legacy_hw_ctx.initialized ? 'I' : 'i');
|
||||
seq_putc(m, ctx->remap_slice ? 'R' : 'r');
|
||||
seq_putc(m, ' ');
|
||||
}
|
||||
|
@ -994,29 +994,6 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
|
|||
i915_next_seqno_get, i915_next_seqno_set,
|
||||
"0x%llx\n");
|
||||
|
||||
static int i915_rstdby_delays(struct seq_file *m, void *unused)
|
||||
{
|
||||
struct drm_info_node *node = m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u16 crstanddelay;
|
||||
int ret;
|
||||
|
||||
ret = mutex_lock_interruptible(&dev->struct_mutex);
|
||||
if (ret)
|
||||
return ret;
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
|
||||
crstanddelay = I915_READ16(CRSTANDVID);
|
||||
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int i915_frequency_info(struct seq_file *m, void *unused)
|
||||
{
|
||||
struct drm_info_node *node = m->private;
|
||||
|
@ -1158,61 +1135,6 @@ out:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int i915_delayfreq_table(struct seq_file *m, void *unused)
|
||||
{
|
||||
struct drm_info_node *node = m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 delayfreq;
|
||||
int ret, i;
|
||||
|
||||
ret = mutex_lock_interruptible(&dev->struct_mutex);
|
||||
if (ret)
|
||||
return ret;
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
|
||||
for (i = 0; i < 16; i++) {
|
||||
delayfreq = I915_READ(PXVFREQ_BASE + i * 4);
|
||||
seq_printf(m, "P%02dVIDFREQ: 0x%08x (VID: %d)\n", i, delayfreq,
|
||||
(delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT);
|
||||
}
|
||||
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int MAP_TO_MV(int map)
|
||||
{
|
||||
return 1250 - (map * 25);
|
||||
}
|
||||
|
||||
static int i915_inttoext_table(struct seq_file *m, void *unused)
|
||||
{
|
||||
struct drm_info_node *node = m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 inttoext;
|
||||
int ret, i;
|
||||
|
||||
ret = mutex_lock_interruptible(&dev->struct_mutex);
|
||||
if (ret)
|
||||
return ret;
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
|
||||
for (i = 1; i <= 32; i++) {
|
||||
inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4);
|
||||
seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext);
|
||||
}
|
||||
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ironlake_drpc_info(struct seq_file *m)
|
||||
{
|
||||
struct drm_info_node *node = m->private;
|
||||
|
@ -1523,10 +1445,17 @@ static int i915_ips_status(struct seq_file *m, void *unused)
|
|||
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
|
||||
if (IS_BROADWELL(dev) || I915_READ(IPS_CTL) & IPS_ENABLE)
|
||||
seq_puts(m, "enabled\n");
|
||||
else
|
||||
seq_puts(m, "disabled\n");
|
||||
seq_printf(m, "Enabled by kernel parameter: %s\n",
|
||||
yesno(i915.enable_ips));
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 8) {
|
||||
seq_puts(m, "Currently: unknown\n");
|
||||
} else {
|
||||
if (I915_READ(IPS_CTL) & IPS_ENABLE)
|
||||
seq_puts(m, "Currently: enabled\n");
|
||||
else
|
||||
seq_puts(m, "Currently: disabled\n");
|
||||
}
|
||||
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
|
||||
|
@ -1630,26 +1559,6 @@ out:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int i915_gfxec(struct seq_file *m, void *unused)
|
||||
{
|
||||
struct drm_info_node *node = m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int ret;
|
||||
|
||||
ret = mutex_lock_interruptible(&dev->struct_mutex);
|
||||
if (ret)
|
||||
return ret;
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
|
||||
seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4));
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int i915_opregion(struct seq_file *m, void *unused)
|
||||
{
|
||||
struct drm_info_node *node = m->private;
|
||||
|
@ -1746,7 +1655,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
|
|||
}
|
||||
|
||||
list_for_each_entry(ctx, &dev_priv->context_list, link) {
|
||||
if (ctx->obj == NULL)
|
||||
if (ctx->legacy_hw_ctx.rcs_state == NULL)
|
||||
continue;
|
||||
|
||||
seq_puts(m, "HW context ");
|
||||
|
@ -1755,7 +1664,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
|
|||
if (ring->default_context == ctx)
|
||||
seq_printf(m, "(default context %s) ", ring->name);
|
||||
|
||||
describe_obj(m, ctx->obj);
|
||||
describe_obj(m, ctx->legacy_hw_ctx.rcs_state);
|
||||
seq_putc(m, '\n');
|
||||
}
|
||||
|
||||
|
@ -1869,7 +1778,7 @@ static int per_file_ctx(int id, void *ptr, void *data)
|
|||
if (i915_gem_context_is_default(ctx))
|
||||
seq_puts(m, " default context:\n");
|
||||
else
|
||||
seq_printf(m, " context %d:\n", ctx->id);
|
||||
seq_printf(m, " context %d:\n", ctx->user_handle);
|
||||
ppgtt->debug_dump(ppgtt, m);
|
||||
|
||||
return 0;
|
||||
|
@ -2134,6 +2043,8 @@ static const char *power_domain_str(enum intel_display_power_domain domain)
|
|||
return "VGA";
|
||||
case POWER_DOMAIN_AUDIO:
|
||||
return "AUDIO";
|
||||
case POWER_DOMAIN_PLLS:
|
||||
return "PLLS";
|
||||
case POWER_DOMAIN_INIT:
|
||||
return "INIT";
|
||||
default:
|
||||
|
@ -2358,17 +2269,17 @@ static int i915_display_info(struct seq_file *m, void *unused)
|
|||
bool active;
|
||||
int x, y;
|
||||
|
||||
seq_printf(m, "CRTC %d: pipe: %c, active: %s\n",
|
||||
seq_printf(m, "CRTC %d: pipe: %c, active=%s (size=%dx%d)\n",
|
||||
crtc->base.base.id, pipe_name(crtc->pipe),
|
||||
yesno(crtc->active));
|
||||
yesno(crtc->active), crtc->config.pipe_src_w, crtc->config.pipe_src_h);
|
||||
if (crtc->active) {
|
||||
intel_crtc_info(m, crtc);
|
||||
|
||||
active = cursor_position(dev, crtc->pipe, &x, &y);
|
||||
seq_printf(m, "\tcursor visible? %s, position (%d, %d), addr 0x%08x, active? %s\n",
|
||||
seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x, active? %s\n",
|
||||
yesno(crtc->cursor_base),
|
||||
x, y, crtc->cursor_addr,
|
||||
yesno(active));
|
||||
x, y, crtc->cursor_width, crtc->cursor_height,
|
||||
crtc->cursor_addr, yesno(active));
|
||||
}
|
||||
|
||||
seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
|
||||
|
@ -2388,6 +2299,104 @@ static int i915_display_info(struct seq_file *m, void *unused)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int i915_semaphore_status(struct seq_file *m, void *unused)
|
||||
{
|
||||
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_engine_cs *ring;
|
||||
int num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
|
||||
int i, j, ret;
|
||||
|
||||
if (!i915_semaphore_is_enabled(dev)) {
|
||||
seq_puts(m, "Semaphores are disabled\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
ret = mutex_lock_interruptible(&dev->struct_mutex);
|
||||
if (ret)
|
||||
return ret;
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
|
||||
if (IS_BROADWELL(dev)) {
|
||||
struct page *page;
|
||||
uint64_t *seqno;
|
||||
|
||||
page = i915_gem_object_get_page(dev_priv->semaphore_obj, 0);
|
||||
|
||||
seqno = (uint64_t *)kmap_atomic(page);
|
||||
for_each_ring(ring, dev_priv, i) {
|
||||
uint64_t offset;
|
||||
|
||||
seq_printf(m, "%s\n", ring->name);
|
||||
|
||||
seq_puts(m, " Last signal:");
|
||||
for (j = 0; j < num_rings; j++) {
|
||||
offset = i * I915_NUM_RINGS + j;
|
||||
seq_printf(m, "0x%08llx (0x%02llx) ",
|
||||
seqno[offset], offset * 8);
|
||||
}
|
||||
seq_putc(m, '\n');
|
||||
|
||||
seq_puts(m, " Last wait: ");
|
||||
for (j = 0; j < num_rings; j++) {
|
||||
offset = i + (j * I915_NUM_RINGS);
|
||||
seq_printf(m, "0x%08llx (0x%02llx) ",
|
||||
seqno[offset], offset * 8);
|
||||
}
|
||||
seq_putc(m, '\n');
|
||||
|
||||
}
|
||||
kunmap_atomic(seqno);
|
||||
} else {
|
||||
seq_puts(m, " Last signal:");
|
||||
for_each_ring(ring, dev_priv, i)
|
||||
for (j = 0; j < num_rings; j++)
|
||||
seq_printf(m, "0x%08x\n",
|
||||
I915_READ(ring->semaphore.mbox.signal[j]));
|
||||
seq_putc(m, '\n');
|
||||
}
|
||||
|
||||
seq_puts(m, "\nSync seqno:\n");
|
||||
for_each_ring(ring, dev_priv, i) {
|
||||
for (j = 0; j < num_rings; j++) {
|
||||
seq_printf(m, " 0x%08x ", ring->semaphore.sync_seqno[j]);
|
||||
}
|
||||
seq_putc(m, '\n');
|
||||
}
|
||||
seq_putc(m, '\n');
|
||||
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int i915_shared_dplls_info(struct seq_file *m, void *unused)
|
||||
{
|
||||
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int i;
|
||||
|
||||
drm_modeset_lock_all(dev);
|
||||
for (i = 0; i < dev_priv->num_shared_dpll; i++) {
|
||||
struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
|
||||
|
||||
seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->name, pll->id);
|
||||
seq_printf(m, " refcount: %i, active: %i, on: %s\n", pll->refcount,
|
||||
pll->active, yesno(pll->on));
|
||||
seq_printf(m, " tracked hardware state:\n");
|
||||
seq_printf(m, " dpll: 0x%08x\n", pll->hw_state.dpll);
|
||||
seq_printf(m, " dpll_md: 0x%08x\n", pll->hw_state.dpll_md);
|
||||
seq_printf(m, " fp0: 0x%08x\n", pll->hw_state.fp0);
|
||||
seq_printf(m, " fp1: 0x%08x\n", pll->hw_state.fp1);
|
||||
seq_printf(m, " wrpll: 0x%08x\n", pll->hw_state.wrpll);
|
||||
}
|
||||
drm_modeset_unlock_all(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct pipe_crc_info {
|
||||
const char *name;
|
||||
struct drm_device *dev;
|
||||
|
@ -2860,7 +2869,60 @@ static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int ivb_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
|
||||
static void hsw_trans_edp_pipe_A_crc_wa(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *crtc =
|
||||
to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]);
|
||||
|
||||
drm_modeset_lock_all(dev);
|
||||
/*
|
||||
* If we use the eDP transcoder we need to make sure that we don't
|
||||
* bypass the pfit, since otherwise the pipe CRC source won't work. Only
|
||||
* relevant on hsw with pipe A when using the always-on power well
|
||||
* routing.
|
||||
*/
|
||||
if (crtc->config.cpu_transcoder == TRANSCODER_EDP &&
|
||||
!crtc->config.pch_pfit.enabled) {
|
||||
crtc->config.pch_pfit.force_thru = true;
|
||||
|
||||
intel_display_power_get(dev_priv,
|
||||
POWER_DOMAIN_PIPE_PANEL_FITTER(PIPE_A));
|
||||
|
||||
dev_priv->display.crtc_disable(&crtc->base);
|
||||
dev_priv->display.crtc_enable(&crtc->base);
|
||||
}
|
||||
drm_modeset_unlock_all(dev);
|
||||
}
|
||||
|
||||
static void hsw_undo_trans_edp_pipe_A_crc_wa(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *crtc =
|
||||
to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]);
|
||||
|
||||
drm_modeset_lock_all(dev);
|
||||
/*
|
||||
* If we use the eDP transcoder we need to make sure that we don't
|
||||
* bypass the pfit, since otherwise the pipe CRC source won't work. Only
|
||||
* relevant on hsw with pipe A when using the always-on power well
|
||||
* routing.
|
||||
*/
|
||||
if (crtc->config.pch_pfit.force_thru) {
|
||||
crtc->config.pch_pfit.force_thru = false;
|
||||
|
||||
dev_priv->display.crtc_disable(&crtc->base);
|
||||
dev_priv->display.crtc_enable(&crtc->base);
|
||||
|
||||
intel_display_power_put(dev_priv,
|
||||
POWER_DOMAIN_PIPE_PANEL_FITTER(PIPE_A));
|
||||
}
|
||||
drm_modeset_unlock_all(dev);
|
||||
}
|
||||
|
||||
static int ivb_pipe_crc_ctl_reg(struct drm_device *dev,
|
||||
enum pipe pipe,
|
||||
enum intel_pipe_crc_source *source,
|
||||
uint32_t *val)
|
||||
{
|
||||
if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
|
||||
|
@ -2874,6 +2936,9 @@ static int ivb_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
|
|||
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB;
|
||||
break;
|
||||
case INTEL_PIPE_CRC_SOURCE_PF:
|
||||
if (IS_HASWELL(dev) && pipe == PIPE_A)
|
||||
hsw_trans_edp_pipe_A_crc_wa(dev);
|
||||
|
||||
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB;
|
||||
break;
|
||||
case INTEL_PIPE_CRC_SOURCE_NONE:
|
||||
|
@ -2906,11 +2971,11 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
|
|||
else if (INTEL_INFO(dev)->gen < 5)
|
||||
ret = i9xx_pipe_crc_ctl_reg(dev, pipe, &source, &val);
|
||||
else if (IS_VALLEYVIEW(dev))
|
||||
ret = vlv_pipe_crc_ctl_reg(dev,pipe, &source, &val);
|
||||
ret = vlv_pipe_crc_ctl_reg(dev, pipe, &source, &val);
|
||||
else if (IS_GEN5(dev) || IS_GEN6(dev))
|
||||
ret = ilk_pipe_crc_ctl_reg(&source, &val);
|
||||
else
|
||||
ret = ivb_pipe_crc_ctl_reg(&source, &val);
|
||||
ret = ivb_pipe_crc_ctl_reg(dev, pipe, &source, &val);
|
||||
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
|
@ -2962,6 +3027,8 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
|
|||
g4x_undo_pipe_scramble_reset(dev, pipe);
|
||||
else if (IS_VALLEYVIEW(dev))
|
||||
vlv_undo_pipe_scramble_reset(dev, pipe);
|
||||
else if (IS_HASWELL(dev) && pipe == PIPE_A)
|
||||
hsw_undo_trans_edp_pipe_A_crc_wa(dev);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -3815,14 +3882,10 @@ static const struct drm_info_list i915_debugfs_list[] = {
|
|||
{"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS},
|
||||
{"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
|
||||
{"i915_gem_hws_vebox", i915_hws_info, 0, (void *)VECS},
|
||||
{"i915_rstdby_delays", i915_rstdby_delays, 0},
|
||||
{"i915_frequency_info", i915_frequency_info, 0},
|
||||
{"i915_delayfreq_table", i915_delayfreq_table, 0},
|
||||
{"i915_inttoext_table", i915_inttoext_table, 0},
|
||||
{"i915_drpc_info", i915_drpc_info, 0},
|
||||
{"i915_emon_status", i915_emon_status, 0},
|
||||
{"i915_ring_freq_table", i915_ring_freq_table, 0},
|
||||
{"i915_gfxec", i915_gfxec, 0},
|
||||
{"i915_fbc_status", i915_fbc_status, 0},
|
||||
{"i915_ips_status", i915_ips_status, 0},
|
||||
{"i915_sr_status", i915_sr_status, 0},
|
||||
|
@ -3839,6 +3902,8 @@ static const struct drm_info_list i915_debugfs_list[] = {
|
|||
{"i915_pc8_status", i915_pc8_status, 0},
|
||||
{"i915_power_domain_info", i915_power_domain_info, 0},
|
||||
{"i915_display_info", i915_display_info, 0},
|
||||
{"i915_semaphore_status", i915_semaphore_status, 0},
|
||||
{"i915_shared_dplls_info", i915_shared_dplls_info, 0},
|
||||
};
|
||||
#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
|
||||
|
||||
|
|
|
@ -1593,7 +1593,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
|
|||
if (dev_priv == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
dev->dev_private = (void *)dev_priv;
|
||||
dev->dev_private = dev_priv;
|
||||
dev_priv->dev = dev;
|
||||
|
||||
/* copy initial configuration to dev_priv->info */
|
||||
|
@ -1954,11 +1954,11 @@ void i915_driver_lastclose(struct drm_device *dev)
|
|||
i915_dma_cleanup(dev);
|
||||
}
|
||||
|
||||
void i915_driver_preclose(struct drm_device *dev, struct drm_file *file_priv)
|
||||
void i915_driver_preclose(struct drm_device *dev, struct drm_file *file)
|
||||
{
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
i915_gem_context_close(dev, file_priv);
|
||||
i915_gem_release(dev, file_priv);
|
||||
i915_gem_context_close(dev, file);
|
||||
i915_gem_release(dev, file);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
}
|
||||
|
||||
|
|
|
@ -477,10 +477,6 @@ bool i915_semaphore_is_enabled(struct drm_device *dev)
|
|||
if (i915.semaphores >= 0)
|
||||
return i915.semaphores;
|
||||
|
||||
/* Until we get further testing... */
|
||||
if (IS_GEN8(dev))
|
||||
return false;
|
||||
|
||||
#ifdef CONFIG_INTEL_IOMMU
|
||||
/* Enable semaphores on SNB when IO remapping is off */
|
||||
if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
|
||||
|
@ -520,6 +516,8 @@ static int i915_drm_freeze(struct drm_device *dev)
|
|||
return error;
|
||||
}
|
||||
|
||||
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
|
||||
|
||||
intel_runtime_pm_disable_interrupts(dev);
|
||||
|
||||
intel_suspend_gt_powersave(dev);
|
||||
|
@ -541,10 +539,11 @@ static int i915_drm_freeze(struct drm_device *dev)
|
|||
|
||||
i915_save_state(dev);
|
||||
|
||||
if (acpi_target_system_state() >= ACPI_STATE_S3)
|
||||
opregion_target_state = PCI_D3cold;
|
||||
else
|
||||
opregion_target_state = PCI_D3cold;
|
||||
#if IS_ENABLED(CONFIG_ACPI_SLEEP)
|
||||
if (acpi_target_system_state() < ACPI_STATE_S3)
|
||||
opregion_target_state = PCI_D1;
|
||||
#endif
|
||||
intel_opregion_notify_adapter(dev, opregion_target_state);
|
||||
|
||||
intel_uncore_forcewake_reset(dev, false);
|
||||
|
|
|
@ -129,6 +129,7 @@ enum intel_display_power_domain {
|
|||
POWER_DOMAIN_PORT_OTHER,
|
||||
POWER_DOMAIN_VGA,
|
||||
POWER_DOMAIN_AUDIO,
|
||||
POWER_DOMAIN_PLLS,
|
||||
POWER_DOMAIN_INIT,
|
||||
|
||||
POWER_DOMAIN_NUM,
|
||||
|
@ -184,8 +185,10 @@ struct i915_mmu_object;
|
|||
enum intel_dpll_id {
|
||||
DPLL_ID_PRIVATE = -1, /* non-shared dpll in use */
|
||||
/* real shared dpll ids must be >= 0 */
|
||||
DPLL_ID_PCH_PLL_A,
|
||||
DPLL_ID_PCH_PLL_B,
|
||||
DPLL_ID_PCH_PLL_A = 0,
|
||||
DPLL_ID_PCH_PLL_B = 1,
|
||||
DPLL_ID_WRPLL1 = 0,
|
||||
DPLL_ID_WRPLL2 = 1,
|
||||
};
|
||||
#define I915_NUM_PLLS 2
|
||||
|
||||
|
@ -194,6 +197,7 @@ struct intel_dpll_hw_state {
|
|||
uint32_t dpll_md;
|
||||
uint32_t fp0;
|
||||
uint32_t fp1;
|
||||
uint32_t wrpll;
|
||||
};
|
||||
|
||||
struct intel_shared_dpll {
|
||||
|
@ -204,6 +208,8 @@ struct intel_shared_dpll {
|
|||
/* should match the index in the dev_priv->shared_dplls array */
|
||||
enum intel_dpll_id id;
|
||||
struct intel_dpll_hw_state hw_state;
|
||||
/* The mode_set hook is optional and should be used together with the
|
||||
* intel_prepare_shared_dpll function. */
|
||||
void (*mode_set)(struct drm_i915_private *dev_priv,
|
||||
struct intel_shared_dpll *pll);
|
||||
void (*enable)(struct drm_i915_private *dev_priv,
|
||||
|
@ -228,12 +234,6 @@ void intel_link_compute_m_n(int bpp, int nlanes,
|
|||
int pixel_clock, int link_clock,
|
||||
struct intel_link_m_n *m_n);
|
||||
|
||||
struct intel_ddi_plls {
|
||||
int spll_refcount;
|
||||
int wrpll1_refcount;
|
||||
int wrpll2_refcount;
|
||||
};
|
||||
|
||||
/* Interface history:
|
||||
*
|
||||
* 1.1: Original.
|
||||
|
@ -324,6 +324,7 @@ struct drm_i915_error_state {
|
|||
u64 fence[I915_MAX_NUM_FENCES];
|
||||
struct intel_overlay_error_state *overlay;
|
||||
struct intel_display_error_state *display;
|
||||
struct drm_i915_error_object *semaphore_obj;
|
||||
|
||||
struct drm_i915_error_ring {
|
||||
bool valid;
|
||||
|
@ -584,27 +585,48 @@ struct i915_ctx_hang_stats {
|
|||
};
|
||||
|
||||
/* This must match up with the value previously used for execbuf2.rsvd1. */
|
||||
#define DEFAULT_CONTEXT_ID 0
|
||||
#define DEFAULT_CONTEXT_HANDLE 0
|
||||
/**
|
||||
* struct intel_context - as the name implies, represents a context.
|
||||
* @ref: reference count.
|
||||
* @user_handle: userspace tracking identity for this context.
|
||||
* @remap_slice: l3 row remapping information.
|
||||
* @file_priv: filp associated with this context (NULL for global default
|
||||
* context).
|
||||
* @hang_stats: information about the role of this context in possible GPU
|
||||
* hangs.
|
||||
* @vm: virtual memory space used by this context.
|
||||
* @legacy_hw_ctx: render context backing object and whether it is correctly
|
||||
* initialized (legacy ring submission mechanism only).
|
||||
* @link: link in the global list of contexts.
|
||||
*
|
||||
* Contexts are memory images used by the hardware to store copies of their
|
||||
* internal state.
|
||||
*/
|
||||
struct intel_context {
|
||||
struct kref ref;
|
||||
int id;
|
||||
bool is_initialized;
|
||||
int user_handle;
|
||||
uint8_t remap_slice;
|
||||
struct drm_i915_file_private *file_priv;
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct i915_ctx_hang_stats hang_stats;
|
||||
struct i915_address_space *vm;
|
||||
|
||||
struct {
|
||||
struct drm_i915_gem_object *rcs_state;
|
||||
bool initialized;
|
||||
} legacy_hw_ctx;
|
||||
|
||||
struct list_head link;
|
||||
};
|
||||
|
||||
struct i915_fbc {
|
||||
unsigned long size;
|
||||
unsigned threshold;
|
||||
unsigned int fb_id;
|
||||
enum plane plane;
|
||||
int y;
|
||||
|
||||
struct drm_mm_node *compressed_fb;
|
||||
struct drm_mm_node compressed_fb;
|
||||
struct drm_mm_node *compressed_llb;
|
||||
|
||||
struct intel_fbc_work {
|
||||
|
@ -880,6 +902,12 @@ struct vlv_s0ix_state {
|
|||
u32 clock_gate_dis2;
|
||||
};
|
||||
|
||||
struct intel_rps_ei {
|
||||
u32 cz_clock;
|
||||
u32 render_c0;
|
||||
u32 media_c0;
|
||||
};
|
||||
|
||||
struct intel_gen6_power_mgmt {
|
||||
/* work and pm_iir are protected by dev_priv->irq_lock */
|
||||
struct work_struct work;
|
||||
|
@ -904,12 +932,17 @@ struct intel_gen6_power_mgmt {
|
|||
u8 rp1_freq; /* "less than" RP0 power/freqency */
|
||||
u8 rp0_freq; /* Non-overclocked max frequency. */
|
||||
|
||||
u32 ei_interrupt_count;
|
||||
|
||||
int last_adj;
|
||||
enum { LOW_POWER, BETWEEN, HIGH_POWER } power;
|
||||
|
||||
bool enabled;
|
||||
struct delayed_work delayed_resume_work;
|
||||
|
||||
/* manual wa residency calculations */
|
||||
struct intel_rps_ei up_ei, down_ei;
|
||||
|
||||
/*
|
||||
* Protects RPS/RC6 register access and PCU communication.
|
||||
* Must be taken after struct_mutex if nested.
|
||||
|
@ -1374,6 +1407,7 @@ struct drm_i915_private {
|
|||
|
||||
struct pci_dev *bridge_dev;
|
||||
struct intel_engine_cs ring[I915_NUM_RINGS];
|
||||
struct drm_i915_gem_object *semaphore_obj;
|
||||
uint32_t last_seqno, next_seqno;
|
||||
|
||||
drm_dma_handle_t *status_page_dmah;
|
||||
|
@ -1480,7 +1514,6 @@ struct drm_i915_private {
|
|||
|
||||
int num_shared_dpll;
|
||||
struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
|
||||
struct intel_ddi_plls ddi_plls;
|
||||
int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
|
||||
|
||||
/* Reclocking support */
|
||||
|
@ -1557,6 +1590,11 @@ struct drm_i915_private {
|
|||
|
||||
struct i915_runtime_pm pm;
|
||||
|
||||
struct intel_digital_port *hpd_irq_port[I915_MAX_PORTS];
|
||||
u32 long_hpd_port_mask;
|
||||
u32 short_hpd_port_mask;
|
||||
struct work_struct dig_port_work;
|
||||
|
||||
/* Old dri1 support infrastructure, beware the dragons ya fools entering
|
||||
* here! */
|
||||
struct i915_dri1_state dri1;
|
||||
|
@ -2097,12 +2135,12 @@ void i915_update_dri1_breadcrumb(struct drm_device *dev);
|
|||
extern void i915_kernel_lost_context(struct drm_device * dev);
|
||||
extern int i915_driver_load(struct drm_device *, unsigned long flags);
|
||||
extern int i915_driver_unload(struct drm_device *);
|
||||
extern int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv);
|
||||
extern int i915_driver_open(struct drm_device *dev, struct drm_file *file);
|
||||
extern void i915_driver_lastclose(struct drm_device * dev);
|
||||
extern void i915_driver_preclose(struct drm_device *dev,
|
||||
struct drm_file *file_priv);
|
||||
struct drm_file *file);
|
||||
extern void i915_driver_postclose(struct drm_device *dev,
|
||||
struct drm_file *file_priv);
|
||||
struct drm_file *file);
|
||||
extern int i915_driver_device_is_agp(struct drm_device * dev);
|
||||
#ifdef CONFIG_COMPAT
|
||||
extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
|
||||
|
@ -2457,7 +2495,7 @@ static inline void i915_gem_context_unreference(struct intel_context *ctx)
|
|||
|
||||
static inline bool i915_gem_context_is_default(const struct intel_context *c)
|
||||
{
|
||||
return c->id == DEFAULT_CONTEXT_ID;
|
||||
return c->user_handle == DEFAULT_CONTEXT_HANDLE;
|
||||
}
|
||||
|
||||
int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
|
||||
|
@ -2488,7 +2526,7 @@ static inline void i915_gem_chipset_flush(struct drm_device *dev)
|
|||
|
||||
/* i915_gem_stolen.c */
|
||||
int i915_gem_init_stolen(struct drm_device *dev);
|
||||
int i915_gem_stolen_setup_compression(struct drm_device *dev, int size);
|
||||
int i915_gem_stolen_setup_compression(struct drm_device *dev, int size, int fb_cpp);
|
||||
void i915_gem_stolen_cleanup_compression(struct drm_device *dev);
|
||||
void i915_gem_cleanup_stolen(struct drm_device *dev);
|
||||
struct drm_i915_gem_object *
|
||||
|
@ -2647,6 +2685,8 @@ extern void gen6_set_rps(struct drm_device *dev, u8 val);
|
|||
extern void valleyview_set_rps(struct drm_device *dev, u8 val);
|
||||
extern int valleyview_rps_max_freq(struct drm_i915_private *dev_priv);
|
||||
extern int valleyview_rps_min_freq(struct drm_i915_private *dev_priv);
|
||||
extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
|
||||
bool enable);
|
||||
extern void intel_detect_pch(struct drm_device *dev);
|
||||
extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
|
||||
extern int intel_enable_rc6(const struct drm_device *dev);
|
||||
|
|
|
@ -1168,7 +1168,7 @@ static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno,
|
|||
|
||||
timeout_expire = timeout ? jiffies + timespec_to_jiffies_timeout(timeout) : 0;
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 6 && can_wait_boost(file_priv)) {
|
||||
if (INTEL_INFO(dev)->gen >= 6 && ring->id == RCS && can_wait_boost(file_priv)) {
|
||||
gen6_rps_boost(dev_priv);
|
||||
if (file_priv)
|
||||
mod_delayed_work(dev_priv->wq,
|
||||
|
@ -2330,7 +2330,7 @@ int __i915_add_request(struct intel_engine_cs *ring,
|
|||
u32 request_ring_position, request_start;
|
||||
int ret;
|
||||
|
||||
request_start = intel_ring_get_tail(ring);
|
||||
request_start = intel_ring_get_tail(ring->buffer);
|
||||
/*
|
||||
* Emit any outstanding flushes - execbuf can fail to emit the flush
|
||||
* after having emitted the batchbuffer command. Hence we need to fix
|
||||
|
@ -2351,7 +2351,7 @@ int __i915_add_request(struct intel_engine_cs *ring,
|
|||
* GPU processing the request, we never over-estimate the
|
||||
* position of the head.
|
||||
*/
|
||||
request_ring_position = intel_ring_get_tail(ring);
|
||||
request_ring_position = intel_ring_get_tail(ring->buffer);
|
||||
|
||||
ret = ring->add_request(ring);
|
||||
if (ret)
|
||||
|
@ -2842,6 +2842,8 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
|
|||
idx = intel_ring_sync_index(from, to);
|
||||
|
||||
seqno = obj->last_read_seqno;
|
||||
/* Optimization: Avoid semaphore sync when we are sure we already
|
||||
* waited for an object with higher seqno */
|
||||
if (seqno <= from->semaphore.sync_seqno[idx])
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -182,14 +182,14 @@ void i915_gem_context_free(struct kref *ctx_ref)
|
|||
typeof(*ctx), ref);
|
||||
struct i915_hw_ppgtt *ppgtt = NULL;
|
||||
|
||||
if (ctx->obj) {
|
||||
if (ctx->legacy_hw_ctx.rcs_state) {
|
||||
/* We refcount even the aliasing PPGTT to keep the code symmetric */
|
||||
if (USES_PPGTT(ctx->obj->base.dev))
|
||||
if (USES_PPGTT(ctx->legacy_hw_ctx.rcs_state->base.dev))
|
||||
ppgtt = ctx_to_ppgtt(ctx);
|
||||
|
||||
/* XXX: Free up the object before tearing down the address space, in
|
||||
* case we're bound in the PPGTT */
|
||||
drm_gem_object_unreference(&ctx->obj->base);
|
||||
drm_gem_object_unreference(&ctx->legacy_hw_ctx.rcs_state->base);
|
||||
}
|
||||
|
||||
if (ppgtt)
|
||||
|
@ -198,6 +198,36 @@ void i915_gem_context_free(struct kref *ctx_ref)
|
|||
kfree(ctx);
|
||||
}
|
||||
|
||||
static struct drm_i915_gem_object *
|
||||
i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
|
||||
{
|
||||
struct drm_i915_gem_object *obj;
|
||||
int ret;
|
||||
|
||||
obj = i915_gem_alloc_object(dev, size);
|
||||
if (obj == NULL)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
/*
|
||||
* Try to make the context utilize L3 as well as LLC.
|
||||
*
|
||||
* On VLV we don't have L3 controls in the PTEs so we
|
||||
* shouldn't touch the cache level, especially as that
|
||||
* would make the object snooped which might have a
|
||||
* negative performance impact.
|
||||
*/
|
||||
if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev)) {
|
||||
ret = i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC);
|
||||
/* Failure shouldn't ever happen this early */
|
||||
if (WARN_ON(ret)) {
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
}
|
||||
|
||||
return obj;
|
||||
}
|
||||
|
||||
static struct i915_hw_ppgtt *
|
||||
create_vm_for_ctx(struct drm_device *dev, struct intel_context *ctx)
|
||||
{
|
||||
|
@ -234,40 +264,26 @@ __create_hw_context(struct drm_device *dev,
|
|||
list_add_tail(&ctx->link, &dev_priv->context_list);
|
||||
|
||||
if (dev_priv->hw_context_size) {
|
||||
ctx->obj = i915_gem_alloc_object(dev, dev_priv->hw_context_size);
|
||||
if (ctx->obj == NULL) {
|
||||
ret = -ENOMEM;
|
||||
struct drm_i915_gem_object *obj =
|
||||
i915_gem_alloc_context_obj(dev, dev_priv->hw_context_size);
|
||||
if (IS_ERR(obj)) {
|
||||
ret = PTR_ERR(obj);
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
/*
|
||||
* Try to make the context utilize L3 as well as LLC.
|
||||
*
|
||||
* On VLV we don't have L3 controls in the PTEs so we
|
||||
* shouldn't touch the cache level, especially as that
|
||||
* would make the object snooped which might have a
|
||||
* negative performance impact.
|
||||
*/
|
||||
if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev)) {
|
||||
ret = i915_gem_object_set_cache_level(ctx->obj,
|
||||
I915_CACHE_L3_LLC);
|
||||
/* Failure shouldn't ever happen this early */
|
||||
if (WARN_ON(ret))
|
||||
goto err_out;
|
||||
}
|
||||
ctx->legacy_hw_ctx.rcs_state = obj;
|
||||
}
|
||||
|
||||
/* Default context will never have a file_priv */
|
||||
if (file_priv != NULL) {
|
||||
ret = idr_alloc(&file_priv->context_idr, ctx,
|
||||
DEFAULT_CONTEXT_ID, 0, GFP_KERNEL);
|
||||
DEFAULT_CONTEXT_HANDLE, 0, GFP_KERNEL);
|
||||
if (ret < 0)
|
||||
goto err_out;
|
||||
} else
|
||||
ret = DEFAULT_CONTEXT_ID;
|
||||
ret = DEFAULT_CONTEXT_HANDLE;
|
||||
|
||||
ctx->file_priv = file_priv;
|
||||
ctx->id = ret;
|
||||
ctx->user_handle = ret;
|
||||
/* NB: Mark all slices as needing a remap so that when the context first
|
||||
* loads it will restore whatever remap state already exists. If there
|
||||
* is no remap info, it will be a NOP. */
|
||||
|
@ -301,7 +317,7 @@ i915_gem_create_context(struct drm_device *dev,
|
|||
if (IS_ERR(ctx))
|
||||
return ctx;
|
||||
|
||||
if (is_global_default_ctx && ctx->obj) {
|
||||
if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state) {
|
||||
/* We may need to do things with the shrinker which
|
||||
* require us to immediately switch back to the default
|
||||
* context. This can cause a problem as pinning the
|
||||
|
@ -309,7 +325,7 @@ i915_gem_create_context(struct drm_device *dev,
|
|||
* be available. To avoid this we always pin the default
|
||||
* context.
|
||||
*/
|
||||
ret = i915_gem_obj_ggtt_pin(ctx->obj,
|
||||
ret = i915_gem_obj_ggtt_pin(ctx->legacy_hw_ctx.rcs_state,
|
||||
get_context_alignment(dev), 0);
|
||||
if (ret) {
|
||||
DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
|
||||
|
@ -349,8 +365,8 @@ i915_gem_create_context(struct drm_device *dev,
|
|||
return ctx;
|
||||
|
||||
err_unpin:
|
||||
if (is_global_default_ctx && ctx->obj)
|
||||
i915_gem_object_ggtt_unpin(ctx->obj);
|
||||
if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state)
|
||||
i915_gem_object_ggtt_unpin(ctx->legacy_hw_ctx.rcs_state);
|
||||
err_destroy:
|
||||
i915_gem_context_unreference(ctx);
|
||||
return ERR_PTR(ret);
|
||||
|
@ -366,23 +382,27 @@ void i915_gem_context_reset(struct drm_device *dev)
|
|||
for (i = 0; i < I915_NUM_RINGS; i++) {
|
||||
struct intel_engine_cs *ring = &dev_priv->ring[i];
|
||||
struct intel_context *dctx = ring->default_context;
|
||||
struct intel_context *lctx = ring->last_context;
|
||||
|
||||
/* Do a fake switch to the default context */
|
||||
if (ring->last_context == dctx)
|
||||
if (lctx == dctx)
|
||||
continue;
|
||||
|
||||
if (!ring->last_context)
|
||||
if (!lctx)
|
||||
continue;
|
||||
|
||||
if (dctx->obj && i == RCS) {
|
||||
WARN_ON(i915_gem_obj_ggtt_pin(dctx->obj,
|
||||
if (dctx->legacy_hw_ctx.rcs_state && i == RCS) {
|
||||
WARN_ON(i915_gem_obj_ggtt_pin(dctx->legacy_hw_ctx.rcs_state,
|
||||
get_context_alignment(dev), 0));
|
||||
/* Fake a finish/inactive */
|
||||
dctx->obj->base.write_domain = 0;
|
||||
dctx->obj->active = 0;
|
||||
dctx->legacy_hw_ctx.rcs_state->base.write_domain = 0;
|
||||
dctx->legacy_hw_ctx.rcs_state->active = 0;
|
||||
}
|
||||
|
||||
i915_gem_context_unreference(ring->last_context);
|
||||
if (lctx->legacy_hw_ctx.rcs_state && i == RCS)
|
||||
i915_gem_object_ggtt_unpin(lctx->legacy_hw_ctx.rcs_state);
|
||||
|
||||
i915_gem_context_unreference(lctx);
|
||||
i915_gem_context_reference(dctx);
|
||||
ring->last_context = dctx;
|
||||
}
|
||||
|
@ -429,7 +449,7 @@ void i915_gem_context_fini(struct drm_device *dev)
|
|||
struct intel_context *dctx = dev_priv->ring[RCS].default_context;
|
||||
int i;
|
||||
|
||||
if (dctx->obj) {
|
||||
if (dctx->legacy_hw_ctx.rcs_state) {
|
||||
/* The only known way to stop the gpu from accessing the hw context is
|
||||
* to reset it. Do this as the very last operation to avoid confusing
|
||||
* other code, leading to spurious errors. */
|
||||
|
@ -444,13 +464,13 @@ void i915_gem_context_fini(struct drm_device *dev)
|
|||
WARN_ON(!dev_priv->ring[RCS].last_context);
|
||||
if (dev_priv->ring[RCS].last_context == dctx) {
|
||||
/* Fake switch to NULL context */
|
||||
WARN_ON(dctx->obj->active);
|
||||
i915_gem_object_ggtt_unpin(dctx->obj);
|
||||
WARN_ON(dctx->legacy_hw_ctx.rcs_state->active);
|
||||
i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
|
||||
i915_gem_context_unreference(dctx);
|
||||
dev_priv->ring[RCS].last_context = NULL;
|
||||
}
|
||||
|
||||
i915_gem_object_ggtt_unpin(dctx->obj);
|
||||
i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
|
||||
}
|
||||
|
||||
for (i = 0; i < I915_NUM_RINGS; i++) {
|
||||
|
@ -570,7 +590,7 @@ mi_set_context(struct intel_engine_cs *ring,
|
|||
|
||||
intel_ring_emit(ring, MI_NOOP);
|
||||
intel_ring_emit(ring, MI_SET_CONTEXT);
|
||||
intel_ring_emit(ring, i915_gem_obj_ggtt_offset(new_context->obj) |
|
||||
intel_ring_emit(ring, i915_gem_obj_ggtt_offset(new_context->legacy_hw_ctx.rcs_state) |
|
||||
MI_MM_SPACE_GTT |
|
||||
MI_SAVE_EXT_STATE_EN |
|
||||
MI_RESTORE_EXT_STATE_EN |
|
||||
|
@ -602,8 +622,8 @@ static int do_switch(struct intel_engine_cs *ring,
|
|||
int ret, i;
|
||||
|
||||
if (from != NULL && ring == &dev_priv->ring[RCS]) {
|
||||
BUG_ON(from->obj == NULL);
|
||||
BUG_ON(!i915_gem_obj_is_pinned(from->obj));
|
||||
BUG_ON(from->legacy_hw_ctx.rcs_state == NULL);
|
||||
BUG_ON(!i915_gem_obj_is_pinned(from->legacy_hw_ctx.rcs_state));
|
||||
}
|
||||
|
||||
if (from == to && !to->remap_slice)
|
||||
|
@ -611,7 +631,7 @@ static int do_switch(struct intel_engine_cs *ring,
|
|||
|
||||
/* Trying to pin first makes error handling easier. */
|
||||
if (ring == &dev_priv->ring[RCS]) {
|
||||
ret = i915_gem_obj_ggtt_pin(to->obj,
|
||||
ret = i915_gem_obj_ggtt_pin(to->legacy_hw_ctx.rcs_state,
|
||||
get_context_alignment(ring->dev), 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -644,17 +664,17 @@ static int do_switch(struct intel_engine_cs *ring,
|
|||
*
|
||||
* XXX: We need a real interface to do this instead of trickery.
|
||||
*/
|
||||
ret = i915_gem_object_set_to_gtt_domain(to->obj, false);
|
||||
ret = i915_gem_object_set_to_gtt_domain(to->legacy_hw_ctx.rcs_state, false);
|
||||
if (ret)
|
||||
goto unpin_out;
|
||||
|
||||
if (!to->obj->has_global_gtt_mapping) {
|
||||
struct i915_vma *vma = i915_gem_obj_to_vma(to->obj,
|
||||
if (!to->legacy_hw_ctx.rcs_state->has_global_gtt_mapping) {
|
||||
struct i915_vma *vma = i915_gem_obj_to_vma(to->legacy_hw_ctx.rcs_state,
|
||||
&dev_priv->gtt.base);
|
||||
vma->bind_vma(vma, to->obj->cache_level, GLOBAL_BIND);
|
||||
vma->bind_vma(vma, to->legacy_hw_ctx.rcs_state->cache_level, GLOBAL_BIND);
|
||||
}
|
||||
|
||||
if (!to->is_initialized || i915_gem_context_is_default(to))
|
||||
if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to))
|
||||
hw_flags |= MI_RESTORE_INHIBIT;
|
||||
|
||||
ret = mi_set_context(ring, to, hw_flags);
|
||||
|
@ -680,8 +700,8 @@ static int do_switch(struct intel_engine_cs *ring,
|
|||
* MI_SET_CONTEXT instead of when the next seqno has completed.
|
||||
*/
|
||||
if (from != NULL) {
|
||||
from->obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
|
||||
i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->obj), ring);
|
||||
from->legacy_hw_ctx.rcs_state->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
|
||||
i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->legacy_hw_ctx.rcs_state), ring);
|
||||
/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
|
||||
* whole damn pipeline, we don't need to explicitly mark the
|
||||
* object dirty. The only exception is that the context must be
|
||||
|
@ -689,16 +709,16 @@ static int do_switch(struct intel_engine_cs *ring,
|
|||
* able to defer doing this until we know the object would be
|
||||
* swapped, but there is no way to do that yet.
|
||||
*/
|
||||
from->obj->dirty = 1;
|
||||
BUG_ON(from->obj->ring != ring);
|
||||
from->legacy_hw_ctx.rcs_state->dirty = 1;
|
||||
BUG_ON(from->legacy_hw_ctx.rcs_state->ring != ring);
|
||||
|
||||
/* obj is kept alive until the next request by its active ref */
|
||||
i915_gem_object_ggtt_unpin(from->obj);
|
||||
i915_gem_object_ggtt_unpin(from->legacy_hw_ctx.rcs_state);
|
||||
i915_gem_context_unreference(from);
|
||||
}
|
||||
|
||||
uninitialized = !to->is_initialized && from == NULL;
|
||||
to->is_initialized = true;
|
||||
uninitialized = !to->legacy_hw_ctx.initialized && from == NULL;
|
||||
to->legacy_hw_ctx.initialized = true;
|
||||
|
||||
done:
|
||||
i915_gem_context_reference(to);
|
||||
|
@ -714,7 +734,7 @@ done:
|
|||
|
||||
unpin_out:
|
||||
if (ring->id == RCS)
|
||||
i915_gem_object_ggtt_unpin(to->obj);
|
||||
i915_gem_object_ggtt_unpin(to->legacy_hw_ctx.rcs_state);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -735,7 +755,7 @@ int i915_switch_context(struct intel_engine_cs *ring,
|
|||
|
||||
WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
|
||||
|
||||
if (to->obj == NULL) { /* We have the fake context */
|
||||
if (to->legacy_hw_ctx.rcs_state == NULL) { /* We have the fake context */
|
||||
if (to != ring->last_context) {
|
||||
i915_gem_context_reference(to);
|
||||
if (ring->last_context)
|
||||
|
@ -773,7 +793,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
|
|||
if (IS_ERR(ctx))
|
||||
return PTR_ERR(ctx);
|
||||
|
||||
args->ctx_id = ctx->id;
|
||||
args->ctx_id = ctx->user_handle;
|
||||
DRM_DEBUG_DRIVER("HW context %d created\n", args->ctx_id);
|
||||
|
||||
return 0;
|
||||
|
@ -787,7 +807,7 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
|
|||
struct intel_context *ctx;
|
||||
int ret;
|
||||
|
||||
if (args->ctx_id == DEFAULT_CONTEXT_ID)
|
||||
if (args->ctx_id == DEFAULT_CONTEXT_HANDLE)
|
||||
return -ENOENT;
|
||||
|
||||
ret = i915_mutex_lock_interruptible(dev);
|
||||
|
@ -800,7 +820,7 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
|
|||
return PTR_ERR(ctx);
|
||||
}
|
||||
|
||||
idr_remove(&ctx->file_priv->context_idr, ctx->id);
|
||||
idr_remove(&ctx->file_priv->context_idr, ctx->user_handle);
|
||||
i915_gem_context_unreference(ctx);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
|
|
|
@ -938,7 +938,7 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
|
|||
struct intel_context *ctx = NULL;
|
||||
struct i915_ctx_hang_stats *hs;
|
||||
|
||||
if (ring->id != RCS && ctx_id != DEFAULT_CONTEXT_ID)
|
||||
if (ring->id != RCS && ctx_id != DEFAULT_CONTEXT_HANDLE)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
ctx = i915_gem_context_get(file->driver_priv, ctx_id);
|
||||
|
@ -1026,6 +1026,163 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
legacy_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
|
||||
struct intel_engine_cs *ring,
|
||||
struct intel_context *ctx,
|
||||
struct drm_i915_gem_execbuffer2 *args,
|
||||
struct list_head *vmas,
|
||||
struct drm_i915_gem_object *batch_obj,
|
||||
u64 exec_start, u32 flags)
|
||||
{
|
||||
struct drm_clip_rect *cliprects = NULL;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u64 exec_len;
|
||||
int instp_mode;
|
||||
u32 instp_mask;
|
||||
int i, ret = 0;
|
||||
|
||||
if (args->num_cliprects != 0) {
|
||||
if (ring != &dev_priv->ring[RCS]) {
|
||||
DRM_DEBUG("clip rectangles are only valid with the render ring\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 5) {
|
||||
DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
|
||||
DRM_DEBUG("execbuf with %u cliprects\n",
|
||||
args->num_cliprects);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
cliprects = kcalloc(args->num_cliprects,
|
||||
sizeof(*cliprects),
|
||||
GFP_KERNEL);
|
||||
if (cliprects == NULL) {
|
||||
ret = -ENOMEM;
|
||||
goto error;
|
||||
}
|
||||
|
||||
if (copy_from_user(cliprects,
|
||||
to_user_ptr(args->cliprects_ptr),
|
||||
sizeof(*cliprects)*args->num_cliprects)) {
|
||||
ret = -EFAULT;
|
||||
goto error;
|
||||
}
|
||||
} else {
|
||||
if (args->DR4 == 0xffffffff) {
|
||||
DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
|
||||
args->DR4 = 0;
|
||||
}
|
||||
|
||||
if (args->DR1 || args->DR4 || args->cliprects_ptr) {
|
||||
DRM_DEBUG("0 cliprects but dirt in cliprects fields\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
ret = i915_gem_execbuffer_move_to_gpu(ring, vmas);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
ret = i915_switch_context(ring, ctx);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
|
||||
instp_mask = I915_EXEC_CONSTANTS_MASK;
|
||||
switch (instp_mode) {
|
||||
case I915_EXEC_CONSTANTS_REL_GENERAL:
|
||||
case I915_EXEC_CONSTANTS_ABSOLUTE:
|
||||
case I915_EXEC_CONSTANTS_REL_SURFACE:
|
||||
if (instp_mode != 0 && ring != &dev_priv->ring[RCS]) {
|
||||
DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
|
||||
ret = -EINVAL;
|
||||
goto error;
|
||||
}
|
||||
|
||||
if (instp_mode != dev_priv->relative_constants_mode) {
|
||||
if (INTEL_INFO(dev)->gen < 4) {
|
||||
DRM_DEBUG("no rel constants on pre-gen4\n");
|
||||
ret = -EINVAL;
|
||||
goto error;
|
||||
}
|
||||
|
||||
if (INTEL_INFO(dev)->gen > 5 &&
|
||||
instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
|
||||
DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
|
||||
ret = -EINVAL;
|
||||
goto error;
|
||||
}
|
||||
|
||||
/* The HW changed the meaning on this bit on gen6 */
|
||||
if (INTEL_INFO(dev)->gen >= 6)
|
||||
instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode);
|
||||
ret = -EINVAL;
|
||||
goto error;
|
||||
}
|
||||
|
||||
if (ring == &dev_priv->ring[RCS] &&
|
||||
instp_mode != dev_priv->relative_constants_mode) {
|
||||
ret = intel_ring_begin(ring, 4);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
intel_ring_emit(ring, MI_NOOP);
|
||||
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
|
||||
intel_ring_emit(ring, INSTPM);
|
||||
intel_ring_emit(ring, instp_mask << 16 | instp_mode);
|
||||
intel_ring_advance(ring);
|
||||
|
||||
dev_priv->relative_constants_mode = instp_mode;
|
||||
}
|
||||
|
||||
if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
|
||||
ret = i915_reset_gen7_sol_offsets(dev, ring);
|
||||
if (ret)
|
||||
goto error;
|
||||
}
|
||||
|
||||
exec_len = args->batch_len;
|
||||
if (cliprects) {
|
||||
for (i = 0; i < args->num_cliprects; i++) {
|
||||
ret = i915_emit_box(dev, &cliprects[i],
|
||||
args->DR1, args->DR4);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
ret = ring->dispatch_execbuffer(ring,
|
||||
exec_start, exec_len,
|
||||
flags);
|
||||
if (ret)
|
||||
goto error;
|
||||
}
|
||||
} else {
|
||||
ret = ring->dispatch_execbuffer(ring,
|
||||
exec_start, exec_len,
|
||||
flags);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
|
||||
|
||||
i915_gem_execbuffer_move_to_active(vmas, ring);
|
||||
i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
|
||||
|
||||
error:
|
||||
kfree(cliprects);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* Find one BSD ring to dispatch the corresponding BSD command.
|
||||
* The Ring ID is returned.
|
||||
|
@ -1085,14 +1242,13 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
|||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct eb_vmas *eb;
|
||||
struct drm_i915_gem_object *batch_obj;
|
||||
struct drm_clip_rect *cliprects = NULL;
|
||||
struct intel_engine_cs *ring;
|
||||
struct intel_context *ctx;
|
||||
struct i915_address_space *vm;
|
||||
const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
|
||||
u64 exec_start = args->batch_start_offset, exec_len;
|
||||
u32 mask, flags;
|
||||
int ret, mode, i;
|
||||
u64 exec_start = args->batch_start_offset;
|
||||
u32 flags;
|
||||
int ret;
|
||||
bool need_relocs;
|
||||
|
||||
if (!i915_gem_check_execbuffer(args))
|
||||
|
@ -1136,87 +1292,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
mode = args->flags & I915_EXEC_CONSTANTS_MASK;
|
||||
mask = I915_EXEC_CONSTANTS_MASK;
|
||||
switch (mode) {
|
||||
case I915_EXEC_CONSTANTS_REL_GENERAL:
|
||||
case I915_EXEC_CONSTANTS_ABSOLUTE:
|
||||
case I915_EXEC_CONSTANTS_REL_SURFACE:
|
||||
if (mode != 0 && ring != &dev_priv->ring[RCS]) {
|
||||
DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (mode != dev_priv->relative_constants_mode) {
|
||||
if (INTEL_INFO(dev)->gen < 4) {
|
||||
DRM_DEBUG("no rel constants on pre-gen4\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (INTEL_INFO(dev)->gen > 5 &&
|
||||
mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
|
||||
DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* The HW changed the meaning on this bit on gen6 */
|
||||
if (INTEL_INFO(dev)->gen >= 6)
|
||||
mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
DRM_DEBUG("execbuf with unknown constants: %d\n", mode);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (args->buffer_count < 1) {
|
||||
DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (args->num_cliprects != 0) {
|
||||
if (ring != &dev_priv->ring[RCS]) {
|
||||
DRM_DEBUG("clip rectangles are only valid with the render ring\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 5) {
|
||||
DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
|
||||
DRM_DEBUG("execbuf with %u cliprects\n",
|
||||
args->num_cliprects);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
cliprects = kcalloc(args->num_cliprects,
|
||||
sizeof(*cliprects),
|
||||
GFP_KERNEL);
|
||||
if (cliprects == NULL) {
|
||||
ret = -ENOMEM;
|
||||
goto pre_mutex_err;
|
||||
}
|
||||
|
||||
if (copy_from_user(cliprects,
|
||||
to_user_ptr(args->cliprects_ptr),
|
||||
sizeof(*cliprects)*args->num_cliprects)) {
|
||||
ret = -EFAULT;
|
||||
goto pre_mutex_err;
|
||||
}
|
||||
} else {
|
||||
if (args->DR4 == 0xffffffff) {
|
||||
DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
|
||||
args->DR4 = 0;
|
||||
}
|
||||
|
||||
if (args->DR1 || args->DR4 || args->cliprects_ptr) {
|
||||
DRM_DEBUG("0 cliprects but dirt in cliprects fields\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
|
||||
ret = i915_mutex_lock_interruptible(dev);
|
||||
|
@ -1320,63 +1400,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
|||
else
|
||||
exec_start += i915_gem_obj_offset(batch_obj, vm);
|
||||
|
||||
ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->vmas);
|
||||
ret = legacy_ringbuffer_submission(dev, file, ring, ctx,
|
||||
args, &eb->vmas, batch_obj, exec_start, flags);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
ret = i915_switch_context(ring, ctx);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
if (ring == &dev_priv->ring[RCS] &&
|
||||
mode != dev_priv->relative_constants_mode) {
|
||||
ret = intel_ring_begin(ring, 4);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
intel_ring_emit(ring, MI_NOOP);
|
||||
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
|
||||
intel_ring_emit(ring, INSTPM);
|
||||
intel_ring_emit(ring, mask << 16 | mode);
|
||||
intel_ring_advance(ring);
|
||||
|
||||
dev_priv->relative_constants_mode = mode;
|
||||
}
|
||||
|
||||
if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
|
||||
ret = i915_reset_gen7_sol_offsets(dev, ring);
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
|
||||
|
||||
exec_len = args->batch_len;
|
||||
if (cliprects) {
|
||||
for (i = 0; i < args->num_cliprects; i++) {
|
||||
ret = i915_emit_box(dev, &cliprects[i],
|
||||
args->DR1, args->DR4);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
ret = ring->dispatch_execbuffer(ring,
|
||||
exec_start, exec_len,
|
||||
flags);
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
} else {
|
||||
ret = ring->dispatch_execbuffer(ring,
|
||||
exec_start, exec_len,
|
||||
flags);
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
|
||||
trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
|
||||
|
||||
i915_gem_execbuffer_move_to_active(&eb->vmas, ring);
|
||||
i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
|
||||
|
||||
err:
|
||||
/* the request owns the ref now */
|
||||
i915_gem_context_unreference(ctx);
|
||||
|
@ -1385,8 +1413,6 @@ err:
|
|||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
pre_mutex_err:
|
||||
kfree(cliprects);
|
||||
|
||||
/* intel_gpu_busy should also get a ref, so it will free when the device
|
||||
* is really idle. */
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
|
|
|
@ -103,30 +103,68 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
|
|||
return base;
|
||||
}
|
||||
|
||||
static int i915_setup_compression(struct drm_device *dev, int size)
|
||||
static int find_compression_threshold(struct drm_device *dev,
|
||||
struct drm_mm_node *node,
|
||||
int size,
|
||||
int fb_cpp)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_mm_node *compressed_fb, *uninitialized_var(compressed_llb);
|
||||
int compression_threshold = 1;
|
||||
int ret;
|
||||
|
||||
compressed_fb = kzalloc(sizeof(*compressed_fb), GFP_KERNEL);
|
||||
if (!compressed_fb)
|
||||
goto err_llb;
|
||||
/* HACK: This code depends on what we will do in *_enable_fbc. If that
|
||||
* code changes, this code needs to change as well.
|
||||
*
|
||||
* The enable_fbc code will attempt to use one of our 2 compression
|
||||
* thresholds, therefore, in that case, we only have 1 resort.
|
||||
*/
|
||||
|
||||
/* Try to over-allocate to reduce reallocations and fragmentation */
|
||||
ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_fb,
|
||||
/* Try to over-allocate to reduce reallocations and fragmentation. */
|
||||
ret = drm_mm_insert_node(&dev_priv->mm.stolen, node,
|
||||
size <<= 1, 4096, DRM_MM_SEARCH_DEFAULT);
|
||||
if (ret)
|
||||
ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_fb,
|
||||
size >>= 1, 4096,
|
||||
DRM_MM_SEARCH_DEFAULT);
|
||||
if (ret)
|
||||
if (ret == 0)
|
||||
return compression_threshold;
|
||||
|
||||
again:
|
||||
/* HW's ability to limit the CFB is 1:4 */
|
||||
if (compression_threshold > 4 ||
|
||||
(fb_cpp == 2 && compression_threshold == 2))
|
||||
return 0;
|
||||
|
||||
ret = drm_mm_insert_node(&dev_priv->mm.stolen, node,
|
||||
size >>= 1, 4096,
|
||||
DRM_MM_SEARCH_DEFAULT);
|
||||
if (ret && INTEL_INFO(dev)->gen <= 4) {
|
||||
return 0;
|
||||
} else if (ret) {
|
||||
compression_threshold <<= 1;
|
||||
goto again;
|
||||
} else {
|
||||
return compression_threshold;
|
||||
}
|
||||
}
|
||||
|
||||
static int i915_setup_compression(struct drm_device *dev, int size, int fb_cpp)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_mm_node *uninitialized_var(compressed_llb);
|
||||
int ret;
|
||||
|
||||
ret = find_compression_threshold(dev, &dev_priv->fbc.compressed_fb,
|
||||
size, fb_cpp);
|
||||
if (!ret)
|
||||
goto err_llb;
|
||||
else if (ret > 1) {
|
||||
DRM_INFO("Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
|
||||
|
||||
}
|
||||
|
||||
dev_priv->fbc.threshold = ret;
|
||||
|
||||
if (HAS_PCH_SPLIT(dev))
|
||||
I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start);
|
||||
I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start);
|
||||
else if (IS_GM45(dev)) {
|
||||
I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
|
||||
I915_WRITE(DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start);
|
||||
} else {
|
||||
compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL);
|
||||
if (!compressed_llb)
|
||||
|
@ -140,13 +178,12 @@ static int i915_setup_compression(struct drm_device *dev, int size)
|
|||
dev_priv->fbc.compressed_llb = compressed_llb;
|
||||
|
||||
I915_WRITE(FBC_CFB_BASE,
|
||||
dev_priv->mm.stolen_base + compressed_fb->start);
|
||||
dev_priv->mm.stolen_base + dev_priv->fbc.compressed_fb.start);
|
||||
I915_WRITE(FBC_LL_BASE,
|
||||
dev_priv->mm.stolen_base + compressed_llb->start);
|
||||
}
|
||||
|
||||
dev_priv->fbc.compressed_fb = compressed_fb;
|
||||
dev_priv->fbc.size = size;
|
||||
dev_priv->fbc.size = size / dev_priv->fbc.threshold;
|
||||
|
||||
DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n",
|
||||
size);
|
||||
|
@ -155,14 +192,13 @@ static int i915_setup_compression(struct drm_device *dev, int size)
|
|||
|
||||
err_fb:
|
||||
kfree(compressed_llb);
|
||||
drm_mm_remove_node(compressed_fb);
|
||||
drm_mm_remove_node(&dev_priv->fbc.compressed_fb);
|
||||
err_llb:
|
||||
kfree(compressed_fb);
|
||||
pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
int i915_gem_stolen_setup_compression(struct drm_device *dev, int size)
|
||||
int i915_gem_stolen_setup_compression(struct drm_device *dev, int size, int fb_cpp)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
|
@ -175,7 +211,7 @@ int i915_gem_stolen_setup_compression(struct drm_device *dev, int size)
|
|||
/* Release any current block */
|
||||
i915_gem_stolen_cleanup_compression(dev);
|
||||
|
||||
return i915_setup_compression(dev, size);
|
||||
return i915_setup_compression(dev, size, fb_cpp);
|
||||
}
|
||||
|
||||
void i915_gem_stolen_cleanup_compression(struct drm_device *dev)
|
||||
|
@ -185,10 +221,7 @@ void i915_gem_stolen_cleanup_compression(struct drm_device *dev)
|
|||
if (dev_priv->fbc.size == 0)
|
||||
return;
|
||||
|
||||
if (dev_priv->fbc.compressed_fb) {
|
||||
drm_mm_remove_node(dev_priv->fbc.compressed_fb);
|
||||
kfree(dev_priv->fbc.compressed_fb);
|
||||
}
|
||||
drm_mm_remove_node(&dev_priv->fbc.compressed_fb);
|
||||
|
||||
if (dev_priv->fbc.compressed_llb) {
|
||||
drm_mm_remove_node(dev_priv->fbc.compressed_llb);
|
||||
|
|
|
@ -327,6 +327,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
|
|||
struct drm_device *dev = error_priv->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_error_state *error = error_priv->error;
|
||||
struct drm_i915_error_object *obj;
|
||||
int i, j, offset, elt;
|
||||
int max_hangcheck_score;
|
||||
|
||||
|
@ -395,8 +396,6 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
|
|||
error->pinned_bo_count[0]);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
|
||||
struct drm_i915_error_object *obj;
|
||||
|
||||
obj = error->ring[i].batchbuffer;
|
||||
if (obj) {
|
||||
err_puts(m, dev_priv->ring[i].name);
|
||||
|
@ -459,6 +458,18 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
|
|||
}
|
||||
}
|
||||
|
||||
if ((obj = error->semaphore_obj)) {
|
||||
err_printf(m, "Semaphore page = 0x%08x\n", obj->gtt_offset);
|
||||
for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
|
||||
err_printf(m, "[%04x] %08x %08x %08x %08x\n",
|
||||
elt * 4,
|
||||
obj->pages[0][elt],
|
||||
obj->pages[0][elt+1],
|
||||
obj->pages[0][elt+2],
|
||||
obj->pages[0][elt+3]);
|
||||
}
|
||||
}
|
||||
|
||||
if (error->overlay)
|
||||
intel_overlay_print_error_state(m, error->overlay);
|
||||
|
||||
|
@ -529,6 +540,7 @@ static void i915_error_state_free(struct kref *error_ref)
|
|||
kfree(error->ring[i].requests);
|
||||
}
|
||||
|
||||
i915_error_object_free(error->semaphore_obj);
|
||||
kfree(error->active_bo);
|
||||
kfree(error->overlay);
|
||||
kfree(error->display);
|
||||
|
@ -746,7 +758,52 @@ static void i915_gem_record_fences(struct drm_device *dev,
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv,
|
||||
struct drm_i915_error_state *error,
|
||||
struct intel_engine_cs *ring,
|
||||
struct drm_i915_error_ring *ering)
|
||||
{
|
||||
struct intel_engine_cs *useless;
|
||||
int i;
|
||||
|
||||
if (!i915_semaphore_is_enabled(dev_priv->dev))
|
||||
return;
|
||||
|
||||
if (!error->semaphore_obj)
|
||||
error->semaphore_obj =
|
||||
i915_error_object_create(dev_priv,
|
||||
dev_priv->semaphore_obj,
|
||||
&dev_priv->gtt.base);
|
||||
|
||||
for_each_ring(useless, dev_priv, i) {
|
||||
u16 signal_offset =
|
||||
(GEN8_SIGNAL_OFFSET(ring, i) & PAGE_MASK) / 4;
|
||||
u32 *tmp = error->semaphore_obj->pages[0];
|
||||
|
||||
ering->semaphore_mboxes[i] = tmp[signal_offset];
|
||||
ering->semaphore_seqno[i] = ring->semaphore.sync_seqno[i];
|
||||
}
|
||||
}
|
||||
|
||||
static void gen6_record_semaphore_state(struct drm_i915_private *dev_priv,
|
||||
struct intel_engine_cs *ring,
|
||||
struct drm_i915_error_ring *ering)
|
||||
{
|
||||
ering->semaphore_mboxes[0] = I915_READ(RING_SYNC_0(ring->mmio_base));
|
||||
ering->semaphore_mboxes[1] = I915_READ(RING_SYNC_1(ring->mmio_base));
|
||||
ering->semaphore_seqno[0] = ring->semaphore.sync_seqno[0];
|
||||
ering->semaphore_seqno[1] = ring->semaphore.sync_seqno[1];
|
||||
|
||||
if (HAS_VEBOX(dev_priv->dev)) {
|
||||
ering->semaphore_mboxes[2] =
|
||||
I915_READ(RING_SYNC_2(ring->mmio_base));
|
||||
ering->semaphore_seqno[2] = ring->semaphore.sync_seqno[2];
|
||||
}
|
||||
}
|
||||
|
||||
static void i915_record_ring_state(struct drm_device *dev,
|
||||
struct drm_i915_error_state *error,
|
||||
struct intel_engine_cs *ring,
|
||||
struct drm_i915_error_ring *ering)
|
||||
{
|
||||
|
@ -755,18 +812,10 @@ static void i915_record_ring_state(struct drm_device *dev,
|
|||
if (INTEL_INFO(dev)->gen >= 6) {
|
||||
ering->rc_psmi = I915_READ(ring->mmio_base + 0x50);
|
||||
ering->fault_reg = I915_READ(RING_FAULT_REG(ring));
|
||||
ering->semaphore_mboxes[0]
|
||||
= I915_READ(RING_SYNC_0(ring->mmio_base));
|
||||
ering->semaphore_mboxes[1]
|
||||
= I915_READ(RING_SYNC_1(ring->mmio_base));
|
||||
ering->semaphore_seqno[0] = ring->semaphore.sync_seqno[0];
|
||||
ering->semaphore_seqno[1] = ring->semaphore.sync_seqno[1];
|
||||
}
|
||||
|
||||
if (HAS_VEBOX(dev)) {
|
||||
ering->semaphore_mboxes[2] =
|
||||
I915_READ(RING_SYNC_2(ring->mmio_base));
|
||||
ering->semaphore_seqno[2] = ring->semaphore.sync_seqno[2];
|
||||
if (INTEL_INFO(dev)->gen >= 8)
|
||||
gen8_record_semaphore_state(dev_priv, error, ring, ering);
|
||||
else
|
||||
gen6_record_semaphore_state(dev_priv, ring, ering);
|
||||
}
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 4) {
|
||||
|
@ -895,7 +944,7 @@ static void i915_gem_record_rings(struct drm_device *dev,
|
|||
|
||||
error->ring[i].valid = true;
|
||||
|
||||
i915_record_ring_state(dev, ring, &error->ring[i]);
|
||||
i915_record_ring_state(dev, error, ring, &error->ring[i]);
|
||||
|
||||
request = i915_gem_find_active_request(ring);
|
||||
if (request) {
|
||||
|
|
|
@ -1090,6 +1090,53 @@ static bool intel_hpd_irq_event(struct drm_device *dev,
|
|||
return true;
|
||||
}
|
||||
|
||||
static void i915_digport_work_func(struct work_struct *work)
|
||||
{
|
||||
struct drm_i915_private *dev_priv =
|
||||
container_of(work, struct drm_i915_private, dig_port_work);
|
||||
unsigned long irqflags;
|
||||
u32 long_port_mask, short_port_mask;
|
||||
struct intel_digital_port *intel_dig_port;
|
||||
int i, ret;
|
||||
u32 old_bits = 0;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
|
||||
long_port_mask = dev_priv->long_hpd_port_mask;
|
||||
dev_priv->long_hpd_port_mask = 0;
|
||||
short_port_mask = dev_priv->short_hpd_port_mask;
|
||||
dev_priv->short_hpd_port_mask = 0;
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
|
||||
|
||||
for (i = 0; i < I915_MAX_PORTS; i++) {
|
||||
bool valid = false;
|
||||
bool long_hpd = false;
|
||||
intel_dig_port = dev_priv->hpd_irq_port[i];
|
||||
if (!intel_dig_port || !intel_dig_port->hpd_pulse)
|
||||
continue;
|
||||
|
||||
if (long_port_mask & (1 << i)) {
|
||||
valid = true;
|
||||
long_hpd = true;
|
||||
} else if (short_port_mask & (1 << i))
|
||||
valid = true;
|
||||
|
||||
if (valid) {
|
||||
ret = intel_dig_port->hpd_pulse(intel_dig_port, long_hpd);
|
||||
if (ret == true) {
|
||||
/* if we get true fallback to old school hpd */
|
||||
old_bits |= (1 << intel_dig_port->base.hpd_pin);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (old_bits) {
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
|
||||
dev_priv->hpd_event_bits |= old_bits;
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
|
||||
schedule_work(&dev_priv->hotplug_work);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Handle hotplug events outside the interrupt handler proper.
|
||||
*/
|
||||
|
@ -1221,6 +1268,131 @@ static void notify_ring(struct drm_device *dev,
|
|||
i915_queue_hangcheck(dev);
|
||||
}
|
||||
|
||||
static u32 vlv_c0_residency(struct drm_i915_private *dev_priv,
|
||||
struct intel_rps_ei *rps_ei)
|
||||
{
|
||||
u32 cz_ts, cz_freq_khz;
|
||||
u32 render_count, media_count;
|
||||
u32 elapsed_render, elapsed_media, elapsed_time;
|
||||
u32 residency = 0;
|
||||
|
||||
cz_ts = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
|
||||
cz_freq_khz = DIV_ROUND_CLOSEST(dev_priv->mem_freq * 1000, 4);
|
||||
|
||||
render_count = I915_READ(VLV_RENDER_C0_COUNT_REG);
|
||||
media_count = I915_READ(VLV_MEDIA_C0_COUNT_REG);
|
||||
|
||||
if (rps_ei->cz_clock == 0) {
|
||||
rps_ei->cz_clock = cz_ts;
|
||||
rps_ei->render_c0 = render_count;
|
||||
rps_ei->media_c0 = media_count;
|
||||
|
||||
return dev_priv->rps.cur_freq;
|
||||
}
|
||||
|
||||
elapsed_time = cz_ts - rps_ei->cz_clock;
|
||||
rps_ei->cz_clock = cz_ts;
|
||||
|
||||
elapsed_render = render_count - rps_ei->render_c0;
|
||||
rps_ei->render_c0 = render_count;
|
||||
|
||||
elapsed_media = media_count - rps_ei->media_c0;
|
||||
rps_ei->media_c0 = media_count;
|
||||
|
||||
/* Convert all the counters into common unit of milli sec */
|
||||
elapsed_time /= VLV_CZ_CLOCK_TO_MILLI_SEC;
|
||||
elapsed_render /= cz_freq_khz;
|
||||
elapsed_media /= cz_freq_khz;
|
||||
|
||||
/*
|
||||
* Calculate overall C0 residency percentage
|
||||
* only if elapsed time is non zero
|
||||
*/
|
||||
if (elapsed_time) {
|
||||
residency =
|
||||
((max(elapsed_render, elapsed_media) * 100)
|
||||
/ elapsed_time);
|
||||
}
|
||||
|
||||
return residency;
|
||||
}
|
||||
|
||||
/**
|
||||
* vlv_calc_delay_from_C0_counters - Increase/Decrease freq based on GPU
|
||||
* busy-ness calculated from C0 counters of render & media power wells
|
||||
* @dev_priv: DRM device private
|
||||
*
|
||||
*/
|
||||
static u32 vlv_calc_delay_from_C0_counters(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
u32 residency_C0_up = 0, residency_C0_down = 0;
|
||||
u8 new_delay, adj;
|
||||
|
||||
dev_priv->rps.ei_interrupt_count++;
|
||||
|
||||
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
|
||||
|
||||
|
||||
if (dev_priv->rps.up_ei.cz_clock == 0) {
|
||||
vlv_c0_residency(dev_priv, &dev_priv->rps.up_ei);
|
||||
vlv_c0_residency(dev_priv, &dev_priv->rps.down_ei);
|
||||
return dev_priv->rps.cur_freq;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* To down throttle, C0 residency should be less than down threshold
|
||||
* for continous EI intervals. So calculate down EI counters
|
||||
* once in VLV_INT_COUNT_FOR_DOWN_EI
|
||||
*/
|
||||
if (dev_priv->rps.ei_interrupt_count == VLV_INT_COUNT_FOR_DOWN_EI) {
|
||||
|
||||
dev_priv->rps.ei_interrupt_count = 0;
|
||||
|
||||
residency_C0_down = vlv_c0_residency(dev_priv,
|
||||
&dev_priv->rps.down_ei);
|
||||
} else {
|
||||
residency_C0_up = vlv_c0_residency(dev_priv,
|
||||
&dev_priv->rps.up_ei);
|
||||
}
|
||||
|
||||
new_delay = dev_priv->rps.cur_freq;
|
||||
|
||||
adj = dev_priv->rps.last_adj;
|
||||
/* C0 residency is greater than UP threshold. Increase Frequency */
|
||||
if (residency_C0_up >= VLV_RP_UP_EI_THRESHOLD) {
|
||||
if (adj > 0)
|
||||
adj *= 2;
|
||||
else
|
||||
adj = 1;
|
||||
|
||||
if (dev_priv->rps.cur_freq < dev_priv->rps.max_freq_softlimit)
|
||||
new_delay = dev_priv->rps.cur_freq + adj;
|
||||
|
||||
/*
|
||||
* For better performance, jump directly
|
||||
* to RPe if we're below it.
|
||||
*/
|
||||
if (new_delay < dev_priv->rps.efficient_freq)
|
||||
new_delay = dev_priv->rps.efficient_freq;
|
||||
|
||||
} else if (!dev_priv->rps.ei_interrupt_count &&
|
||||
(residency_C0_down < VLV_RP_DOWN_EI_THRESHOLD)) {
|
||||
if (adj < 0)
|
||||
adj *= 2;
|
||||
else
|
||||
adj = -1;
|
||||
/*
|
||||
* This means, C0 residency is less than down threshold over
|
||||
* a period of VLV_INT_COUNT_FOR_DOWN_EI. So, reduce the freq
|
||||
*/
|
||||
if (dev_priv->rps.cur_freq > dev_priv->rps.min_freq_softlimit)
|
||||
new_delay = dev_priv->rps.cur_freq + adj;
|
||||
}
|
||||
|
||||
return new_delay;
|
||||
}
|
||||
|
||||
static void gen6_pm_rps_work(struct work_struct *work)
|
||||
{
|
||||
struct drm_i915_private *dev_priv =
|
||||
|
@ -1269,6 +1441,8 @@ static void gen6_pm_rps_work(struct work_struct *work)
|
|||
else
|
||||
new_delay = dev_priv->rps.min_freq_softlimit;
|
||||
adj = 0;
|
||||
} else if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
|
||||
new_delay = vlv_calc_delay_from_C0_counters(dev_priv);
|
||||
} else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
|
||||
if (adj < 0)
|
||||
adj *= 2;
|
||||
|
@ -1517,23 +1691,104 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
|
|||
#define HPD_STORM_DETECT_PERIOD 1000
|
||||
#define HPD_STORM_THRESHOLD 5
|
||||
|
||||
static int ilk_port_to_hotplug_shift(enum port port)
|
||||
{
|
||||
switch (port) {
|
||||
case PORT_A:
|
||||
case PORT_E:
|
||||
default:
|
||||
return -1;
|
||||
case PORT_B:
|
||||
return 0;
|
||||
case PORT_C:
|
||||
return 8;
|
||||
case PORT_D:
|
||||
return 16;
|
||||
}
|
||||
}
|
||||
|
||||
static int g4x_port_to_hotplug_shift(enum port port)
|
||||
{
|
||||
switch (port) {
|
||||
case PORT_A:
|
||||
case PORT_E:
|
||||
default:
|
||||
return -1;
|
||||
case PORT_B:
|
||||
return 17;
|
||||
case PORT_C:
|
||||
return 19;
|
||||
case PORT_D:
|
||||
return 21;
|
||||
}
|
||||
}
|
||||
|
||||
static inline enum port get_port_from_pin(enum hpd_pin pin)
|
||||
{
|
||||
switch (pin) {
|
||||
case HPD_PORT_B:
|
||||
return PORT_B;
|
||||
case HPD_PORT_C:
|
||||
return PORT_C;
|
||||
case HPD_PORT_D:
|
||||
return PORT_D;
|
||||
default:
|
||||
return PORT_A; /* no hpd */
|
||||
}
|
||||
}
|
||||
|
||||
static inline void intel_hpd_irq_handler(struct drm_device *dev,
|
||||
u32 hotplug_trigger,
|
||||
u32 dig_hotplug_reg,
|
||||
const u32 *hpd)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int i;
|
||||
enum port port;
|
||||
bool storm_detected = false;
|
||||
bool queue_dig = false, queue_hp = false;
|
||||
u32 dig_shift;
|
||||
u32 dig_port_mask = 0;
|
||||
|
||||
if (!hotplug_trigger)
|
||||
return;
|
||||
|
||||
DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
|
||||
hotplug_trigger);
|
||||
DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x\n",
|
||||
hotplug_trigger, dig_hotplug_reg);
|
||||
|
||||
spin_lock(&dev_priv->irq_lock);
|
||||
for (i = 1; i < HPD_NUM_PINS; i++) {
|
||||
if (!(hpd[i] & hotplug_trigger))
|
||||
continue;
|
||||
|
||||
port = get_port_from_pin(i);
|
||||
if (port && dev_priv->hpd_irq_port[port]) {
|
||||
bool long_hpd;
|
||||
|
||||
if (IS_G4X(dev)) {
|
||||
dig_shift = g4x_port_to_hotplug_shift(port);
|
||||
long_hpd = (hotplug_trigger >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
|
||||
} else {
|
||||
dig_shift = ilk_port_to_hotplug_shift(port);
|
||||
long_hpd = (dig_hotplug_reg >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
|
||||
}
|
||||
|
||||
DRM_DEBUG_DRIVER("digital hpd port %d %d\n", port, long_hpd);
|
||||
/* for long HPD pulses we want to have the digital queue happen,
|
||||
but we still want HPD storm detection to function. */
|
||||
if (long_hpd) {
|
||||
dev_priv->long_hpd_port_mask |= (1 << port);
|
||||
dig_port_mask |= hpd[i];
|
||||
} else {
|
||||
/* for short HPD just trigger the digital queue */
|
||||
dev_priv->short_hpd_port_mask |= (1 << port);
|
||||
hotplug_trigger &= ~hpd[i];
|
||||
}
|
||||
queue_dig = true;
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 1; i < HPD_NUM_PINS; i++) {
|
||||
if (hpd[i] & hotplug_trigger &&
|
||||
dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED) {
|
||||
/*
|
||||
|
@ -1553,7 +1808,11 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev,
|
|||
dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
|
||||
continue;
|
||||
|
||||
dev_priv->hpd_event_bits |= (1 << i);
|
||||
if (!(dig_port_mask & hpd[i])) {
|
||||
dev_priv->hpd_event_bits |= (1 << i);
|
||||
queue_hp = true;
|
||||
}
|
||||
|
||||
if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies,
|
||||
dev_priv->hpd_stats[i].hpd_last_jiffies
|
||||
+ msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
|
||||
|
@ -1582,7 +1841,10 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev,
|
|||
* queue for otherwise the flush_work in the pageflip code will
|
||||
* deadlock.
|
||||
*/
|
||||
schedule_work(&dev_priv->hotplug_work);
|
||||
if (queue_dig)
|
||||
schedule_work(&dev_priv->dig_port_work);
|
||||
if (queue_hp)
|
||||
schedule_work(&dev_priv->hotplug_work);
|
||||
}
|
||||
|
||||
static void gmbus_irq_handler(struct drm_device *dev)
|
||||
|
@ -1823,11 +2085,11 @@ static void i9xx_hpd_irq_handler(struct drm_device *dev)
|
|||
if (IS_G4X(dev)) {
|
||||
u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
|
||||
|
||||
intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_g4x);
|
||||
intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_g4x);
|
||||
} else {
|
||||
u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
|
||||
|
||||
intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
|
||||
intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_i915);
|
||||
}
|
||||
|
||||
if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) &&
|
||||
|
@ -1925,8 +2187,12 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
|
|||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int pipe;
|
||||
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
|
||||
u32 dig_hotplug_reg;
|
||||
|
||||
intel_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx);
|
||||
dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
|
||||
I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
|
||||
|
||||
intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_ibx);
|
||||
|
||||
if (pch_iir & SDE_AUDIO_POWER_MASK) {
|
||||
int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
|
||||
|
@ -2032,8 +2298,12 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
|
|||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int pipe;
|
||||
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
|
||||
u32 dig_hotplug_reg;
|
||||
|
||||
intel_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt);
|
||||
dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
|
||||
I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
|
||||
|
||||
intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_cpt);
|
||||
|
||||
if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
|
||||
int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
|
||||
|
@ -2780,12 +3050,7 @@ static bool
|
|||
ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
|
||||
{
|
||||
if (INTEL_INFO(dev)->gen >= 8) {
|
||||
/*
|
||||
* FIXME: gen8 semaphore support - currently we don't emit
|
||||
* semaphores on bdw anyway, but this needs to be addressed when
|
||||
* we merge that code.
|
||||
*/
|
||||
return false;
|
||||
return (ipehr >> 23) == 0x1c;
|
||||
} else {
|
||||
ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
|
||||
return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
|
||||
|
@ -2794,19 +3059,20 @@ ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
|
|||
}
|
||||
|
||||
static struct intel_engine_cs *
|
||||
semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr)
|
||||
semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = ring->dev->dev_private;
|
||||
struct intel_engine_cs *signaller;
|
||||
int i;
|
||||
|
||||
if (INTEL_INFO(dev_priv->dev)->gen >= 8) {
|
||||
/*
|
||||
* FIXME: gen8 semaphore support - currently we don't emit
|
||||
* semaphores on bdw anyway, but this needs to be addressed when
|
||||
* we merge that code.
|
||||
*/
|
||||
return NULL;
|
||||
for_each_ring(signaller, dev_priv, i) {
|
||||
if (ring == signaller)
|
||||
continue;
|
||||
|
||||
if (offset == signaller->semaphore.signal_ggtt[ring->id])
|
||||
return signaller;
|
||||
}
|
||||
} else {
|
||||
u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
|
||||
|
||||
|
@ -2819,8 +3085,8 @@ semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr)
|
|||
}
|
||||
}
|
||||
|
||||
DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x\n",
|
||||
ring->id, ipehr);
|
||||
DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
|
||||
ring->id, ipehr, offset);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
@ -2830,7 +3096,8 @@ semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
|
|||
{
|
||||
struct drm_i915_private *dev_priv = ring->dev->dev_private;
|
||||
u32 cmd, ipehr, head;
|
||||
int i;
|
||||
u64 offset = 0;
|
||||
int i, backwards;
|
||||
|
||||
ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
|
||||
if (!ipehr_is_semaphore_wait(ring->dev, ipehr))
|
||||
|
@ -2839,13 +3106,15 @@ semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
|
|||
/*
|
||||
* HEAD is likely pointing to the dword after the actual command,
|
||||
* so scan backwards until we find the MBOX. But limit it to just 3
|
||||
* dwords. Note that we don't care about ACTHD here since that might
|
||||
* or 4 dwords depending on the semaphore wait command size.
|
||||
* Note that we don't care about ACTHD here since that might
|
||||
* point at at batch, and semaphores are always emitted into the
|
||||
* ringbuffer itself.
|
||||
*/
|
||||
head = I915_READ_HEAD(ring) & HEAD_ADDR;
|
||||
backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4;
|
||||
|
||||
for (i = 4; i; --i) {
|
||||
for (i = backwards; i; --i) {
|
||||
/*
|
||||
* Be paranoid and presume the hw has gone off into the wild -
|
||||
* our ring is smaller than what the hardware (and hence
|
||||
|
@ -2865,7 +3134,12 @@ semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
|
|||
return NULL;
|
||||
|
||||
*seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1;
|
||||
return semaphore_wait_to_signaller_ring(ring, ipehr);
|
||||
if (INTEL_INFO(ring->dev)->gen >= 8) {
|
||||
offset = ioread32(ring->buffer->virtual_start + head + 12);
|
||||
offset <<= 32;
|
||||
offset = ioread32(ring->buffer->virtual_start + head + 8);
|
||||
}
|
||||
return semaphore_wait_to_signaller_ring(ring, ipehr, offset);
|
||||
}
|
||||
|
||||
static int semaphore_passed(struct intel_engine_cs *ring)
|
||||
|
@ -4354,12 +4628,17 @@ void intel_irq_init(struct drm_device *dev)
|
|||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
|
||||
INIT_WORK(&dev_priv->dig_port_work, i915_digport_work_func);
|
||||
INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func);
|
||||
INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
|
||||
INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
|
||||
|
||||
/* Let's track the enabled rps events */
|
||||
dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
|
||||
if (IS_VALLEYVIEW(dev))
|
||||
/* WaGsvRC0ResidenncyMethod:VLV */
|
||||
dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
|
||||
else
|
||||
dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
|
||||
|
||||
setup_timer(&dev_priv->gpu_error.hangcheck_timer,
|
||||
i915_hangcheck_elapsed,
|
||||
|
|
|
@ -240,7 +240,7 @@
|
|||
#define MI_DISPLAY_FLIP_IVB_SPRITE_B (3 << 19)
|
||||
#define MI_DISPLAY_FLIP_IVB_PLANE_C (4 << 19)
|
||||
#define MI_DISPLAY_FLIP_IVB_SPRITE_C (5 << 19)
|
||||
#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6+ */
|
||||
#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6, gen7 */
|
||||
#define MI_SEMAPHORE_GLOBAL_GTT (1<<22)
|
||||
#define MI_SEMAPHORE_UPDATE (1<<21)
|
||||
#define MI_SEMAPHORE_COMPARE (1<<20)
|
||||
|
@ -266,6 +266,11 @@
|
|||
#define MI_RESTORE_EXT_STATE_EN (1<<2)
|
||||
#define MI_FORCE_RESTORE (1<<1)
|
||||
#define MI_RESTORE_INHIBIT (1<<0)
|
||||
#define MI_SEMAPHORE_SIGNAL MI_INSTR(0x1b, 0) /* GEN8+ */
|
||||
#define MI_SEMAPHORE_TARGET(engine) ((engine)<<15)
|
||||
#define MI_SEMAPHORE_WAIT MI_INSTR(0x1c, 2) /* GEN8+ */
|
||||
#define MI_SEMAPHORE_POLL (1<<15)
|
||||
#define MI_SEMAPHORE_SAD_GTE_SDD (1<<12)
|
||||
#define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1)
|
||||
#define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */
|
||||
#define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1)
|
||||
|
@ -360,6 +365,7 @@
|
|||
#define PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE (1<<10) /* GM45+ only */
|
||||
#define PIPE_CONTROL_INDIRECT_STATE_DISABLE (1<<9)
|
||||
#define PIPE_CONTROL_NOTIFY (1<<8)
|
||||
#define PIPE_CONTROL_FLUSH_ENABLE (1<<7) /* gen7+ */
|
||||
#define PIPE_CONTROL_VF_CACHE_INVALIDATE (1<<4)
|
||||
#define PIPE_CONTROL_CONST_CACHE_INVALIDATE (1<<3)
|
||||
#define PIPE_CONTROL_STATE_CACHE_INVALIDATE (1<<2)
|
||||
|
@ -525,6 +531,7 @@ enum punit_power_well {
|
|||
#define PUNIT_REG_GPU_FREQ_STS 0xd8
|
||||
#define GENFREQSTATUS (1<<0)
|
||||
#define PUNIT_REG_MEDIA_TURBO_FREQ_REQ 0xdc
|
||||
#define PUNIT_REG_CZ_TIMESTAMP 0xce
|
||||
|
||||
#define PUNIT_FUSE_BUS2 0xf6 /* bits 47:40 */
|
||||
#define PUNIT_FUSE_BUS1 0xf5 /* bits 55:48 */
|
||||
|
@ -550,6 +557,11 @@ enum punit_power_well {
|
|||
#define FB_FMAX_VMIN_FREQ_LO_SHIFT 27
|
||||
#define FB_FMAX_VMIN_FREQ_LO_MASK 0xf8000000
|
||||
|
||||
#define VLV_CZ_CLOCK_TO_MILLI_SEC 100000
|
||||
#define VLV_RP_UP_EI_THRESHOLD 90
|
||||
#define VLV_RP_DOWN_EI_THRESHOLD 70
|
||||
#define VLV_INT_COUNT_FOR_DOWN_EI 5
|
||||
|
||||
/* vlv2 north clock has */
|
||||
#define CCK_FUSE_REG 0x8
|
||||
#define CCK_FUSE_HPLL_FREQ_MASK 0x3
|
||||
|
@ -584,6 +596,11 @@ enum punit_power_well {
|
|||
#define DSI_PLL_M1_DIV_SHIFT 0
|
||||
#define DSI_PLL_M1_DIV_MASK (0x1ff << 0)
|
||||
#define CCK_DISPLAY_CLOCK_CONTROL 0x6b
|
||||
#define DISPLAY_TRUNK_FORCE_ON (1 << 17)
|
||||
#define DISPLAY_TRUNK_FORCE_OFF (1 << 16)
|
||||
#define DISPLAY_FREQUENCY_STATUS (0x1f << 8)
|
||||
#define DISPLAY_FREQUENCY_STATUS_SHIFT 8
|
||||
#define DISPLAY_FREQUENCY_VALUES (0x1f << 0)
|
||||
|
||||
/**
|
||||
* DOC: DPIO
|
||||
|
@ -5383,6 +5400,7 @@ enum punit_power_well {
|
|||
#define VLV_GTLC_ALLOWWAKEERR (1 << 1)
|
||||
#define VLV_GTLC_PW_MEDIA_STATUS_MASK (1 << 5)
|
||||
#define VLV_GTLC_PW_RENDER_STATUS_MASK (1 << 7)
|
||||
#define VLV_GTLC_SURVIVABILITY_REG 0x130098
|
||||
#define FORCEWAKE_MT 0xa188 /* multi-threaded */
|
||||
#define FORCEWAKE_KERNEL 0x1
|
||||
#define FORCEWAKE_USER 0x2
|
||||
|
@ -5530,6 +5548,8 @@ enum punit_power_well {
|
|||
#define GEN6_GT_GFX_RC6_LOCKED 0x138104
|
||||
#define VLV_COUNTER_CONTROL 0x138104
|
||||
#define VLV_COUNT_RANGE_HIGH (1<<15)
|
||||
#define VLV_MEDIA_RC0_COUNT_EN (1<<5)
|
||||
#define VLV_RENDER_RC0_COUNT_EN (1<<4)
|
||||
#define VLV_MEDIA_RC6_COUNT_EN (1<<1)
|
||||
#define VLV_RENDER_RC6_COUNT_EN (1<<0)
|
||||
#define GEN6_GT_GFX_RC6 0x138108
|
||||
|
@ -5538,6 +5558,8 @@ enum punit_power_well {
|
|||
|
||||
#define GEN6_GT_GFX_RC6p 0x13810C
|
||||
#define GEN6_GT_GFX_RC6pp 0x138110
|
||||
#define VLV_RENDER_C0_COUNT_REG 0x138118
|
||||
#define VLV_MEDIA_C0_COUNT_REG 0x13811C
|
||||
|
||||
#define GEN6_PCODE_MAILBOX 0x138124
|
||||
#define GEN6_PCODE_READY (1<<31)
|
||||
|
@ -5772,6 +5794,7 @@ enum punit_power_well {
|
|||
#define TRANS_DDI_FUNC_ENABLE (1<<31)
|
||||
/* Those bits are ignored by pipe EDP since it can only connect to DDI A */
|
||||
#define TRANS_DDI_PORT_MASK (7<<28)
|
||||
#define TRANS_DDI_PORT_SHIFT 28
|
||||
#define TRANS_DDI_SELECT_PORT(x) ((x)<<28)
|
||||
#define TRANS_DDI_PORT_NONE (0<<28)
|
||||
#define TRANS_DDI_MODE_SELECT_MASK (7<<24)
|
||||
|
@ -5899,10 +5922,12 @@ enum punit_power_well {
|
|||
/* WRPLL */
|
||||
#define WRPLL_CTL1 0x46040
|
||||
#define WRPLL_CTL2 0x46060
|
||||
#define WRPLL_CTL(pll) (pll == 0 ? WRPLL_CTL1 : WRPLL_CTL2)
|
||||
#define WRPLL_PLL_ENABLE (1<<31)
|
||||
#define WRPLL_PLL_SELECT_SSC (0x01<<28)
|
||||
#define WRPLL_PLL_SELECT_NON_SSC (0x02<<28)
|
||||
#define WRPLL_PLL_SELECT_LCPLL_2700 (0x03<<28)
|
||||
#define WRPLL_PLL_SSC (1<<28)
|
||||
#define WRPLL_PLL_NON_SSC (2<<28)
|
||||
#define WRPLL_PLL_LCPLL (3<<28)
|
||||
#define WRPLL_PLL_REF_MASK (3<<28)
|
||||
/* WRPLL divider programming */
|
||||
#define WRPLL_DIVIDER_REFERENCE(x) ((x)<<0)
|
||||
#define WRPLL_DIVIDER_REF_MASK (0xff)
|
||||
|
@ -5921,6 +5946,7 @@ enum punit_power_well {
|
|||
#define PORT_CLK_SEL_LCPLL_1350 (1<<29)
|
||||
#define PORT_CLK_SEL_LCPLL_810 (2<<29)
|
||||
#define PORT_CLK_SEL_SPLL (3<<29)
|
||||
#define PORT_CLK_SEL_WRPLL(pll) (((pll)+4)<<29)
|
||||
#define PORT_CLK_SEL_WRPLL1 (4<<29)
|
||||
#define PORT_CLK_SEL_WRPLL2 (5<<29)
|
||||
#define PORT_CLK_SEL_NONE (7<<29)
|
||||
|
@ -5962,7 +5988,10 @@ enum punit_power_well {
|
|||
#define LCPLL_CD_SOURCE_FCLK (1<<21)
|
||||
#define LCPLL_CD_SOURCE_FCLK_DONE (1<<19)
|
||||
|
||||
#define D_COMP (MCHBAR_MIRROR_BASE_SNB + 0x5F0C)
|
||||
/* Please see hsw_read_dcomp() and hsw_write_dcomp() before using this register,
|
||||
* since on HSW we can't write to it using I915_WRITE. */
|
||||
#define D_COMP_HSW (MCHBAR_MIRROR_BASE_SNB + 0x5F0C)
|
||||
#define D_COMP_BDW 0x138144
|
||||
#define D_COMP_RCOMP_IN_PROGRESS (1<<9)
|
||||
#define D_COMP_COMP_FORCE (1<<8)
|
||||
#define D_COMP_COMP_DISABLE (1<<0)
|
||||
|
|
|
@ -137,6 +137,18 @@ static void hsw_crt_get_config(struct intel_encoder *encoder,
|
|||
pipe_config->adjusted_mode.flags |= intel_crt_get_flags(encoder);
|
||||
}
|
||||
|
||||
static void hsw_crt_pre_enable(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL already enabled\n");
|
||||
I915_WRITE(SPLL_CTL,
|
||||
SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | SPLL_PLL_SSC);
|
||||
POSTING_READ(SPLL_CTL);
|
||||
udelay(20);
|
||||
}
|
||||
|
||||
/* Note: The caller is required to filter out dpms modes not supported by the
|
||||
* platform. */
|
||||
static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode)
|
||||
|
@ -194,6 +206,20 @@ static void intel_disable_crt(struct intel_encoder *encoder)
|
|||
intel_crt_set_dpms(encoder, DRM_MODE_DPMS_OFF);
|
||||
}
|
||||
|
||||
|
||||
static void hsw_crt_post_disable(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
uint32_t val;
|
||||
|
||||
DRM_DEBUG_KMS("Disabling SPLL\n");
|
||||
val = I915_READ(SPLL_CTL);
|
||||
WARN_ON(!(val & SPLL_PLL_ENABLE));
|
||||
I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE);
|
||||
POSTING_READ(SPLL_CTL);
|
||||
}
|
||||
|
||||
static void intel_enable_crt(struct intel_encoder *encoder)
|
||||
{
|
||||
struct intel_crt *crt = intel_encoder_to_crt(encoder);
|
||||
|
@ -289,8 +315,10 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder,
|
|||
pipe_config->pipe_bpp = 24;
|
||||
|
||||
/* FDI must always be 2.7 GHz */
|
||||
if (HAS_DDI(dev))
|
||||
if (HAS_DDI(dev)) {
|
||||
pipe_config->ddi_pll_sel = PORT_CLK_SEL_SPLL;
|
||||
pipe_config->port_clock = 135000 * 2;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@ -860,6 +888,8 @@ void intel_crt_init(struct drm_device *dev)
|
|||
if (HAS_DDI(dev)) {
|
||||
crt->base.get_config = hsw_crt_get_config;
|
||||
crt->base.get_hw_state = intel_ddi_get_hw_state;
|
||||
crt->base.pre_enable = hsw_crt_pre_enable;
|
||||
crt->base.post_disable = hsw_crt_post_disable;
|
||||
} else {
|
||||
crt->base.get_config = intel_crt_get_config;
|
||||
crt->base.get_hw_state = intel_crt_get_hw_state;
|
||||
|
|
|
@ -277,7 +277,8 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
|
|||
I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
|
||||
|
||||
/* Configure Port Clock Select */
|
||||
I915_WRITE(PORT_CLK_SEL(PORT_E), intel_crtc->ddi_pll_sel);
|
||||
I915_WRITE(PORT_CLK_SEL(PORT_E), intel_crtc->config.ddi_pll_sel);
|
||||
WARN_ON(intel_crtc->config.ddi_pll_sel != PORT_CLK_SEL_SPLL);
|
||||
|
||||
/* Start the training iterating through available voltages and emphasis,
|
||||
* testing each value twice. */
|
||||
|
@ -385,53 +386,6 @@ intel_ddi_get_crtc_encoder(struct drm_crtc *crtc)
|
|||
return ret;
|
||||
}
|
||||
|
||||
void intel_ddi_put_crtc_pll(struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = crtc->dev->dev_private;
|
||||
struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
uint32_t val;
|
||||
|
||||
switch (intel_crtc->ddi_pll_sel) {
|
||||
case PORT_CLK_SEL_SPLL:
|
||||
plls->spll_refcount--;
|
||||
if (plls->spll_refcount == 0) {
|
||||
DRM_DEBUG_KMS("Disabling SPLL\n");
|
||||
val = I915_READ(SPLL_CTL);
|
||||
WARN_ON(!(val & SPLL_PLL_ENABLE));
|
||||
I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE);
|
||||
POSTING_READ(SPLL_CTL);
|
||||
}
|
||||
break;
|
||||
case PORT_CLK_SEL_WRPLL1:
|
||||
plls->wrpll1_refcount--;
|
||||
if (plls->wrpll1_refcount == 0) {
|
||||
DRM_DEBUG_KMS("Disabling WRPLL 1\n");
|
||||
val = I915_READ(WRPLL_CTL1);
|
||||
WARN_ON(!(val & WRPLL_PLL_ENABLE));
|
||||
I915_WRITE(WRPLL_CTL1, val & ~WRPLL_PLL_ENABLE);
|
||||
POSTING_READ(WRPLL_CTL1);
|
||||
}
|
||||
break;
|
||||
case PORT_CLK_SEL_WRPLL2:
|
||||
plls->wrpll2_refcount--;
|
||||
if (plls->wrpll2_refcount == 0) {
|
||||
DRM_DEBUG_KMS("Disabling WRPLL 2\n");
|
||||
val = I915_READ(WRPLL_CTL2);
|
||||
WARN_ON(!(val & WRPLL_PLL_ENABLE));
|
||||
I915_WRITE(WRPLL_CTL2, val & ~WRPLL_PLL_ENABLE);
|
||||
POSTING_READ(WRPLL_CTL2);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
WARN(plls->spll_refcount < 0, "Invalid SPLL refcount\n");
|
||||
WARN(plls->wrpll1_refcount < 0, "Invalid WRPLL1 refcount\n");
|
||||
WARN(plls->wrpll2_refcount < 0, "Invalid WRPLL2 refcount\n");
|
||||
|
||||
intel_crtc->ddi_pll_sel = PORT_CLK_SEL_NONE;
|
||||
}
|
||||
|
||||
#define LC_FREQ 2700
|
||||
#define LC_FREQ_2K (LC_FREQ * 2000)
|
||||
|
||||
|
@ -592,9 +546,9 @@ static int intel_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv,
|
|||
u32 wrpll;
|
||||
|
||||
wrpll = I915_READ(reg);
|
||||
switch (wrpll & SPLL_PLL_REF_MASK) {
|
||||
case SPLL_PLL_SSC:
|
||||
case SPLL_PLL_NON_SSC:
|
||||
switch (wrpll & WRPLL_PLL_REF_MASK) {
|
||||
case WRPLL_PLL_SSC:
|
||||
case WRPLL_PLL_NON_SSC:
|
||||
/*
|
||||
* We could calculate spread here, but our checking
|
||||
* code only cares about 5% accuracy, and spread is a max of
|
||||
|
@ -602,7 +556,7 @@ static int intel_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv,
|
|||
*/
|
||||
refclk = 135;
|
||||
break;
|
||||
case SPLL_PLL_LCPLL:
|
||||
case WRPLL_PLL_LCPLL:
|
||||
refclk = LC_FREQ;
|
||||
break;
|
||||
default:
|
||||
|
@ -622,11 +576,10 @@ static void intel_ddi_clock_get(struct intel_encoder *encoder,
|
|||
struct intel_crtc_config *pipe_config)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
|
||||
enum port port = intel_ddi_get_encoder_port(encoder);
|
||||
int link_clock = 0;
|
||||
u32 val, pll;
|
||||
|
||||
val = I915_READ(PORT_CLK_SEL(port));
|
||||
val = pipe_config->ddi_pll_sel;
|
||||
switch (val & PORT_CLK_SEL_MASK) {
|
||||
case PORT_CLK_SEL_LCPLL_810:
|
||||
link_clock = 81000;
|
||||
|
@ -750,173 +703,37 @@ bool intel_ddi_pll_select(struct intel_crtc *intel_crtc)
|
|||
{
|
||||
struct drm_crtc *crtc = &intel_crtc->base;
|
||||
struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
|
||||
struct drm_encoder *encoder = &intel_encoder->base;
|
||||
struct drm_i915_private *dev_priv = crtc->dev->dev_private;
|
||||
struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
|
||||
int type = intel_encoder->type;
|
||||
enum pipe pipe = intel_crtc->pipe;
|
||||
int clock = intel_crtc->config.port_clock;
|
||||
|
||||
intel_ddi_put_crtc_pll(crtc);
|
||||
intel_put_shared_dpll(intel_crtc);
|
||||
|
||||
if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
||||
|
||||
switch (intel_dp->link_bw) {
|
||||
case DP_LINK_BW_1_62:
|
||||
intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
|
||||
break;
|
||||
case DP_LINK_BW_2_7:
|
||||
intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
|
||||
break;
|
||||
case DP_LINK_BW_5_4:
|
||||
intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Link bandwidth %d unsupported\n",
|
||||
intel_dp->link_bw);
|
||||
return false;
|
||||
}
|
||||
|
||||
} else if (type == INTEL_OUTPUT_HDMI) {
|
||||
uint32_t reg, val;
|
||||
if (type == INTEL_OUTPUT_HDMI) {
|
||||
struct intel_shared_dpll *pll;
|
||||
uint32_t val;
|
||||
unsigned p, n2, r2;
|
||||
|
||||
intel_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p);
|
||||
|
||||
val = WRPLL_PLL_ENABLE | WRPLL_PLL_SELECT_LCPLL_2700 |
|
||||
val = WRPLL_PLL_ENABLE | WRPLL_PLL_LCPLL |
|
||||
WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
|
||||
WRPLL_DIVIDER_POST(p);
|
||||
|
||||
if (val == I915_READ(WRPLL_CTL1)) {
|
||||
DRM_DEBUG_KMS("Reusing WRPLL 1 on pipe %c\n",
|
||||
pipe_name(pipe));
|
||||
reg = WRPLL_CTL1;
|
||||
} else if (val == I915_READ(WRPLL_CTL2)) {
|
||||
DRM_DEBUG_KMS("Reusing WRPLL 2 on pipe %c\n",
|
||||
pipe_name(pipe));
|
||||
reg = WRPLL_CTL2;
|
||||
} else if (plls->wrpll1_refcount == 0) {
|
||||
DRM_DEBUG_KMS("Using WRPLL 1 on pipe %c\n",
|
||||
pipe_name(pipe));
|
||||
reg = WRPLL_CTL1;
|
||||
} else if (plls->wrpll2_refcount == 0) {
|
||||
DRM_DEBUG_KMS("Using WRPLL 2 on pipe %c\n",
|
||||
pipe_name(pipe));
|
||||
reg = WRPLL_CTL2;
|
||||
} else {
|
||||
DRM_ERROR("No WRPLLs available!\n");
|
||||
intel_crtc->config.dpll_hw_state.wrpll = val;
|
||||
|
||||
pll = intel_get_shared_dpll(intel_crtc);
|
||||
if (pll == NULL) {
|
||||
DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
|
||||
pipe_name(intel_crtc->pipe));
|
||||
return false;
|
||||
}
|
||||
|
||||
DRM_DEBUG_KMS("WRPLL: %dKHz refresh rate with p=%d, n2=%d r2=%d\n",
|
||||
clock, p, n2, r2);
|
||||
|
||||
if (reg == WRPLL_CTL1) {
|
||||
plls->wrpll1_refcount++;
|
||||
intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL1;
|
||||
} else {
|
||||
plls->wrpll2_refcount++;
|
||||
intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL2;
|
||||
}
|
||||
|
||||
} else if (type == INTEL_OUTPUT_ANALOG) {
|
||||
if (plls->spll_refcount == 0) {
|
||||
DRM_DEBUG_KMS("Using SPLL on pipe %c\n",
|
||||
pipe_name(pipe));
|
||||
plls->spll_refcount++;
|
||||
intel_crtc->ddi_pll_sel = PORT_CLK_SEL_SPLL;
|
||||
} else {
|
||||
DRM_ERROR("SPLL already in use\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
} else {
|
||||
WARN(1, "Invalid DDI encoder type %d\n", type);
|
||||
return false;
|
||||
intel_crtc->config.ddi_pll_sel = PORT_CLK_SEL_WRPLL(pll->id);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* To be called after intel_ddi_pll_select(). That one selects the PLL to be
|
||||
* used, this one actually enables the PLL.
|
||||
*/
|
||||
void intel_ddi_pll_enable(struct intel_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
|
||||
int clock = crtc->config.port_clock;
|
||||
uint32_t reg, cur_val, new_val;
|
||||
int refcount;
|
||||
const char *pll_name;
|
||||
uint32_t enable_bit = (1 << 31);
|
||||
unsigned int p, n2, r2;
|
||||
|
||||
BUILD_BUG_ON(enable_bit != SPLL_PLL_ENABLE);
|
||||
BUILD_BUG_ON(enable_bit != WRPLL_PLL_ENABLE);
|
||||
|
||||
switch (crtc->ddi_pll_sel) {
|
||||
case PORT_CLK_SEL_LCPLL_2700:
|
||||
case PORT_CLK_SEL_LCPLL_1350:
|
||||
case PORT_CLK_SEL_LCPLL_810:
|
||||
/*
|
||||
* LCPLL should always be enabled at this point of the mode set
|
||||
* sequence, so nothing to do.
|
||||
*/
|
||||
return;
|
||||
|
||||
case PORT_CLK_SEL_SPLL:
|
||||
pll_name = "SPLL";
|
||||
reg = SPLL_CTL;
|
||||
refcount = plls->spll_refcount;
|
||||
new_val = SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz |
|
||||
SPLL_PLL_SSC;
|
||||
break;
|
||||
|
||||
case PORT_CLK_SEL_WRPLL1:
|
||||
case PORT_CLK_SEL_WRPLL2:
|
||||
if (crtc->ddi_pll_sel == PORT_CLK_SEL_WRPLL1) {
|
||||
pll_name = "WRPLL1";
|
||||
reg = WRPLL_CTL1;
|
||||
refcount = plls->wrpll1_refcount;
|
||||
} else {
|
||||
pll_name = "WRPLL2";
|
||||
reg = WRPLL_CTL2;
|
||||
refcount = plls->wrpll2_refcount;
|
||||
}
|
||||
|
||||
intel_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p);
|
||||
|
||||
new_val = WRPLL_PLL_ENABLE | WRPLL_PLL_SELECT_LCPLL_2700 |
|
||||
WRPLL_DIVIDER_REFERENCE(r2) |
|
||||
WRPLL_DIVIDER_FEEDBACK(n2) | WRPLL_DIVIDER_POST(p);
|
||||
|
||||
break;
|
||||
|
||||
case PORT_CLK_SEL_NONE:
|
||||
WARN(1, "Bad selected pll: PORT_CLK_SEL_NONE\n");
|
||||
return;
|
||||
default:
|
||||
WARN(1, "Bad selected pll: 0x%08x\n", crtc->ddi_pll_sel);
|
||||
return;
|
||||
}
|
||||
|
||||
cur_val = I915_READ(reg);
|
||||
|
||||
WARN(refcount < 1, "Bad %s refcount: %d\n", pll_name, refcount);
|
||||
if (refcount == 1) {
|
||||
WARN(cur_val & enable_bit, "%s already enabled\n", pll_name);
|
||||
I915_WRITE(reg, new_val);
|
||||
POSTING_READ(reg);
|
||||
udelay(20);
|
||||
} else {
|
||||
WARN((cur_val & enable_bit) == 0, "%s disabled\n", pll_name);
|
||||
}
|
||||
}
|
||||
|
||||
void intel_ddi_set_pipe_settings(struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = crtc->dev->dev_private;
|
||||
|
@ -995,7 +812,9 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
|
|||
* eDP when not using the panel fitter, and when not
|
||||
* using motion blur mitigation (which we don't
|
||||
* support). */
|
||||
if (IS_HASWELL(dev) && intel_crtc->config.pch_pfit.enabled)
|
||||
if (IS_HASWELL(dev) &&
|
||||
(intel_crtc->config.pch_pfit.enabled ||
|
||||
intel_crtc->config.pch_pfit.force_thru))
|
||||
temp |= TRANS_DDI_EDP_INPUT_A_ONOFF;
|
||||
else
|
||||
temp |= TRANS_DDI_EDP_INPUT_A_ON;
|
||||
|
@ -1146,76 +965,6 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
|
|||
return false;
|
||||
}
|
||||
|
||||
static uint32_t intel_ddi_get_crtc_pll(struct drm_i915_private *dev_priv,
|
||||
enum pipe pipe)
|
||||
{
|
||||
uint32_t temp, ret;
|
||||
enum port port = I915_MAX_PORTS;
|
||||
enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
|
||||
pipe);
|
||||
int i;
|
||||
|
||||
if (cpu_transcoder == TRANSCODER_EDP) {
|
||||
port = PORT_A;
|
||||
} else {
|
||||
temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
|
||||
temp &= TRANS_DDI_PORT_MASK;
|
||||
|
||||
for (i = PORT_B; i <= PORT_E; i++)
|
||||
if (temp == TRANS_DDI_SELECT_PORT(i))
|
||||
port = i;
|
||||
}
|
||||
|
||||
if (port == I915_MAX_PORTS) {
|
||||
WARN(1, "Pipe %c enabled on an unknown port\n",
|
||||
pipe_name(pipe));
|
||||
ret = PORT_CLK_SEL_NONE;
|
||||
} else {
|
||||
ret = I915_READ(PORT_CLK_SEL(port));
|
||||
DRM_DEBUG_KMS("Pipe %c connected to port %c using clock "
|
||||
"0x%08x\n", pipe_name(pipe), port_name(port),
|
||||
ret);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void intel_ddi_setup_hw_pll_state(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
enum pipe pipe;
|
||||
struct intel_crtc *intel_crtc;
|
||||
|
||||
dev_priv->ddi_plls.spll_refcount = 0;
|
||||
dev_priv->ddi_plls.wrpll1_refcount = 0;
|
||||
dev_priv->ddi_plls.wrpll2_refcount = 0;
|
||||
|
||||
for_each_pipe(pipe) {
|
||||
intel_crtc =
|
||||
to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
|
||||
|
||||
if (!intel_crtc->active) {
|
||||
intel_crtc->ddi_pll_sel = PORT_CLK_SEL_NONE;
|
||||
continue;
|
||||
}
|
||||
|
||||
intel_crtc->ddi_pll_sel = intel_ddi_get_crtc_pll(dev_priv,
|
||||
pipe);
|
||||
|
||||
switch (intel_crtc->ddi_pll_sel) {
|
||||
case PORT_CLK_SEL_SPLL:
|
||||
dev_priv->ddi_plls.spll_refcount++;
|
||||
break;
|
||||
case PORT_CLK_SEL_WRPLL1:
|
||||
dev_priv->ddi_plls.wrpll1_refcount++;
|
||||
break;
|
||||
case PORT_CLK_SEL_WRPLL2:
|
||||
dev_priv->ddi_plls.wrpll2_refcount++;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc)
|
||||
{
|
||||
struct drm_crtc *crtc = &intel_crtc->base;
|
||||
|
@ -1261,8 +1010,8 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
|
|||
intel_edp_panel_on(intel_dp);
|
||||
}
|
||||
|
||||
WARN_ON(crtc->ddi_pll_sel == PORT_CLK_SEL_NONE);
|
||||
I915_WRITE(PORT_CLK_SEL(port), crtc->ddi_pll_sel);
|
||||
WARN_ON(crtc->config.ddi_pll_sel == PORT_CLK_SEL_NONE);
|
||||
I915_WRITE(PORT_CLK_SEL(port), crtc->config.ddi_pll_sel);
|
||||
|
||||
if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
||||
|
@ -1418,10 +1167,60 @@ int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv)
|
|||
}
|
||||
}
|
||||
|
||||
static void hsw_ddi_pll_enable(struct drm_i915_private *dev_priv,
|
||||
struct intel_shared_dpll *pll)
|
||||
{
|
||||
I915_WRITE(WRPLL_CTL(pll->id), pll->hw_state.wrpll);
|
||||
POSTING_READ(WRPLL_CTL(pll->id));
|
||||
udelay(20);
|
||||
}
|
||||
|
||||
static void hsw_ddi_pll_disable(struct drm_i915_private *dev_priv,
|
||||
struct intel_shared_dpll *pll)
|
||||
{
|
||||
uint32_t val;
|
||||
|
||||
val = I915_READ(WRPLL_CTL(pll->id));
|
||||
I915_WRITE(WRPLL_CTL(pll->id), val & ~WRPLL_PLL_ENABLE);
|
||||
POSTING_READ(WRPLL_CTL(pll->id));
|
||||
}
|
||||
|
||||
static bool hsw_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
|
||||
struct intel_shared_dpll *pll,
|
||||
struct intel_dpll_hw_state *hw_state)
|
||||
{
|
||||
uint32_t val;
|
||||
|
||||
if (!intel_display_power_enabled(dev_priv, POWER_DOMAIN_PLLS))
|
||||
return false;
|
||||
|
||||
val = I915_READ(WRPLL_CTL(pll->id));
|
||||
hw_state->wrpll = val;
|
||||
|
||||
return val & WRPLL_PLL_ENABLE;
|
||||
}
|
||||
|
||||
static char *hsw_ddi_pll_names[] = {
|
||||
"WRPLL 1",
|
||||
"WRPLL 2",
|
||||
};
|
||||
|
||||
void intel_ddi_pll_init(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
uint32_t val = I915_READ(LCPLL_CTL);
|
||||
int i;
|
||||
|
||||
dev_priv->num_shared_dpll = 2;
|
||||
|
||||
for (i = 0; i < dev_priv->num_shared_dpll; i++) {
|
||||
dev_priv->shared_dplls[i].id = i;
|
||||
dev_priv->shared_dplls[i].name = hsw_ddi_pll_names[i];
|
||||
dev_priv->shared_dplls[i].disable = hsw_ddi_pll_disable;
|
||||
dev_priv->shared_dplls[i].enable = hsw_ddi_pll_enable;
|
||||
dev_priv->shared_dplls[i].get_hw_state =
|
||||
hsw_ddi_pll_get_hw_state;
|
||||
}
|
||||
|
||||
/* The LCPLL register should be turned on by the BIOS. For now let's
|
||||
* just check its state and print errors in case something is wrong.
|
||||
|
@ -1705,6 +1504,9 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
|
|||
intel_encoder->cloneable = 0;
|
||||
intel_encoder->hot_plug = intel_ddi_hot_plug;
|
||||
|
||||
intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
|
||||
dev_priv->hpd_irq_port[port] = intel_dig_port;
|
||||
|
||||
if (init_dp)
|
||||
dp_connector = intel_ddi_init_dp_connector(intel_dig_port);
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -745,6 +745,22 @@ intel_dp_connector_unregister(struct intel_connector *intel_connector)
|
|||
intel_connector_unregister(intel_connector);
|
||||
}
|
||||
|
||||
static void
|
||||
hsw_dp_set_ddi_pll_sel(struct intel_crtc_config *pipe_config, int link_bw)
|
||||
{
|
||||
switch (link_bw) {
|
||||
case DP_LINK_BW_1_62:
|
||||
pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
|
||||
break;
|
||||
case DP_LINK_BW_2_7:
|
||||
pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
|
||||
break;
|
||||
case DP_LINK_BW_5_4:
|
||||
pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
intel_dp_set_clock(struct intel_encoder *encoder,
|
||||
struct intel_crtc_config *pipe_config, int link_bw)
|
||||
|
@ -756,8 +772,6 @@ intel_dp_set_clock(struct intel_encoder *encoder,
|
|||
if (IS_G4X(dev)) {
|
||||
divisor = gen4_dpll;
|
||||
count = ARRAY_SIZE(gen4_dpll);
|
||||
} else if (IS_HASWELL(dev)) {
|
||||
/* Haswell has special-purpose DP DDI clocks. */
|
||||
} else if (HAS_PCH_SPLIT(dev)) {
|
||||
divisor = pch_dpll;
|
||||
count = ARRAY_SIZE(pch_dpll);
|
||||
|
@ -928,7 +942,10 @@ found:
|
|||
&pipe_config->dp_m2_n2);
|
||||
}
|
||||
|
||||
intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
|
||||
if (HAS_DDI(dev))
|
||||
hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
|
||||
else
|
||||
intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@ -1316,8 +1333,6 @@ void intel_edp_panel_off(struct intel_dp *intel_dp)
|
|||
|
||||
DRM_DEBUG_KMS("Turn eDP power off\n");
|
||||
|
||||
edp_wait_backlight_off(intel_dp);
|
||||
|
||||
WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n");
|
||||
|
||||
pp = ironlake_get_pp_control(intel_dp);
|
||||
|
@ -1353,6 +1368,9 @@ void intel_edp_backlight_on(struct intel_dp *intel_dp)
|
|||
return;
|
||||
|
||||
DRM_DEBUG_KMS("\n");
|
||||
|
||||
intel_panel_enable_backlight(intel_dp->attached_connector);
|
||||
|
||||
/*
|
||||
* If we enable the backlight right away following a panel power
|
||||
* on, we may see slight flicker as the panel syncs with the eDP
|
||||
|
@ -1367,8 +1385,6 @@ void intel_edp_backlight_on(struct intel_dp *intel_dp)
|
|||
|
||||
I915_WRITE(pp_ctrl_reg, pp);
|
||||
POSTING_READ(pp_ctrl_reg);
|
||||
|
||||
intel_panel_enable_backlight(intel_dp->attached_connector);
|
||||
}
|
||||
|
||||
void intel_edp_backlight_off(struct intel_dp *intel_dp)
|
||||
|
@ -1381,8 +1397,6 @@ void intel_edp_backlight_off(struct intel_dp *intel_dp)
|
|||
if (!is_edp(intel_dp))
|
||||
return;
|
||||
|
||||
intel_panel_disable_backlight(intel_dp->attached_connector);
|
||||
|
||||
DRM_DEBUG_KMS("\n");
|
||||
pp = ironlake_get_pp_control(intel_dp);
|
||||
pp &= ~EDP_BLC_ENABLE;
|
||||
|
@ -1392,6 +1406,10 @@ void intel_edp_backlight_off(struct intel_dp *intel_dp)
|
|||
I915_WRITE(pp_ctrl_reg, pp);
|
||||
POSTING_READ(pp_ctrl_reg);
|
||||
intel_dp->last_backlight_off = jiffies;
|
||||
|
||||
edp_wait_backlight_off(intel_dp);
|
||||
|
||||
intel_panel_disable_backlight(intel_dp->attached_connector);
|
||||
}
|
||||
|
||||
static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
|
||||
|
@ -1751,7 +1769,7 @@ static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
|
|||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_crtc *crtc = dig_port->base.base.crtc;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
struct drm_i915_gem_object *obj = to_intel_framebuffer(crtc->primary->fb)->obj;
|
||||
struct drm_i915_gem_object *obj = intel_fb_obj(crtc->primary->fb);
|
||||
struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
|
||||
|
||||
dev_priv->psr.source_ok = false;
|
||||
|
@ -1784,7 +1802,6 @@ static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
|
|||
return false;
|
||||
}
|
||||
|
||||
obj = to_intel_framebuffer(crtc->primary->fb)->obj;
|
||||
if (obj->tiling_mode != I915_TILING_X ||
|
||||
obj->fence_reg == I915_FENCE_REG_NONE) {
|
||||
DRM_DEBUG_KMS("PSR condition failed: fb not tiled or fenced\n");
|
||||
|
@ -3815,6 +3832,22 @@ intel_dp_hot_plug(struct intel_encoder *intel_encoder)
|
|||
intel_dp_check_link_status(intel_dp);
|
||||
}
|
||||
|
||||
bool
|
||||
intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
|
||||
{
|
||||
struct intel_dp *intel_dp = &intel_dig_port->dp;
|
||||
|
||||
if (long_hpd)
|
||||
return true;
|
||||
|
||||
/*
|
||||
* we'll check the link status via the normal hot plug path later -
|
||||
* but for short hpds we should check it now
|
||||
*/
|
||||
intel_dp_check_link_status(intel_dp);
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Return which DP Port should be selected for Transcoder DP control */
|
||||
int
|
||||
intel_trans_dp_port_sel(struct drm_crtc *crtc)
|
||||
|
@ -4387,6 +4420,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
|
|||
void
|
||||
intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_digital_port *intel_dig_port;
|
||||
struct intel_encoder *intel_encoder;
|
||||
struct drm_encoder *encoder;
|
||||
|
@ -4443,6 +4477,9 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
|
|||
intel_encoder->cloneable = 0;
|
||||
intel_encoder->hot_plug = intel_dp_hot_plug;
|
||||
|
||||
intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
|
||||
dev_priv->hpd_irq_port[port] = intel_dig_port;
|
||||
|
||||
if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
|
||||
drm_encoder_cleanup(encoder);
|
||||
kfree(intel_dig_port);
|
||||
|
|
|
@ -307,6 +307,9 @@ struct intel_crtc_config {
|
|||
/* Selected dpll when shared or DPLL_ID_PRIVATE. */
|
||||
enum intel_dpll_id shared_dpll;
|
||||
|
||||
/* PORT_CLK_SEL for DDI ports. */
|
||||
uint32_t ddi_pll_sel;
|
||||
|
||||
/* Actual register state of the dpll, for shared dpll cross-checking. */
|
||||
struct intel_dpll_hw_state dpll_hw_state;
|
||||
|
||||
|
@ -338,6 +341,7 @@ struct intel_crtc_config {
|
|||
u32 pos;
|
||||
u32 size;
|
||||
bool enabled;
|
||||
bool force_thru;
|
||||
} pch_pfit;
|
||||
|
||||
/* FDI configuration, only valid if has_pch_encoder is set. */
|
||||
|
@ -398,8 +402,6 @@ struct intel_crtc {
|
|||
struct intel_crtc_config *new_config;
|
||||
bool new_enabled;
|
||||
|
||||
uint32_t ddi_pll_sel;
|
||||
|
||||
/* reset counter value when the last flip was submitted */
|
||||
unsigned int reset_counter;
|
||||
|
||||
|
@ -485,6 +487,7 @@ struct cxsr_latency {
|
|||
#define to_intel_encoder(x) container_of(x, struct intel_encoder, base)
|
||||
#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
|
||||
#define to_intel_plane(x) container_of(x, struct intel_plane, base)
|
||||
#define intel_fb_obj(x) (x ? to_intel_framebuffer(x)->obj : NULL)
|
||||
|
||||
struct intel_hdmi {
|
||||
u32 hdmi_reg;
|
||||
|
@ -567,6 +570,7 @@ struct intel_digital_port {
|
|||
u32 saved_port_bits;
|
||||
struct intel_dp dp;
|
||||
struct intel_hdmi hdmi;
|
||||
bool (*hpd_pulse)(struct intel_digital_port *, bool);
|
||||
};
|
||||
|
||||
static inline int
|
||||
|
@ -706,10 +710,7 @@ void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
|
|||
enum transcoder cpu_transcoder);
|
||||
void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc);
|
||||
void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc);
|
||||
void intel_ddi_setup_hw_pll_state(struct drm_device *dev);
|
||||
bool intel_ddi_pll_select(struct intel_crtc *crtc);
|
||||
void intel_ddi_pll_enable(struct intel_crtc *crtc);
|
||||
void intel_ddi_put_crtc_pll(struct drm_crtc *crtc);
|
||||
void intel_ddi_set_pipe_settings(struct drm_crtc *crtc);
|
||||
void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder);
|
||||
bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
|
||||
|
@ -722,7 +723,6 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
|
|||
const char *intel_output_name(int output);
|
||||
bool intel_has_pending_fb_unpin(struct drm_device *dev);
|
||||
int intel_pch_rawclk(struct drm_device *dev);
|
||||
int valleyview_cur_cdclk(struct drm_i915_private *dev_priv);
|
||||
void intel_mark_busy(struct drm_device *dev);
|
||||
void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
|
||||
struct intel_engine_cs *ring);
|
||||
|
@ -793,12 +793,18 @@ __intel_framebuffer_create(struct drm_device *dev,
|
|||
void intel_prepare_page_flip(struct drm_device *dev, int plane);
|
||||
void intel_finish_page_flip(struct drm_device *dev, int pipe);
|
||||
void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
|
||||
|
||||
/* shared dpll functions */
|
||||
struct intel_shared_dpll *intel_crtc_to_shared_dpll(struct intel_crtc *crtc);
|
||||
void assert_shared_dpll(struct drm_i915_private *dev_priv,
|
||||
struct intel_shared_dpll *pll,
|
||||
bool state);
|
||||
#define assert_shared_dpll_enabled(d, p) assert_shared_dpll(d, p, true)
|
||||
#define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false)
|
||||
struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc);
|
||||
void intel_put_shared_dpll(struct intel_crtc *crtc);
|
||||
|
||||
/* modesetting asserts */
|
||||
void assert_pll(struct drm_i915_private *dev_priv,
|
||||
enum pipe pipe, bool state);
|
||||
#define assert_pll_enabled(d, p) assert_pll(d, p, true)
|
||||
|
@ -831,7 +837,6 @@ void hsw_disable_ips(struct intel_crtc *crtc);
|
|||
void intel_display_set_init_power(struct drm_i915_private *dev, bool enable);
|
||||
enum intel_display_power_domain
|
||||
intel_display_port_power_domain(struct intel_encoder *intel_encoder);
|
||||
int valleyview_get_vco(struct drm_i915_private *dev_priv);
|
||||
void intel_mode_from_pipe_config(struct drm_display_mode *mode,
|
||||
struct intel_crtc_config *pipe_config);
|
||||
int intel_format_to_fourcc(int format);
|
||||
|
@ -852,6 +857,8 @@ int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc);
|
|||
bool intel_dp_compute_config(struct intel_encoder *encoder,
|
||||
struct intel_crtc_config *pipe_config);
|
||||
bool intel_dp_is_edp(struct drm_device *dev, enum port port);
|
||||
bool intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port,
|
||||
bool long_hpd);
|
||||
void intel_edp_backlight_on(struct intel_dp *intel_dp);
|
||||
void intel_edp_backlight_off(struct intel_dp *intel_dp);
|
||||
void intel_edp_panel_vdd_on(struct intel_dp *intel_dp);
|
||||
|
@ -863,7 +870,6 @@ void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate);
|
|||
void intel_edp_psr_exit(struct drm_device *dev);
|
||||
void intel_edp_psr_init(struct drm_device *dev);
|
||||
|
||||
|
||||
/* intel_dsi.c */
|
||||
void intel_dsi_init(struct drm_device *dev);
|
||||
|
||||
|
@ -1005,8 +1011,7 @@ void intel_runtime_pm_put(struct drm_i915_private *dev_priv);
|
|||
void intel_init_runtime_pm(struct drm_i915_private *dev_priv);
|
||||
void intel_fini_runtime_pm(struct drm_i915_private *dev_priv);
|
||||
void ilk_wm_get_hw_state(struct drm_device *dev);
|
||||
void __vlv_set_power_well(struct drm_i915_private *dev_priv,
|
||||
enum punit_power_well power_well_id, bool enable);
|
||||
|
||||
|
||||
/* intel_sdvo.c */
|
||||
bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob);
|
||||
|
|
|
@ -107,7 +107,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
|
|||
sizes->surface_depth);
|
||||
|
||||
size = mode_cmd.pitches[0] * mode_cmd.height;
|
||||
size = ALIGN(size, PAGE_SIZE);
|
||||
size = PAGE_ALIGN(size);
|
||||
obj = i915_gem_object_create_stolen(dev, size);
|
||||
if (obj == NULL)
|
||||
obj = i915_gem_alloc_object(dev, size);
|
||||
|
|
|
@ -34,11 +34,6 @@
|
|||
#include <drm/i915_drm.h>
|
||||
#include "i915_drv.h"
|
||||
|
||||
enum disp_clk {
|
||||
CDCLK,
|
||||
CZCLK
|
||||
};
|
||||
|
||||
struct gmbus_port {
|
||||
const char *name;
|
||||
int reg;
|
||||
|
@ -63,60 +58,11 @@ to_intel_gmbus(struct i2c_adapter *i2c)
|
|||
return container_of(i2c, struct intel_gmbus, adapter);
|
||||
}
|
||||
|
||||
static int get_disp_clk_div(struct drm_i915_private *dev_priv,
|
||||
enum disp_clk clk)
|
||||
{
|
||||
u32 reg_val;
|
||||
int clk_ratio;
|
||||
|
||||
reg_val = I915_READ(CZCLK_CDCLK_FREQ_RATIO);
|
||||
|
||||
if (clk == CDCLK)
|
||||
clk_ratio =
|
||||
((reg_val & CDCLK_FREQ_MASK) >> CDCLK_FREQ_SHIFT) + 1;
|
||||
else
|
||||
clk_ratio = (reg_val & CZCLK_FREQ_MASK) + 1;
|
||||
|
||||
return clk_ratio;
|
||||
}
|
||||
|
||||
static void gmbus_set_freq(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
int vco, gmbus_freq = 0, cdclk_div;
|
||||
|
||||
BUG_ON(!IS_VALLEYVIEW(dev_priv->dev));
|
||||
|
||||
vco = valleyview_get_vco(dev_priv);
|
||||
|
||||
/* Get the CDCLK divide ratio */
|
||||
cdclk_div = get_disp_clk_div(dev_priv, CDCLK);
|
||||
|
||||
/*
|
||||
* Program the gmbus_freq based on the cdclk frequency.
|
||||
* BSpec erroneously claims we should aim for 4MHz, but
|
||||
* in fact 1MHz is the correct frequency.
|
||||
*/
|
||||
if (cdclk_div)
|
||||
gmbus_freq = (vco << 1) / cdclk_div;
|
||||
|
||||
if (WARN_ON(gmbus_freq == 0))
|
||||
return;
|
||||
|
||||
I915_WRITE(GMBUSFREQ_VLV, gmbus_freq);
|
||||
}
|
||||
|
||||
void
|
||||
intel_i2c_reset(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
/*
|
||||
* In BIOS-less system, program the correct gmbus frequency
|
||||
* before reading edid.
|
||||
*/
|
||||
if (IS_VALLEYVIEW(dev))
|
||||
gmbus_set_freq(dev_priv);
|
||||
|
||||
I915_WRITE(dev_priv->gpio_mmio_base + GMBUS0, 0);
|
||||
I915_WRITE(dev_priv->gpio_mmio_base + GMBUS4, 0);
|
||||
}
|
||||
|
|
|
@ -51,6 +51,7 @@ struct intel_lvds_encoder {
|
|||
|
||||
bool is_dual_link;
|
||||
u32 reg;
|
||||
u32 a3_power;
|
||||
|
||||
struct intel_lvds_connector *attached_connector;
|
||||
};
|
||||
|
@ -71,8 +72,13 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
|
|||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
|
||||
enum intel_display_power_domain power_domain;
|
||||
u32 tmp;
|
||||
|
||||
power_domain = intel_display_port_power_domain(encoder);
|
||||
if (!intel_display_power_enabled(dev_priv, power_domain))
|
||||
return false;
|
||||
|
||||
tmp = I915_READ(lvds_encoder->reg);
|
||||
|
||||
if (!(tmp & LVDS_PORT_EN))
|
||||
|
@ -165,8 +171,11 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder)
|
|||
|
||||
/* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
|
||||
* appropriately here, but we need to look more thoroughly into how
|
||||
* panels behave in the two modes.
|
||||
* panels behave in the two modes. For now, let's just maintain the
|
||||
* value we got from the BIOS.
|
||||
*/
|
||||
temp &= ~LVDS_A3_POWER_MASK;
|
||||
temp |= lvds_encoder->a3_power;
|
||||
|
||||
/* Set the dithering flag on LVDS as needed, note that there is no
|
||||
* special lvds dither control bit on pch-split platforms, dithering is
|
||||
|
@ -264,7 +273,6 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
|
|||
struct intel_crtc_config *pipe_config)
|
||||
{
|
||||
struct drm_device *dev = intel_encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_lvds_encoder *lvds_encoder =
|
||||
to_lvds_encoder(&intel_encoder->base);
|
||||
struct intel_connector *intel_connector =
|
||||
|
@ -279,8 +287,7 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
|
|||
return false;
|
||||
}
|
||||
|
||||
if ((I915_READ(lvds_encoder->reg) & LVDS_A3_POWER_MASK) ==
|
||||
LVDS_A3_POWER_UP)
|
||||
if (lvds_encoder->a3_power == LVDS_A3_POWER_UP)
|
||||
lvds_bpp = 8*3;
|
||||
else
|
||||
lvds_bpp = 6*3;
|
||||
|
@ -1081,6 +1088,9 @@ out:
|
|||
DRM_DEBUG_KMS("detected %s-link lvds configuration\n",
|
||||
lvds_encoder->is_dual_link ? "dual" : "single");
|
||||
|
||||
lvds_encoder->a3_power = I915_READ(lvds_encoder->reg) &
|
||||
LVDS_A3_POWER_MASK;
|
||||
|
||||
/*
|
||||
* Unlock registers and just
|
||||
* leave them unlocked
|
||||
|
|
|
@ -93,8 +93,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc)
|
|||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_framebuffer *fb = crtc->primary->fb;
|
||||
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
|
||||
struct drm_i915_gem_object *obj = intel_fb->obj;
|
||||
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
int cfb_pitch;
|
||||
int i;
|
||||
|
@ -150,8 +149,7 @@ static void g4x_enable_fbc(struct drm_crtc *crtc)
|
|||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_framebuffer *fb = crtc->primary->fb;
|
||||
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
|
||||
struct drm_i915_gem_object *obj = intel_fb->obj;
|
||||
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
u32 dpfc_ctl;
|
||||
|
||||
|
@ -222,16 +220,26 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc)
|
|||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_framebuffer *fb = crtc->primary->fb;
|
||||
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
|
||||
struct drm_i915_gem_object *obj = intel_fb->obj;
|
||||
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
u32 dpfc_ctl;
|
||||
|
||||
dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane);
|
||||
if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
|
||||
dev_priv->fbc.threshold++;
|
||||
|
||||
switch (dev_priv->fbc.threshold) {
|
||||
case 4:
|
||||
case 3:
|
||||
dpfc_ctl |= DPFC_CTL_LIMIT_4X;
|
||||
break;
|
||||
case 2:
|
||||
dpfc_ctl |= DPFC_CTL_LIMIT_2X;
|
||||
else
|
||||
break;
|
||||
case 1:
|
||||
dpfc_ctl |= DPFC_CTL_LIMIT_1X;
|
||||
break;
|
||||
}
|
||||
dpfc_ctl |= DPFC_CTL_FENCE_EN;
|
||||
if (IS_GEN5(dev))
|
||||
dpfc_ctl |= obj->fence_reg;
|
||||
|
@ -278,16 +286,27 @@ static void gen7_enable_fbc(struct drm_crtc *crtc)
|
|||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_framebuffer *fb = crtc->primary->fb;
|
||||
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
|
||||
struct drm_i915_gem_object *obj = intel_fb->obj;
|
||||
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
u32 dpfc_ctl;
|
||||
|
||||
dpfc_ctl = IVB_DPFC_CTL_PLANE(intel_crtc->plane);
|
||||
if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
|
||||
dev_priv->fbc.threshold++;
|
||||
|
||||
switch (dev_priv->fbc.threshold) {
|
||||
case 4:
|
||||
case 3:
|
||||
dpfc_ctl |= DPFC_CTL_LIMIT_4X;
|
||||
break;
|
||||
case 2:
|
||||
dpfc_ctl |= DPFC_CTL_LIMIT_2X;
|
||||
else
|
||||
break;
|
||||
case 1:
|
||||
dpfc_ctl |= DPFC_CTL_LIMIT_1X;
|
||||
break;
|
||||
}
|
||||
|
||||
dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
|
||||
|
||||
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
|
||||
|
@ -462,7 +481,6 @@ void intel_update_fbc(struct drm_device *dev)
|
|||
struct drm_crtc *crtc = NULL, *tmp_crtc;
|
||||
struct intel_crtc *intel_crtc;
|
||||
struct drm_framebuffer *fb;
|
||||
struct intel_framebuffer *intel_fb;
|
||||
struct drm_i915_gem_object *obj;
|
||||
const struct drm_display_mode *adjusted_mode;
|
||||
unsigned int max_width, max_height;
|
||||
|
@ -507,8 +525,7 @@ void intel_update_fbc(struct drm_device *dev)
|
|||
|
||||
intel_crtc = to_intel_crtc(crtc);
|
||||
fb = crtc->primary->fb;
|
||||
intel_fb = to_intel_framebuffer(fb);
|
||||
obj = intel_fb->obj;
|
||||
obj = intel_fb_obj(fb);
|
||||
adjusted_mode = &intel_crtc->config.adjusted_mode;
|
||||
|
||||
if (i915.enable_fbc < 0) {
|
||||
|
@ -566,7 +583,8 @@ void intel_update_fbc(struct drm_device *dev)
|
|||
if (in_dbg_master())
|
||||
goto out_disable;
|
||||
|
||||
if (i915_gem_stolen_setup_compression(dev, intel_fb->obj->base.size)) {
|
||||
if (i915_gem_stolen_setup_compression(dev, obj->base.size,
|
||||
drm_format_plane_cpp(fb->pixel_format, 0))) {
|
||||
if (set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL))
|
||||
DRM_DEBUG_KMS("framebuffer too large, disabling compression\n");
|
||||
goto out_disable;
|
||||
|
@ -792,12 +810,33 @@ static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static void pineview_disable_cxsr(struct drm_device *dev)
|
||||
void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
u32 val;
|
||||
|
||||
/* deactivate cxsr */
|
||||
I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN);
|
||||
if (IS_VALLEYVIEW(dev)) {
|
||||
I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
|
||||
} else if (IS_G4X(dev) || IS_CRESTLINE(dev)) {
|
||||
I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
|
||||
} else if (IS_PINEVIEW(dev)) {
|
||||
val = I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN;
|
||||
val |= enable ? PINEVIEW_SELF_REFRESH_EN : 0;
|
||||
I915_WRITE(DSPFW3, val);
|
||||
} else if (IS_I945G(dev) || IS_I945GM(dev)) {
|
||||
val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) :
|
||||
_MASKED_BIT_DISABLE(FW_BLC_SELF_EN);
|
||||
I915_WRITE(FW_BLC_SELF, val);
|
||||
} else if (IS_I915GM(dev)) {
|
||||
val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) :
|
||||
_MASKED_BIT_DISABLE(INSTPM_SELF_EN);
|
||||
I915_WRITE(INSTPM, val);
|
||||
} else {
|
||||
return;
|
||||
}
|
||||
|
||||
DRM_DEBUG_KMS("memory self-refresh is %s\n",
|
||||
enable ? "enabled" : "disabled");
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1036,7 +1075,7 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc)
|
|||
dev_priv->fsb_freq, dev_priv->mem_freq);
|
||||
if (!latency) {
|
||||
DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
|
||||
pineview_disable_cxsr(dev);
|
||||
intel_set_memory_cxsr(dev_priv, false);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1087,13 +1126,9 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc)
|
|||
I915_WRITE(DSPFW3, reg);
|
||||
DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
|
||||
|
||||
/* activate cxsr */
|
||||
I915_WRITE(DSPFW3,
|
||||
I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN);
|
||||
DRM_DEBUG_KMS("Self-refresh is enabled\n");
|
||||
intel_set_memory_cxsr(dev_priv, true);
|
||||
} else {
|
||||
pineview_disable_cxsr(dev);
|
||||
DRM_DEBUG_KMS("Self-refresh is disabled\n");
|
||||
intel_set_memory_cxsr(dev_priv, false);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1319,6 +1354,7 @@ static void valleyview_update_wm(struct drm_crtc *crtc)
|
|||
int plane_sr, cursor_sr;
|
||||
int ignore_plane_sr, ignore_cursor_sr;
|
||||
unsigned int enabled = 0;
|
||||
bool cxsr_enabled;
|
||||
|
||||
vlv_update_drain_latency(dev);
|
||||
|
||||
|
@ -1345,10 +1381,10 @@ static void valleyview_update_wm(struct drm_crtc *crtc)
|
|||
&valleyview_wm_info,
|
||||
&valleyview_cursor_wm_info,
|
||||
&ignore_plane_sr, &cursor_sr)) {
|
||||
I915_WRITE(FW_BLC_SELF_VLV, FW_CSPWRDWNEN);
|
||||
cxsr_enabled = true;
|
||||
} else {
|
||||
I915_WRITE(FW_BLC_SELF_VLV,
|
||||
I915_READ(FW_BLC_SELF_VLV) & ~FW_CSPWRDWNEN);
|
||||
cxsr_enabled = false;
|
||||
intel_set_memory_cxsr(dev_priv, false);
|
||||
plane_sr = cursor_sr = 0;
|
||||
}
|
||||
|
||||
|
@ -1368,6 +1404,9 @@ static void valleyview_update_wm(struct drm_crtc *crtc)
|
|||
I915_WRITE(DSPFW3,
|
||||
(I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) |
|
||||
(cursor_sr << DSPFW_CURSOR_SR_SHIFT));
|
||||
|
||||
if (cxsr_enabled)
|
||||
intel_set_memory_cxsr(dev_priv, true);
|
||||
}
|
||||
|
||||
static void g4x_update_wm(struct drm_crtc *crtc)
|
||||
|
@ -1378,6 +1417,7 @@ static void g4x_update_wm(struct drm_crtc *crtc)
|
|||
int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
|
||||
int plane_sr, cursor_sr;
|
||||
unsigned int enabled = 0;
|
||||
bool cxsr_enabled;
|
||||
|
||||
if (g4x_compute_wm0(dev, PIPE_A,
|
||||
&g4x_wm_info, latency_ns,
|
||||
|
@ -1397,10 +1437,10 @@ static void g4x_update_wm(struct drm_crtc *crtc)
|
|||
&g4x_wm_info,
|
||||
&g4x_cursor_wm_info,
|
||||
&plane_sr, &cursor_sr)) {
|
||||
I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
|
||||
cxsr_enabled = true;
|
||||
} else {
|
||||
I915_WRITE(FW_BLC_SELF,
|
||||
I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN);
|
||||
cxsr_enabled = false;
|
||||
intel_set_memory_cxsr(dev_priv, false);
|
||||
plane_sr = cursor_sr = 0;
|
||||
}
|
||||
|
||||
|
@ -1421,6 +1461,9 @@ static void g4x_update_wm(struct drm_crtc *crtc)
|
|||
I915_WRITE(DSPFW3,
|
||||
(I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) |
|
||||
(cursor_sr << DSPFW_CURSOR_SR_SHIFT));
|
||||
|
||||
if (cxsr_enabled)
|
||||
intel_set_memory_cxsr(dev_priv, true);
|
||||
}
|
||||
|
||||
static void i965_update_wm(struct drm_crtc *unused_crtc)
|
||||
|
@ -1430,6 +1473,7 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
|
|||
struct drm_crtc *crtc;
|
||||
int srwm = 1;
|
||||
int cursor_sr = 16;
|
||||
bool cxsr_enabled;
|
||||
|
||||
/* Calc sr entries for one plane configs */
|
||||
crtc = single_enabled_crtc(dev);
|
||||
|
@ -1471,13 +1515,11 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
|
|||
DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
|
||||
"cursor %d\n", srwm, cursor_sr);
|
||||
|
||||
if (IS_CRESTLINE(dev))
|
||||
I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
|
||||
cxsr_enabled = true;
|
||||
} else {
|
||||
cxsr_enabled = false;
|
||||
/* Turn off self refresh if both pipes are enabled */
|
||||
if (IS_CRESTLINE(dev))
|
||||
I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
|
||||
& ~FW_BLC_SELF_EN);
|
||||
intel_set_memory_cxsr(dev_priv, false);
|
||||
}
|
||||
|
||||
DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
|
||||
|
@ -1489,6 +1531,9 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
|
|||
I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
|
||||
/* update cursor SR watermark */
|
||||
I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
|
||||
|
||||
if (cxsr_enabled)
|
||||
intel_set_memory_cxsr(dev_priv, true);
|
||||
}
|
||||
|
||||
static void i9xx_update_wm(struct drm_crtc *unused_crtc)
|
||||
|
@ -1548,12 +1593,12 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
|
|||
DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
|
||||
|
||||
if (IS_I915GM(dev) && enabled) {
|
||||
struct intel_framebuffer *fb;
|
||||
struct drm_i915_gem_object *obj;
|
||||
|
||||
fb = to_intel_framebuffer(enabled->primary->fb);
|
||||
obj = intel_fb_obj(enabled->primary->fb);
|
||||
|
||||
/* self-refresh seems busted with untiled */
|
||||
if (fb->obj->tiling_mode == I915_TILING_NONE)
|
||||
if (obj->tiling_mode == I915_TILING_NONE)
|
||||
enabled = NULL;
|
||||
}
|
||||
|
||||
|
@ -1563,10 +1608,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
|
|||
cwm = 2;
|
||||
|
||||
/* Play safe and disable self-refresh before adjusting watermarks. */
|
||||
if (IS_I945G(dev) || IS_I945GM(dev))
|
||||
I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0);
|
||||
else if (IS_I915GM(dev))
|
||||
I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_SELF_EN));
|
||||
intel_set_memory_cxsr(dev_priv, false);
|
||||
|
||||
/* Calc sr entries for one plane configs */
|
||||
if (HAS_FW_BLC(dev) && enabled) {
|
||||
|
@ -1612,17 +1654,8 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
|
|||
I915_WRITE(FW_BLC, fwater_lo);
|
||||
I915_WRITE(FW_BLC2, fwater_hi);
|
||||
|
||||
if (HAS_FW_BLC(dev)) {
|
||||
if (enabled) {
|
||||
if (IS_I945G(dev) || IS_I945GM(dev))
|
||||
I915_WRITE(FW_BLC_SELF,
|
||||
FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
|
||||
else if (IS_I915GM(dev))
|
||||
I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_SELF_EN));
|
||||
DRM_DEBUG_KMS("memory self refresh enabled\n");
|
||||
} else
|
||||
DRM_DEBUG_KMS("memory self refresh disabled\n");
|
||||
}
|
||||
if (enabled)
|
||||
intel_set_memory_cxsr(dev_priv, true);
|
||||
}
|
||||
|
||||
static void i845_update_wm(struct drm_crtc *unused_crtc)
|
||||
|
@ -3150,6 +3183,9 @@ static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
|
|||
if (val < dev_priv->rps.max_freq_softlimit)
|
||||
mask |= GEN6_PM_RP_UP_THRESHOLD;
|
||||
|
||||
mask |= dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED);
|
||||
mask &= dev_priv->pm_rps_events;
|
||||
|
||||
/* IVB and SNB hard hangs on looping batchbuffer
|
||||
* if GEN6_PM_UP_EI_EXPIRED is masked.
|
||||
*/
|
||||
|
@ -3493,15 +3529,23 @@ static void gen8_enable_rps(struct drm_device *dev)
|
|||
for_each_ring(ring, dev_priv, unused)
|
||||
I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
|
||||
I915_WRITE(GEN6_RC_SLEEP, 0);
|
||||
I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
|
||||
if (IS_BROADWELL(dev))
|
||||
I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */
|
||||
else
|
||||
I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
|
||||
|
||||
/* 3: Enable RC6 */
|
||||
if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
|
||||
rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
|
||||
intel_print_rc6_info(dev, rc6_mask);
|
||||
I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
|
||||
GEN6_RC_CTL_EI_MODE(1) |
|
||||
rc6_mask);
|
||||
if (IS_BROADWELL(dev))
|
||||
I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
|
||||
GEN7_RC_CTL_TO_MODE |
|
||||
rc6_mask);
|
||||
else
|
||||
I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
|
||||
GEN6_RC_CTL_EI_MODE(1) |
|
||||
rc6_mask);
|
||||
|
||||
/* 4 Program defaults and thresholds for RPS*/
|
||||
I915_WRITE(GEN6_RPNSWREQ,
|
||||
|
@ -4078,6 +4122,7 @@ static void valleyview_enable_rps(struct drm_device *dev)
|
|||
I915_WRITE(GEN6_RP_DOWN_EI, 350000);
|
||||
|
||||
I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
|
||||
I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 0xf4240);
|
||||
|
||||
I915_WRITE(GEN6_RP_CONTROL,
|
||||
GEN6_RP_MEDIA_TURBO |
|
||||
|
@ -4098,9 +4143,11 @@ static void valleyview_enable_rps(struct drm_device *dev)
|
|||
|
||||
/* allows RC6 residency counter to work */
|
||||
I915_WRITE(VLV_COUNTER_CONTROL,
|
||||
_MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
|
||||
_MASKED_BIT_ENABLE(VLV_MEDIA_RC0_COUNT_EN |
|
||||
VLV_RENDER_RC0_COUNT_EN |
|
||||
VLV_MEDIA_RC6_COUNT_EN |
|
||||
VLV_RENDER_RC6_COUNT_EN));
|
||||
|
||||
if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
|
||||
rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL;
|
||||
|
||||
|
@ -5328,7 +5375,7 @@ static void gen8_init_clock_gating(struct drm_device *dev)
|
|||
I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_BWGTLB_DISABLE));
|
||||
|
||||
I915_WRITE(_3D_CHICKEN3,
|
||||
_3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(2));
|
||||
_MASKED_BIT_ENABLE(_3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(2)));
|
||||
|
||||
I915_WRITE(COMMON_SLICE_CHICKEN2,
|
||||
_MASKED_BIT_ENABLE(GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE));
|
||||
|
@ -5563,10 +5610,6 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
|
|||
}
|
||||
DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
|
||||
|
||||
dev_priv->vlv_cdclk_freq = valleyview_cur_cdclk(dev_priv);
|
||||
DRM_DEBUG_DRIVER("Current CD clock rate: %d MHz",
|
||||
dev_priv->vlv_cdclk_freq);
|
||||
|
||||
I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
|
||||
|
||||
/* WaDisableEarlyCull:vlv */
|
||||
|
@ -5982,34 +6025,13 @@ static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
|
|||
return true;
|
||||
}
|
||||
|
||||
void __vlv_set_power_well(struct drm_i915_private *dev_priv,
|
||||
enum punit_power_well power_well_id, bool enable)
|
||||
static void vlv_set_power_well(struct drm_i915_private *dev_priv,
|
||||
struct i915_power_well *power_well, bool enable)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
enum punit_power_well power_well_id = power_well->data;
|
||||
u32 mask;
|
||||
u32 state;
|
||||
u32 ctrl;
|
||||
enum pipe pipe;
|
||||
|
||||
if (power_well_id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
|
||||
if (enable) {
|
||||
/*
|
||||
* Enable the CRI clock source so we can get at the
|
||||
* display and the reference clock for VGA
|
||||
* hotplug / manual detection.
|
||||
*/
|
||||
I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
|
||||
DPLL_REFA_CLK_ENABLE_VLV |
|
||||
DPLL_INTEGRATED_CRI_CLK_VLV);
|
||||
udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
|
||||
} else {
|
||||
for_each_pipe(pipe)
|
||||
assert_pll_disabled(dev_priv, pipe);
|
||||
/* Assert common reset */
|
||||
I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) &
|
||||
~DPIO_CMNRST);
|
||||
}
|
||||
}
|
||||
|
||||
mask = PUNIT_PWRGT_MASK(power_well_id);
|
||||
state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
|
||||
|
@ -6037,28 +6059,6 @@ void __vlv_set_power_well(struct drm_i915_private *dev_priv,
|
|||
|
||||
out:
|
||||
mutex_unlock(&dev_priv->rps.hw_lock);
|
||||
|
||||
/*
|
||||
* From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
|
||||
* 6. De-assert cmn_reset/side_reset. Same as VLV X0.
|
||||
* a. GUnit 0x2110 bit[0] set to 1 (def 0)
|
||||
* b. The other bits such as sfr settings / modesel may all
|
||||
* be set to 0.
|
||||
*
|
||||
* This should only be done on init and resume from S3 with
|
||||
* both PLLs disabled, or we risk losing DPIO and PLL
|
||||
* synchronization.
|
||||
*/
|
||||
if (power_well_id == PUNIT_POWER_WELL_DPIO_CMN_BC && enable)
|
||||
I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
|
||||
}
|
||||
|
||||
static void vlv_set_power_well(struct drm_i915_private *dev_priv,
|
||||
struct i915_power_well *power_well, bool enable)
|
||||
{
|
||||
enum punit_power_well power_well_id = power_well->data;
|
||||
|
||||
__vlv_set_power_well(dev_priv, power_well_id, enable);
|
||||
}
|
||||
|
||||
static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv,
|
||||
|
@ -6150,6 +6150,53 @@ static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
|
|||
vlv_set_power_well(dev_priv, power_well, false);
|
||||
}
|
||||
|
||||
static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
|
||||
struct i915_power_well *power_well)
|
||||
{
|
||||
WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
|
||||
|
||||
/*
|
||||
* Enable the CRI clock source so we can get at the
|
||||
* display and the reference clock for VGA
|
||||
* hotplug / manual detection.
|
||||
*/
|
||||
I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
|
||||
DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
|
||||
udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
|
||||
|
||||
vlv_set_power_well(dev_priv, power_well, true);
|
||||
|
||||
/*
|
||||
* From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
|
||||
* 6. De-assert cmn_reset/side_reset. Same as VLV X0.
|
||||
* a. GUnit 0x2110 bit[0] set to 1 (def 0)
|
||||
* b. The other bits such as sfr settings / modesel may all
|
||||
* be set to 0.
|
||||
*
|
||||
* This should only be done on init and resume from S3 with
|
||||
* both PLLs disabled, or we risk losing DPIO and PLL
|
||||
* synchronization.
|
||||
*/
|
||||
I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
|
||||
}
|
||||
|
||||
static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
|
||||
struct i915_power_well *power_well)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
enum pipe pipe;
|
||||
|
||||
WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
|
||||
|
||||
for_each_pipe(pipe)
|
||||
assert_pll_disabled(dev_priv, pipe);
|
||||
|
||||
/* Assert common reset */
|
||||
I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
|
||||
|
||||
vlv_set_power_well(dev_priv, power_well, false);
|
||||
}
|
||||
|
||||
static void check_power_well_state(struct drm_i915_private *dev_priv,
|
||||
struct i915_power_well *power_well)
|
||||
{
|
||||
|
@ -6299,6 +6346,7 @@ EXPORT_SYMBOL_GPL(i915_get_cdclk_freq);
|
|||
BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
|
||||
BIT(POWER_DOMAIN_PORT_CRT) | \
|
||||
BIT(POWER_DOMAIN_PLLS) | \
|
||||
BIT(POWER_DOMAIN_INIT))
|
||||
#define HSW_DISPLAY_POWER_DOMAINS ( \
|
||||
(POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) | \
|
||||
|
@ -6398,6 +6446,13 @@ static const struct i915_power_well_ops vlv_display_power_well_ops = {
|
|||
.is_enabled = vlv_power_well_enabled,
|
||||
};
|
||||
|
||||
static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
|
||||
.sync_hw = vlv_power_well_sync_hw,
|
||||
.enable = vlv_dpio_cmn_power_well_enable,
|
||||
.disable = vlv_dpio_cmn_power_well_disable,
|
||||
.is_enabled = vlv_power_well_enabled,
|
||||
};
|
||||
|
||||
static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
|
||||
.sync_hw = vlv_power_well_sync_hw,
|
||||
.enable = vlv_power_well_enable,
|
||||
|
@ -6458,10 +6513,25 @@ static struct i915_power_well vlv_power_wells[] = {
|
|||
.name = "dpio-common",
|
||||
.domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
|
||||
.data = PUNIT_POWER_WELL_DPIO_CMN_BC,
|
||||
.ops = &vlv_dpio_power_well_ops,
|
||||
.ops = &vlv_dpio_cmn_power_well_ops,
|
||||
},
|
||||
};
|
||||
|
||||
static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv,
|
||||
enum punit_power_well power_well_id)
|
||||
{
|
||||
struct i915_power_domains *power_domains = &dev_priv->power_domains;
|
||||
struct i915_power_well *power_well;
|
||||
int i;
|
||||
|
||||
for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
|
||||
if (power_well->data == power_well_id)
|
||||
return power_well;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#define set_power_wells(power_domains, __power_wells) ({ \
|
||||
(power_domains)->power_wells = (__power_wells); \
|
||||
(power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \
|
||||
|
@ -6512,11 +6582,50 @@ static void intel_power_domains_resume(struct drm_i915_private *dev_priv)
|
|||
mutex_unlock(&power_domains->lock);
|
||||
}
|
||||
|
||||
static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct i915_power_well *cmn =
|
||||
lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
|
||||
struct i915_power_well *disp2d =
|
||||
lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D);
|
||||
|
||||
/* nothing to do if common lane is already off */
|
||||
if (!cmn->ops->is_enabled(dev_priv, cmn))
|
||||
return;
|
||||
|
||||
/* If the display might be already active skip this */
|
||||
if (disp2d->ops->is_enabled(dev_priv, disp2d) &&
|
||||
I915_READ(DPIO_CTL) & DPIO_CMNRST)
|
||||
return;
|
||||
|
||||
DRM_DEBUG_KMS("toggling display PHY side reset\n");
|
||||
|
||||
/* cmnlane needs DPLL registers */
|
||||
disp2d->ops->enable(dev_priv, disp2d);
|
||||
|
||||
/*
|
||||
* From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
|
||||
* Need to assert and de-assert PHY SB reset by gating the
|
||||
* common lane power, then un-gating it.
|
||||
* Simply ungating isn't enough to reset the PHY enough to get
|
||||
* ports and lanes running.
|
||||
*/
|
||||
cmn->ops->disable(dev_priv, cmn);
|
||||
}
|
||||
|
||||
void intel_power_domains_init_hw(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct i915_power_domains *power_domains = &dev_priv->power_domains;
|
||||
|
||||
power_domains->initializing = true;
|
||||
|
||||
if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
|
||||
mutex_lock(&power_domains->lock);
|
||||
vlv_cmnlane_wa(dev_priv);
|
||||
mutex_unlock(&power_domains->lock);
|
||||
}
|
||||
|
||||
/* For now, we need the power well to be always enabled. */
|
||||
intel_display_set_init_power(dev_priv, true);
|
||||
intel_power_domains_resume(dev_priv);
|
||||
|
@ -6689,7 +6798,7 @@ void intel_init_pm(struct drm_device *dev)
|
|||
(dev_priv->is_ddr3 == 1) ? "3" : "2",
|
||||
dev_priv->fsb_freq, dev_priv->mem_freq);
|
||||
/* Disable CxSR and never update its watermark again */
|
||||
pineview_disable_cxsr(dev);
|
||||
intel_set_memory_cxsr(dev_priv, false);
|
||||
dev_priv->display.update_wm = NULL;
|
||||
} else
|
||||
dev_priv->display.update_wm = pineview_update_wm;
|
||||
|
|
|
@ -48,9 +48,8 @@ static inline int __ring_space(int head, int tail, int size)
|
|||
return space;
|
||||
}
|
||||
|
||||
static inline int ring_space(struct intel_engine_cs *ring)
|
||||
static inline int ring_space(struct intel_ringbuffer *ringbuf)
|
||||
{
|
||||
struct intel_ringbuffer *ringbuf = ring->buffer;
|
||||
return __ring_space(ringbuf->head & HEAD_ADDR, ringbuf->tail, ringbuf->size);
|
||||
}
|
||||
|
||||
|
@ -545,7 +544,7 @@ static int init_ring_common(struct intel_engine_cs *ring)
|
|||
else {
|
||||
ringbuf->head = I915_READ_HEAD(ring);
|
||||
ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
|
||||
ringbuf->space = ring_space(ring);
|
||||
ringbuf->space = ring_space(ringbuf);
|
||||
ringbuf->last_retired_head = -1;
|
||||
}
|
||||
|
||||
|
@ -660,6 +659,13 @@ static int init_render_ring(struct intel_engine_cs *ring)
|
|||
static void render_ring_cleanup(struct intel_engine_cs *ring)
|
||||
{
|
||||
struct drm_device *dev = ring->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
if (dev_priv->semaphore_obj) {
|
||||
i915_gem_object_ggtt_unpin(dev_priv->semaphore_obj);
|
||||
drm_gem_object_unreference(&dev_priv->semaphore_obj->base);
|
||||
dev_priv->semaphore_obj = NULL;
|
||||
}
|
||||
|
||||
if (ring->scratch.obj == NULL)
|
||||
return;
|
||||
|
@ -673,29 +679,96 @@ static void render_ring_cleanup(struct intel_engine_cs *ring)
|
|||
ring->scratch.obj = NULL;
|
||||
}
|
||||
|
||||
static int gen8_rcs_signal(struct intel_engine_cs *signaller,
|
||||
unsigned int num_dwords)
|
||||
{
|
||||
#define MBOX_UPDATE_DWORDS 8
|
||||
struct drm_device *dev = signaller->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_engine_cs *waiter;
|
||||
int i, ret, num_rings;
|
||||
|
||||
num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
|
||||
num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
|
||||
#undef MBOX_UPDATE_DWORDS
|
||||
|
||||
ret = intel_ring_begin(signaller, num_dwords);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
for_each_ring(waiter, dev_priv, i) {
|
||||
u64 gtt_offset = signaller->semaphore.signal_ggtt[i];
|
||||
if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
|
||||
continue;
|
||||
|
||||
intel_ring_emit(signaller, GFX_OP_PIPE_CONTROL(6));
|
||||
intel_ring_emit(signaller, PIPE_CONTROL_GLOBAL_GTT_IVB |
|
||||
PIPE_CONTROL_QW_WRITE |
|
||||
PIPE_CONTROL_FLUSH_ENABLE);
|
||||
intel_ring_emit(signaller, lower_32_bits(gtt_offset));
|
||||
intel_ring_emit(signaller, upper_32_bits(gtt_offset));
|
||||
intel_ring_emit(signaller, signaller->outstanding_lazy_seqno);
|
||||
intel_ring_emit(signaller, 0);
|
||||
intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
|
||||
MI_SEMAPHORE_TARGET(waiter->id));
|
||||
intel_ring_emit(signaller, 0);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gen8_xcs_signal(struct intel_engine_cs *signaller,
|
||||
unsigned int num_dwords)
|
||||
{
|
||||
#define MBOX_UPDATE_DWORDS 6
|
||||
struct drm_device *dev = signaller->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_engine_cs *waiter;
|
||||
int i, ret, num_rings;
|
||||
|
||||
num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
|
||||
num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
|
||||
#undef MBOX_UPDATE_DWORDS
|
||||
|
||||
ret = intel_ring_begin(signaller, num_dwords);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
for_each_ring(waiter, dev_priv, i) {
|
||||
u64 gtt_offset = signaller->semaphore.signal_ggtt[i];
|
||||
if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
|
||||
continue;
|
||||
|
||||
intel_ring_emit(signaller, (MI_FLUSH_DW + 1) |
|
||||
MI_FLUSH_DW_OP_STOREDW);
|
||||
intel_ring_emit(signaller, lower_32_bits(gtt_offset) |
|
||||
MI_FLUSH_DW_USE_GTT);
|
||||
intel_ring_emit(signaller, upper_32_bits(gtt_offset));
|
||||
intel_ring_emit(signaller, signaller->outstanding_lazy_seqno);
|
||||
intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
|
||||
MI_SEMAPHORE_TARGET(waiter->id));
|
||||
intel_ring_emit(signaller, 0);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gen6_signal(struct intel_engine_cs *signaller,
|
||||
unsigned int num_dwords)
|
||||
{
|
||||
struct drm_device *dev = signaller->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_engine_cs *useless;
|
||||
int i, ret;
|
||||
int i, ret, num_rings;
|
||||
|
||||
/* NB: In order to be able to do semaphore MBOX updates for varying
|
||||
* number of rings, it's easiest if we round up each individual update
|
||||
* to a multiple of 2 (since ring updates must always be a multiple of
|
||||
* 2) even though the actual update only requires 3 dwords.
|
||||
*/
|
||||
#define MBOX_UPDATE_DWORDS 4
|
||||
if (i915_semaphore_is_enabled(dev))
|
||||
num_dwords += ((I915_NUM_RINGS-1) * MBOX_UPDATE_DWORDS);
|
||||
else
|
||||
return intel_ring_begin(signaller, num_dwords);
|
||||
#define MBOX_UPDATE_DWORDS 3
|
||||
num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
|
||||
num_dwords += round_up((num_rings-1) * MBOX_UPDATE_DWORDS, 2);
|
||||
#undef MBOX_UPDATE_DWORDS
|
||||
|
||||
ret = intel_ring_begin(signaller, num_dwords);
|
||||
if (ret)
|
||||
return ret;
|
||||
#undef MBOX_UPDATE_DWORDS
|
||||
|
||||
for_each_ring(useless, dev_priv, i) {
|
||||
u32 mbox_reg = signaller->semaphore.mbox.signal[i];
|
||||
|
@ -703,15 +776,13 @@ static int gen6_signal(struct intel_engine_cs *signaller,
|
|||
intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1));
|
||||
intel_ring_emit(signaller, mbox_reg);
|
||||
intel_ring_emit(signaller, signaller->outstanding_lazy_seqno);
|
||||
intel_ring_emit(signaller, MI_NOOP);
|
||||
} else {
|
||||
intel_ring_emit(signaller, MI_NOOP);
|
||||
intel_ring_emit(signaller, MI_NOOP);
|
||||
intel_ring_emit(signaller, MI_NOOP);
|
||||
intel_ring_emit(signaller, MI_NOOP);
|
||||
}
|
||||
}
|
||||
|
||||
/* If num_dwords was rounded, make sure the tail pointer is correct */
|
||||
if (num_rings % 2 == 0)
|
||||
intel_ring_emit(signaller, MI_NOOP);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -729,7 +800,11 @@ gen6_add_request(struct intel_engine_cs *ring)
|
|||
{
|
||||
int ret;
|
||||
|
||||
ret = ring->semaphore.signal(ring, 4);
|
||||
if (ring->semaphore.signal)
|
||||
ret = ring->semaphore.signal(ring, 4);
|
||||
else
|
||||
ret = intel_ring_begin(ring, 4);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -756,6 +831,32 @@ static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev,
|
|||
* @signaller - ring which has, or will signal
|
||||
* @seqno - seqno which the waiter will block on
|
||||
*/
|
||||
|
||||
static int
|
||||
gen8_ring_sync(struct intel_engine_cs *waiter,
|
||||
struct intel_engine_cs *signaller,
|
||||
u32 seqno)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = waiter->dev->dev_private;
|
||||
int ret;
|
||||
|
||||
ret = intel_ring_begin(waiter, 4);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
intel_ring_emit(waiter, MI_SEMAPHORE_WAIT |
|
||||
MI_SEMAPHORE_GLOBAL_GTT |
|
||||
MI_SEMAPHORE_POLL |
|
||||
MI_SEMAPHORE_SAD_GTE_SDD);
|
||||
intel_ring_emit(waiter, seqno);
|
||||
intel_ring_emit(waiter,
|
||||
lower_32_bits(GEN8_WAIT_OFFSET(waiter, signaller->id)));
|
||||
intel_ring_emit(waiter,
|
||||
upper_32_bits(GEN8_WAIT_OFFSET(waiter, signaller->id)));
|
||||
intel_ring_advance(waiter);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
gen6_ring_sync(struct intel_engine_cs *waiter,
|
||||
struct intel_engine_cs *signaller,
|
||||
|
@ -1331,6 +1432,7 @@ static int init_status_page(struct intel_engine_cs *ring)
|
|||
struct drm_i915_gem_object *obj;
|
||||
|
||||
if ((obj = ring->status_page.obj) == NULL) {
|
||||
unsigned flags;
|
||||
int ret;
|
||||
|
||||
obj = i915_gem_alloc_object(ring->dev, 4096);
|
||||
|
@ -1343,7 +1445,20 @@ static int init_status_page(struct intel_engine_cs *ring)
|
|||
if (ret)
|
||||
goto err_unref;
|
||||
|
||||
ret = i915_gem_obj_ggtt_pin(obj, 4096, 0);
|
||||
flags = 0;
|
||||
if (!HAS_LLC(ring->dev))
|
||||
/* On g33, we cannot place HWS above 256MiB, so
|
||||
* restrict its pinning to the low mappable arena.
|
||||
* Though this restriction is not documented for
|
||||
* gen4, gen5, or byt, they also behave similarly
|
||||
* and hang if the HWS is placed at the top of the
|
||||
* GTT. To generalise, it appears that all !llc
|
||||
* platforms have issues with us placing the HWS
|
||||
* above the mappable region (even though we never
|
||||
* actualy map it).
|
||||
*/
|
||||
flags |= PIN_MAPPABLE;
|
||||
ret = i915_gem_obj_ggtt_pin(obj, 4096, flags);
|
||||
if (ret) {
|
||||
err_unref:
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
|
@ -1380,15 +1495,25 @@ static int init_phys_status_page(struct intel_engine_cs *ring)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int allocate_ring_buffer(struct intel_engine_cs *ring)
|
||||
static void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
|
||||
{
|
||||
if (!ringbuf->obj)
|
||||
return;
|
||||
|
||||
iounmap(ringbuf->virtual_start);
|
||||
i915_gem_object_ggtt_unpin(ringbuf->obj);
|
||||
drm_gem_object_unreference(&ringbuf->obj->base);
|
||||
ringbuf->obj = NULL;
|
||||
}
|
||||
|
||||
static int intel_alloc_ringbuffer_obj(struct drm_device *dev,
|
||||
struct intel_ringbuffer *ringbuf)
|
||||
{
|
||||
struct drm_device *dev = ring->dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_ringbuffer *ringbuf = ring->buffer;
|
||||
struct drm_i915_gem_object *obj;
|
||||
int ret;
|
||||
|
||||
if (intel_ring_initialized(ring))
|
||||
if (ringbuf->obj)
|
||||
return 0;
|
||||
|
||||
obj = NULL;
|
||||
|
@ -1460,7 +1585,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
|
|||
goto error;
|
||||
}
|
||||
|
||||
ret = allocate_ring_buffer(ring);
|
||||
ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to allocate ringbuffer %s: %d\n", ring->name, ret);
|
||||
goto error;
|
||||
|
@ -1501,11 +1626,7 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
|
|||
intel_stop_ring_buffer(ring);
|
||||
WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0);
|
||||
|
||||
iounmap(ringbuf->virtual_start);
|
||||
|
||||
i915_gem_object_ggtt_unpin(ringbuf->obj);
|
||||
drm_gem_object_unreference(&ringbuf->obj->base);
|
||||
ringbuf->obj = NULL;
|
||||
intel_destroy_ringbuffer_obj(ringbuf);
|
||||
ring->preallocated_lazy_request = NULL;
|
||||
ring->outstanding_lazy_seqno = 0;
|
||||
|
||||
|
@ -1531,7 +1652,7 @@ static int intel_ring_wait_request(struct intel_engine_cs *ring, int n)
|
|||
ringbuf->head = ringbuf->last_retired_head;
|
||||
ringbuf->last_retired_head = -1;
|
||||
|
||||
ringbuf->space = ring_space(ring);
|
||||
ringbuf->space = ring_space(ringbuf);
|
||||
if (ringbuf->space >= n)
|
||||
return 0;
|
||||
}
|
||||
|
@ -1554,7 +1675,7 @@ static int intel_ring_wait_request(struct intel_engine_cs *ring, int n)
|
|||
ringbuf->head = ringbuf->last_retired_head;
|
||||
ringbuf->last_retired_head = -1;
|
||||
|
||||
ringbuf->space = ring_space(ring);
|
||||
ringbuf->space = ring_space(ringbuf);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1583,7 +1704,7 @@ static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
|
|||
trace_i915_ring_wait_begin(ring);
|
||||
do {
|
||||
ringbuf->head = I915_READ_HEAD(ring);
|
||||
ringbuf->space = ring_space(ring);
|
||||
ringbuf->space = ring_space(ringbuf);
|
||||
if (ringbuf->space >= n) {
|
||||
ret = 0;
|
||||
break;
|
||||
|
@ -1635,7 +1756,7 @@ static int intel_wrap_ring_buffer(struct intel_engine_cs *ring)
|
|||
iowrite32(MI_NOOP, virt++);
|
||||
|
||||
ringbuf->tail = 0;
|
||||
ringbuf->space = ring_space(ring);
|
||||
ringbuf->space = ring_space(ringbuf);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1947,45 +2068,74 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
|
|||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_engine_cs *ring = &dev_priv->ring[RCS];
|
||||
struct drm_i915_gem_object *obj;
|
||||
int ret;
|
||||
|
||||
ring->name = "render ring";
|
||||
ring->id = RCS;
|
||||
ring->mmio_base = RENDER_RING_BASE;
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 6) {
|
||||
if (INTEL_INFO(dev)->gen >= 8) {
|
||||
if (i915_semaphore_is_enabled(dev)) {
|
||||
obj = i915_gem_alloc_object(dev, 4096);
|
||||
if (obj == NULL) {
|
||||
DRM_ERROR("Failed to allocate semaphore bo. Disabling semaphores\n");
|
||||
i915.semaphores = 0;
|
||||
} else {
|
||||
i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
|
||||
ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_NONBLOCK);
|
||||
if (ret != 0) {
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
DRM_ERROR("Failed to pin semaphore bo. Disabling semaphores\n");
|
||||
i915.semaphores = 0;
|
||||
} else
|
||||
dev_priv->semaphore_obj = obj;
|
||||
}
|
||||
}
|
||||
ring->add_request = gen6_add_request;
|
||||
ring->flush = gen8_render_ring_flush;
|
||||
ring->irq_get = gen8_ring_get_irq;
|
||||
ring->irq_put = gen8_ring_put_irq;
|
||||
ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
|
||||
ring->get_seqno = gen6_ring_get_seqno;
|
||||
ring->set_seqno = ring_set_seqno;
|
||||
if (i915_semaphore_is_enabled(dev)) {
|
||||
WARN_ON(!dev_priv->semaphore_obj);
|
||||
ring->semaphore.sync_to = gen8_ring_sync;
|
||||
ring->semaphore.signal = gen8_rcs_signal;
|
||||
GEN8_RING_SEMAPHORE_INIT;
|
||||
}
|
||||
} else if (INTEL_INFO(dev)->gen >= 6) {
|
||||
ring->add_request = gen6_add_request;
|
||||
ring->flush = gen7_render_ring_flush;
|
||||
if (INTEL_INFO(dev)->gen == 6)
|
||||
ring->flush = gen6_render_ring_flush;
|
||||
if (INTEL_INFO(dev)->gen >= 8) {
|
||||
ring->flush = gen8_render_ring_flush;
|
||||
ring->irq_get = gen8_ring_get_irq;
|
||||
ring->irq_put = gen8_ring_put_irq;
|
||||
} else {
|
||||
ring->irq_get = gen6_ring_get_irq;
|
||||
ring->irq_put = gen6_ring_put_irq;
|
||||
}
|
||||
ring->irq_get = gen6_ring_get_irq;
|
||||
ring->irq_put = gen6_ring_put_irq;
|
||||
ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
|
||||
ring->get_seqno = gen6_ring_get_seqno;
|
||||
ring->set_seqno = ring_set_seqno;
|
||||
ring->semaphore.sync_to = gen6_ring_sync;
|
||||
ring->semaphore.signal = gen6_signal;
|
||||
/*
|
||||
* The current semaphore is only applied on pre-gen8 platform.
|
||||
* And there is no VCS2 ring on the pre-gen8 platform. So the
|
||||
* semaphore between RCS and VCS2 is initialized as INVALID.
|
||||
* Gen8 will initialize the sema between VCS2 and RCS later.
|
||||
*/
|
||||
ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID;
|
||||
ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_RV;
|
||||
ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_RB;
|
||||
ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_RVE;
|
||||
ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
|
||||
ring->semaphore.mbox.signal[RCS] = GEN6_NOSYNC;
|
||||
ring->semaphore.mbox.signal[VCS] = GEN6_VRSYNC;
|
||||
ring->semaphore.mbox.signal[BCS] = GEN6_BRSYNC;
|
||||
ring->semaphore.mbox.signal[VECS] = GEN6_VERSYNC;
|
||||
ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
|
||||
if (i915_semaphore_is_enabled(dev)) {
|
||||
ring->semaphore.sync_to = gen6_ring_sync;
|
||||
ring->semaphore.signal = gen6_signal;
|
||||
/*
|
||||
* The current semaphore is only applied on pre-gen8
|
||||
* platform. And there is no VCS2 ring on the pre-gen8
|
||||
* platform. So the semaphore between RCS and VCS2 is
|
||||
* initialized as INVALID. Gen8 will initialize the
|
||||
* sema between VCS2 and RCS later.
|
||||
*/
|
||||
ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID;
|
||||
ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_RV;
|
||||
ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_RB;
|
||||
ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_RVE;
|
||||
ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
|
||||
ring->semaphore.mbox.signal[RCS] = GEN6_NOSYNC;
|
||||
ring->semaphore.mbox.signal[VCS] = GEN6_VRSYNC;
|
||||
ring->semaphore.mbox.signal[BCS] = GEN6_BRSYNC;
|
||||
ring->semaphore.mbox.signal[VECS] = GEN6_VERSYNC;
|
||||
ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
|
||||
}
|
||||
} else if (IS_GEN5(dev)) {
|
||||
ring->add_request = pc_render_add_request;
|
||||
ring->flush = gen4_render_ring_flush;
|
||||
|
@ -2013,6 +2163,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
|
|||
ring->irq_enable_mask = I915_USER_INTERRUPT;
|
||||
}
|
||||
ring->write_tail = ring_write_tail;
|
||||
|
||||
if (IS_HASWELL(dev))
|
||||
ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
|
||||
else if (IS_GEN8(dev))
|
||||
|
@ -2030,9 +2181,6 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
|
|||
|
||||
/* Workaround batchbuffer to combat CS tlb bug. */
|
||||
if (HAS_BROKEN_CS_TLB(dev)) {
|
||||
struct drm_i915_gem_object *obj;
|
||||
int ret;
|
||||
|
||||
obj = i915_gem_alloc_object(dev, I830_BATCH_LIMIT);
|
||||
if (obj == NULL) {
|
||||
DRM_ERROR("Failed to allocate batch bo\n");
|
||||
|
@ -2163,31 +2311,32 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
|
|||
ring->irq_put = gen8_ring_put_irq;
|
||||
ring->dispatch_execbuffer =
|
||||
gen8_ring_dispatch_execbuffer;
|
||||
if (i915_semaphore_is_enabled(dev)) {
|
||||
ring->semaphore.sync_to = gen8_ring_sync;
|
||||
ring->semaphore.signal = gen8_xcs_signal;
|
||||
GEN8_RING_SEMAPHORE_INIT;
|
||||
}
|
||||
} else {
|
||||
ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
|
||||
ring->irq_get = gen6_ring_get_irq;
|
||||
ring->irq_put = gen6_ring_put_irq;
|
||||
ring->dispatch_execbuffer =
|
||||
gen6_ring_dispatch_execbuffer;
|
||||
if (i915_semaphore_is_enabled(dev)) {
|
||||
ring->semaphore.sync_to = gen6_ring_sync;
|
||||
ring->semaphore.signal = gen6_signal;
|
||||
ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VR;
|
||||
ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID;
|
||||
ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VB;
|
||||
ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_VVE;
|
||||
ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
|
||||
ring->semaphore.mbox.signal[RCS] = GEN6_RVSYNC;
|
||||
ring->semaphore.mbox.signal[VCS] = GEN6_NOSYNC;
|
||||
ring->semaphore.mbox.signal[BCS] = GEN6_BVSYNC;
|
||||
ring->semaphore.mbox.signal[VECS] = GEN6_VEVSYNC;
|
||||
ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
|
||||
}
|
||||
}
|
||||
ring->semaphore.sync_to = gen6_ring_sync;
|
||||
ring->semaphore.signal = gen6_signal;
|
||||
/*
|
||||
* The current semaphore is only applied on pre-gen8 platform.
|
||||
* And there is no VCS2 ring on the pre-gen8 platform. So the
|
||||
* semaphore between VCS and VCS2 is initialized as INVALID.
|
||||
* Gen8 will initialize the sema between VCS2 and VCS later.
|
||||
*/
|
||||
ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VR;
|
||||
ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID;
|
||||
ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VB;
|
||||
ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_VVE;
|
||||
ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
|
||||
ring->semaphore.mbox.signal[RCS] = GEN6_RVSYNC;
|
||||
ring->semaphore.mbox.signal[VCS] = GEN6_NOSYNC;
|
||||
ring->semaphore.mbox.signal[BCS] = GEN6_BVSYNC;
|
||||
ring->semaphore.mbox.signal[VECS] = GEN6_VEVSYNC;
|
||||
ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
|
||||
} else {
|
||||
ring->mmio_base = BSD_RING_BASE;
|
||||
ring->flush = bsd_ring_flush;
|
||||
|
@ -2224,7 +2373,7 @@ int intel_init_bsd2_ring_buffer(struct drm_device *dev)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
ring->name = "bds2_ring";
|
||||
ring->name = "bsd2 ring";
|
||||
ring->id = VCS2;
|
||||
|
||||
ring->write_tail = ring_write_tail;
|
||||
|
@ -2239,25 +2388,11 @@ int intel_init_bsd2_ring_buffer(struct drm_device *dev)
|
|||
ring->irq_put = gen8_ring_put_irq;
|
||||
ring->dispatch_execbuffer =
|
||||
gen8_ring_dispatch_execbuffer;
|
||||
ring->semaphore.sync_to = gen6_ring_sync;
|
||||
ring->semaphore.signal = gen6_signal;
|
||||
/*
|
||||
* The current semaphore is only applied on the pre-gen8. And there
|
||||
* is no bsd2 ring on the pre-gen8. So now the semaphore_register
|
||||
* between VCS2 and other ring is initialized as invalid.
|
||||
* Gen8 will initialize the sema between VCS2 and other ring later.
|
||||
*/
|
||||
ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID;
|
||||
ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID;
|
||||
ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID;
|
||||
ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID;
|
||||
ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
|
||||
ring->semaphore.mbox.signal[RCS] = GEN6_NOSYNC;
|
||||
ring->semaphore.mbox.signal[VCS] = GEN6_NOSYNC;
|
||||
ring->semaphore.mbox.signal[BCS] = GEN6_NOSYNC;
|
||||
ring->semaphore.mbox.signal[VECS] = GEN6_NOSYNC;
|
||||
ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
|
||||
|
||||
if (i915_semaphore_is_enabled(dev)) {
|
||||
ring->semaphore.sync_to = gen8_ring_sync;
|
||||
ring->semaphore.signal = gen8_xcs_signal;
|
||||
GEN8_RING_SEMAPHORE_INIT;
|
||||
}
|
||||
ring->init = init_ring_common;
|
||||
|
||||
return intel_init_ring_buffer(dev, ring);
|
||||
|
@ -2283,30 +2418,38 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
|
|||
ring->irq_get = gen8_ring_get_irq;
|
||||
ring->irq_put = gen8_ring_put_irq;
|
||||
ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
|
||||
if (i915_semaphore_is_enabled(dev)) {
|
||||
ring->semaphore.sync_to = gen8_ring_sync;
|
||||
ring->semaphore.signal = gen8_xcs_signal;
|
||||
GEN8_RING_SEMAPHORE_INIT;
|
||||
}
|
||||
} else {
|
||||
ring->irq_enable_mask = GT_BLT_USER_INTERRUPT;
|
||||
ring->irq_get = gen6_ring_get_irq;
|
||||
ring->irq_put = gen6_ring_put_irq;
|
||||
ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
|
||||
if (i915_semaphore_is_enabled(dev)) {
|
||||
ring->semaphore.signal = gen6_signal;
|
||||
ring->semaphore.sync_to = gen6_ring_sync;
|
||||
/*
|
||||
* The current semaphore is only applied on pre-gen8
|
||||
* platform. And there is no VCS2 ring on the pre-gen8
|
||||
* platform. So the semaphore between BCS and VCS2 is
|
||||
* initialized as INVALID. Gen8 will initialize the
|
||||
* sema between BCS and VCS2 later.
|
||||
*/
|
||||
ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_BR;
|
||||
ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_BV;
|
||||
ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID;
|
||||
ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_BVE;
|
||||
ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
|
||||
ring->semaphore.mbox.signal[RCS] = GEN6_RBSYNC;
|
||||
ring->semaphore.mbox.signal[VCS] = GEN6_VBSYNC;
|
||||
ring->semaphore.mbox.signal[BCS] = GEN6_NOSYNC;
|
||||
ring->semaphore.mbox.signal[VECS] = GEN6_VEBSYNC;
|
||||
ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
|
||||
}
|
||||
}
|
||||
ring->semaphore.sync_to = gen6_ring_sync;
|
||||
ring->semaphore.signal = gen6_signal;
|
||||
/*
|
||||
* The current semaphore is only applied on pre-gen8 platform. And
|
||||
* there is no VCS2 ring on the pre-gen8 platform. So the semaphore
|
||||
* between BCS and VCS2 is initialized as INVALID.
|
||||
* Gen8 will initialize the sema between BCS and VCS2 later.
|
||||
*/
|
||||
ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_BR;
|
||||
ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_BV;
|
||||
ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID;
|
||||
ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_BVE;
|
||||
ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
|
||||
ring->semaphore.mbox.signal[RCS] = GEN6_RBSYNC;
|
||||
ring->semaphore.mbox.signal[VCS] = GEN6_VBSYNC;
|
||||
ring->semaphore.mbox.signal[BCS] = GEN6_NOSYNC;
|
||||
ring->semaphore.mbox.signal[VECS] = GEN6_VEBSYNC;
|
||||
ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
|
||||
ring->init = init_ring_common;
|
||||
|
||||
return intel_init_ring_buffer(dev, ring);
|
||||
|
@ -2333,24 +2476,31 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev)
|
|||
ring->irq_get = gen8_ring_get_irq;
|
||||
ring->irq_put = gen8_ring_put_irq;
|
||||
ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
|
||||
if (i915_semaphore_is_enabled(dev)) {
|
||||
ring->semaphore.sync_to = gen8_ring_sync;
|
||||
ring->semaphore.signal = gen8_xcs_signal;
|
||||
GEN8_RING_SEMAPHORE_INIT;
|
||||
}
|
||||
} else {
|
||||
ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
|
||||
ring->irq_get = hsw_vebox_get_irq;
|
||||
ring->irq_put = hsw_vebox_put_irq;
|
||||
ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
|
||||
if (i915_semaphore_is_enabled(dev)) {
|
||||
ring->semaphore.sync_to = gen6_ring_sync;
|
||||
ring->semaphore.signal = gen6_signal;
|
||||
ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VER;
|
||||
ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_VEV;
|
||||
ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VEB;
|
||||
ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID;
|
||||
ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
|
||||
ring->semaphore.mbox.signal[RCS] = GEN6_RVESYNC;
|
||||
ring->semaphore.mbox.signal[VCS] = GEN6_VVESYNC;
|
||||
ring->semaphore.mbox.signal[BCS] = GEN6_BVESYNC;
|
||||
ring->semaphore.mbox.signal[VECS] = GEN6_NOSYNC;
|
||||
ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
|
||||
}
|
||||
}
|
||||
ring->semaphore.sync_to = gen6_ring_sync;
|
||||
ring->semaphore.signal = gen6_signal;
|
||||
ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VER;
|
||||
ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_VEV;
|
||||
ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VEB;
|
||||
ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID;
|
||||
ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
|
||||
ring->semaphore.mbox.signal[RCS] = GEN6_RVESYNC;
|
||||
ring->semaphore.mbox.signal[VCS] = GEN6_VVESYNC;
|
||||
ring->semaphore.mbox.signal[BCS] = GEN6_BVESYNC;
|
||||
ring->semaphore.mbox.signal[VECS] = GEN6_NOSYNC;
|
||||
ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
|
||||
ring->init = init_ring_common;
|
||||
|
||||
return intel_init_ring_buffer(dev, ring);
|
||||
|
|
|
@ -40,6 +40,32 @@ struct intel_hw_status_page {
|
|||
#define I915_READ_MODE(ring) I915_READ(RING_MI_MODE((ring)->mmio_base))
|
||||
#define I915_WRITE_MODE(ring, val) I915_WRITE(RING_MI_MODE((ring)->mmio_base), val)
|
||||
|
||||
/* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to
|
||||
* do the writes, and that must have qw aligned offsets, simply pretend it's 8b.
|
||||
*/
|
||||
#define i915_semaphore_seqno_size sizeof(uint64_t)
|
||||
#define GEN8_SIGNAL_OFFSET(__ring, to) \
|
||||
(i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
|
||||
((__ring)->id * I915_NUM_RINGS * i915_semaphore_seqno_size) + \
|
||||
(i915_semaphore_seqno_size * (to)))
|
||||
|
||||
#define GEN8_WAIT_OFFSET(__ring, from) \
|
||||
(i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
|
||||
((from) * I915_NUM_RINGS * i915_semaphore_seqno_size) + \
|
||||
(i915_semaphore_seqno_size * (__ring)->id))
|
||||
|
||||
#define GEN8_RING_SEMAPHORE_INIT do { \
|
||||
if (!dev_priv->semaphore_obj) { \
|
||||
break; \
|
||||
} \
|
||||
ring->semaphore.signal_ggtt[RCS] = GEN8_SIGNAL_OFFSET(ring, RCS); \
|
||||
ring->semaphore.signal_ggtt[VCS] = GEN8_SIGNAL_OFFSET(ring, VCS); \
|
||||
ring->semaphore.signal_ggtt[BCS] = GEN8_SIGNAL_OFFSET(ring, BCS); \
|
||||
ring->semaphore.signal_ggtt[VECS] = GEN8_SIGNAL_OFFSET(ring, VECS); \
|
||||
ring->semaphore.signal_ggtt[VCS2] = GEN8_SIGNAL_OFFSET(ring, VCS2); \
|
||||
ring->semaphore.signal_ggtt[ring->id] = MI_SEMAPHORE_SYNC_INVALID; \
|
||||
} while(0)
|
||||
|
||||
enum intel_ring_hangcheck_action {
|
||||
HANGCHECK_IDLE = 0,
|
||||
HANGCHECK_WAIT,
|
||||
|
@ -127,15 +153,55 @@ struct intel_engine_cs {
|
|||
#define I915_DISPATCH_PINNED 0x2
|
||||
void (*cleanup)(struct intel_engine_cs *ring);
|
||||
|
||||
/* GEN8 signal/wait table - never trust comments!
|
||||
* signal to signal to signal to signal to signal to
|
||||
* RCS VCS BCS VECS VCS2
|
||||
* --------------------------------------------------------------------
|
||||
* RCS | NOP (0x00) | VCS (0x08) | BCS (0x10) | VECS (0x18) | VCS2 (0x20) |
|
||||
* |-------------------------------------------------------------------
|
||||
* VCS | RCS (0x28) | NOP (0x30) | BCS (0x38) | VECS (0x40) | VCS2 (0x48) |
|
||||
* |-------------------------------------------------------------------
|
||||
* BCS | RCS (0x50) | VCS (0x58) | NOP (0x60) | VECS (0x68) | VCS2 (0x70) |
|
||||
* |-------------------------------------------------------------------
|
||||
* VECS | RCS (0x78) | VCS (0x80) | BCS (0x88) | NOP (0x90) | VCS2 (0x98) |
|
||||
* |-------------------------------------------------------------------
|
||||
* VCS2 | RCS (0xa0) | VCS (0xa8) | BCS (0xb0) | VECS (0xb8) | NOP (0xc0) |
|
||||
* |-------------------------------------------------------------------
|
||||
*
|
||||
* Generalization:
|
||||
* f(x, y) := (x->id * NUM_RINGS * seqno_size) + (seqno_size * y->id)
|
||||
* ie. transpose of g(x, y)
|
||||
*
|
||||
* sync from sync from sync from sync from sync from
|
||||
* RCS VCS BCS VECS VCS2
|
||||
* --------------------------------------------------------------------
|
||||
* RCS | NOP (0x00) | VCS (0x28) | BCS (0x50) | VECS (0x78) | VCS2 (0xa0) |
|
||||
* |-------------------------------------------------------------------
|
||||
* VCS | RCS (0x08) | NOP (0x30) | BCS (0x58) | VECS (0x80) | VCS2 (0xa8) |
|
||||
* |-------------------------------------------------------------------
|
||||
* BCS | RCS (0x10) | VCS (0x38) | NOP (0x60) | VECS (0x88) | VCS2 (0xb0) |
|
||||
* |-------------------------------------------------------------------
|
||||
* VECS | RCS (0x18) | VCS (0x40) | BCS (0x68) | NOP (0x90) | VCS2 (0xb8) |
|
||||
* |-------------------------------------------------------------------
|
||||
* VCS2 | RCS (0x20) | VCS (0x48) | BCS (0x70) | VECS (0x98) | NOP (0xc0) |
|
||||
* |-------------------------------------------------------------------
|
||||
*
|
||||
* Generalization:
|
||||
* g(x, y) := (y->id * NUM_RINGS * seqno_size) + (seqno_size * x->id)
|
||||
* ie. transpose of f(x, y)
|
||||
*/
|
||||
struct {
|
||||
u32 sync_seqno[I915_NUM_RINGS-1];
|
||||
|
||||
struct {
|
||||
/* our mbox written by others */
|
||||
u32 wait[I915_NUM_RINGS];
|
||||
/* mboxes this ring signals to */
|
||||
u32 signal[I915_NUM_RINGS];
|
||||
} mbox;
|
||||
union {
|
||||
struct {
|
||||
/* our mbox written by others */
|
||||
u32 wait[I915_NUM_RINGS];
|
||||
/* mboxes this ring signals to */
|
||||
u32 signal[I915_NUM_RINGS];
|
||||
} mbox;
|
||||
u64 signal_ggtt[I915_NUM_RINGS];
|
||||
};
|
||||
|
||||
/* AKA wait() */
|
||||
int (*sync_to)(struct intel_engine_cs *ring,
|
||||
|
@ -238,9 +304,11 @@ intel_ring_sync_index(struct intel_engine_cs *ring,
|
|||
int idx;
|
||||
|
||||
/*
|
||||
* cs -> 0 = vcs, 1 = bcs
|
||||
* vcs -> 0 = bcs, 1 = cs,
|
||||
* bcs -> 0 = cs, 1 = vcs.
|
||||
* rcs -> 0 = vcs, 1 = bcs, 2 = vecs, 3 = vcs2;
|
||||
* vcs -> 0 = bcs, 1 = vecs, 2 = vcs2, 3 = rcs;
|
||||
* bcs -> 0 = vecs, 1 = vcs2. 2 = rcs, 3 = vcs;
|
||||
* vecs -> 0 = vcs2, 1 = rcs, 2 = vcs, 3 = bcs;
|
||||
* vcs2 -> 0 = rcs, 1 = vcs, 2 = bcs, 3 = vecs;
|
||||
*/
|
||||
|
||||
idx = (other - ring) - 1;
|
||||
|
@ -318,9 +386,9 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev);
|
|||
u64 intel_ring_get_active_head(struct intel_engine_cs *ring);
|
||||
void intel_ring_setup_status_page(struct intel_engine_cs *ring);
|
||||
|
||||
static inline u32 intel_ring_get_tail(struct intel_engine_cs *ring)
|
||||
static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf)
|
||||
{
|
||||
return ring->buffer->tail;
|
||||
return ringbuf->tail;
|
||||
}
|
||||
|
||||
static inline u32 intel_ring_get_seqno(struct intel_engine_cs *ring)
|
||||
|
|
|
@ -1010,7 +1010,7 @@ int i915_get_reset_stats_ioctl(struct drm_device *dev,
|
|||
if (args->flags || args->pad)
|
||||
return -EINVAL;
|
||||
|
||||
if (args->ctx_id == DEFAULT_CONTEXT_ID && !capable(CAP_SYS_ADMIN))
|
||||
if (args->ctx_id == DEFAULT_CONTEXT_HANDLE && !capable(CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
|
||||
ret = mutex_lock_interruptible(&dev->struct_mutex);
|
||||
|
|
Loading…
Reference in New Issue