Merge tag 'drm-intel-next-2015-09-11' of git://anongit.freedesktop.org/drm-intel into drm-next
- initialize backlight from VBT as fallback (Jani) - hpd A support from Ville - various atomic polish all over (mostly from Maarten) - first parts of virtualize gpu guest support on bdw from Zhiyuan Lv - GuC fixes from Alex - polish for the chv clocks code (Ville) - various things all over, as usual * tag 'drm-intel-next-2015-09-11' of git://anongit.freedesktop.org/drm-intel: (145 commits) drm/i915: Update DRIVER_DATE to 20150911 drm/i915: Remove one very outdated comment drm/i915: Use crtc->state for duplication. drm/i915: Do not handle a null plane state. drm/i915: Remove legacy plane updates for cursor and sprite planes. drm/i915: Use atomic state when changing cursor visibility. drm/i915: Use the atomic state in intel_update_primary_planes. drm/i915: Use the plane state in intel_crtc_info. drm/i915: Use atomic plane state in the primary plane update. drm/i915: add attached connector to hdmi container drm/i915: don't hard code vlv backlight frequency if unset drm/i915: initialize backlight max from VBT drm/i915: use pch backlight override on hsw too drm/i915/bxt: Clean up bxt_init_clock_gating drm/i915: Fix cmdparser STORE/LOAD command descriptors drm/i915: Dump pfit state as hex drm/i915: access the PP_ON_DELAYS/PP_OFF_DELAYS regs only pre GEN5 drm/i915: access the PP_CONTROL reg only pre GEN5 drm/i915: Refactor common ringbuffer allocation code drm/i915: use the yesno helper for logging ...
This commit is contained in:
commit
d4070ff713
|
@ -4237,6 +4237,20 @@ int num_ioctls;</synopsis>
|
|||
!Idrivers/gpu/drm/i915/i915_gem_shrinker.c
|
||||
</sect2>
|
||||
</sect1>
|
||||
<sect1>
|
||||
<title>GuC-based Command Submission</title>
|
||||
<sect2>
|
||||
<title>GuC</title>
|
||||
!Pdrivers/gpu/drm/i915/intel_guc_loader.c GuC-specific firmware loader
|
||||
!Idrivers/gpu/drm/i915/intel_guc_loader.c
|
||||
</sect2>
|
||||
<sect2>
|
||||
<title>GuC Client</title>
|
||||
!Pdrivers/gpu/drm/i915/i915_guc_submission.c GuC-based command submissison
|
||||
!Idrivers/gpu/drm/i915/i915_guc_submission.c
|
||||
</sect2>
|
||||
</sect1>
|
||||
|
||||
<sect1>
|
||||
<title> Tracing </title>
|
||||
<para>
|
||||
|
|
|
@ -40,6 +40,10 @@ i915-y += i915_cmd_parser.o \
|
|||
intel_ringbuffer.o \
|
||||
intel_uncore.o
|
||||
|
||||
# general-purpose microcontroller (GuC) support
|
||||
i915-y += intel_guc_loader.o \
|
||||
i915_guc_submission.o
|
||||
|
||||
# autogenerated null render state
|
||||
i915-y += intel_renderstate_gen6.o \
|
||||
intel_renderstate_gen7.o \
|
||||
|
|
|
@ -94,7 +94,7 @@
|
|||
#define CMD(op, opm, f, lm, fl, ...) \
|
||||
{ \
|
||||
.flags = (fl) | ((f) ? CMD_DESC_FIXED : 0), \
|
||||
.cmd = { (op), (opm) }, \
|
||||
.cmd = { (op), (opm) }, \
|
||||
.length = { (lm) }, \
|
||||
__VA_ARGS__ \
|
||||
}
|
||||
|
@ -124,14 +124,14 @@ static const struct drm_i915_cmd_descriptor common_cmds[] = {
|
|||
CMD( MI_STORE_DWORD_INDEX, SMI, !F, 0xFF, R ),
|
||||
CMD( MI_LOAD_REGISTER_IMM(1), SMI, !F, 0xFF, W,
|
||||
.reg = { .offset = 1, .mask = 0x007FFFFC, .step = 2 } ),
|
||||
CMD( MI_STORE_REGISTER_MEM(1), SMI, !F, 0xFF, W | B,
|
||||
CMD( MI_STORE_REGISTER_MEM, SMI, F, 3, W | B,
|
||||
.reg = { .offset = 1, .mask = 0x007FFFFC },
|
||||
.bits = {{
|
||||
.offset = 0,
|
||||
.mask = MI_GLOBAL_GTT,
|
||||
.expected = 0,
|
||||
}}, ),
|
||||
CMD( MI_LOAD_REGISTER_MEM(1), SMI, !F, 0xFF, W | B,
|
||||
CMD( MI_LOAD_REGISTER_MEM, SMI, F, 3, W | B,
|
||||
.reg = { .offset = 1, .mask = 0x007FFFFC },
|
||||
.bits = {{
|
||||
.offset = 0,
|
||||
|
@ -1021,7 +1021,7 @@ static bool check_cmd(const struct intel_engine_cs *ring,
|
|||
* only MI_LOAD_REGISTER_IMM commands.
|
||||
*/
|
||||
if (reg_addr == OACONTROL) {
|
||||
if (desc->cmd.value == MI_LOAD_REGISTER_MEM(1)) {
|
||||
if (desc->cmd.value == MI_LOAD_REGISTER_MEM) {
|
||||
DRM_DEBUG_DRIVER("CMD: Rejected LRM to OACONTROL\n");
|
||||
return false;
|
||||
}
|
||||
|
@ -1035,7 +1035,7 @@ static bool check_cmd(const struct intel_engine_cs *ring,
|
|||
* allowed mask/value pair given in the whitelist entry.
|
||||
*/
|
||||
if (reg->mask) {
|
||||
if (desc->cmd.value == MI_LOAD_REGISTER_MEM(1)) {
|
||||
if (desc->cmd.value == MI_LOAD_REGISTER_MEM) {
|
||||
DRM_DEBUG_DRIVER("CMD: Rejected LRM to masked register 0x%08X\n",
|
||||
reg_addr);
|
||||
return false;
|
||||
|
@ -1213,6 +1213,7 @@ int i915_cmd_parser_get_version(void)
|
|||
* 2. Allow access to the MI_PREDICATE_SRC0 and
|
||||
* MI_PREDICATE_SRC1 registers.
|
||||
* 3. Allow access to the GPGPU_THREADS_DISPATCHED register.
|
||||
* 4. L3 atomic chicken bits of HSW_SCRATCH1 and HSW_ROW_CHICKEN3.
|
||||
*/
|
||||
return 3;
|
||||
return 4;
|
||||
}
|
||||
|
|
|
@ -46,11 +46,6 @@ enum {
|
|||
PINNED_LIST,
|
||||
};
|
||||
|
||||
static const char *yesno(int v)
|
||||
{
|
||||
return v ? "yes" : "no";
|
||||
}
|
||||
|
||||
/* As the drm_debugfs_init() routines are called before dev->dev_private is
|
||||
* allocated we need to hook into the minor for release. */
|
||||
static int
|
||||
|
@ -1387,17 +1382,16 @@ static int ironlake_drpc_info(struct seq_file *m)
|
|||
intel_runtime_pm_put(dev_priv);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ?
|
||||
"yes" : "no");
|
||||
seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
|
||||
seq_printf(m, "Boost freq: %d\n",
|
||||
(rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
|
||||
MEMMODE_BOOST_FREQ_SHIFT);
|
||||
seq_printf(m, "HW control enabled: %s\n",
|
||||
rgvmodectl & MEMMODE_HWIDLE_EN ? "yes" : "no");
|
||||
yesno(rgvmodectl & MEMMODE_HWIDLE_EN));
|
||||
seq_printf(m, "SW control enabled: %s\n",
|
||||
rgvmodectl & MEMMODE_SWMODE_EN ? "yes" : "no");
|
||||
yesno(rgvmodectl & MEMMODE_SWMODE_EN));
|
||||
seq_printf(m, "Gated voltage change: %s\n",
|
||||
rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no");
|
||||
yesno(rgvmodectl & MEMMODE_RCLK_GATE));
|
||||
seq_printf(m, "Starting frequency: P%d\n",
|
||||
(rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
|
||||
seq_printf(m, "Max P-state: P%d\n",
|
||||
|
@ -1406,7 +1400,7 @@ static int ironlake_drpc_info(struct seq_file *m)
|
|||
seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
|
||||
seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
|
||||
seq_printf(m, "Render standby enabled: %s\n",
|
||||
(rstdbyctl & RCX_SW_EXIT) ? "no" : "yes");
|
||||
yesno(!(rstdbyctl & RCX_SW_EXIT)));
|
||||
seq_puts(m, "Current RS state: ");
|
||||
switch (rstdbyctl & RSX_STATUS_MASK) {
|
||||
case RSX_STATUS_ON:
|
||||
|
@ -1995,7 +1989,7 @@ static void i915_dump_lrc_obj(struct seq_file *m,
|
|||
return;
|
||||
}
|
||||
|
||||
page = i915_gem_object_get_page(ctx_obj, 1);
|
||||
page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
|
||||
if (!WARN_ON(page == NULL)) {
|
||||
reg_state = kmap_atomic(page);
|
||||
|
||||
|
@ -2250,7 +2244,6 @@ static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
|
|||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_engine_cs *ring;
|
||||
struct drm_file *file;
|
||||
int i;
|
||||
|
||||
if (INTEL_INFO(dev)->gen == 6)
|
||||
|
@ -2273,13 +2266,6 @@ static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
|
|||
ppgtt->debug_dump(ppgtt, m);
|
||||
}
|
||||
|
||||
list_for_each_entry_reverse(file, &dev->filelist, lhead) {
|
||||
struct drm_i915_file_private *file_priv = file->driver_priv;
|
||||
|
||||
seq_printf(m, "proc: %s\n",
|
||||
get_pid_task(file->pid, PIDTYPE_PID)->comm);
|
||||
idr_for_each(&file_priv->context_idr, per_file_ctx, m);
|
||||
}
|
||||
seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
|
||||
}
|
||||
|
||||
|
@ -2288,6 +2274,7 @@ static int i915_ppgtt_info(struct seq_file *m, void *data)
|
|||
struct drm_info_node *node = m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_file *file;
|
||||
|
||||
int ret = mutex_lock_interruptible(&dev->struct_mutex);
|
||||
if (ret)
|
||||
|
@ -2299,6 +2286,15 @@ static int i915_ppgtt_info(struct seq_file *m, void *data)
|
|||
else if (INTEL_INFO(dev)->gen >= 6)
|
||||
gen6_ppgtt_info(m, dev);
|
||||
|
||||
list_for_each_entry_reverse(file, &dev->filelist, lhead) {
|
||||
struct drm_i915_file_private *file_priv = file->driver_priv;
|
||||
|
||||
seq_printf(m, "\nproc: %s\n",
|
||||
get_pid_task(file->pid, PIDTYPE_PID)->comm);
|
||||
idr_for_each(&file_priv->context_idr, per_file_ctx,
|
||||
(void *)(unsigned long)m);
|
||||
}
|
||||
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
|
@ -2372,6 +2368,147 @@ static int i915_llc(struct seq_file *m, void *data)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int i915_guc_load_status_info(struct seq_file *m, void *data)
|
||||
{
|
||||
struct drm_info_node *node = m->private;
|
||||
struct drm_i915_private *dev_priv = node->minor->dev->dev_private;
|
||||
struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
|
||||
u32 tmp, i;
|
||||
|
||||
if (!HAS_GUC_UCODE(dev_priv->dev))
|
||||
return 0;
|
||||
|
||||
seq_printf(m, "GuC firmware status:\n");
|
||||
seq_printf(m, "\tpath: %s\n",
|
||||
guc_fw->guc_fw_path);
|
||||
seq_printf(m, "\tfetch: %s\n",
|
||||
intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status));
|
||||
seq_printf(m, "\tload: %s\n",
|
||||
intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
|
||||
seq_printf(m, "\tversion wanted: %d.%d\n",
|
||||
guc_fw->guc_fw_major_wanted, guc_fw->guc_fw_minor_wanted);
|
||||
seq_printf(m, "\tversion found: %d.%d\n",
|
||||
guc_fw->guc_fw_major_found, guc_fw->guc_fw_minor_found);
|
||||
|
||||
tmp = I915_READ(GUC_STATUS);
|
||||
|
||||
seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
|
||||
seq_printf(m, "\tBootrom status = 0x%x\n",
|
||||
(tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
|
||||
seq_printf(m, "\tuKernel status = 0x%x\n",
|
||||
(tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
|
||||
seq_printf(m, "\tMIA Core status = 0x%x\n",
|
||||
(tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
|
||||
seq_puts(m, "\nScratch registers:\n");
|
||||
for (i = 0; i < 16; i++)
|
||||
seq_printf(m, "\t%2d: \t0x%x\n", i, I915_READ(SOFT_SCRATCH(i)));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void i915_guc_client_info(struct seq_file *m,
|
||||
struct drm_i915_private *dev_priv,
|
||||
struct i915_guc_client *client)
|
||||
{
|
||||
struct intel_engine_cs *ring;
|
||||
uint64_t tot = 0;
|
||||
uint32_t i;
|
||||
|
||||
seq_printf(m, "\tPriority %d, GuC ctx index: %u, PD offset 0x%x\n",
|
||||
client->priority, client->ctx_index, client->proc_desc_offset);
|
||||
seq_printf(m, "\tDoorbell id %d, offset: 0x%x, cookie 0x%x\n",
|
||||
client->doorbell_id, client->doorbell_offset, client->cookie);
|
||||
seq_printf(m, "\tWQ size %d, offset: 0x%x, tail %d\n",
|
||||
client->wq_size, client->wq_offset, client->wq_tail);
|
||||
|
||||
seq_printf(m, "\tFailed to queue: %u\n", client->q_fail);
|
||||
seq_printf(m, "\tFailed doorbell: %u\n", client->b_fail);
|
||||
seq_printf(m, "\tLast submission result: %d\n", client->retcode);
|
||||
|
||||
for_each_ring(ring, dev_priv, i) {
|
||||
seq_printf(m, "\tSubmissions: %llu %s\n",
|
||||
client->submissions[i],
|
||||
ring->name);
|
||||
tot += client->submissions[i];
|
||||
}
|
||||
seq_printf(m, "\tTotal: %llu\n", tot);
|
||||
}
|
||||
|
||||
static int i915_guc_info(struct seq_file *m, void *data)
|
||||
{
|
||||
struct drm_info_node *node = m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_guc guc;
|
||||
struct i915_guc_client client = {};
|
||||
struct intel_engine_cs *ring;
|
||||
enum intel_ring_id i;
|
||||
u64 total = 0;
|
||||
|
||||
if (!HAS_GUC_SCHED(dev_priv->dev))
|
||||
return 0;
|
||||
|
||||
/* Take a local copy of the GuC data, so we can dump it at leisure */
|
||||
spin_lock(&dev_priv->guc.host2guc_lock);
|
||||
guc = dev_priv->guc;
|
||||
if (guc.execbuf_client) {
|
||||
spin_lock(&guc.execbuf_client->wq_lock);
|
||||
client = *guc.execbuf_client;
|
||||
spin_unlock(&guc.execbuf_client->wq_lock);
|
||||
}
|
||||
spin_unlock(&dev_priv->guc.host2guc_lock);
|
||||
|
||||
seq_printf(m, "GuC total action count: %llu\n", guc.action_count);
|
||||
seq_printf(m, "GuC action failure count: %u\n", guc.action_fail);
|
||||
seq_printf(m, "GuC last action command: 0x%x\n", guc.action_cmd);
|
||||
seq_printf(m, "GuC last action status: 0x%x\n", guc.action_status);
|
||||
seq_printf(m, "GuC last action error code: %d\n", guc.action_err);
|
||||
|
||||
seq_printf(m, "\nGuC submissions:\n");
|
||||
for_each_ring(ring, dev_priv, i) {
|
||||
seq_printf(m, "\t%-24s: %10llu, last seqno 0x%08x %9d\n",
|
||||
ring->name, guc.submissions[i],
|
||||
guc.last_seqno[i], guc.last_seqno[i]);
|
||||
total += guc.submissions[i];
|
||||
}
|
||||
seq_printf(m, "\t%s: %llu\n", "Total", total);
|
||||
|
||||
seq_printf(m, "\nGuC execbuf client @ %p:\n", guc.execbuf_client);
|
||||
i915_guc_client_info(m, dev_priv, &client);
|
||||
|
||||
/* Add more as required ... */
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int i915_guc_log_dump(struct seq_file *m, void *data)
|
||||
{
|
||||
struct drm_info_node *node = m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_gem_object *log_obj = dev_priv->guc.log_obj;
|
||||
u32 *log;
|
||||
int i = 0, pg;
|
||||
|
||||
if (!log_obj)
|
||||
return 0;
|
||||
|
||||
for (pg = 0; pg < log_obj->base.size / PAGE_SIZE; pg++) {
|
||||
log = kmap_atomic(i915_gem_object_get_page(log_obj, pg));
|
||||
|
||||
for (i = 0; i < PAGE_SIZE / sizeof(u32); i += 4)
|
||||
seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
|
||||
*(log + i), *(log + i + 1),
|
||||
*(log + i + 2), *(log + i + 3));
|
||||
|
||||
kunmap_atomic(log);
|
||||
}
|
||||
|
||||
seq_putc(m, '\n');
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int i915_edp_psr_status(struct seq_file *m, void *data)
|
||||
{
|
||||
struct drm_info_node *node = m->private;
|
||||
|
@ -2680,11 +2817,13 @@ static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
|
|||
struct drm_device *dev = node->minor->dev;
|
||||
struct drm_crtc *crtc = &intel_crtc->base;
|
||||
struct intel_encoder *intel_encoder;
|
||||
struct drm_plane_state *plane_state = crtc->primary->state;
|
||||
struct drm_framebuffer *fb = plane_state->fb;
|
||||
|
||||
if (crtc->primary->fb)
|
||||
if (fb)
|
||||
seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
|
||||
crtc->primary->fb->base.id, crtc->x, crtc->y,
|
||||
crtc->primary->fb->width, crtc->primary->fb->height);
|
||||
fb->base.id, plane_state->src_x >> 16,
|
||||
plane_state->src_y >> 16, fb->width, fb->height);
|
||||
else
|
||||
seq_puts(m, "\tprimary plane disabled\n");
|
||||
for_each_encoder_on_crtc(dev, crtc, intel_encoder)
|
||||
|
@ -2706,8 +2845,7 @@ static void intel_dp_info(struct seq_file *m,
|
|||
struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
|
||||
|
||||
seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
|
||||
seq_printf(m, "\taudio support: %s\n", intel_dp->has_audio ? "yes" :
|
||||
"no");
|
||||
seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
|
||||
if (intel_encoder->type == INTEL_OUTPUT_EDP)
|
||||
intel_panel_info(m, &intel_connector->panel);
|
||||
}
|
||||
|
@ -2718,8 +2856,7 @@ static void intel_hdmi_info(struct seq_file *m,
|
|||
struct intel_encoder *intel_encoder = intel_connector->encoder;
|
||||
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
|
||||
|
||||
seq_printf(m, "\taudio support: %s\n", intel_hdmi->has_audio ? "yes" :
|
||||
"no");
|
||||
seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
|
||||
}
|
||||
|
||||
static void intel_lvds_info(struct seq_file *m,
|
||||
|
@ -4807,7 +4944,7 @@ static void cherryview_sseu_device_status(struct drm_device *dev,
|
|||
struct sseu_dev_status *stat)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
const int ss_max = 2;
|
||||
int ss_max = 2;
|
||||
int ss;
|
||||
u32 sig1[ss_max], sig2[ss_max];
|
||||
|
||||
|
@ -5033,6 +5170,9 @@ static const struct drm_info_list i915_debugfs_list[] = {
|
|||
{"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
|
||||
{"i915_gem_hws_vebox", i915_hws_info, 0, (void *)VECS},
|
||||
{"i915_gem_batch_pool", i915_gem_batch_pool_info, 0},
|
||||
{"i915_guc_info", i915_guc_info, 0},
|
||||
{"i915_guc_load_status", i915_guc_load_status_info, 0},
|
||||
{"i915_guc_log_dump", i915_guc_log_dump, 0},
|
||||
{"i915_frequency_info", i915_frequency_info, 0},
|
||||
{"i915_hangcheck_info", i915_hangcheck_info, 0},
|
||||
{"i915_drpc_info", i915_drpc_info, 0},
|
||||
|
|
|
@ -364,12 +364,12 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_
|
|||
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
|
||||
/* i915 resume handler doesn't set to D0 */
|
||||
pci_set_power_state(dev->pdev, PCI_D0);
|
||||
i915_resume_legacy(dev);
|
||||
i915_resume_switcheroo(dev);
|
||||
dev->switch_power_state = DRM_SWITCH_POWER_ON;
|
||||
} else {
|
||||
pr_err("switched off\n");
|
||||
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
|
||||
i915_suspend_legacy(dev, pmm);
|
||||
i915_suspend_switcheroo(dev, pmm);
|
||||
dev->switch_power_state = DRM_SWITCH_POWER_OFF;
|
||||
}
|
||||
}
|
||||
|
@ -435,6 +435,11 @@ static int i915_load_modeset_init(struct drm_device *dev)
|
|||
* working irqs for e.g. gmbus and dp aux transfers. */
|
||||
intel_modeset_init(dev);
|
||||
|
||||
/* intel_guc_ucode_init() needs the mutex to allocate GEM objects */
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
intel_guc_ucode_init(dev);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
ret = i915_gem_init(dev);
|
||||
if (ret)
|
||||
goto cleanup_irq;
|
||||
|
@ -476,6 +481,9 @@ cleanup_gem:
|
|||
i915_gem_context_fini(dev);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
cleanup_irq:
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
intel_guc_ucode_fini(dev);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
drm_irq_uninstall(dev);
|
||||
cleanup_gem_stolen:
|
||||
i915_gem_cleanup_stolen(dev);
|
||||
|
@ -791,6 +799,24 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
|
|||
info->has_eu_pg ? "y" : "n");
|
||||
}
|
||||
|
||||
static void intel_init_dpio(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
if (!IS_VALLEYVIEW(dev_priv))
|
||||
return;
|
||||
|
||||
/*
|
||||
* IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C),
|
||||
* CHV x1 PHY (DP/HDMI D)
|
||||
* IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C)
|
||||
*/
|
||||
if (IS_CHERRYVIEW(dev_priv)) {
|
||||
DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2;
|
||||
DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO;
|
||||
} else {
|
||||
DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* i915_driver_load - setup chip and create an initial config
|
||||
* @dev: DRM device
|
||||
|
@ -971,8 +997,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
|
|||
intel_setup_gmbus(dev);
|
||||
intel_opregion_setup(dev);
|
||||
|
||||
intel_setup_bios(dev);
|
||||
|
||||
i915_gem_load(dev);
|
||||
|
||||
/* On the 945G/GM, the chipset reports the MSI capability on the
|
||||
|
@ -991,6 +1015,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
|
|||
|
||||
intel_device_info_runtime_init(dev);
|
||||
|
||||
intel_init_dpio(dev_priv);
|
||||
|
||||
if (INTEL_INFO(dev)->num_pipes) {
|
||||
ret = drm_vblank_init(dev, INTEL_INFO(dev)->num_pipes);
|
||||
if (ret)
|
||||
|
@ -1128,6 +1154,7 @@ int i915_driver_unload(struct drm_device *dev)
|
|||
flush_workqueue(dev_priv->wq);
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
intel_guc_ucode_fini(dev);
|
||||
i915_gem_cleanup_ringbuffer(dev);
|
||||
i915_gem_context_fini(dev);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
|
|
@ -362,6 +362,7 @@ static const struct intel_device_info intel_skylake_info = {
|
|||
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
|
||||
.has_llc = 1,
|
||||
.has_ddi = 1,
|
||||
.has_fpga_dbg = 1,
|
||||
.has_fbc = 1,
|
||||
GEN_DEFAULT_PIPEOFFSETS,
|
||||
IVB_CURSOR_OFFSETS,
|
||||
|
@ -374,6 +375,7 @@ static const struct intel_device_info intel_skylake_gt3_info = {
|
|||
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
|
||||
.has_llc = 1,
|
||||
.has_ddi = 1,
|
||||
.has_fpga_dbg = 1,
|
||||
.has_fbc = 1,
|
||||
GEN_DEFAULT_PIPEOFFSETS,
|
||||
IVB_CURSOR_OFFSETS,
|
||||
|
@ -386,6 +388,7 @@ static const struct intel_device_info intel_broxton_info = {
|
|||
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
|
||||
.num_pipes = 3,
|
||||
.has_ddi = 1,
|
||||
.has_fpga_dbg = 1,
|
||||
.has_fbc = 1,
|
||||
GEN_DEFAULT_PIPEOFFSETS,
|
||||
IVB_CURSOR_OFFSETS,
|
||||
|
@ -679,7 +682,7 @@ static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int i915_suspend_legacy(struct drm_device *dev, pm_message_t state)
|
||||
int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state)
|
||||
{
|
||||
int error;
|
||||
|
||||
|
@ -812,7 +815,7 @@ static int i915_drm_resume_early(struct drm_device *dev)
|
|||
return ret;
|
||||
}
|
||||
|
||||
int i915_resume_legacy(struct drm_device *dev)
|
||||
int i915_resume_switcheroo(struct drm_device *dev)
|
||||
{
|
||||
int ret;
|
||||
|
||||
|
@ -1552,6 +1555,15 @@ static int intel_runtime_resume(struct device *device)
|
|||
gen6_update_ring_freq(dev);
|
||||
|
||||
intel_runtime_pm_enable_interrupts(dev_priv);
|
||||
|
||||
/*
|
||||
* On VLV/CHV display interrupts are part of the display
|
||||
* power well, so hpd is reinitialized from there. For
|
||||
* everyone else do it here.
|
||||
*/
|
||||
if (!IS_VALLEYVIEW(dev_priv))
|
||||
intel_hpd_init(dev_priv);
|
||||
|
||||
intel_enable_gt_powersave(dev);
|
||||
|
||||
if (ret)
|
||||
|
@ -1649,7 +1661,7 @@ static struct drm_driver driver = {
|
|||
*/
|
||||
.driver_features =
|
||||
DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME |
|
||||
DRIVER_RENDER,
|
||||
DRIVER_RENDER | DRIVER_MODESET,
|
||||
.load = i915_driver_load,
|
||||
.unload = i915_driver_unload,
|
||||
.open = i915_driver_open,
|
||||
|
@ -1658,10 +1670,6 @@ static struct drm_driver driver = {
|
|||
.postclose = i915_driver_postclose,
|
||||
.set_busid = drm_pci_set_busid,
|
||||
|
||||
/* Used in place of i915_pm_ops for non-DRIVER_MODESET */
|
||||
.suspend = i915_suspend_legacy,
|
||||
.resume = i915_resume_legacy,
|
||||
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
.debugfs_init = i915_debugfs_init,
|
||||
.debugfs_cleanup = i915_debugfs_cleanup,
|
||||
|
@ -1704,7 +1712,6 @@ static int __init i915_init(void)
|
|||
* either the i915.modeset prarameter or by the
|
||||
* vga_text_mode_force boot option.
|
||||
*/
|
||||
driver.driver_features |= DRIVER_MODESET;
|
||||
|
||||
if (i915.modeset == 0)
|
||||
driver.driver_features &= ~DRIVER_MODESET;
|
||||
|
@ -1715,18 +1722,12 @@ static int __init i915_init(void)
|
|||
#endif
|
||||
|
||||
if (!(driver.driver_features & DRIVER_MODESET)) {
|
||||
driver.get_vblank_timestamp = NULL;
|
||||
/* Silently fail loading to not upset userspace. */
|
||||
DRM_DEBUG_DRIVER("KMS and UMS disabled.\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* FIXME: Note that we're lying to the DRM core here so that we can get access
|
||||
* to the atomic ioctl and the atomic properties. Only plane operations on
|
||||
* a single CRTC will actually work.
|
||||
*/
|
||||
if (driver.driver_features & DRIVER_MODESET)
|
||||
if (i915.nuclear_pageflip)
|
||||
driver.driver_features |= DRIVER_ATOMIC;
|
||||
|
||||
return drm_pci_init(&driver, &i915_pci_driver);
|
||||
|
|
|
@ -50,13 +50,14 @@
|
|||
#include <linux/intel-iommu.h>
|
||||
#include <linux/kref.h>
|
||||
#include <linux/pm_qos.h>
|
||||
#include "intel_guc.h"
|
||||
|
||||
/* General customization:
|
||||
*/
|
||||
|
||||
#define DRIVER_NAME "i915"
|
||||
#define DRIVER_DESC "Intel Graphics"
|
||||
#define DRIVER_DATE "20150731"
|
||||
#define DRIVER_DATE "20150911"
|
||||
|
||||
#undef WARN_ON
|
||||
/* Many gcc seem to no see through this and fall over :( */
|
||||
|
@ -67,11 +68,11 @@
|
|||
BUILD_BUG_ON(__i915_warn_cond); \
|
||||
WARN(__i915_warn_cond, "WARN_ON(" #x ")"); })
|
||||
#else
|
||||
#define WARN_ON(x) WARN((x), "WARN_ON(" #x ")")
|
||||
#define WARN_ON(x) WARN((x), "WARN_ON(%s)", #x )
|
||||
#endif
|
||||
|
||||
#undef WARN_ON_ONCE
|
||||
#define WARN_ON_ONCE(x) WARN_ONCE((x), "WARN_ON_ONCE(" #x ")")
|
||||
#define WARN_ON_ONCE(x) WARN_ONCE((x), "WARN_ON_ONCE(%s)", #x )
|
||||
|
||||
#define MISSING_CASE(x) WARN(1, "Missing switch case (%lu) in %s\n", \
|
||||
(long) (x), __func__);
|
||||
|
@ -105,6 +106,11 @@
|
|||
unlikely(__ret_warn_on); \
|
||||
})
|
||||
|
||||
static inline const char *yesno(bool v)
|
||||
{
|
||||
return v ? "yes" : "no";
|
||||
}
|
||||
|
||||
enum pipe {
|
||||
INVALID_PIPE = -1,
|
||||
PIPE_A = 0,
|
||||
|
@ -549,7 +555,7 @@ struct drm_i915_error_state {
|
|||
|
||||
struct drm_i915_error_object {
|
||||
int page_count;
|
||||
u32 gtt_offset;
|
||||
u64 gtt_offset;
|
||||
u32 *pages[0];
|
||||
} *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page;
|
||||
|
||||
|
@ -575,7 +581,7 @@ struct drm_i915_error_state {
|
|||
u32 size;
|
||||
u32 name;
|
||||
u32 rseqno[I915_NUM_RINGS], wseqno;
|
||||
u32 gtt_offset;
|
||||
u64 gtt_offset;
|
||||
u32 read_domains;
|
||||
u32 write_domain;
|
||||
s32 fence_reg:I915_MAX_NUM_FENCE_BITS;
|
||||
|
@ -665,6 +671,8 @@ struct drm_i915_display_funcs {
|
|||
uint32_t level);
|
||||
void (*disable_backlight)(struct intel_connector *connector);
|
||||
void (*enable_backlight)(struct intel_connector *connector);
|
||||
uint32_t (*backlight_hz_to_pwm)(struct intel_connector *connector,
|
||||
uint32_t hz);
|
||||
};
|
||||
|
||||
enum forcewake_domain_id {
|
||||
|
@ -1693,7 +1701,7 @@ struct i915_execbuffer_params {
|
|||
struct drm_file *file;
|
||||
uint32_t dispatch_flags;
|
||||
uint32_t args_batch_start_offset;
|
||||
uint32_t batch_obj_vm_offset;
|
||||
uint64_t batch_obj_vm_offset;
|
||||
struct intel_engine_cs *ring;
|
||||
struct drm_i915_gem_object *batch_obj;
|
||||
struct intel_context *ctx;
|
||||
|
@ -1716,6 +1724,8 @@ struct drm_i915_private {
|
|||
|
||||
struct i915_virtual_gpu vgpu;
|
||||
|
||||
struct intel_guc guc;
|
||||
|
||||
struct intel_csr csr;
|
||||
|
||||
/* Display CSR-related protection */
|
||||
|
@ -1796,6 +1806,7 @@ struct drm_i915_private {
|
|||
unsigned int fsb_freq, mem_freq, is_ddr3;
|
||||
unsigned int skl_boot_cdclk;
|
||||
unsigned int cdclk_freq, max_cdclk_freq;
|
||||
unsigned int max_dotclk_freq;
|
||||
unsigned int hpll_freq;
|
||||
|
||||
/**
|
||||
|
@ -1963,6 +1974,11 @@ static inline struct drm_i915_private *dev_to_i915(struct device *dev)
|
|||
return to_i915(dev_get_drvdata(dev));
|
||||
}
|
||||
|
||||
static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc)
|
||||
{
|
||||
return container_of(guc, struct drm_i915_private, guc);
|
||||
}
|
||||
|
||||
/* Iterate over initialised rings */
|
||||
#define for_each_ring(ring__, dev_priv__, i__) \
|
||||
for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \
|
||||
|
@ -2520,7 +2536,8 @@ struct drm_i915_cmd_table {
|
|||
#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6)
|
||||
#define HAS_LOGICAL_RING_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 8)
|
||||
#define USES_PPGTT(dev) (i915.enable_ppgtt)
|
||||
#define USES_FULL_PPGTT(dev) (i915.enable_ppgtt == 2)
|
||||
#define USES_FULL_PPGTT(dev) (i915.enable_ppgtt >= 2)
|
||||
#define USES_FULL_48BIT_PPGTT(dev) (i915.enable_ppgtt == 3)
|
||||
|
||||
#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay)
|
||||
#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical)
|
||||
|
@ -2566,6 +2583,9 @@ struct drm_i915_cmd_table {
|
|||
|
||||
#define HAS_CSR(dev) (IS_SKYLAKE(dev))
|
||||
|
||||
#define HAS_GUC_UCODE(dev) (IS_GEN9(dev))
|
||||
#define HAS_GUC_SCHED(dev) (IS_GEN9(dev))
|
||||
|
||||
#define HAS_RESOURCE_STREAMER(dev) (IS_HASWELL(dev) || \
|
||||
INTEL_INFO(dev)->gen >= 8)
|
||||
|
||||
|
@ -2584,6 +2604,7 @@ struct drm_i915_cmd_table {
|
|||
#define INTEL_PCH_TYPE(dev) (__I915__(dev)->pch_type)
|
||||
#define HAS_PCH_SPT(dev) (INTEL_PCH_TYPE(dev) == PCH_SPT)
|
||||
#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
|
||||
#define HAS_PCH_LPT_LP(dev) (__I915__(dev)->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)
|
||||
#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
|
||||
#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
|
||||
#define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP)
|
||||
|
@ -2603,8 +2624,8 @@ struct drm_i915_cmd_table {
|
|||
extern const struct drm_ioctl_desc i915_ioctls[];
|
||||
extern int i915_max_ioctl;
|
||||
|
||||
extern int i915_suspend_legacy(struct drm_device *dev, pm_message_t state);
|
||||
extern int i915_resume_legacy(struct drm_device *dev);
|
||||
extern int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state);
|
||||
extern int i915_resume_switcheroo(struct drm_device *dev);
|
||||
|
||||
/* i915_params.c */
|
||||
struct i915_params {
|
||||
|
@ -2637,6 +2658,7 @@ struct i915_params {
|
|||
int use_mmio_flip;
|
||||
int mmio_debug;
|
||||
bool verbose_state_checks;
|
||||
bool nuclear_pageflip;
|
||||
int edp_vswing;
|
||||
};
|
||||
extern struct i915_params i915 __read_mostly;
|
||||
|
@ -2986,13 +3008,11 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
|
|||
struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
|
||||
struct drm_gem_object *gem_obj, int flags);
|
||||
|
||||
unsigned long
|
||||
i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
|
||||
const struct i915_ggtt_view *view);
|
||||
unsigned long
|
||||
i915_gem_obj_offset(struct drm_i915_gem_object *o,
|
||||
struct i915_address_space *vm);
|
||||
static inline unsigned long
|
||||
u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
|
||||
const struct i915_ggtt_view *view);
|
||||
u64 i915_gem_obj_offset(struct drm_i915_gem_object *o,
|
||||
struct i915_address_space *vm);
|
||||
static inline u64
|
||||
i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *o)
|
||||
{
|
||||
return i915_gem_obj_ggtt_offset_view(o, &i915_ggtt_view_normal);
|
||||
|
|
|
@ -1005,12 +1005,14 @@ out:
|
|||
if (!needs_clflush_after &&
|
||||
obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
|
||||
if (i915_gem_clflush_object(obj, obj->pin_display))
|
||||
i915_gem_chipset_flush(dev);
|
||||
needs_clflush_after = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (needs_clflush_after)
|
||||
i915_gem_chipset_flush(dev);
|
||||
else
|
||||
obj->cache_dirty = true;
|
||||
|
||||
intel_fb_obj_flush(obj, false, ORIGIN_CPU);
|
||||
return ret;
|
||||
|
@ -3228,10 +3230,6 @@ int i915_vma_unbind(struct i915_vma *vma)
|
|||
ret = i915_gem_object_wait_rendering(obj, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
/* Continue on if we fail due to EIO, the GPU is hung so we
|
||||
* should be safe and we need to cleanup or else we might
|
||||
* cause memory corruption through use-after-free.
|
||||
*/
|
||||
|
||||
if (i915_is_ggtt(vma->vm) &&
|
||||
vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
|
||||
|
@ -3355,7 +3353,8 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
|
|||
{
|
||||
struct drm_device *dev = obj->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 size, fence_size, fence_alignment, unfenced_alignment;
|
||||
u32 fence_alignment, unfenced_alignment;
|
||||
u64 size, fence_size;
|
||||
u64 start =
|
||||
flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
|
||||
u64 end =
|
||||
|
@ -3414,7 +3413,7 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
|
|||
* attempt to find space.
|
||||
*/
|
||||
if (size > end) {
|
||||
DRM_DEBUG("Attempting to bind an object (view type=%u) larger than the aperture: size=%u > %s aperture=%llu\n",
|
||||
DRM_DEBUG("Attempting to bind an object (view type=%u) larger than the aperture: size=%llu > %s aperture=%llu\n",
|
||||
ggtt_view ? ggtt_view->type : 0,
|
||||
size,
|
||||
flags & PIN_MAPPABLE ? "mappable" : "total",
|
||||
|
@ -3638,10 +3637,10 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
|
|||
{
|
||||
struct drm_device *dev = obj->base.dev;
|
||||
struct i915_vma *vma, *next;
|
||||
int ret;
|
||||
int ret = 0;
|
||||
|
||||
if (obj->cache_level == cache_level)
|
||||
return 0;
|
||||
goto out;
|
||||
|
||||
if (i915_gem_obj_is_pinned(obj)) {
|
||||
DRM_DEBUG("can not change the cache level of pinned objects\n");
|
||||
|
@ -3686,6 +3685,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
|
|||
vma->node.color = cache_level;
|
||||
obj->cache_level = cache_level;
|
||||
|
||||
out:
|
||||
if (obj->cache_dirty &&
|
||||
obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
|
||||
cpu_write_needs_clflush(obj)) {
|
||||
|
@ -3738,6 +3738,15 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
|
|||
level = I915_CACHE_NONE;
|
||||
break;
|
||||
case I915_CACHING_CACHED:
|
||||
/*
|
||||
* Due to a HW issue on BXT A stepping, GPU stores via a
|
||||
* snooped mapping may leave stale data in a corresponding CPU
|
||||
* cacheline, whereas normally such cachelines would get
|
||||
* invalidated.
|
||||
*/
|
||||
if (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0)
|
||||
return -ENODEV;
|
||||
|
||||
level = I915_CACHE_LLC;
|
||||
break;
|
||||
case I915_CACHING_DISPLAY:
|
||||
|
@ -4011,15 +4020,13 @@ i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
|
|||
return -EBUSY;
|
||||
|
||||
if (i915_vma_misplaced(vma, alignment, flags)) {
|
||||
unsigned long offset;
|
||||
offset = ggtt_view ? i915_gem_obj_ggtt_offset_view(obj, ggtt_view) :
|
||||
i915_gem_obj_offset(obj, vm);
|
||||
WARN(vma->pin_count,
|
||||
"bo is already pinned in %s with incorrect alignment:"
|
||||
" offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
|
||||
" offset=%08x %08x, req.alignment=%x, req.map_and_fenceable=%d,"
|
||||
" obj->map_and_fenceable=%d\n",
|
||||
ggtt_view ? "ggtt" : "ppgtt",
|
||||
offset,
|
||||
upper_32_bits(vma->node.start),
|
||||
lower_32_bits(vma->node.start),
|
||||
alignment,
|
||||
!!(flags & PIN_MAPPABLE),
|
||||
obj->map_and_fenceable);
|
||||
|
@ -4679,6 +4686,22 @@ i915_gem_init_hw(struct drm_device *dev)
|
|||
goto out;
|
||||
}
|
||||
|
||||
/* We can't enable contexts until all firmware is loaded */
|
||||
ret = intel_guc_ucode_load(dev);
|
||||
if (ret) {
|
||||
/*
|
||||
* If we got an error and GuC submission is enabled, map
|
||||
* the error to -EIO so the GPU will be declared wedged.
|
||||
* OTOH, if we didn't intend to use the GuC anyway, just
|
||||
* discard the error and carry on.
|
||||
*/
|
||||
DRM_ERROR("Failed to initialize GuC, error %d%s\n", ret,
|
||||
i915.enable_guc_submission ? "" : " (ignored)");
|
||||
ret = i915.enable_guc_submission ? -EIO : 0;
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Now it is safe to go back round and do everything else: */
|
||||
for_each_ring(ring, dev_priv, i) {
|
||||
struct drm_i915_gem_request *req;
|
||||
|
@ -4974,9 +4997,8 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old,
|
|||
}
|
||||
|
||||
/* All the new VM stuff */
|
||||
unsigned long
|
||||
i915_gem_obj_offset(struct drm_i915_gem_object *o,
|
||||
struct i915_address_space *vm)
|
||||
u64 i915_gem_obj_offset(struct drm_i915_gem_object *o,
|
||||
struct i915_address_space *vm)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = o->base.dev->dev_private;
|
||||
struct i915_vma *vma;
|
||||
|
@ -4996,9 +5018,8 @@ i915_gem_obj_offset(struct drm_i915_gem_object *o,
|
|||
return -1;
|
||||
}
|
||||
|
||||
unsigned long
|
||||
i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
|
||||
const struct i915_ggtt_view *view)
|
||||
u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
|
||||
const struct i915_ggtt_view *view)
|
||||
{
|
||||
struct i915_address_space *ggtt = i915_obj_to_ggtt(o);
|
||||
struct i915_vma *vma;
|
||||
|
|
|
@ -332,6 +332,13 @@ int i915_gem_context_init(struct drm_device *dev)
|
|||
if (WARN_ON(dev_priv->ring[RCS].default_context))
|
||||
return 0;
|
||||
|
||||
if (intel_vgpu_active(dev) && HAS_LOGICAL_RING_CONTEXTS(dev)) {
|
||||
if (!i915.enable_execlists) {
|
||||
DRM_INFO("Only EXECLIST mode is supported in vgpu.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
if (i915.enable_execlists) {
|
||||
/* NB: intentionally left blank. We will allocate our own
|
||||
* backing objects as we need them, thank you very much */
|
||||
|
|
|
@ -128,7 +128,7 @@ static void i915_write_fence_reg(struct drm_device *dev, int reg,
|
|||
WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) ||
|
||||
(size & -size) != size ||
|
||||
(i915_gem_obj_ggtt_offset(obj) & (size - 1)),
|
||||
"object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
|
||||
"object 0x%08llx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
|
||||
i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size);
|
||||
|
||||
if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
|
||||
|
@ -171,7 +171,7 @@ static void i830_write_fence_reg(struct drm_device *dev, int reg,
|
|||
WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) ||
|
||||
(size & -size) != size ||
|
||||
(i915_gem_obj_ggtt_offset(obj) & (size - 1)),
|
||||
"object 0x%08lx not 512K or pot-size 0x%08x aligned\n",
|
||||
"object 0x%08llx not 512K or pot-size 0x%08x aligned\n",
|
||||
i915_gem_obj_ggtt_offset(obj), size);
|
||||
|
||||
pitch_val = obj->stride / 128;
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -39,6 +39,8 @@ struct drm_i915_file_private;
|
|||
typedef uint32_t gen6_pte_t;
|
||||
typedef uint64_t gen8_pte_t;
|
||||
typedef uint64_t gen8_pde_t;
|
||||
typedef uint64_t gen8_ppgtt_pdpe_t;
|
||||
typedef uint64_t gen8_ppgtt_pml4e_t;
|
||||
|
||||
#define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT)
|
||||
|
||||
|
@ -88,9 +90,18 @@ typedef uint64_t gen8_pde_t;
|
|||
* PDPE | PDE | PTE | offset
|
||||
* The difference as compared to normal x86 3 level page table is the PDPEs are
|
||||
* programmed via register.
|
||||
*
|
||||
* GEN8 48b legacy style address is defined as a 4 level page table:
|
||||
* 47:39 | 38:30 | 29:21 | 20:12 | 11:0
|
||||
* PML4E | PDPE | PDE | PTE | offset
|
||||
*/
|
||||
#define GEN8_PML4ES_PER_PML4 512
|
||||
#define GEN8_PML4E_SHIFT 39
|
||||
#define GEN8_PML4E_MASK (GEN8_PML4ES_PER_PML4 - 1)
|
||||
#define GEN8_PDPE_SHIFT 30
|
||||
#define GEN8_PDPE_MASK 0x3
|
||||
/* NB: GEN8_PDPE_MASK is untrue for 32b platforms, but it has no impact on 32b page
|
||||
* tables */
|
||||
#define GEN8_PDPE_MASK 0x1ff
|
||||
#define GEN8_PDE_SHIFT 21
|
||||
#define GEN8_PDE_MASK 0x1ff
|
||||
#define GEN8_PTE_SHIFT 12
|
||||
|
@ -98,6 +109,9 @@ typedef uint64_t gen8_pde_t;
|
|||
#define GEN8_LEGACY_PDPES 4
|
||||
#define GEN8_PTES I915_PTES(sizeof(gen8_pte_t))
|
||||
|
||||
#define I915_PDPES_PER_PDP(dev) (USES_FULL_48BIT_PPGTT(dev) ?\
|
||||
GEN8_PML4ES_PER_PML4 : GEN8_LEGACY_PDPES)
|
||||
|
||||
#define PPAT_UNCACHED_INDEX (_PAGE_PWT | _PAGE_PCD)
|
||||
#define PPAT_CACHED_PDE_INDEX 0 /* WB LLC */
|
||||
#define PPAT_CACHED_INDEX _PAGE_PAT /* WB LLCeLLC */
|
||||
|
@ -135,7 +149,7 @@ struct i915_ggtt_view {
|
|||
|
||||
union {
|
||||
struct {
|
||||
unsigned long offset;
|
||||
u64 offset;
|
||||
unsigned int size;
|
||||
} partial;
|
||||
} params;
|
||||
|
@ -241,9 +255,17 @@ struct i915_page_directory {
|
|||
};
|
||||
|
||||
struct i915_page_directory_pointer {
|
||||
/* struct page *page; */
|
||||
DECLARE_BITMAP(used_pdpes, GEN8_LEGACY_PDPES);
|
||||
struct i915_page_directory *page_directory[GEN8_LEGACY_PDPES];
|
||||
struct i915_page_dma base;
|
||||
|
||||
unsigned long *used_pdpes;
|
||||
struct i915_page_directory **page_directory;
|
||||
};
|
||||
|
||||
struct i915_pml4 {
|
||||
struct i915_page_dma base;
|
||||
|
||||
DECLARE_BITMAP(used_pml4es, GEN8_PML4ES_PER_PML4);
|
||||
struct i915_page_directory_pointer *pdps[GEN8_PML4ES_PER_PML4];
|
||||
};
|
||||
|
||||
struct i915_address_space {
|
||||
|
@ -256,6 +278,7 @@ struct i915_address_space {
|
|||
struct i915_page_scratch *scratch_page;
|
||||
struct i915_page_table *scratch_pt;
|
||||
struct i915_page_directory *scratch_pd;
|
||||
struct i915_page_directory_pointer *scratch_pdp; /* GEN8+ & 48b PPGTT */
|
||||
|
||||
/**
|
||||
* List of objects currently involved in rendering.
|
||||
|
@ -341,8 +364,9 @@ struct i915_hw_ppgtt {
|
|||
struct drm_mm_node node;
|
||||
unsigned long pd_dirty_rings;
|
||||
union {
|
||||
struct i915_page_directory_pointer pdp;
|
||||
struct i915_page_directory pd;
|
||||
struct i915_pml4 pml4; /* GEN8+ & 48b PPGTT */
|
||||
struct i915_page_directory_pointer pdp; /* GEN8+ */
|
||||
struct i915_page_directory pd; /* GEN6-7 */
|
||||
};
|
||||
|
||||
struct drm_i915_file_private *file_priv;
|
||||
|
@ -436,24 +460,23 @@ static inline uint32_t gen6_pde_index(uint32_t addr)
|
|||
temp = min(temp, length), \
|
||||
start += temp, length -= temp)
|
||||
|
||||
#define gen8_for_each_pdpe(pd, pdp, start, length, temp, iter) \
|
||||
for (iter = gen8_pdpe_index(start); \
|
||||
pd = (pdp)->page_directory[iter], length > 0 && iter < GEN8_LEGACY_PDPES; \
|
||||
#define gen8_for_each_pdpe(pd, pdp, start, length, temp, iter) \
|
||||
for (iter = gen8_pdpe_index(start); \
|
||||
pd = (pdp)->page_directory[iter], \
|
||||
length > 0 && (iter < I915_PDPES_PER_PDP(dev)); \
|
||||
iter++, \
|
||||
temp = ALIGN(start+1, 1 << GEN8_PDPE_SHIFT) - start, \
|
||||
temp = min(temp, length), \
|
||||
start += temp, length -= temp)
|
||||
|
||||
/* Clamp length to the next page_directory boundary */
|
||||
static inline uint64_t gen8_clamp_pd(uint64_t start, uint64_t length)
|
||||
{
|
||||
uint64_t next_pd = ALIGN(start + 1, 1 << GEN8_PDPE_SHIFT);
|
||||
|
||||
if (next_pd > (start + length))
|
||||
return length;
|
||||
|
||||
return next_pd - start;
|
||||
}
|
||||
#define gen8_for_each_pml4e(pdp, pml4, start, length, temp, iter) \
|
||||
for (iter = gen8_pml4e_index(start); \
|
||||
pdp = (pml4)->pdps[iter], \
|
||||
length > 0 && iter < GEN8_PML4ES_PER_PML4; \
|
||||
iter++, \
|
||||
temp = ALIGN(start+1, 1ULL << GEN8_PML4E_SHIFT) - start, \
|
||||
temp = min(temp, length), \
|
||||
start += temp, length -= temp)
|
||||
|
||||
static inline uint32_t gen8_pte_index(uint64_t address)
|
||||
{
|
||||
|
@ -472,8 +495,7 @@ static inline uint32_t gen8_pdpe_index(uint64_t address)
|
|||
|
||||
static inline uint32_t gen8_pml4e_index(uint64_t address)
|
||||
{
|
||||
WARN_ON(1); /* For 64B */
|
||||
return 0;
|
||||
return (address >> GEN8_PML4E_SHIFT) & GEN8_PML4E_MASK;
|
||||
}
|
||||
|
||||
static inline size_t gen8_pte_count(uint64_t address, uint64_t length)
|
||||
|
|
|
@ -813,7 +813,6 @@ static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
|
|||
int
|
||||
i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_gem_userptr *args = data;
|
||||
struct drm_i915_gem_object *obj;
|
||||
int ret;
|
||||
|
@ -826,9 +825,6 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file
|
|||
if (offset_in_page(args->user_ptr | args->user_size))
|
||||
return -EINVAL;
|
||||
|
||||
if (args->user_size > dev_priv->gtt.base.total)
|
||||
return -E2BIG;
|
||||
|
||||
if (!access_ok(args->flags & I915_USERPTR_READ_ONLY ? VERIFY_READ : VERIFY_WRITE,
|
||||
(char __user *)(unsigned long)args->user_ptr, args->user_size))
|
||||
return -EFAULT;
|
||||
|
|
|
@ -30,11 +30,6 @@
|
|||
#include <generated/utsrelease.h>
|
||||
#include "i915_drv.h"
|
||||
|
||||
static const char *yesno(int v)
|
||||
{
|
||||
return v ? "yes" : "no";
|
||||
}
|
||||
|
||||
static const char *ring_str(int ring)
|
||||
{
|
||||
switch (ring) {
|
||||
|
@ -197,8 +192,9 @@ static void print_error_buffers(struct drm_i915_error_state_buf *m,
|
|||
err_printf(m, " %s [%d]:\n", name, count);
|
||||
|
||||
while (count--) {
|
||||
err_printf(m, " %08x %8u %02x %02x [ ",
|
||||
err->gtt_offset,
|
||||
err_printf(m, " %08x_%08x %8u %02x %02x [ ",
|
||||
upper_32_bits(err->gtt_offset),
|
||||
lower_32_bits(err->gtt_offset),
|
||||
err->size,
|
||||
err->read_domains,
|
||||
err->write_domain);
|
||||
|
@ -427,15 +423,17 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
|
|||
err_printf(m, " (submitted by %s [%d])",
|
||||
error->ring[i].comm,
|
||||
error->ring[i].pid);
|
||||
err_printf(m, " --- gtt_offset = 0x%08x\n",
|
||||
obj->gtt_offset);
|
||||
err_printf(m, " --- gtt_offset = 0x%08x %08x\n",
|
||||
upper_32_bits(obj->gtt_offset),
|
||||
lower_32_bits(obj->gtt_offset));
|
||||
print_error_obj(m, obj);
|
||||
}
|
||||
|
||||
obj = error->ring[i].wa_batchbuffer;
|
||||
if (obj) {
|
||||
err_printf(m, "%s (w/a) --- gtt_offset = 0x%08x\n",
|
||||
dev_priv->ring[i].name, obj->gtt_offset);
|
||||
dev_priv->ring[i].name,
|
||||
lower_32_bits(obj->gtt_offset));
|
||||
print_error_obj(m, obj);
|
||||
}
|
||||
|
||||
|
@ -454,22 +452,22 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
|
|||
if ((obj = error->ring[i].ringbuffer)) {
|
||||
err_printf(m, "%s --- ringbuffer = 0x%08x\n",
|
||||
dev_priv->ring[i].name,
|
||||
obj->gtt_offset);
|
||||
lower_32_bits(obj->gtt_offset));
|
||||
print_error_obj(m, obj);
|
||||
}
|
||||
|
||||
if ((obj = error->ring[i].hws_page)) {
|
||||
err_printf(m, "%s --- HW Status = 0x%08x\n",
|
||||
dev_priv->ring[i].name,
|
||||
obj->gtt_offset);
|
||||
err_printf(m, "%s --- HW Status = 0x%08llx\n",
|
||||
dev_priv->ring[i].name,
|
||||
obj->gtt_offset + LRC_PPHWSP_PN * PAGE_SIZE);
|
||||
offset = 0;
|
||||
for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
|
||||
err_printf(m, "[%04x] %08x %08x %08x %08x\n",
|
||||
offset,
|
||||
obj->pages[0][elt],
|
||||
obj->pages[0][elt+1],
|
||||
obj->pages[0][elt+2],
|
||||
obj->pages[0][elt+3]);
|
||||
obj->pages[LRC_PPHWSP_PN][elt],
|
||||
obj->pages[LRC_PPHWSP_PN][elt+1],
|
||||
obj->pages[LRC_PPHWSP_PN][elt+2],
|
||||
obj->pages[LRC_PPHWSP_PN][elt+3]);
|
||||
offset += 16;
|
||||
}
|
||||
}
|
||||
|
@ -477,13 +475,14 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
|
|||
if ((obj = error->ring[i].ctx)) {
|
||||
err_printf(m, "%s --- HW Context = 0x%08x\n",
|
||||
dev_priv->ring[i].name,
|
||||
obj->gtt_offset);
|
||||
lower_32_bits(obj->gtt_offset));
|
||||
print_error_obj(m, obj);
|
||||
}
|
||||
}
|
||||
|
||||
if ((obj = error->semaphore_obj)) {
|
||||
err_printf(m, "Semaphore page = 0x%08x\n", obj->gtt_offset);
|
||||
err_printf(m, "Semaphore page = 0x%08x\n",
|
||||
lower_32_bits(obj->gtt_offset));
|
||||
for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
|
||||
err_printf(m, "[%04x] %08x %08x %08x %08x\n",
|
||||
elt * 4,
|
||||
|
@ -591,7 +590,7 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
|
|||
int num_pages;
|
||||
bool use_ggtt;
|
||||
int i = 0;
|
||||
u32 reloc_offset;
|
||||
u64 reloc_offset;
|
||||
|
||||
if (src == NULL || src->pages == NULL)
|
||||
return NULL;
|
||||
|
|
|
@ -38,10 +38,6 @@
|
|||
#define GS_MIA_SHIFT 16
|
||||
#define GS_MIA_MASK (0x07 << GS_MIA_SHIFT)
|
||||
|
||||
#define GUC_WOPCM_SIZE 0xc050
|
||||
#define GUC_WOPCM_SIZE_VALUE (0x80 << 12) /* 512KB */
|
||||
#define GUC_WOPCM_OFFSET 0x80000 /* 512KB */
|
||||
|
||||
#define SOFT_SCRATCH(n) (0xc180 + ((n) * 4))
|
||||
|
||||
#define UOS_RSA_SCRATCH_0 0xc200
|
||||
|
@ -56,10 +52,18 @@
|
|||
#define UOS_MOVE (1<<4)
|
||||
#define START_DMA (1<<0)
|
||||
#define DMA_GUC_WOPCM_OFFSET 0xc340
|
||||
#define GUC_WOPCM_OFFSET_VALUE 0x80000 /* 512KB */
|
||||
|
||||
#define GUC_WOPCM_SIZE 0xc050
|
||||
#define GUC_WOPCM_SIZE_VALUE (0x80 << 12) /* 512KB */
|
||||
|
||||
/* GuC addresses below GUC_WOPCM_TOP don't map through the GTT */
|
||||
#define GUC_WOPCM_TOP (GUC_WOPCM_SIZE_VALUE)
|
||||
|
||||
#define GEN8_GT_PM_CONFIG 0x138140
|
||||
#define GEN9LP_GT_PM_CONFIG 0x138140
|
||||
#define GEN9_GT_PM_CONFIG 0x13816c
|
||||
#define GEN8_GT_DOORBELL_ENABLE (1<<0)
|
||||
#define GT_DOORBELL_ENABLE (1<<0)
|
||||
|
||||
#define GEN8_GTCR 0x4274
|
||||
#define GEN8_GTCR_INVALIDATE (1<<0)
|
||||
|
@ -80,7 +84,8 @@
|
|||
GUC_ENABLE_READ_CACHE_LOGIC | \
|
||||
GUC_ENABLE_MIA_CACHING | \
|
||||
GUC_ENABLE_READ_CACHE_FOR_SRAM_DATA | \
|
||||
GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA)
|
||||
GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA | \
|
||||
GUC_ENABLE_MIA_CLOCK_GATING)
|
||||
|
||||
#define HOST2GUC_INTERRUPT 0xc4c8
|
||||
#define HOST2GUC_TRIGGER (1<<0)
|
||||
|
|
|
@ -0,0 +1,916 @@
|
|||
/*
|
||||
* Copyright © 2014 Intel Corporation
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
* IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
#include <linux/firmware.h>
|
||||
#include <linux/circ_buf.h>
|
||||
#include "i915_drv.h"
|
||||
#include "intel_guc.h"
|
||||
|
||||
/**
|
||||
* DOC: GuC Client
|
||||
*
|
||||
* i915_guc_client:
|
||||
* We use the term client to avoid confusion with contexts. A i915_guc_client is
|
||||
* equivalent to GuC object guc_context_desc. This context descriptor is
|
||||
* allocated from a pool of 1024 entries. Kernel driver will allocate doorbell
|
||||
* and workqueue for it. Also the process descriptor (guc_process_desc), which
|
||||
* is mapped to client space. So the client can write Work Item then ring the
|
||||
* doorbell.
|
||||
*
|
||||
* To simplify the implementation, we allocate one gem object that contains all
|
||||
* pages for doorbell, process descriptor and workqueue.
|
||||
*
|
||||
* The Scratch registers:
|
||||
* There are 16 MMIO-based registers start from 0xC180. The kernel driver writes
|
||||
* a value to the action register (SOFT_SCRATCH_0) along with any data. It then
|
||||
* triggers an interrupt on the GuC via another register write (0xC4C8).
|
||||
* Firmware writes a success/fail code back to the action register after
|
||||
* processes the request. The kernel driver polls waiting for this update and
|
||||
* then proceeds.
|
||||
* See host2guc_action()
|
||||
*
|
||||
* Doorbells:
|
||||
* Doorbells are interrupts to uKernel. A doorbell is a single cache line (QW)
|
||||
* mapped into process space.
|
||||
*
|
||||
* Work Items:
|
||||
* There are several types of work items that the host may place into a
|
||||
* workqueue, each with its own requirements and limitations. Currently only
|
||||
* WQ_TYPE_INORDER is needed to support legacy submission via GuC, which
|
||||
* represents in-order queue. The kernel driver packs ring tail pointer and an
|
||||
* ELSP context descriptor dword into Work Item.
|
||||
* See guc_add_workqueue_item()
|
||||
*
|
||||
*/
|
||||
|
||||
/*
|
||||
* Read GuC command/status register (SOFT_SCRATCH_0)
|
||||
* Return true if it contains a response rather than a command
|
||||
*/
|
||||
static inline bool host2guc_action_response(struct drm_i915_private *dev_priv,
|
||||
u32 *status)
|
||||
{
|
||||
u32 val = I915_READ(SOFT_SCRATCH(0));
|
||||
*status = val;
|
||||
return GUC2HOST_IS_RESPONSE(val);
|
||||
}
|
||||
|
||||
static int host2guc_action(struct intel_guc *guc, u32 *data, u32 len)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = guc_to_i915(guc);
|
||||
u32 status;
|
||||
int i;
|
||||
int ret;
|
||||
|
||||
if (WARN_ON(len < 1 || len > 15))
|
||||
return -EINVAL;
|
||||
|
||||
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
|
||||
spin_lock(&dev_priv->guc.host2guc_lock);
|
||||
|
||||
dev_priv->guc.action_count += 1;
|
||||
dev_priv->guc.action_cmd = data[0];
|
||||
|
||||
for (i = 0; i < len; i++)
|
||||
I915_WRITE(SOFT_SCRATCH(i), data[i]);
|
||||
|
||||
POSTING_READ(SOFT_SCRATCH(i - 1));
|
||||
|
||||
I915_WRITE(HOST2GUC_INTERRUPT, HOST2GUC_TRIGGER);
|
||||
|
||||
/* No HOST2GUC command should take longer than 10ms */
|
||||
ret = wait_for_atomic(host2guc_action_response(dev_priv, &status), 10);
|
||||
if (status != GUC2HOST_STATUS_SUCCESS) {
|
||||
/*
|
||||
* Either the GuC explicitly returned an error (which
|
||||
* we convert to -EIO here) or no response at all was
|
||||
* received within the timeout limit (-ETIMEDOUT)
|
||||
*/
|
||||
if (ret != -ETIMEDOUT)
|
||||
ret = -EIO;
|
||||
|
||||
DRM_ERROR("GUC: host2guc action 0x%X failed. ret=%d "
|
||||
"status=0x%08X response=0x%08X\n",
|
||||
data[0], ret, status,
|
||||
I915_READ(SOFT_SCRATCH(15)));
|
||||
|
||||
dev_priv->guc.action_fail += 1;
|
||||
dev_priv->guc.action_err = ret;
|
||||
}
|
||||
dev_priv->guc.action_status = status;
|
||||
|
||||
spin_unlock(&dev_priv->guc.host2guc_lock);
|
||||
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Tell the GuC to allocate or deallocate a specific doorbell
|
||||
*/
|
||||
|
||||
static int host2guc_allocate_doorbell(struct intel_guc *guc,
|
||||
struct i915_guc_client *client)
|
||||
{
|
||||
u32 data[2];
|
||||
|
||||
data[0] = HOST2GUC_ACTION_ALLOCATE_DOORBELL;
|
||||
data[1] = client->ctx_index;
|
||||
|
||||
return host2guc_action(guc, data, 2);
|
||||
}
|
||||
|
||||
static int host2guc_release_doorbell(struct intel_guc *guc,
|
||||
struct i915_guc_client *client)
|
||||
{
|
||||
u32 data[2];
|
||||
|
||||
data[0] = HOST2GUC_ACTION_DEALLOCATE_DOORBELL;
|
||||
data[1] = client->ctx_index;
|
||||
|
||||
return host2guc_action(guc, data, 2);
|
||||
}
|
||||
|
||||
static int host2guc_sample_forcewake(struct intel_guc *guc,
|
||||
struct i915_guc_client *client)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = guc_to_i915(guc);
|
||||
u32 data[2];
|
||||
|
||||
data[0] = HOST2GUC_ACTION_SAMPLE_FORCEWAKE;
|
||||
data[1] = (intel_enable_rc6(dev_priv->dev)) ? 1 : 0;
|
||||
|
||||
return host2guc_action(guc, data, 2);
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialise, update, or clear doorbell data shared with the GuC
|
||||
*
|
||||
* These functions modify shared data and so need access to the mapped
|
||||
* client object which contains the page being used for the doorbell
|
||||
*/
|
||||
|
||||
static void guc_init_doorbell(struct intel_guc *guc,
|
||||
struct i915_guc_client *client)
|
||||
{
|
||||
struct guc_doorbell_info *doorbell;
|
||||
void *base;
|
||||
|
||||
base = kmap_atomic(i915_gem_object_get_page(client->client_obj, 0));
|
||||
doorbell = base + client->doorbell_offset;
|
||||
|
||||
doorbell->db_status = 1;
|
||||
doorbell->cookie = 0;
|
||||
|
||||
kunmap_atomic(base);
|
||||
}
|
||||
|
||||
static int guc_ring_doorbell(struct i915_guc_client *gc)
|
||||
{
|
||||
struct guc_process_desc *desc;
|
||||
union guc_doorbell_qw db_cmp, db_exc, db_ret;
|
||||
union guc_doorbell_qw *db;
|
||||
void *base;
|
||||
int attempt = 2, ret = -EAGAIN;
|
||||
|
||||
base = kmap_atomic(i915_gem_object_get_page(gc->client_obj, 0));
|
||||
desc = base + gc->proc_desc_offset;
|
||||
|
||||
/* Update the tail so it is visible to GuC */
|
||||
desc->tail = gc->wq_tail;
|
||||
|
||||
/* current cookie */
|
||||
db_cmp.db_status = GUC_DOORBELL_ENABLED;
|
||||
db_cmp.cookie = gc->cookie;
|
||||
|
||||
/* cookie to be updated */
|
||||
db_exc.db_status = GUC_DOORBELL_ENABLED;
|
||||
db_exc.cookie = gc->cookie + 1;
|
||||
if (db_exc.cookie == 0)
|
||||
db_exc.cookie = 1;
|
||||
|
||||
/* pointer of current doorbell cacheline */
|
||||
db = base + gc->doorbell_offset;
|
||||
|
||||
while (attempt--) {
|
||||
/* lets ring the doorbell */
|
||||
db_ret.value_qw = atomic64_cmpxchg((atomic64_t *)db,
|
||||
db_cmp.value_qw, db_exc.value_qw);
|
||||
|
||||
/* if the exchange was successfully executed */
|
||||
if (db_ret.value_qw == db_cmp.value_qw) {
|
||||
/* db was successfully rung */
|
||||
gc->cookie = db_exc.cookie;
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
/* XXX: doorbell was lost and need to acquire it again */
|
||||
if (db_ret.db_status == GUC_DOORBELL_DISABLED)
|
||||
break;
|
||||
|
||||
DRM_ERROR("Cookie mismatch. Expected %d, returned %d\n",
|
||||
db_cmp.cookie, db_ret.cookie);
|
||||
|
||||
/* update the cookie to newly read cookie from GuC */
|
||||
db_cmp.cookie = db_ret.cookie;
|
||||
db_exc.cookie = db_ret.cookie + 1;
|
||||
if (db_exc.cookie == 0)
|
||||
db_exc.cookie = 1;
|
||||
}
|
||||
|
||||
kunmap_atomic(base);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void guc_disable_doorbell(struct intel_guc *guc,
|
||||
struct i915_guc_client *client)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = guc_to_i915(guc);
|
||||
struct guc_doorbell_info *doorbell;
|
||||
void *base;
|
||||
int drbreg = GEN8_DRBREGL(client->doorbell_id);
|
||||
int value;
|
||||
|
||||
base = kmap_atomic(i915_gem_object_get_page(client->client_obj, 0));
|
||||
doorbell = base + client->doorbell_offset;
|
||||
|
||||
doorbell->db_status = 0;
|
||||
|
||||
kunmap_atomic(base);
|
||||
|
||||
I915_WRITE(drbreg, I915_READ(drbreg) & ~GEN8_DRB_VALID);
|
||||
|
||||
value = I915_READ(drbreg);
|
||||
WARN_ON((value & GEN8_DRB_VALID) != 0);
|
||||
|
||||
I915_WRITE(GEN8_DRBREGU(client->doorbell_id), 0);
|
||||
I915_WRITE(drbreg, 0);
|
||||
|
||||
/* XXX: wait for any interrupts */
|
||||
/* XXX: wait for workqueue to drain */
|
||||
}
|
||||
|
||||
/*
|
||||
* Select, assign and relase doorbell cachelines
|
||||
*
|
||||
* These functions track which doorbell cachelines are in use.
|
||||
* The data they manipulate is protected by the host2guc lock.
|
||||
*/
|
||||
|
||||
static uint32_t select_doorbell_cacheline(struct intel_guc *guc)
|
||||
{
|
||||
const uint32_t cacheline_size = cache_line_size();
|
||||
uint32_t offset;
|
||||
|
||||
spin_lock(&guc->host2guc_lock);
|
||||
|
||||
/* Doorbell uses a single cache line within a page */
|
||||
offset = offset_in_page(guc->db_cacheline);
|
||||
|
||||
/* Moving to next cache line to reduce contention */
|
||||
guc->db_cacheline += cacheline_size;
|
||||
|
||||
spin_unlock(&guc->host2guc_lock);
|
||||
|
||||
DRM_DEBUG_DRIVER("selected doorbell cacheline 0x%x, next 0x%x, linesize %u\n",
|
||||
offset, guc->db_cacheline, cacheline_size);
|
||||
|
||||
return offset;
|
||||
}
|
||||
|
||||
static uint16_t assign_doorbell(struct intel_guc *guc, uint32_t priority)
|
||||
{
|
||||
/*
|
||||
* The bitmap is split into two halves; the first half is used for
|
||||
* normal priority contexts, the second half for high-priority ones.
|
||||
* Note that logically higher priorities are numerically less than
|
||||
* normal ones, so the test below means "is it high-priority?"
|
||||
*/
|
||||
const bool hi_pri = (priority <= GUC_CTX_PRIORITY_HIGH);
|
||||
const uint16_t half = GUC_MAX_DOORBELLS / 2;
|
||||
const uint16_t start = hi_pri ? half : 0;
|
||||
const uint16_t end = start + half;
|
||||
uint16_t id;
|
||||
|
||||
spin_lock(&guc->host2guc_lock);
|
||||
id = find_next_zero_bit(guc->doorbell_bitmap, end, start);
|
||||
if (id == end)
|
||||
id = GUC_INVALID_DOORBELL_ID;
|
||||
else
|
||||
bitmap_set(guc->doorbell_bitmap, id, 1);
|
||||
spin_unlock(&guc->host2guc_lock);
|
||||
|
||||
DRM_DEBUG_DRIVER("assigned %s priority doorbell id 0x%x\n",
|
||||
hi_pri ? "high" : "normal", id);
|
||||
|
||||
return id;
|
||||
}
|
||||
|
||||
static void release_doorbell(struct intel_guc *guc, uint16_t id)
|
||||
{
|
||||
spin_lock(&guc->host2guc_lock);
|
||||
bitmap_clear(guc->doorbell_bitmap, id, 1);
|
||||
spin_unlock(&guc->host2guc_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialise the process descriptor shared with the GuC firmware.
|
||||
*/
|
||||
static void guc_init_proc_desc(struct intel_guc *guc,
|
||||
struct i915_guc_client *client)
|
||||
{
|
||||
struct guc_process_desc *desc;
|
||||
void *base;
|
||||
|
||||
base = kmap_atomic(i915_gem_object_get_page(client->client_obj, 0));
|
||||
desc = base + client->proc_desc_offset;
|
||||
|
||||
memset(desc, 0, sizeof(*desc));
|
||||
|
||||
/*
|
||||
* XXX: pDoorbell and WQVBaseAddress are pointers in process address
|
||||
* space for ring3 clients (set them as in mmap_ioctl) or kernel
|
||||
* space for kernel clients (map on demand instead? May make debug
|
||||
* easier to have it mapped).
|
||||
*/
|
||||
desc->wq_base_addr = 0;
|
||||
desc->db_base_addr = 0;
|
||||
|
||||
desc->context_id = client->ctx_index;
|
||||
desc->wq_size_bytes = client->wq_size;
|
||||
desc->wq_status = WQ_STATUS_ACTIVE;
|
||||
desc->priority = client->priority;
|
||||
|
||||
kunmap_atomic(base);
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialise/clear the context descriptor shared with the GuC firmware.
|
||||
*
|
||||
* This descriptor tells the GuC where (in GGTT space) to find the important
|
||||
* data structures relating to this client (doorbell, process descriptor,
|
||||
* write queue, etc).
|
||||
*/
|
||||
|
||||
static void guc_init_ctx_desc(struct intel_guc *guc,
|
||||
struct i915_guc_client *client)
|
||||
{
|
||||
struct intel_context *ctx = client->owner;
|
||||
struct guc_context_desc desc;
|
||||
struct sg_table *sg;
|
||||
int i;
|
||||
|
||||
memset(&desc, 0, sizeof(desc));
|
||||
|
||||
desc.attribute = GUC_CTX_DESC_ATTR_ACTIVE | GUC_CTX_DESC_ATTR_KERNEL;
|
||||
desc.context_id = client->ctx_index;
|
||||
desc.priority = client->priority;
|
||||
desc.db_id = client->doorbell_id;
|
||||
|
||||
for (i = 0; i < I915_NUM_RINGS; i++) {
|
||||
struct guc_execlist_context *lrc = &desc.lrc[i];
|
||||
struct intel_ringbuffer *ringbuf = ctx->engine[i].ringbuf;
|
||||
struct intel_engine_cs *ring;
|
||||
struct drm_i915_gem_object *obj;
|
||||
uint64_t ctx_desc;
|
||||
|
||||
/* TODO: We have a design issue to be solved here. Only when we
|
||||
* receive the first batch, we know which engine is used by the
|
||||
* user. But here GuC expects the lrc and ring to be pinned. It
|
||||
* is not an issue for default context, which is the only one
|
||||
* for now who owns a GuC client. But for future owner of GuC
|
||||
* client, need to make sure lrc is pinned prior to enter here.
|
||||
*/
|
||||
obj = ctx->engine[i].state;
|
||||
if (!obj)
|
||||
break; /* XXX: continue? */
|
||||
|
||||
ring = ringbuf->ring;
|
||||
ctx_desc = intel_lr_context_descriptor(ctx, ring);
|
||||
lrc->context_desc = (u32)ctx_desc;
|
||||
|
||||
/* The state page is after PPHWSP */
|
||||
lrc->ring_lcra = i915_gem_obj_ggtt_offset(obj) +
|
||||
LRC_STATE_PN * PAGE_SIZE;
|
||||
lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) |
|
||||
(ring->id << GUC_ELC_ENGINE_OFFSET);
|
||||
|
||||
obj = ringbuf->obj;
|
||||
|
||||
lrc->ring_begin = i915_gem_obj_ggtt_offset(obj);
|
||||
lrc->ring_end = lrc->ring_begin + obj->base.size - 1;
|
||||
lrc->ring_next_free_location = lrc->ring_begin;
|
||||
lrc->ring_current_tail_pointer_value = 0;
|
||||
|
||||
desc.engines_used |= (1 << ring->id);
|
||||
}
|
||||
|
||||
WARN_ON(desc.engines_used == 0);
|
||||
|
||||
/*
|
||||
* The CPU address is only needed at certain points, so kmap_atomic on
|
||||
* demand instead of storing it in the ctx descriptor.
|
||||
* XXX: May make debug easier to have it mapped
|
||||
*/
|
||||
desc.db_trigger_cpu = 0;
|
||||
desc.db_trigger_uk = client->doorbell_offset +
|
||||
i915_gem_obj_ggtt_offset(client->client_obj);
|
||||
desc.db_trigger_phy = client->doorbell_offset +
|
||||
sg_dma_address(client->client_obj->pages->sgl);
|
||||
|
||||
desc.process_desc = client->proc_desc_offset +
|
||||
i915_gem_obj_ggtt_offset(client->client_obj);
|
||||
|
||||
desc.wq_addr = client->wq_offset +
|
||||
i915_gem_obj_ggtt_offset(client->client_obj);
|
||||
|
||||
desc.wq_size = client->wq_size;
|
||||
|
||||
/*
|
||||
* XXX: Take LRCs from an existing intel_context if this is not an
|
||||
* IsKMDCreatedContext client
|
||||
*/
|
||||
desc.desc_private = (uintptr_t)client;
|
||||
|
||||
/* Pool context is pinned already */
|
||||
sg = guc->ctx_pool_obj->pages;
|
||||
sg_pcopy_from_buffer(sg->sgl, sg->nents, &desc, sizeof(desc),
|
||||
sizeof(desc) * client->ctx_index);
|
||||
}
|
||||
|
||||
static void guc_fini_ctx_desc(struct intel_guc *guc,
|
||||
struct i915_guc_client *client)
|
||||
{
|
||||
struct guc_context_desc desc;
|
||||
struct sg_table *sg;
|
||||
|
||||
memset(&desc, 0, sizeof(desc));
|
||||
|
||||
sg = guc->ctx_pool_obj->pages;
|
||||
sg_pcopy_from_buffer(sg->sgl, sg->nents, &desc, sizeof(desc),
|
||||
sizeof(desc) * client->ctx_index);
|
||||
}
|
||||
|
||||
/* Get valid workqueue item and return it back to offset */
|
||||
static int guc_get_workqueue_space(struct i915_guc_client *gc, u32 *offset)
|
||||
{
|
||||
struct guc_process_desc *desc;
|
||||
void *base;
|
||||
u32 size = sizeof(struct guc_wq_item);
|
||||
int ret = 0, timeout_counter = 200;
|
||||
|
||||
base = kmap_atomic(i915_gem_object_get_page(gc->client_obj, 0));
|
||||
desc = base + gc->proc_desc_offset;
|
||||
|
||||
while (timeout_counter-- > 0) {
|
||||
ret = wait_for_atomic(CIRC_SPACE(gc->wq_tail, desc->head,
|
||||
gc->wq_size) >= size, 1);
|
||||
|
||||
if (!ret) {
|
||||
*offset = gc->wq_tail;
|
||||
|
||||
/* advance the tail for next workqueue item */
|
||||
gc->wq_tail += size;
|
||||
gc->wq_tail &= gc->wq_size - 1;
|
||||
|
||||
/* this will break the loop */
|
||||
timeout_counter = 0;
|
||||
}
|
||||
};
|
||||
|
||||
kunmap_atomic(base);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int guc_add_workqueue_item(struct i915_guc_client *gc,
|
||||
struct drm_i915_gem_request *rq)
|
||||
{
|
||||
enum intel_ring_id ring_id = rq->ring->id;
|
||||
struct guc_wq_item *wqi;
|
||||
void *base;
|
||||
u32 tail, wq_len, wq_off = 0;
|
||||
int ret;
|
||||
|
||||
ret = guc_get_workqueue_space(gc, &wq_off);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we
|
||||
* should not have the case where structure wqi is across page, neither
|
||||
* wrapped to the beginning. This simplifies the implementation below.
|
||||
*
|
||||
* XXX: if not the case, we need save data to a temp wqi and copy it to
|
||||
* workqueue buffer dw by dw.
|
||||
*/
|
||||
WARN_ON(sizeof(struct guc_wq_item) != 16);
|
||||
WARN_ON(wq_off & 3);
|
||||
|
||||
/* wq starts from the page after doorbell / process_desc */
|
||||
base = kmap_atomic(i915_gem_object_get_page(gc->client_obj,
|
||||
(wq_off + GUC_DB_SIZE) >> PAGE_SHIFT));
|
||||
wq_off &= PAGE_SIZE - 1;
|
||||
wqi = (struct guc_wq_item *)((char *)base + wq_off);
|
||||
|
||||
/* len does not include the header */
|
||||
wq_len = sizeof(struct guc_wq_item) / sizeof(u32) - 1;
|
||||
wqi->header = WQ_TYPE_INORDER |
|
||||
(wq_len << WQ_LEN_SHIFT) |
|
||||
(ring_id << WQ_TARGET_SHIFT) |
|
||||
WQ_NO_WCFLUSH_WAIT;
|
||||
|
||||
/* The GuC wants only the low-order word of the context descriptor */
|
||||
wqi->context_desc = (u32)intel_lr_context_descriptor(rq->ctx, rq->ring);
|
||||
|
||||
/* The GuC firmware wants the tail index in QWords, not bytes */
|
||||
tail = rq->ringbuf->tail >> 3;
|
||||
wqi->ring_tail = tail << WQ_RING_TAIL_SHIFT;
|
||||
wqi->fence_id = 0; /*XXX: what fence to be here */
|
||||
|
||||
kunmap_atomic(base);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define CTX_RING_BUFFER_START 0x08
|
||||
|
||||
/* Update the ringbuffer pointer in a saved context image */
|
||||
static void lr_context_update(struct drm_i915_gem_request *rq)
|
||||
{
|
||||
enum intel_ring_id ring_id = rq->ring->id;
|
||||
struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring_id].state;
|
||||
struct drm_i915_gem_object *rb_obj = rq->ringbuf->obj;
|
||||
struct page *page;
|
||||
uint32_t *reg_state;
|
||||
|
||||
BUG_ON(!ctx_obj);
|
||||
WARN_ON(!i915_gem_obj_is_pinned(ctx_obj));
|
||||
WARN_ON(!i915_gem_obj_is_pinned(rb_obj));
|
||||
|
||||
page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
|
||||
reg_state = kmap_atomic(page);
|
||||
|
||||
reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(rb_obj);
|
||||
|
||||
kunmap_atomic(reg_state);
|
||||
}
|
||||
|
||||
/**
|
||||
* i915_guc_submit() - Submit commands through GuC
|
||||
* @client: the guc client where commands will go through
|
||||
* @ctx: LRC where commands come from
|
||||
* @ring: HW engine that will excute the commands
|
||||
*
|
||||
* Return: 0 if succeed
|
||||
*/
|
||||
int i915_guc_submit(struct i915_guc_client *client,
|
||||
struct drm_i915_gem_request *rq)
|
||||
{
|
||||
struct intel_guc *guc = client->guc;
|
||||
enum intel_ring_id ring_id = rq->ring->id;
|
||||
unsigned long flags;
|
||||
int q_ret, b_ret;
|
||||
|
||||
/* Need this because of the deferred pin ctx and ring */
|
||||
/* Shall we move this right after ring is pinned? */
|
||||
lr_context_update(rq);
|
||||
|
||||
spin_lock_irqsave(&client->wq_lock, flags);
|
||||
|
||||
q_ret = guc_add_workqueue_item(client, rq);
|
||||
if (q_ret == 0)
|
||||
b_ret = guc_ring_doorbell(client);
|
||||
|
||||
client->submissions[ring_id] += 1;
|
||||
if (q_ret) {
|
||||
client->q_fail += 1;
|
||||
client->retcode = q_ret;
|
||||
} else if (b_ret) {
|
||||
client->b_fail += 1;
|
||||
client->retcode = q_ret = b_ret;
|
||||
} else {
|
||||
client->retcode = 0;
|
||||
}
|
||||
spin_unlock_irqrestore(&client->wq_lock, flags);
|
||||
|
||||
spin_lock(&guc->host2guc_lock);
|
||||
guc->submissions[ring_id] += 1;
|
||||
guc->last_seqno[ring_id] = rq->seqno;
|
||||
spin_unlock(&guc->host2guc_lock);
|
||||
|
||||
return q_ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Everything below here is concerned with setup & teardown, and is
|
||||
* therefore not part of the somewhat time-critical batch-submission
|
||||
* path of i915_guc_submit() above.
|
||||
*/
|
||||
|
||||
/**
|
||||
* gem_allocate_guc_obj() - Allocate gem object for GuC usage
|
||||
* @dev: drm device
|
||||
* @size: size of object
|
||||
*
|
||||
* This is a wrapper to create a gem obj. In order to use it inside GuC, the
|
||||
* object needs to be pinned lifetime. Also we must pin it to gtt space other
|
||||
* than [0, GUC_WOPCM_TOP) because this range is reserved inside GuC.
|
||||
*
|
||||
* Return: A drm_i915_gem_object if successful, otherwise NULL.
|
||||
*/
|
||||
static struct drm_i915_gem_object *gem_allocate_guc_obj(struct drm_device *dev,
|
||||
u32 size)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_gem_object *obj;
|
||||
|
||||
obj = i915_gem_alloc_object(dev, size);
|
||||
if (!obj)
|
||||
return NULL;
|
||||
|
||||
if (i915_gem_object_get_pages(obj)) {
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (i915_gem_obj_ggtt_pin(obj, PAGE_SIZE,
|
||||
PIN_OFFSET_BIAS | GUC_WOPCM_TOP)) {
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Invalidate GuC TLB to let GuC take the latest updates to GTT. */
|
||||
I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
|
||||
|
||||
return obj;
|
||||
}
|
||||
|
||||
/**
|
||||
* gem_release_guc_obj() - Release gem object allocated for GuC usage
|
||||
* @obj: gem obj to be released
|
||||
*/
|
||||
static void gem_release_guc_obj(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
if (!obj)
|
||||
return;
|
||||
|
||||
if (i915_gem_obj_is_pinned(obj))
|
||||
i915_gem_object_ggtt_unpin(obj);
|
||||
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
}
|
||||
|
||||
static void guc_client_free(struct drm_device *dev,
|
||||
struct i915_guc_client *client)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_guc *guc = &dev_priv->guc;
|
||||
|
||||
if (!client)
|
||||
return;
|
||||
|
||||
if (client->doorbell_id != GUC_INVALID_DOORBELL_ID) {
|
||||
/*
|
||||
* First disable the doorbell, then tell the GuC we've
|
||||
* finished with it, finally deallocate it in our bitmap
|
||||
*/
|
||||
guc_disable_doorbell(guc, client);
|
||||
host2guc_release_doorbell(guc, client);
|
||||
release_doorbell(guc, client->doorbell_id);
|
||||
}
|
||||
|
||||
/*
|
||||
* XXX: wait for any outstanding submissions before freeing memory.
|
||||
* Be sure to drop any locks
|
||||
*/
|
||||
|
||||
gem_release_guc_obj(client->client_obj);
|
||||
|
||||
if (client->ctx_index != GUC_INVALID_CTX_ID) {
|
||||
guc_fini_ctx_desc(guc, client);
|
||||
ida_simple_remove(&guc->ctx_ids, client->ctx_index);
|
||||
}
|
||||
|
||||
kfree(client);
|
||||
}
|
||||
|
||||
/**
|
||||
* guc_client_alloc() - Allocate an i915_guc_client
|
||||
* @dev: drm device
|
||||
* @priority: four levels priority _CRITICAL, _HIGH, _NORMAL and _LOW
|
||||
* The kernel client to replace ExecList submission is created with
|
||||
* NORMAL priority. Priority of a client for scheduler can be HIGH,
|
||||
* while a preemption context can use CRITICAL.
|
||||
* @ctx the context to own the client (we use the default render context)
|
||||
*
|
||||
* Return: An i915_guc_client object if success.
|
||||
*/
|
||||
static struct i915_guc_client *guc_client_alloc(struct drm_device *dev,
|
||||
uint32_t priority,
|
||||
struct intel_context *ctx)
|
||||
{
|
||||
struct i915_guc_client *client;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_guc *guc = &dev_priv->guc;
|
||||
struct drm_i915_gem_object *obj;
|
||||
|
||||
client = kzalloc(sizeof(*client), GFP_KERNEL);
|
||||
if (!client)
|
||||
return NULL;
|
||||
|
||||
client->doorbell_id = GUC_INVALID_DOORBELL_ID;
|
||||
client->priority = priority;
|
||||
client->owner = ctx;
|
||||
client->guc = guc;
|
||||
|
||||
client->ctx_index = (uint32_t)ida_simple_get(&guc->ctx_ids, 0,
|
||||
GUC_MAX_GPU_CONTEXTS, GFP_KERNEL);
|
||||
if (client->ctx_index >= GUC_MAX_GPU_CONTEXTS) {
|
||||
client->ctx_index = GUC_INVALID_CTX_ID;
|
||||
goto err;
|
||||
}
|
||||
|
||||
/* The first page is doorbell/proc_desc. Two followed pages are wq. */
|
||||
obj = gem_allocate_guc_obj(dev, GUC_DB_SIZE + GUC_WQ_SIZE);
|
||||
if (!obj)
|
||||
goto err;
|
||||
|
||||
client->client_obj = obj;
|
||||
client->wq_offset = GUC_DB_SIZE;
|
||||
client->wq_size = GUC_WQ_SIZE;
|
||||
spin_lock_init(&client->wq_lock);
|
||||
|
||||
client->doorbell_offset = select_doorbell_cacheline(guc);
|
||||
|
||||
/*
|
||||
* Since the doorbell only requires a single cacheline, we can save
|
||||
* space by putting the application process descriptor in the same
|
||||
* page. Use the half of the page that doesn't include the doorbell.
|
||||
*/
|
||||
if (client->doorbell_offset >= (GUC_DB_SIZE / 2))
|
||||
client->proc_desc_offset = 0;
|
||||
else
|
||||
client->proc_desc_offset = (GUC_DB_SIZE / 2);
|
||||
|
||||
client->doorbell_id = assign_doorbell(guc, client->priority);
|
||||
if (client->doorbell_id == GUC_INVALID_DOORBELL_ID)
|
||||
/* XXX: evict a doorbell instead */
|
||||
goto err;
|
||||
|
||||
guc_init_proc_desc(guc, client);
|
||||
guc_init_ctx_desc(guc, client);
|
||||
guc_init_doorbell(guc, client);
|
||||
|
||||
/* XXX: Any cache flushes needed? General domain mgmt calls? */
|
||||
|
||||
if (host2guc_allocate_doorbell(guc, client))
|
||||
goto err;
|
||||
|
||||
DRM_DEBUG_DRIVER("new priority %u client %p: ctx_index %u db_id %u\n",
|
||||
priority, client, client->ctx_index, client->doorbell_id);
|
||||
|
||||
return client;
|
||||
|
||||
err:
|
||||
DRM_ERROR("FAILED to create priority %u GuC client!\n", priority);
|
||||
|
||||
guc_client_free(dev, client);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void guc_create_log(struct intel_guc *guc)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = guc_to_i915(guc);
|
||||
struct drm_i915_gem_object *obj;
|
||||
unsigned long offset;
|
||||
uint32_t size, flags;
|
||||
|
||||
if (i915.guc_log_level < GUC_LOG_VERBOSITY_MIN)
|
||||
return;
|
||||
|
||||
if (i915.guc_log_level > GUC_LOG_VERBOSITY_MAX)
|
||||
i915.guc_log_level = GUC_LOG_VERBOSITY_MAX;
|
||||
|
||||
/* The first page is to save log buffer state. Allocate one
|
||||
* extra page for others in case for overlap */
|
||||
size = (1 + GUC_LOG_DPC_PAGES + 1 +
|
||||
GUC_LOG_ISR_PAGES + 1 +
|
||||
GUC_LOG_CRASH_PAGES + 1) << PAGE_SHIFT;
|
||||
|
||||
obj = guc->log_obj;
|
||||
if (!obj) {
|
||||
obj = gem_allocate_guc_obj(dev_priv->dev, size);
|
||||
if (!obj) {
|
||||
/* logging will be off */
|
||||
i915.guc_log_level = -1;
|
||||
return;
|
||||
}
|
||||
|
||||
guc->log_obj = obj;
|
||||
}
|
||||
|
||||
/* each allocated unit is a page */
|
||||
flags = GUC_LOG_VALID | GUC_LOG_NOTIFY_ON_HALF_FULL |
|
||||
(GUC_LOG_DPC_PAGES << GUC_LOG_DPC_SHIFT) |
|
||||
(GUC_LOG_ISR_PAGES << GUC_LOG_ISR_SHIFT) |
|
||||
(GUC_LOG_CRASH_PAGES << GUC_LOG_CRASH_SHIFT);
|
||||
|
||||
offset = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT; /* in pages */
|
||||
guc->log_flags = (offset << GUC_LOG_BUF_ADDR_SHIFT) | flags;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set up the memory resources to be shared with the GuC. At this point,
|
||||
* we require just one object that can be mapped through the GGTT.
|
||||
*/
|
||||
int i915_guc_submission_init(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
const size_t ctxsize = sizeof(struct guc_context_desc);
|
||||
const size_t poolsize = GUC_MAX_GPU_CONTEXTS * ctxsize;
|
||||
const size_t gemsize = round_up(poolsize, PAGE_SIZE);
|
||||
struct intel_guc *guc = &dev_priv->guc;
|
||||
|
||||
if (!i915.enable_guc_submission)
|
||||
return 0; /* not enabled */
|
||||
|
||||
if (guc->ctx_pool_obj)
|
||||
return 0; /* already allocated */
|
||||
|
||||
guc->ctx_pool_obj = gem_allocate_guc_obj(dev_priv->dev, gemsize);
|
||||
if (!guc->ctx_pool_obj)
|
||||
return -ENOMEM;
|
||||
|
||||
spin_lock_init(&dev_priv->guc.host2guc_lock);
|
||||
|
||||
ida_init(&guc->ctx_ids);
|
||||
|
||||
guc_create_log(guc);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int i915_guc_submission_enable(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_guc *guc = &dev_priv->guc;
|
||||
struct intel_context *ctx = dev_priv->ring[RCS].default_context;
|
||||
struct i915_guc_client *client;
|
||||
|
||||
/* client for execbuf submission */
|
||||
client = guc_client_alloc(dev, GUC_CTX_PRIORITY_KMD_NORMAL, ctx);
|
||||
if (!client) {
|
||||
DRM_ERROR("Failed to create execbuf guc_client\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
guc->execbuf_client = client;
|
||||
|
||||
host2guc_sample_forcewake(guc, client);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void i915_guc_submission_disable(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_guc *guc = &dev_priv->guc;
|
||||
|
||||
guc_client_free(dev, guc->execbuf_client);
|
||||
guc->execbuf_client = NULL;
|
||||
}
|
||||
|
||||
void i915_guc_submission_fini(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_guc *guc = &dev_priv->guc;
|
||||
|
||||
gem_release_guc_obj(dev_priv->guc.log_obj);
|
||||
guc->log_obj = NULL;
|
||||
|
||||
if (guc->ctx_pool_obj)
|
||||
ida_destroy(&guc->ctx_ids);
|
||||
gem_release_guc_obj(guc->ctx_pool_obj);
|
||||
guc->ctx_pool_obj = NULL;
|
||||
}
|
|
@ -45,6 +45,18 @@
|
|||
* and related files, but that will be described in separate chapters.
|
||||
*/
|
||||
|
||||
static const u32 hpd_ilk[HPD_NUM_PINS] = {
|
||||
[HPD_PORT_A] = DE_DP_A_HOTPLUG,
|
||||
};
|
||||
|
||||
static const u32 hpd_ivb[HPD_NUM_PINS] = {
|
||||
[HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
|
||||
};
|
||||
|
||||
static const u32 hpd_bdw[HPD_NUM_PINS] = {
|
||||
[HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG,
|
||||
};
|
||||
|
||||
static const u32 hpd_ibx[HPD_NUM_PINS] = {
|
||||
[HPD_CRT] = SDE_CRT_HOTPLUG,
|
||||
[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
|
||||
|
@ -62,6 +74,7 @@ static const u32 hpd_cpt[HPD_NUM_PINS] = {
|
|||
};
|
||||
|
||||
static const u32 hpd_spt[HPD_NUM_PINS] = {
|
||||
[HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
|
||||
[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
|
||||
[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
|
||||
[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
|
||||
|
@ -97,6 +110,7 @@ static const u32 hpd_status_i915[HPD_NUM_PINS] = {
|
|||
|
||||
/* BXT hpd list */
|
||||
static const u32 hpd_bxt[HPD_NUM_PINS] = {
|
||||
[HPD_PORT_A] = BXT_DE_PORT_HP_DDIA,
|
||||
[HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
|
||||
[HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
|
||||
};
|
||||
|
@ -153,35 +167,46 @@ static const u32 hpd_bxt[HPD_NUM_PINS] = {
|
|||
|
||||
static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
|
||||
|
||||
/* For display hotplug interrupt */
|
||||
void
|
||||
ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
|
||||
/**
|
||||
* ilk_update_display_irq - update DEIMR
|
||||
* @dev_priv: driver private
|
||||
* @interrupt_mask: mask of interrupt bits to update
|
||||
* @enabled_irq_mask: mask of interrupt bits to enable
|
||||
*/
|
||||
static void ilk_update_display_irq(struct drm_i915_private *dev_priv,
|
||||
uint32_t interrupt_mask,
|
||||
uint32_t enabled_irq_mask)
|
||||
{
|
||||
uint32_t new_val;
|
||||
|
||||
assert_spin_locked(&dev_priv->irq_lock);
|
||||
|
||||
WARN_ON(enabled_irq_mask & ~interrupt_mask);
|
||||
|
||||
if (WARN_ON(!intel_irqs_enabled(dev_priv)))
|
||||
return;
|
||||
|
||||
if ((dev_priv->irq_mask & mask) != 0) {
|
||||
dev_priv->irq_mask &= ~mask;
|
||||
new_val = dev_priv->irq_mask;
|
||||
new_val &= ~interrupt_mask;
|
||||
new_val |= (~enabled_irq_mask & interrupt_mask);
|
||||
|
||||
if (new_val != dev_priv->irq_mask) {
|
||||
dev_priv->irq_mask = new_val;
|
||||
I915_WRITE(DEIMR, dev_priv->irq_mask);
|
||||
POSTING_READ(DEIMR);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
|
||||
{
|
||||
ilk_update_display_irq(dev_priv, mask, mask);
|
||||
}
|
||||
|
||||
void
|
||||
ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
|
||||
{
|
||||
assert_spin_locked(&dev_priv->irq_lock);
|
||||
|
||||
if (WARN_ON(!intel_irqs_enabled(dev_priv)))
|
||||
return;
|
||||
|
||||
if ((dev_priv->irq_mask & mask) != mask) {
|
||||
dev_priv->irq_mask |= mask;
|
||||
I915_WRITE(DEIMR, dev_priv->irq_mask);
|
||||
POSTING_READ(DEIMR);
|
||||
}
|
||||
ilk_update_display_irq(dev_priv, mask, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -350,6 +375,38 @@ void gen6_disable_rps_interrupts(struct drm_device *dev)
|
|||
synchronize_irq(dev->irq);
|
||||
}
|
||||
|
||||
/**
|
||||
* bdw_update_port_irq - update DE port interrupt
|
||||
* @dev_priv: driver private
|
||||
* @interrupt_mask: mask of interrupt bits to update
|
||||
* @enabled_irq_mask: mask of interrupt bits to enable
|
||||
*/
|
||||
static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
|
||||
uint32_t interrupt_mask,
|
||||
uint32_t enabled_irq_mask)
|
||||
{
|
||||
uint32_t new_val;
|
||||
uint32_t old_val;
|
||||
|
||||
assert_spin_locked(&dev_priv->irq_lock);
|
||||
|
||||
WARN_ON(enabled_irq_mask & ~interrupt_mask);
|
||||
|
||||
if (WARN_ON(!intel_irqs_enabled(dev_priv)))
|
||||
return;
|
||||
|
||||
old_val = I915_READ(GEN8_DE_PORT_IMR);
|
||||
|
||||
new_val = old_val;
|
||||
new_val &= ~interrupt_mask;
|
||||
new_val |= (~enabled_irq_mask & interrupt_mask);
|
||||
|
||||
if (new_val != old_val) {
|
||||
I915_WRITE(GEN8_DE_PORT_IMR, new_val);
|
||||
POSTING_READ(GEN8_DE_PORT_IMR);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* ibx_display_interrupt_update - update SDEIMR
|
||||
* @dev_priv: driver private
|
||||
|
@ -1263,7 +1320,31 @@ static bool bxt_port_hotplug_long_detect(enum port port, u32 val)
|
|||
{
|
||||
switch (port) {
|
||||
case PORT_A:
|
||||
return val & BXT_PORTA_HOTPLUG_LONG_DETECT;
|
||||
return val & PORTA_HOTPLUG_LONG_DETECT;
|
||||
case PORT_B:
|
||||
return val & PORTB_HOTPLUG_LONG_DETECT;
|
||||
case PORT_C:
|
||||
return val & PORTC_HOTPLUG_LONG_DETECT;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
static bool spt_port_hotplug2_long_detect(enum port port, u32 val)
|
||||
{
|
||||
switch (port) {
|
||||
case PORT_E:
|
||||
return val & PORTE_HOTPLUG_LONG_DETECT;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
static bool spt_port_hotplug_long_detect(enum port port, u32 val)
|
||||
{
|
||||
switch (port) {
|
||||
case PORT_A:
|
||||
return val & PORTA_HOTPLUG_LONG_DETECT;
|
||||
case PORT_B:
|
||||
return val & PORTB_HOTPLUG_LONG_DETECT;
|
||||
case PORT_C:
|
||||
|
@ -1275,6 +1356,16 @@ static bool bxt_port_hotplug_long_detect(enum port port, u32 val)
|
|||
}
|
||||
}
|
||||
|
||||
static bool ilk_port_hotplug_long_detect(enum port port, u32 val)
|
||||
{
|
||||
switch (port) {
|
||||
case PORT_A:
|
||||
return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
static bool pch_port_hotplug_long_detect(enum port port, u32 val)
|
||||
{
|
||||
switch (port) {
|
||||
|
@ -1284,8 +1375,6 @@ static bool pch_port_hotplug_long_detect(enum port port, u32 val)
|
|||
return val & PORTC_HOTPLUG_LONG_DETECT;
|
||||
case PORT_D:
|
||||
return val & PORTD_HOTPLUG_LONG_DETECT;
|
||||
case PORT_E:
|
||||
return val & PORTE_HOTPLUG_LONG_DETECT;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
|
@ -1305,7 +1394,13 @@ static bool i9xx_port_hotplug_long_detect(enum port port, u32 val)
|
|||
}
|
||||
}
|
||||
|
||||
/* Get a bit mask of pins that have triggered, and which ones may be long. */
|
||||
/*
|
||||
* Get a bit mask of pins that have triggered, and which ones may be long.
|
||||
* This can be called multiple times with the same masks to accumulate
|
||||
* hotplug detection results from several registers.
|
||||
*
|
||||
* Note that the caller is expected to zero out the masks initially.
|
||||
*/
|
||||
static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask,
|
||||
u32 hotplug_trigger, u32 dig_hotplug_reg,
|
||||
const u32 hpd[HPD_NUM_PINS],
|
||||
|
@ -1314,9 +1409,6 @@ static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask,
|
|||
enum port port;
|
||||
int i;
|
||||
|
||||
*pin_mask = 0;
|
||||
*long_mask = 0;
|
||||
|
||||
for_each_hpd_pin(i) {
|
||||
if ((hpd[i] & hotplug_trigger) == 0)
|
||||
continue;
|
||||
|
@ -1557,7 +1649,7 @@ static void i9xx_hpd_irq_handler(struct drm_device *dev)
|
|||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
|
||||
u32 pin_mask, long_mask;
|
||||
u32 pin_mask = 0, long_mask = 0;
|
||||
|
||||
if (!hotplug_status)
|
||||
return;
|
||||
|
@ -1572,20 +1664,26 @@ static void i9xx_hpd_irq_handler(struct drm_device *dev)
|
|||
if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
|
||||
u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
|
||||
|
||||
intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
|
||||
hotplug_trigger, hpd_status_g4x,
|
||||
i9xx_port_hotplug_long_detect);
|
||||
intel_hpd_irq_handler(dev, pin_mask, long_mask);
|
||||
if (hotplug_trigger) {
|
||||
intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
|
||||
hotplug_trigger, hpd_status_g4x,
|
||||
i9xx_port_hotplug_long_detect);
|
||||
|
||||
intel_hpd_irq_handler(dev, pin_mask, long_mask);
|
||||
}
|
||||
|
||||
if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
|
||||
dp_aux_irq_handler(dev);
|
||||
} else {
|
||||
u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
|
||||
|
||||
intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
|
||||
hotplug_trigger, hpd_status_i915,
|
||||
i9xx_port_hotplug_long_detect);
|
||||
intel_hpd_irq_handler(dev, pin_mask, long_mask);
|
||||
if (hotplug_trigger) {
|
||||
intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
|
||||
hotplug_trigger, hpd_status_i915,
|
||||
i9xx_port_hotplug_long_detect);
|
||||
|
||||
intel_hpd_irq_handler(dev, pin_mask, long_mask);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1679,23 +1777,30 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void ibx_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
|
||||
const u32 hpd[HPD_NUM_PINS])
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
|
||||
|
||||
dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
|
||||
I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
|
||||
|
||||
intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
|
||||
dig_hotplug_reg, hpd,
|
||||
pch_port_hotplug_long_detect);
|
||||
|
||||
intel_hpd_irq_handler(dev, pin_mask, long_mask);
|
||||
}
|
||||
|
||||
static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int pipe;
|
||||
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
|
||||
|
||||
if (hotplug_trigger) {
|
||||
u32 dig_hotplug_reg, pin_mask, long_mask;
|
||||
|
||||
dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
|
||||
I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
|
||||
|
||||
intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
|
||||
dig_hotplug_reg, hpd_ibx,
|
||||
pch_port_hotplug_long_detect);
|
||||
intel_hpd_irq_handler(dev, pin_mask, long_mask);
|
||||
}
|
||||
if (hotplug_trigger)
|
||||
ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx);
|
||||
|
||||
if (pch_iir & SDE_AUDIO_POWER_MASK) {
|
||||
int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
|
||||
|
@ -1786,38 +1891,10 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
|
|||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int pipe;
|
||||
u32 hotplug_trigger;
|
||||
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
|
||||
|
||||
if (HAS_PCH_SPT(dev))
|
||||
hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT;
|
||||
else
|
||||
hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
|
||||
|
||||
if (hotplug_trigger) {
|
||||
u32 dig_hotplug_reg, pin_mask, long_mask;
|
||||
|
||||
dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
|
||||
I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
|
||||
|
||||
if (HAS_PCH_SPT(dev)) {
|
||||
intel_get_hpd_pins(&pin_mask, &long_mask,
|
||||
hotplug_trigger,
|
||||
dig_hotplug_reg, hpd_spt,
|
||||
pch_port_hotplug_long_detect);
|
||||
|
||||
/* detect PORTE HP event */
|
||||
dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
|
||||
if (pch_port_hotplug_long_detect(PORT_E,
|
||||
dig_hotplug_reg))
|
||||
long_mask |= 1 << HPD_PORT_E;
|
||||
} else
|
||||
intel_get_hpd_pins(&pin_mask, &long_mask,
|
||||
hotplug_trigger,
|
||||
dig_hotplug_reg, hpd_cpt,
|
||||
pch_port_hotplug_long_detect);
|
||||
|
||||
intel_hpd_irq_handler(dev, pin_mask, long_mask);
|
||||
}
|
||||
if (hotplug_trigger)
|
||||
ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt);
|
||||
|
||||
if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
|
||||
int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
|
||||
|
@ -1848,10 +1925,67 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
|
|||
cpt_serr_int_handler(dev);
|
||||
}
|
||||
|
||||
static void spt_irq_handler(struct drm_device *dev, u32 pch_iir)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
|
||||
~SDE_PORTE_HOTPLUG_SPT;
|
||||
u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
|
||||
u32 pin_mask = 0, long_mask = 0;
|
||||
|
||||
if (hotplug_trigger) {
|
||||
u32 dig_hotplug_reg;
|
||||
|
||||
dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
|
||||
I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
|
||||
|
||||
intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
|
||||
dig_hotplug_reg, hpd_spt,
|
||||
spt_port_hotplug_long_detect);
|
||||
}
|
||||
|
||||
if (hotplug2_trigger) {
|
||||
u32 dig_hotplug_reg;
|
||||
|
||||
dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
|
||||
I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);
|
||||
|
||||
intel_get_hpd_pins(&pin_mask, &long_mask, hotplug2_trigger,
|
||||
dig_hotplug_reg, hpd_spt,
|
||||
spt_port_hotplug2_long_detect);
|
||||
}
|
||||
|
||||
if (pin_mask)
|
||||
intel_hpd_irq_handler(dev, pin_mask, long_mask);
|
||||
|
||||
if (pch_iir & SDE_GMBUS_CPT)
|
||||
gmbus_irq_handler(dev);
|
||||
}
|
||||
|
||||
static void ilk_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
|
||||
const u32 hpd[HPD_NUM_PINS])
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
|
||||
|
||||
dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
|
||||
I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
|
||||
|
||||
intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
|
||||
dig_hotplug_reg, hpd,
|
||||
ilk_port_hotplug_long_detect);
|
||||
|
||||
intel_hpd_irq_handler(dev, pin_mask, long_mask);
|
||||
}
|
||||
|
||||
static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
enum pipe pipe;
|
||||
u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
|
||||
|
||||
if (hotplug_trigger)
|
||||
ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ilk);
|
||||
|
||||
if (de_iir & DE_AUX_CHANNEL_A)
|
||||
dp_aux_irq_handler(dev);
|
||||
|
@ -1901,6 +2035,10 @@ static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
|
|||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
enum pipe pipe;
|
||||
u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
|
||||
|
||||
if (hotplug_trigger)
|
||||
ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ivb);
|
||||
|
||||
if (de_iir & DE_ERR_INT_IVB)
|
||||
ivb_err_int_handler(dev);
|
||||
|
@ -2013,27 +2151,19 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void bxt_hpd_handler(struct drm_device *dev, uint32_t iir_status)
|
||||
static void bxt_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
|
||||
const u32 hpd[HPD_NUM_PINS])
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 hp_control, hp_trigger;
|
||||
u32 pin_mask, long_mask;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
|
||||
|
||||
/* Get the status */
|
||||
hp_trigger = iir_status & BXT_DE_PORT_HOTPLUG_MASK;
|
||||
hp_control = I915_READ(BXT_HOTPLUG_CTL);
|
||||
dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
|
||||
I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
|
||||
|
||||
/* Hotplug not enabled ? */
|
||||
if (!(hp_control & BXT_HOTPLUG_CTL_MASK)) {
|
||||
DRM_ERROR("Interrupt when HPD disabled\n");
|
||||
return;
|
||||
}
|
||||
intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
|
||||
dig_hotplug_reg, hpd,
|
||||
bxt_port_hotplug_long_detect);
|
||||
|
||||
/* Clear sticky bits in hpd status */
|
||||
I915_WRITE(BXT_HOTPLUG_CTL, hp_control);
|
||||
|
||||
intel_get_hpd_pins(&pin_mask, &long_mask, hp_trigger, hp_control,
|
||||
hpd_bxt, bxt_port_hotplug_long_detect);
|
||||
intel_hpd_irq_handler(dev, pin_mask, long_mask);
|
||||
}
|
||||
|
||||
|
@ -2050,7 +2180,7 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
|
|||
if (!intel_irqs_enabled(dev_priv))
|
||||
return IRQ_NONE;
|
||||
|
||||
if (IS_GEN9(dev))
|
||||
if (INTEL_INFO(dev_priv)->gen >= 9)
|
||||
aux_mask |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
|
||||
GEN9_AUX_CHANNEL_D;
|
||||
|
||||
|
@ -2083,6 +2213,12 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
|
|||
tmp = I915_READ(GEN8_DE_PORT_IIR);
|
||||
if (tmp) {
|
||||
bool found = false;
|
||||
u32 hotplug_trigger = 0;
|
||||
|
||||
if (IS_BROXTON(dev_priv))
|
||||
hotplug_trigger = tmp & BXT_DE_PORT_HOTPLUG_MASK;
|
||||
else if (IS_BROADWELL(dev_priv))
|
||||
hotplug_trigger = tmp & GEN8_PORT_DP_A_HOTPLUG;
|
||||
|
||||
I915_WRITE(GEN8_DE_PORT_IIR, tmp);
|
||||
ret = IRQ_HANDLED;
|
||||
|
@ -2092,8 +2228,11 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
|
|||
found = true;
|
||||
}
|
||||
|
||||
if (IS_BROXTON(dev) && tmp & BXT_DE_PORT_HOTPLUG_MASK) {
|
||||
bxt_hpd_handler(dev, tmp);
|
||||
if (hotplug_trigger) {
|
||||
if (IS_BROXTON(dev))
|
||||
bxt_hpd_irq_handler(dev, hotplug_trigger, hpd_bxt);
|
||||
else
|
||||
ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_bdw);
|
||||
found = true;
|
||||
}
|
||||
|
||||
|
@ -2124,7 +2263,7 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
|
|||
intel_pipe_handle_vblank(dev, pipe))
|
||||
intel_check_page_flip(dev, pipe);
|
||||
|
||||
if (IS_GEN9(dev))
|
||||
if (INTEL_INFO(dev_priv)->gen >= 9)
|
||||
flip_done = pipe_iir & GEN9_PIPE_PLANE1_FLIP_DONE;
|
||||
else
|
||||
flip_done = pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE;
|
||||
|
@ -2142,7 +2281,7 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
|
|||
pipe);
|
||||
|
||||
|
||||
if (IS_GEN9(dev))
|
||||
if (INTEL_INFO(dev_priv)->gen >= 9)
|
||||
fault_errors = pipe_iir & GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
|
||||
else
|
||||
fault_errors = pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
|
||||
|
@ -2166,7 +2305,11 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
|
|||
if (pch_iir) {
|
||||
I915_WRITE(SDEIIR, pch_iir);
|
||||
ret = IRQ_HANDLED;
|
||||
cpt_irq_handler(dev, pch_iir);
|
||||
|
||||
if (HAS_PCH_SPT(dev_priv))
|
||||
spt_irq_handler(dev, pch_iir);
|
||||
else
|
||||
cpt_irq_handler(dev, pch_iir);
|
||||
} else
|
||||
DRM_ERROR("The master control interrupt lied (SDE)!\n");
|
||||
|
||||
|
@ -3026,86 +3169,124 @@ static void cherryview_irq_preinstall(struct drm_device *dev)
|
|||
vlv_display_irq_reset(dev_priv);
|
||||
}
|
||||
|
||||
static u32 intel_hpd_enabled_irqs(struct drm_device *dev,
|
||||
const u32 hpd[HPD_NUM_PINS])
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_encoder *encoder;
|
||||
u32 enabled_irqs = 0;
|
||||
|
||||
for_each_intel_encoder(dev, encoder)
|
||||
if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
|
||||
enabled_irqs |= hpd[encoder->hpd_pin];
|
||||
|
||||
return enabled_irqs;
|
||||
}
|
||||
|
||||
static void ibx_hpd_irq_setup(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_encoder *intel_encoder;
|
||||
u32 hotplug_irqs, hotplug, enabled_irqs = 0;
|
||||
u32 hotplug_irqs, hotplug, enabled_irqs;
|
||||
|
||||
if (HAS_PCH_IBX(dev)) {
|
||||
hotplug_irqs = SDE_HOTPLUG_MASK;
|
||||
for_each_intel_encoder(dev, intel_encoder)
|
||||
if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state == HPD_ENABLED)
|
||||
enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin];
|
||||
} else if (HAS_PCH_SPT(dev)) {
|
||||
hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
|
||||
for_each_intel_encoder(dev, intel_encoder)
|
||||
if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state == HPD_ENABLED)
|
||||
enabled_irqs |= hpd_spt[intel_encoder->hpd_pin];
|
||||
enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ibx);
|
||||
} else {
|
||||
hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
|
||||
for_each_intel_encoder(dev, intel_encoder)
|
||||
if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state == HPD_ENABLED)
|
||||
enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin];
|
||||
enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_cpt);
|
||||
}
|
||||
|
||||
ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
|
||||
|
||||
/*
|
||||
* Enable digital hotplug on the PCH, and configure the DP short pulse
|
||||
* duration to 2ms (which is the minimum in the Display Port spec)
|
||||
*
|
||||
* This register is the same on all known PCH chips.
|
||||
* duration to 2ms (which is the minimum in the Display Port spec).
|
||||
* The pulse duration bits are reserved on LPT+.
|
||||
*/
|
||||
hotplug = I915_READ(PCH_PORT_HOTPLUG);
|
||||
hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
|
||||
hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
|
||||
hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
|
||||
hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
|
||||
/*
|
||||
* When CPU and PCH are on the same package, port A
|
||||
* HPD must be enabled in both north and south.
|
||||
*/
|
||||
if (HAS_PCH_LPT_LP(dev))
|
||||
hotplug |= PORTA_HOTPLUG_ENABLE;
|
||||
I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
|
||||
}
|
||||
|
||||
static void spt_hpd_irq_setup(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 hotplug_irqs, hotplug, enabled_irqs;
|
||||
|
||||
hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
|
||||
enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_spt);
|
||||
|
||||
ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
|
||||
|
||||
/* Enable digital hotplug on the PCH */
|
||||
hotplug = I915_READ(PCH_PORT_HOTPLUG);
|
||||
hotplug |= PORTD_HOTPLUG_ENABLE | PORTC_HOTPLUG_ENABLE |
|
||||
PORTB_HOTPLUG_ENABLE | PORTA_HOTPLUG_ENABLE;
|
||||
I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
|
||||
|
||||
/* enable SPT PORTE hot plug */
|
||||
if (HAS_PCH_SPT(dev)) {
|
||||
hotplug = I915_READ(PCH_PORT_HOTPLUG2);
|
||||
hotplug |= PORTE_HOTPLUG_ENABLE;
|
||||
I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
|
||||
hotplug = I915_READ(PCH_PORT_HOTPLUG2);
|
||||
hotplug |= PORTE_HOTPLUG_ENABLE;
|
||||
I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
|
||||
}
|
||||
|
||||
static void ilk_hpd_irq_setup(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 hotplug_irqs, hotplug, enabled_irqs;
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 8) {
|
||||
hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
|
||||
enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_bdw);
|
||||
|
||||
bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
|
||||
} else if (INTEL_INFO(dev)->gen >= 7) {
|
||||
hotplug_irqs = DE_DP_A_HOTPLUG_IVB;
|
||||
enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ivb);
|
||||
|
||||
ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
|
||||
} else {
|
||||
hotplug_irqs = DE_DP_A_HOTPLUG;
|
||||
enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ilk);
|
||||
|
||||
ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
|
||||
}
|
||||
|
||||
/*
|
||||
* Enable digital hotplug on the CPU, and configure the DP short pulse
|
||||
* duration to 2ms (which is the minimum in the Display Port spec)
|
||||
* The pulse duration bits are reserved on HSW+.
|
||||
*/
|
||||
hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
|
||||
hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK;
|
||||
hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | DIGITAL_PORTA_PULSE_DURATION_2ms;
|
||||
I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
|
||||
|
||||
ibx_hpd_irq_setup(dev);
|
||||
}
|
||||
|
||||
static void bxt_hpd_irq_setup(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_encoder *intel_encoder;
|
||||
u32 hotplug_port = 0;
|
||||
u32 hotplug_ctrl;
|
||||
u32 hotplug_irqs, hotplug, enabled_irqs;
|
||||
|
||||
/* Now, enable HPD */
|
||||
for_each_intel_encoder(dev, intel_encoder) {
|
||||
if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state
|
||||
== HPD_ENABLED)
|
||||
hotplug_port |= hpd_bxt[intel_encoder->hpd_pin];
|
||||
}
|
||||
enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_bxt);
|
||||
hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;
|
||||
|
||||
/* Mask all HPD control bits */
|
||||
hotplug_ctrl = I915_READ(BXT_HOTPLUG_CTL) & ~BXT_HOTPLUG_CTL_MASK;
|
||||
bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
|
||||
|
||||
/* Enable requested port in hotplug control */
|
||||
/* TODO: implement (short) HPD support on port A */
|
||||
WARN_ON_ONCE(hotplug_port & BXT_DE_PORT_HP_DDIA);
|
||||
if (hotplug_port & BXT_DE_PORT_HP_DDIB)
|
||||
hotplug_ctrl |= BXT_DDIB_HPD_ENABLE;
|
||||
if (hotplug_port & BXT_DE_PORT_HP_DDIC)
|
||||
hotplug_ctrl |= BXT_DDIC_HPD_ENABLE;
|
||||
I915_WRITE(BXT_HOTPLUG_CTL, hotplug_ctrl);
|
||||
|
||||
/* Unmask DDI hotplug in IMR */
|
||||
hotplug_ctrl = I915_READ(GEN8_DE_PORT_IMR) & ~hotplug_port;
|
||||
I915_WRITE(GEN8_DE_PORT_IMR, hotplug_ctrl);
|
||||
|
||||
/* Enable DDI hotplug in IER */
|
||||
hotplug_ctrl = I915_READ(GEN8_DE_PORT_IER) | hotplug_port;
|
||||
I915_WRITE(GEN8_DE_PORT_IER, hotplug_ctrl);
|
||||
POSTING_READ(GEN8_DE_PORT_IER);
|
||||
hotplug = I915_READ(PCH_PORT_HOTPLUG);
|
||||
hotplug |= PORTC_HOTPLUG_ENABLE | PORTB_HOTPLUG_ENABLE |
|
||||
PORTA_HOTPLUG_ENABLE;
|
||||
I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
|
||||
}
|
||||
|
||||
static void ibx_irq_postinstall(struct drm_device *dev)
|
||||
|
@ -3173,15 +3354,17 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
|
|||
DE_PLANEB_FLIP_DONE_IVB |
|
||||
DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
|
||||
extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
|
||||
DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB);
|
||||
DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
|
||||
DE_DP_A_HOTPLUG_IVB);
|
||||
} else {
|
||||
display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
|
||||
DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
|
||||
DE_AUX_CHANNEL_A |
|
||||
DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
|
||||
DE_POISON);
|
||||
extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
|
||||
DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN;
|
||||
extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
|
||||
DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
|
||||
DE_DP_A_HOTPLUG);
|
||||
}
|
||||
|
||||
dev_priv->irq_mask = ~display_mask;
|
||||
|
@ -3377,24 +3560,31 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
|
|||
{
|
||||
uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
|
||||
uint32_t de_pipe_enables;
|
||||
int pipe;
|
||||
u32 de_port_en = GEN8_AUX_CHANNEL_A;
|
||||
u32 de_port_masked = GEN8_AUX_CHANNEL_A;
|
||||
u32 de_port_enables;
|
||||
enum pipe pipe;
|
||||
|
||||
if (IS_GEN9(dev_priv)) {
|
||||
if (INTEL_INFO(dev_priv)->gen >= 9) {
|
||||
de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE |
|
||||
GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
|
||||
de_port_en |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
|
||||
GEN9_AUX_CHANNEL_D;
|
||||
|
||||
de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
|
||||
GEN9_AUX_CHANNEL_D;
|
||||
if (IS_BROXTON(dev_priv))
|
||||
de_port_en |= BXT_DE_PORT_GMBUS;
|
||||
} else
|
||||
de_port_masked |= BXT_DE_PORT_GMBUS;
|
||||
} else {
|
||||
de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE |
|
||||
GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
|
||||
}
|
||||
|
||||
de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
|
||||
GEN8_PIPE_FIFO_UNDERRUN;
|
||||
|
||||
de_port_enables = de_port_masked;
|
||||
if (IS_BROXTON(dev_priv))
|
||||
de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
|
||||
else if (IS_BROADWELL(dev_priv))
|
||||
de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;
|
||||
|
||||
dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
|
||||
dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
|
||||
dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
|
||||
|
@ -3406,7 +3596,7 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
|
|||
dev_priv->de_irq_mask[pipe],
|
||||
de_pipe_enables);
|
||||
|
||||
GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_en, de_port_en);
|
||||
GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
|
||||
}
|
||||
|
||||
static int gen8_irq_postinstall(struct drm_device *dev)
|
||||
|
@ -3964,7 +4154,6 @@ static int i965_irq_postinstall(struct drm_device *dev)
|
|||
static void i915_hpd_irq_setup(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_encoder *intel_encoder;
|
||||
u32 hotplug_en;
|
||||
|
||||
assert_spin_locked(&dev_priv->irq_lock);
|
||||
|
@ -3973,9 +4162,7 @@ static void i915_hpd_irq_setup(struct drm_device *dev)
|
|||
hotplug_en &= ~HOTPLUG_INT_EN_MASK;
|
||||
/* Note HDMI and DP share hotplug bits */
|
||||
/* enable bits are the same for all generations */
|
||||
for_each_intel_encoder(dev, intel_encoder)
|
||||
if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state == HPD_ENABLED)
|
||||
hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
|
||||
hotplug_en |= intel_hpd_enabled_irqs(dev, hpd_mask_i915);
|
||||
/* Programming the CRT detection parameters tends
|
||||
to generate a spurious hotplug event about three
|
||||
seconds later. So just do it once.
|
||||
|
@ -4187,10 +4374,12 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
|
|||
dev->driver->irq_uninstall = gen8_irq_uninstall;
|
||||
dev->driver->enable_vblank = gen8_enable_vblank;
|
||||
dev->driver->disable_vblank = gen8_disable_vblank;
|
||||
if (HAS_PCH_SPLIT(dev))
|
||||
dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
|
||||
else
|
||||
if (IS_BROXTON(dev))
|
||||
dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
|
||||
else if (HAS_PCH_SPT(dev))
|
||||
dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
|
||||
else
|
||||
dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
|
||||
} else if (HAS_PCH_SPLIT(dev)) {
|
||||
dev->driver->irq_handler = ironlake_irq_handler;
|
||||
dev->driver->irq_preinstall = ironlake_irq_reset;
|
||||
|
@ -4198,7 +4387,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
|
|||
dev->driver->irq_uninstall = ironlake_irq_uninstall;
|
||||
dev->driver->enable_vblank = ironlake_enable_vblank;
|
||||
dev->driver->disable_vblank = ironlake_disable_vblank;
|
||||
dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
|
||||
dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
|
||||
} else {
|
||||
if (INTEL_INFO(dev_priv)->gen == 2) {
|
||||
dev->driver->irq_preinstall = i8xx_irq_preinstall;
|
||||
|
|
|
@ -51,6 +51,7 @@ struct i915_params i915 __read_mostly = {
|
|||
.use_mmio_flip = 0,
|
||||
.mmio_debug = 0,
|
||||
.verbose_state_checks = 1,
|
||||
.nuclear_pageflip = 0,
|
||||
.edp_vswing = 0,
|
||||
.enable_guc_submission = false,
|
||||
.guc_log_level = -1,
|
||||
|
@ -177,6 +178,10 @@ module_param_named(verbose_state_checks, i915.verbose_state_checks, bool, 0600);
|
|||
MODULE_PARM_DESC(verbose_state_checks,
|
||||
"Enable verbose logs (ie. WARN_ON()) in case of unexpected hw state conditions.");
|
||||
|
||||
module_param_named_unsafe(nuclear_pageflip, i915.nuclear_pageflip, bool, 0600);
|
||||
MODULE_PARM_DESC(nuclear_pageflip,
|
||||
"Force atomic modeset functionality; asynchronous mode is not yet supported. (default: false).");
|
||||
|
||||
/* WA to get away with the default setting in VBT for early platforms.Will be removed */
|
||||
module_param_named_unsafe(edp_vswing, i915.edp_vswing, int, 0400);
|
||||
MODULE_PARM_DESC(edp_vswing,
|
||||
|
|
|
@ -352,8 +352,8 @@
|
|||
*/
|
||||
#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*(x)-1)
|
||||
#define MI_LRI_FORCE_POSTED (1<<12)
|
||||
#define MI_STORE_REGISTER_MEM(x) MI_INSTR(0x24, 2*(x)-1)
|
||||
#define MI_STORE_REGISTER_MEM_GEN8(x) MI_INSTR(0x24, 3*(x)-1)
|
||||
#define MI_STORE_REGISTER_MEM MI_INSTR(0x24, 1)
|
||||
#define MI_STORE_REGISTER_MEM_GEN8 MI_INSTR(0x24, 2)
|
||||
#define MI_SRM_LRM_GLOBAL_GTT (1<<22)
|
||||
#define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */
|
||||
#define MI_FLUSH_DW_STORE_INDEX (1<<21)
|
||||
|
@ -364,8 +364,8 @@
|
|||
#define MI_INVALIDATE_BSD (1<<7)
|
||||
#define MI_FLUSH_DW_USE_GTT (1<<2)
|
||||
#define MI_FLUSH_DW_USE_PPGTT (0<<2)
|
||||
#define MI_LOAD_REGISTER_MEM(x) MI_INSTR(0x29, 2*(x)-1)
|
||||
#define MI_LOAD_REGISTER_MEM_GEN8(x) MI_INSTR(0x29, 3*(x)-1)
|
||||
#define MI_LOAD_REGISTER_MEM MI_INSTR(0x29, 1)
|
||||
#define MI_LOAD_REGISTER_MEM_GEN8 MI_INSTR(0x29, 2)
|
||||
#define MI_BATCH_BUFFER MI_INSTR(0x30, 1)
|
||||
#define MI_BATCH_NON_SECURE (1)
|
||||
/* for snb/ivb/vlv this also means "batch in ppgtt" when ppgtt is enabled. */
|
||||
|
@ -1099,6 +1099,12 @@ enum skl_disp_power_wells {
|
|||
#define DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE 1 /* 1: coarse & 0 : fine */
|
||||
#define CHV_PLL_DW9(ch) _PIPE(ch, _CHV_PLL_DW9_CH0, _CHV_PLL_DW9_CH1)
|
||||
|
||||
#define _CHV_CMN_DW0_CH0 0x8100
|
||||
#define DPIO_ALLDL_POWERDOWN_SHIFT_CH0 19
|
||||
#define DPIO_ANYDL_POWERDOWN_SHIFT_CH0 18
|
||||
#define DPIO_ALLDL_POWERDOWN (1 << 1)
|
||||
#define DPIO_ANYDL_POWERDOWN (1 << 0)
|
||||
|
||||
#define _CHV_CMN_DW5_CH0 0x8114
|
||||
#define CHV_BUFRIGHTENA1_DISABLE (0 << 20)
|
||||
#define CHV_BUFRIGHTENA1_NORMAL (1 << 20)
|
||||
|
@ -1135,10 +1141,23 @@ enum skl_disp_power_wells {
|
|||
|
||||
#define _CHV_CMN_DW19_CH0 0x814c
|
||||
#define _CHV_CMN_DW6_CH1 0x8098
|
||||
#define DPIO_ALLDL_POWERDOWN_SHIFT_CH1 30 /* CL2 DW6 only */
|
||||
#define DPIO_ANYDL_POWERDOWN_SHIFT_CH1 29 /* CL2 DW6 only */
|
||||
#define DPIO_DYNPWRDOWNEN_CH1 (1 << 28) /* CL2 DW6 only */
|
||||
#define CHV_CMN_USEDCLKCHANNEL (1 << 13)
|
||||
|
||||
#define CHV_CMN_DW19(ch) _PIPE(ch, _CHV_CMN_DW19_CH0, _CHV_CMN_DW6_CH1)
|
||||
|
||||
#define CHV_CMN_DW28 0x8170
|
||||
#define DPIO_CL1POWERDOWNEN (1 << 23)
|
||||
#define DPIO_DYNPWRDOWNEN_CH0 (1 << 22)
|
||||
#define DPIO_SUS_CLK_CONFIG_ON (0 << 0)
|
||||
#define DPIO_SUS_CLK_CONFIG_CLKREQ (1 << 0)
|
||||
#define DPIO_SUS_CLK_CONFIG_GATE (2 << 0)
|
||||
#define DPIO_SUS_CLK_CONFIG_GATE_CLKREQ (3 << 0)
|
||||
|
||||
#define CHV_CMN_DW30 0x8178
|
||||
#define DPIO_CL2_LDOFUSE_PWRENB (1 << 6)
|
||||
#define DPIO_LRC_BYPASS (1 << 3)
|
||||
|
||||
#define _TXLANE(ch, lane, offset) ((ch ? 0x2400 : 0) + \
|
||||
|
@ -1674,11 +1693,18 @@ enum skl_disp_power_wells {
|
|||
#define GFX_MODE_GEN7 0x0229c
|
||||
#define RING_MODE_GEN7(ring) ((ring)->mmio_base+0x29c)
|
||||
#define GFX_RUN_LIST_ENABLE (1<<15)
|
||||
#define GFX_INTERRUPT_STEERING (1<<14)
|
||||
#define GFX_TLB_INVALIDATE_EXPLICIT (1<<13)
|
||||
#define GFX_SURFACE_FAULT_ENABLE (1<<12)
|
||||
#define GFX_REPLAY_MODE (1<<11)
|
||||
#define GFX_PSMI_GRANULARITY (1<<10)
|
||||
#define GFX_PPGTT_ENABLE (1<<9)
|
||||
#define GEN8_GFX_PPGTT_48B (1<<7)
|
||||
|
||||
#define GFX_FORWARD_VBLANK_MASK (3<<5)
|
||||
#define GFX_FORWARD_VBLANK_NEVER (0<<5)
|
||||
#define GFX_FORWARD_VBLANK_ALWAYS (1<<5)
|
||||
#define GFX_FORWARD_VBLANK_COND (2<<5)
|
||||
|
||||
#define VLV_DISPLAY_BASE 0x180000
|
||||
#define VLV_MIPI_BASE VLV_DISPLAY_BASE
|
||||
|
@ -2185,16 +2211,20 @@ enum skl_disp_power_wells {
|
|||
#define DPIO_PHY_STATUS (VLV_DISPLAY_BASE + 0x6240)
|
||||
#define DPLL_PORTD_READY_MASK (0xf)
|
||||
#define DISPLAY_PHY_CONTROL (VLV_DISPLAY_BASE + 0x60100)
|
||||
#define PHY_CH_POWER_DOWN_OVRD_EN(phy, ch) (1 << (2*(phy)+(ch)+27))
|
||||
#define PHY_LDO_DELAY_0NS 0x0
|
||||
#define PHY_LDO_DELAY_200NS 0x1
|
||||
#define PHY_LDO_DELAY_600NS 0x2
|
||||
#define PHY_LDO_SEQ_DELAY(delay, phy) ((delay) << (2*(phy)+23))
|
||||
#define PHY_CH_POWER_DOWN_OVRD(mask, phy, ch) ((mask) << (8*(phy)+4*(ch)+11))
|
||||
#define PHY_CH_SU_PSR 0x1
|
||||
#define PHY_CH_DEEP_PSR 0x7
|
||||
#define PHY_CH_POWER_MODE(mode, phy, ch) ((mode) << (6*(phy)+3*(ch)+2))
|
||||
#define PHY_COM_LANE_RESET_DEASSERT(phy) (1 << (phy))
|
||||
#define DISPLAY_PHY_STATUS (VLV_DISPLAY_BASE + 0x60104)
|
||||
#define PHY_POWERGOOD(phy) (((phy) == DPIO_PHY0) ? (1<<31) : (1<<30))
|
||||
#define PHY_STATUS_CMN_LDO(phy, ch) (1 << (6-(6*(phy)+3*(ch))))
|
||||
#define PHY_STATUS_SPLINE_LDO(phy, ch, spline) (1 << (8-(6*(phy)+3*(ch)+(spline))))
|
||||
|
||||
/*
|
||||
* The i830 generation, in LVDS mode, defines P1 as the bit number set within
|
||||
|
@ -4107,6 +4137,7 @@ enum skl_disp_power_wells {
|
|||
/* How many wires to use. I guess 3 was too hard */
|
||||
#define DP_PORT_WIDTH(width) (((width) - 1) << 19)
|
||||
#define DP_PORT_WIDTH_MASK (7 << 19)
|
||||
#define DP_PORT_WIDTH_SHIFT 19
|
||||
|
||||
/* Mystic DPCD version 1.1 special mode */
|
||||
#define DP_ENHANCED_FRAMING (1 << 18)
|
||||
|
@ -4617,6 +4648,7 @@ enum skl_disp_power_wells {
|
|||
|
||||
#define CBR1_VLV (VLV_DISPLAY_BASE + 0x70400)
|
||||
#define CBR_PND_DEADLINE_DISABLE (1<<31)
|
||||
#define CBR_PWM_CLOCK_MUX_SELECT (1<<30)
|
||||
|
||||
/* FIFO watermark sizes etc */
|
||||
#define G4X_FIFO_LINE_SIZE 64
|
||||
|
@ -5363,15 +5395,17 @@ enum skl_disp_power_wells {
|
|||
|
||||
#define CPU_VGACNTRL 0x41000
|
||||
|
||||
#define DIGITAL_PORT_HOTPLUG_CNTRL 0x44030
|
||||
#define DIGITAL_PORTA_HOTPLUG_ENABLE (1 << 4)
|
||||
#define DIGITAL_PORTA_SHORT_PULSE_2MS (0 << 2)
|
||||
#define DIGITAL_PORTA_SHORT_PULSE_4_5MS (1 << 2)
|
||||
#define DIGITAL_PORTA_SHORT_PULSE_6MS (2 << 2)
|
||||
#define DIGITAL_PORTA_SHORT_PULSE_100MS (3 << 2)
|
||||
#define DIGITAL_PORTA_NO_DETECT (0 << 0)
|
||||
#define DIGITAL_PORTA_LONG_PULSE_DETECT_MASK (1 << 1)
|
||||
#define DIGITAL_PORTA_SHORT_PULSE_DETECT_MASK (1 << 0)
|
||||
#define DIGITAL_PORT_HOTPLUG_CNTRL 0x44030
|
||||
#define DIGITAL_PORTA_HOTPLUG_ENABLE (1 << 4)
|
||||
#define DIGITAL_PORTA_PULSE_DURATION_2ms (0 << 2) /* pre-HSW */
|
||||
#define DIGITAL_PORTA_PULSE_DURATION_4_5ms (1 << 2) /* pre-HSW */
|
||||
#define DIGITAL_PORTA_PULSE_DURATION_6ms (2 << 2) /* pre-HSW */
|
||||
#define DIGITAL_PORTA_PULSE_DURATION_100ms (3 << 2) /* pre-HSW */
|
||||
#define DIGITAL_PORTA_PULSE_DURATION_MASK (3 << 2) /* pre-HSW */
|
||||
#define DIGITAL_PORTA_HOTPLUG_STATUS_MASK (3 << 0)
|
||||
#define DIGITAL_PORTA_HOTPLUG_NO_DETECT (0 << 0)
|
||||
#define DIGITAL_PORTA_HOTPLUG_SHORT_DETECT (1 << 0)
|
||||
#define DIGITAL_PORTA_HOTPLUG_LONG_DETECT (2 << 0)
|
||||
|
||||
/* refresh rate hardware control */
|
||||
#define RR_HW_CTL 0x45300
|
||||
|
@ -5693,11 +5727,12 @@ enum skl_disp_power_wells {
|
|||
#define GEN8_GT_IIR(which) (0x44308 + (0x10 * (which)))
|
||||
#define GEN8_GT_IER(which) (0x4430c + (0x10 * (which)))
|
||||
|
||||
#define GEN8_BCS_IRQ_SHIFT 16
|
||||
#define GEN8_RCS_IRQ_SHIFT 0
|
||||
#define GEN8_VCS2_IRQ_SHIFT 16
|
||||
#define GEN8_BCS_IRQ_SHIFT 16
|
||||
#define GEN8_VCS1_IRQ_SHIFT 0
|
||||
#define GEN8_VCS2_IRQ_SHIFT 16
|
||||
#define GEN8_VECS_IRQ_SHIFT 0
|
||||
#define GEN8_WD_IRQ_SHIFT 16
|
||||
|
||||
#define GEN8_DE_PIPE_ISR(pipe) (0x44400 + (0x10 * (pipe)))
|
||||
#define GEN8_DE_PIPE_IMR(pipe) (0x44404 + (0x10 * (pipe)))
|
||||
|
@ -5763,21 +5798,6 @@ enum skl_disp_power_wells {
|
|||
#define GEN8_PCU_IIR 0x444e8
|
||||
#define GEN8_PCU_IER 0x444ec
|
||||
|
||||
/* BXT hotplug control */
|
||||
#define BXT_HOTPLUG_CTL 0xC4030
|
||||
#define BXT_DDIA_HPD_ENABLE (1 << 28)
|
||||
#define BXT_DDIA_HPD_STATUS (3 << 24)
|
||||
#define BXT_DDIC_HPD_ENABLE (1 << 12)
|
||||
#define BXT_DDIC_HPD_STATUS (3 << 8)
|
||||
#define BXT_DDIB_HPD_ENABLE (1 << 4)
|
||||
#define BXT_DDIB_HPD_STATUS (3 << 0)
|
||||
#define BXT_HOTPLUG_CTL_MASK (BXT_DDIA_HPD_ENABLE | \
|
||||
BXT_DDIB_HPD_ENABLE | \
|
||||
BXT_DDIC_HPD_ENABLE)
|
||||
#define BXT_HPD_STATUS_MASK (BXT_DDIA_HPD_STATUS | \
|
||||
BXT_DDIB_HPD_STATUS | \
|
||||
BXT_DDIC_HPD_STATUS)
|
||||
|
||||
#define ILK_DISPLAY_CHICKEN2 0x42004
|
||||
/* Required on all Ironlake and Sandybridge according to the B-Spec. */
|
||||
#define ILK_ELPIN_409_SELECT (1 << 25)
|
||||
|
@ -5950,6 +5970,7 @@ enum skl_disp_power_wells {
|
|||
#define SDE_AUXB_CPT (1 << 25)
|
||||
#define SDE_AUX_MASK_CPT (7 << 25)
|
||||
#define SDE_PORTE_HOTPLUG_SPT (1 << 25)
|
||||
#define SDE_PORTA_HOTPLUG_SPT (1 << 24)
|
||||
#define SDE_PORTD_HOTPLUG_CPT (1 << 23)
|
||||
#define SDE_PORTC_HOTPLUG_CPT (1 << 22)
|
||||
#define SDE_PORTB_HOTPLUG_CPT (1 << 21)
|
||||
|
@ -5963,7 +5984,8 @@ enum skl_disp_power_wells {
|
|||
#define SDE_HOTPLUG_MASK_SPT (SDE_PORTE_HOTPLUG_SPT | \
|
||||
SDE_PORTD_HOTPLUG_CPT | \
|
||||
SDE_PORTC_HOTPLUG_CPT | \
|
||||
SDE_PORTB_HOTPLUG_CPT)
|
||||
SDE_PORTB_HOTPLUG_CPT | \
|
||||
SDE_PORTA_HOTPLUG_SPT)
|
||||
#define SDE_GMBUS_CPT (1 << 17)
|
||||
#define SDE_ERROR_CPT (1 << 16)
|
||||
#define SDE_AUDIO_CP_REQ_C_CPT (1 << 10)
|
||||
|
@ -5998,46 +6020,46 @@ enum skl_disp_power_wells {
|
|||
#define SERR_INT_TRANS_FIFO_UNDERRUN(pipe) (1<<(pipe*3))
|
||||
|
||||
/* digital port hotplug */
|
||||
#define PCH_PORT_HOTPLUG 0xc4030 /* SHOTPLUG_CTL */
|
||||
#define BXT_PORTA_HOTPLUG_ENABLE (1 << 28)
|
||||
#define BXT_PORTA_HOTPLUG_STATUS_MASK (0x3 << 24)
|
||||
#define BXT_PORTA_HOTPLUG_NO_DETECT (0 << 24)
|
||||
#define BXT_PORTA_HOTPLUG_SHORT_DETECT (1 << 24)
|
||||
#define BXT_PORTA_HOTPLUG_LONG_DETECT (2 << 24)
|
||||
#define PORTD_HOTPLUG_ENABLE (1 << 20)
|
||||
#define PORTD_PULSE_DURATION_2ms (0)
|
||||
#define PORTD_PULSE_DURATION_4_5ms (1 << 18)
|
||||
#define PORTD_PULSE_DURATION_6ms (2 << 18)
|
||||
#define PORTD_PULSE_DURATION_100ms (3 << 18)
|
||||
#define PORTD_PULSE_DURATION_MASK (3 << 18)
|
||||
#define PORTD_HOTPLUG_STATUS_MASK (0x3 << 16)
|
||||
#define PCH_PORT_HOTPLUG 0xc4030 /* SHOTPLUG_CTL */
|
||||
#define PORTA_HOTPLUG_ENABLE (1 << 28) /* LPT:LP+ & BXT */
|
||||
#define PORTA_HOTPLUG_STATUS_MASK (3 << 24) /* SPT+ & BXT */
|
||||
#define PORTA_HOTPLUG_NO_DETECT (0 << 24) /* SPT+ & BXT */
|
||||
#define PORTA_HOTPLUG_SHORT_DETECT (1 << 24) /* SPT+ & BXT */
|
||||
#define PORTA_HOTPLUG_LONG_DETECT (2 << 24) /* SPT+ & BXT */
|
||||
#define PORTD_HOTPLUG_ENABLE (1 << 20)
|
||||
#define PORTD_PULSE_DURATION_2ms (0 << 18) /* pre-LPT */
|
||||
#define PORTD_PULSE_DURATION_4_5ms (1 << 18) /* pre-LPT */
|
||||
#define PORTD_PULSE_DURATION_6ms (2 << 18) /* pre-LPT */
|
||||
#define PORTD_PULSE_DURATION_100ms (3 << 18) /* pre-LPT */
|
||||
#define PORTD_PULSE_DURATION_MASK (3 << 18) /* pre-LPT */
|
||||
#define PORTD_HOTPLUG_STATUS_MASK (3 << 16)
|
||||
#define PORTD_HOTPLUG_NO_DETECT (0 << 16)
|
||||
#define PORTD_HOTPLUG_SHORT_DETECT (1 << 16)
|
||||
#define PORTD_HOTPLUG_LONG_DETECT (2 << 16)
|
||||
#define PORTC_HOTPLUG_ENABLE (1 << 12)
|
||||
#define PORTC_PULSE_DURATION_2ms (0)
|
||||
#define PORTC_PULSE_DURATION_4_5ms (1 << 10)
|
||||
#define PORTC_PULSE_DURATION_6ms (2 << 10)
|
||||
#define PORTC_PULSE_DURATION_100ms (3 << 10)
|
||||
#define PORTC_PULSE_DURATION_MASK (3 << 10)
|
||||
#define PORTC_HOTPLUG_STATUS_MASK (0x3 << 8)
|
||||
#define PORTC_HOTPLUG_ENABLE (1 << 12)
|
||||
#define PORTC_PULSE_DURATION_2ms (0 << 10) /* pre-LPT */
|
||||
#define PORTC_PULSE_DURATION_4_5ms (1 << 10) /* pre-LPT */
|
||||
#define PORTC_PULSE_DURATION_6ms (2 << 10) /* pre-LPT */
|
||||
#define PORTC_PULSE_DURATION_100ms (3 << 10) /* pre-LPT */
|
||||
#define PORTC_PULSE_DURATION_MASK (3 << 10) /* pre-LPT */
|
||||
#define PORTC_HOTPLUG_STATUS_MASK (3 << 8)
|
||||
#define PORTC_HOTPLUG_NO_DETECT (0 << 8)
|
||||
#define PORTC_HOTPLUG_SHORT_DETECT (1 << 8)
|
||||
#define PORTC_HOTPLUG_LONG_DETECT (2 << 8)
|
||||
#define PORTB_HOTPLUG_ENABLE (1 << 4)
|
||||
#define PORTB_PULSE_DURATION_2ms (0)
|
||||
#define PORTB_PULSE_DURATION_4_5ms (1 << 2)
|
||||
#define PORTB_PULSE_DURATION_6ms (2 << 2)
|
||||
#define PORTB_PULSE_DURATION_100ms (3 << 2)
|
||||
#define PORTB_PULSE_DURATION_MASK (3 << 2)
|
||||
#define PORTB_HOTPLUG_STATUS_MASK (0x3 << 0)
|
||||
#define PORTB_HOTPLUG_ENABLE (1 << 4)
|
||||
#define PORTB_PULSE_DURATION_2ms (0 << 2) /* pre-LPT */
|
||||
#define PORTB_PULSE_DURATION_4_5ms (1 << 2) /* pre-LPT */
|
||||
#define PORTB_PULSE_DURATION_6ms (2 << 2) /* pre-LPT */
|
||||
#define PORTB_PULSE_DURATION_100ms (3 << 2) /* pre-LPT */
|
||||
#define PORTB_PULSE_DURATION_MASK (3 << 2) /* pre-LPT */
|
||||
#define PORTB_HOTPLUG_STATUS_MASK (3 << 0)
|
||||
#define PORTB_HOTPLUG_NO_DETECT (0 << 0)
|
||||
#define PORTB_HOTPLUG_SHORT_DETECT (1 << 0)
|
||||
#define PORTB_HOTPLUG_LONG_DETECT (2 << 0)
|
||||
|
||||
#define PCH_PORT_HOTPLUG2 0xc403C /* SHOTPLUG_CTL2 */
|
||||
#define PORTE_HOTPLUG_ENABLE (1 << 4)
|
||||
#define PORTE_HOTPLUG_STATUS_MASK (0x3 << 0)
|
||||
#define PCH_PORT_HOTPLUG2 0xc403C /* SHOTPLUG_CTL2 SPT+ */
|
||||
#define PORTE_HOTPLUG_ENABLE (1 << 4)
|
||||
#define PORTE_HOTPLUG_STATUS_MASK (3 << 0)
|
||||
#define PORTE_HOTPLUG_NO_DETECT (0 << 0)
|
||||
#define PORTE_HOTPLUG_SHORT_DETECT (1 << 0)
|
||||
#define PORTE_HOTPLUG_LONG_DETECT (2 << 0)
|
||||
|
@ -6304,9 +6326,11 @@ enum skl_disp_power_wells {
|
|||
#define FDI_PHASE_SYNC_OVR(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_OVR - ((pipe) * 2)))
|
||||
#define FDI_PHASE_SYNC_EN(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_EN - ((pipe) * 2)))
|
||||
#define FDI_BC_BIFURCATION_SELECT (1 << 12)
|
||||
#define SPT_PWM_GRANULARITY (1<<0)
|
||||
#define SOUTH_CHICKEN2 0xc2004
|
||||
#define FDI_MPHY_IOSFSB_RESET_STATUS (1<<13)
|
||||
#define FDI_MPHY_IOSFSB_RESET_CTL (1<<12)
|
||||
#define LPT_PWM_GRANULARITY (1<<5)
|
||||
#define DPLS_EDP_PPS_FIX_DIS (1<<0)
|
||||
|
||||
#define _FDI_RXA_CHICKEN 0xc200c
|
||||
|
@ -6870,7 +6894,9 @@ enum skl_disp_power_wells {
|
|||
#define GEN9_PGCTL_SSB_EU311_ACK (1 << 14)
|
||||
|
||||
#define GEN7_MISCCPCTL (0x9424)
|
||||
#define GEN7_DOP_CLOCK_GATE_ENABLE (1<<0)
|
||||
#define GEN7_DOP_CLOCK_GATE_ENABLE (1<<0)
|
||||
#define GEN8_DOP_CLOCK_GATE_CFCLK_ENABLE (1<<2)
|
||||
#define GEN8_DOP_CLOCK_GATE_GUC_ENABLE (1<<4)
|
||||
|
||||
#define GEN8_GARBCNTL 0xB004
|
||||
#define GEN9_GAPS_TSV_CREDIT_DISABLE (1<<7)
|
||||
|
@ -7159,6 +7185,8 @@ enum skl_disp_power_wells {
|
|||
#define DDI_BUF_IS_IDLE (1<<7)
|
||||
#define DDI_A_4_LANES (1<<4)
|
||||
#define DDI_PORT_WIDTH(width) (((width) - 1) << 1)
|
||||
#define DDI_PORT_WIDTH_MASK (7 << 1)
|
||||
#define DDI_PORT_WIDTH_SHIFT 1
|
||||
#define DDI_INIT_DISPLAY_DETECTED (1<<0)
|
||||
|
||||
/* DDI Buffer Translations */
|
||||
|
|
|
@ -186,33 +186,49 @@ DEFINE_EVENT(i915_va, i915_va_alloc,
|
|||
TP_ARGS(vm, start, length, name)
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(i915_page_table_entry,
|
||||
TP_PROTO(struct i915_address_space *vm, u32 pde, u64 start, u64 pde_shift),
|
||||
TP_ARGS(vm, pde, start, pde_shift),
|
||||
DECLARE_EVENT_CLASS(i915_px_entry,
|
||||
TP_PROTO(struct i915_address_space *vm, u32 px, u64 start, u64 px_shift),
|
||||
TP_ARGS(vm, px, start, px_shift),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(struct i915_address_space *, vm)
|
||||
__field(u32, pde)
|
||||
__field(u32, px)
|
||||
__field(u64, start)
|
||||
__field(u64, end)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->vm = vm;
|
||||
__entry->pde = pde;
|
||||
__entry->px = px;
|
||||
__entry->start = start;
|
||||
__entry->end = ((start + (1ULL << pde_shift)) & ~((1ULL << pde_shift)-1)) - 1;
|
||||
__entry->end = ((start + (1ULL << px_shift)) & ~((1ULL << px_shift)-1)) - 1;
|
||||
),
|
||||
|
||||
TP_printk("vm=%p, pde=%d (0x%llx-0x%llx)",
|
||||
__entry->vm, __entry->pde, __entry->start, __entry->end)
|
||||
__entry->vm, __entry->px, __entry->start, __entry->end)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(i915_page_table_entry, i915_page_table_entry_alloc,
|
||||
DEFINE_EVENT(i915_px_entry, i915_page_table_entry_alloc,
|
||||
TP_PROTO(struct i915_address_space *vm, u32 pde, u64 start, u64 pde_shift),
|
||||
TP_ARGS(vm, pde, start, pde_shift)
|
||||
);
|
||||
|
||||
DEFINE_EVENT_PRINT(i915_px_entry, i915_page_directory_entry_alloc,
|
||||
TP_PROTO(struct i915_address_space *vm, u32 pdpe, u64 start, u64 pdpe_shift),
|
||||
TP_ARGS(vm, pdpe, start, pdpe_shift),
|
||||
|
||||
TP_printk("vm=%p, pdpe=%d (0x%llx-0x%llx)",
|
||||
__entry->vm, __entry->px, __entry->start, __entry->end)
|
||||
);
|
||||
|
||||
DEFINE_EVENT_PRINT(i915_px_entry, i915_page_directory_pointer_entry_alloc,
|
||||
TP_PROTO(struct i915_address_space *vm, u32 pml4e, u64 start, u64 pml4e_shift),
|
||||
TP_ARGS(vm, pml4e, start, pml4e_shift),
|
||||
|
||||
TP_printk("vm=%p, pml4e=%d (0x%llx-0x%llx)",
|
||||
__entry->vm, __entry->px, __entry->start, __entry->end)
|
||||
);
|
||||
|
||||
/* Avoid extra math because we only support two sizes. The format is defined by
|
||||
* bitmap_scnprintf. Each 32 bits is 8 HEX digits followed by comma */
|
||||
#define TRACE_PT_SIZE(bits) \
|
||||
|
|
|
@ -40,6 +40,19 @@
|
|||
#define INTEL_VGT_IF_VERSION \
|
||||
INTEL_VGT_IF_VERSION_ENCODE(VGT_VERSION_MAJOR, VGT_VERSION_MINOR)
|
||||
|
||||
/*
|
||||
* notifications from guest to vgpu device model
|
||||
*/
|
||||
enum vgt_g2v_type {
|
||||
VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE = 2,
|
||||
VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY,
|
||||
VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE,
|
||||
VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY,
|
||||
VGT_G2V_EXECLIST_CONTEXT_CREATE,
|
||||
VGT_G2V_EXECLIST_CONTEXT_DESTROY,
|
||||
VGT_G2V_MAX,
|
||||
};
|
||||
|
||||
struct vgt_if {
|
||||
uint64_t magic; /* VGT_MAGIC */
|
||||
uint16_t version_major;
|
||||
|
@ -70,11 +83,28 @@ struct vgt_if {
|
|||
uint32_t rsv3[0x200 - 24]; /* pad to half page */
|
||||
/*
|
||||
* The bottom half page is for response from Gfx driver to hypervisor.
|
||||
* Set to reserved fields temporarily by now.
|
||||
*/
|
||||
uint32_t rsv4;
|
||||
uint32_t display_ready; /* ready for display owner switch */
|
||||
uint32_t rsv5[0x200 - 2]; /* pad to one page */
|
||||
|
||||
uint32_t rsv5[4];
|
||||
|
||||
uint32_t g2v_notify;
|
||||
uint32_t rsv6[7];
|
||||
|
||||
uint32_t pdp0_lo;
|
||||
uint32_t pdp0_hi;
|
||||
uint32_t pdp1_lo;
|
||||
uint32_t pdp1_hi;
|
||||
uint32_t pdp2_lo;
|
||||
uint32_t pdp2_hi;
|
||||
uint32_t pdp3_lo;
|
||||
uint32_t pdp3_hi;
|
||||
|
||||
uint32_t execlist_context_descriptor_lo;
|
||||
uint32_t execlist_context_descriptor_hi;
|
||||
|
||||
uint32_t rsv7[0x200 - 24]; /* pad to one page */
|
||||
} __packed;
|
||||
|
||||
#define vgtif_reg(x) \
|
||||
|
|
|
@ -85,22 +85,14 @@ intel_connector_atomic_get_property(struct drm_connector *connector,
|
|||
struct drm_crtc_state *
|
||||
intel_crtc_duplicate_state(struct drm_crtc *crtc)
|
||||
{
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
struct intel_crtc_state *crtc_state;
|
||||
|
||||
if (WARN_ON(!intel_crtc->config))
|
||||
crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
|
||||
else
|
||||
crtc_state = kmemdup(intel_crtc->config,
|
||||
sizeof(*intel_crtc->config), GFP_KERNEL);
|
||||
|
||||
crtc_state = kmemdup(crtc->state, sizeof(*crtc_state), GFP_KERNEL);
|
||||
if (!crtc_state)
|
||||
return NULL;
|
||||
|
||||
__drm_atomic_helper_crtc_duplicate_state(crtc, &crtc_state->base);
|
||||
|
||||
crtc_state->base.crtc = crtc;
|
||||
|
||||
return &crtc_state->base;
|
||||
}
|
||||
|
||||
|
@ -149,9 +141,6 @@ int intel_atomic_setup_scalers(struct drm_device *dev,
|
|||
int i, j;
|
||||
|
||||
num_scalers_need = hweight32(scaler_state->scaler_users);
|
||||
DRM_DEBUG_KMS("crtc_state = %p need = %d avail = %d scaler_users = 0x%x\n",
|
||||
crtc_state, num_scalers_need, intel_crtc->num_scalers,
|
||||
scaler_state->scaler_users);
|
||||
|
||||
/*
|
||||
* High level flow:
|
||||
|
|
|
@ -76,11 +76,7 @@ intel_plane_duplicate_state(struct drm_plane *plane)
|
|||
struct drm_plane_state *state;
|
||||
struct intel_plane_state *intel_state;
|
||||
|
||||
if (WARN_ON(!plane->state))
|
||||
intel_state = intel_create_plane_state(plane);
|
||||
else
|
||||
intel_state = kmemdup(plane->state, sizeof(*intel_state),
|
||||
GFP_KERNEL);
|
||||
intel_state = kmemdup(plane->state, sizeof(*intel_state), GFP_KERNEL);
|
||||
|
||||
if (!intel_state)
|
||||
return NULL;
|
||||
|
|
|
@ -1350,21 +1350,3 @@ intel_parse_bios(struct drm_device *dev)
|
|||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Ensure that vital registers have been initialised, even if the BIOS
|
||||
* is absent or just failing to do its job.
|
||||
*/
|
||||
void intel_setup_bios(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
/* Set the Panel Power On/Off timings if uninitialized. */
|
||||
if (!HAS_PCH_SPLIT(dev) &&
|
||||
I915_READ(PP_ON_DELAYS) == 0 && I915_READ(PP_OFF_DELAYS) == 0) {
|
||||
/* Set T2 to 40ms and T5 to 200ms */
|
||||
I915_WRITE(PP_ON_DELAYS, 0x019007d0);
|
||||
|
||||
/* Set T3 to 35ms and Tx to 200ms */
|
||||
I915_WRITE(PP_OFF_DELAYS, 0x015e07d0);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -588,7 +588,6 @@ struct bdb_psr {
|
|||
struct psr_table psr_table[16];
|
||||
} __packed;
|
||||
|
||||
void intel_setup_bios(struct drm_device *dev);
|
||||
int intel_parse_bios(struct drm_device *dev);
|
||||
|
||||
/*
|
||||
|
|
|
@ -707,7 +707,6 @@ void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder)
|
|||
intel_dp->DP = intel_dig_port->saved_port_bits |
|
||||
DDI_BUF_CTL_ENABLE | DDI_BUF_TRANS_SELECT(0);
|
||||
intel_dp->DP |= DDI_PORT_WIDTH(intel_dp->lane_count);
|
||||
|
||||
}
|
||||
|
||||
static struct intel_encoder *
|
||||
|
@ -1242,9 +1241,10 @@ hsw_ddi_calculate_wrpll(int clock /* in Hz */,
|
|||
static bool
|
||||
hsw_ddi_pll_select(struct intel_crtc *intel_crtc,
|
||||
struct intel_crtc_state *crtc_state,
|
||||
struct intel_encoder *intel_encoder,
|
||||
int clock)
|
||||
struct intel_encoder *intel_encoder)
|
||||
{
|
||||
int clock = crtc_state->port_clock;
|
||||
|
||||
if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
|
||||
struct intel_shared_dpll *pll;
|
||||
uint32_t val;
|
||||
|
@ -1523,11 +1523,11 @@ skip_remaining_dividers:
|
|||
static bool
|
||||
skl_ddi_pll_select(struct intel_crtc *intel_crtc,
|
||||
struct intel_crtc_state *crtc_state,
|
||||
struct intel_encoder *intel_encoder,
|
||||
int clock)
|
||||
struct intel_encoder *intel_encoder)
|
||||
{
|
||||
struct intel_shared_dpll *pll;
|
||||
uint32_t ctrl1, cfgcr1, cfgcr2;
|
||||
int clock = crtc_state->port_clock;
|
||||
|
||||
/*
|
||||
* See comment in intel_dpll_hw_state to understand why we always use 0
|
||||
|
@ -1615,14 +1615,14 @@ static const struct bxt_clk_div bxt_dp_clk_val[] = {
|
|||
static bool
|
||||
bxt_ddi_pll_select(struct intel_crtc *intel_crtc,
|
||||
struct intel_crtc_state *crtc_state,
|
||||
struct intel_encoder *intel_encoder,
|
||||
int clock)
|
||||
struct intel_encoder *intel_encoder)
|
||||
{
|
||||
struct intel_shared_dpll *pll;
|
||||
struct bxt_clk_div clk_div = {0};
|
||||
int vco = 0;
|
||||
uint32_t prop_coef, int_coef, gain_ctl, targ_cnt;
|
||||
uint32_t lanestagger;
|
||||
int clock = crtc_state->port_clock;
|
||||
|
||||
if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
|
||||
intel_clock_t best_clock;
|
||||
|
@ -1750,17 +1750,16 @@ bool intel_ddi_pll_select(struct intel_crtc *intel_crtc,
|
|||
struct drm_device *dev = intel_crtc->base.dev;
|
||||
struct intel_encoder *intel_encoder =
|
||||
intel_ddi_get_crtc_new_encoder(crtc_state);
|
||||
int clock = crtc_state->port_clock;
|
||||
|
||||
if (IS_SKYLAKE(dev))
|
||||
return skl_ddi_pll_select(intel_crtc, crtc_state,
|
||||
intel_encoder, clock);
|
||||
intel_encoder);
|
||||
else if (IS_BROXTON(dev))
|
||||
return bxt_ddi_pll_select(intel_crtc, crtc_state,
|
||||
intel_encoder, clock);
|
||||
intel_encoder);
|
||||
else
|
||||
return hsw_ddi_pll_select(intel_crtc, crtc_state,
|
||||
intel_encoder, clock);
|
||||
intel_encoder);
|
||||
}
|
||||
|
||||
void intel_ddi_set_pipe_settings(struct drm_crtc *crtc)
|
||||
|
@ -1893,7 +1892,7 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
|
|||
} else
|
||||
temp |= TRANS_DDI_MODE_SELECT_DP_SST;
|
||||
|
||||
temp |= DDI_PORT_WIDTH(intel_dp->lane_count);
|
||||
temp |= DDI_PORT_WIDTH(intel_crtc->config->lane_count);
|
||||
} else if (type == INTEL_OUTPUT_DP_MST) {
|
||||
struct intel_dp *intel_dp = &enc_to_mst(encoder)->primary->dp;
|
||||
|
||||
|
@ -1902,7 +1901,7 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
|
|||
} else
|
||||
temp |= TRANS_DDI_MODE_SELECT_DP_SST;
|
||||
|
||||
temp |= DDI_PORT_WIDTH(intel_dp->lane_count);
|
||||
temp |= DDI_PORT_WIDTH(intel_crtc->config->lane_count);
|
||||
} else {
|
||||
WARN(1, "Invalid encoder type %d for pipe %c\n",
|
||||
intel_encoder->type, pipe_name(pipe));
|
||||
|
@ -2289,6 +2288,8 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
|
|||
if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
||||
|
||||
intel_dp_set_link_params(intel_dp, crtc->config);
|
||||
|
||||
intel_ddi_init_dp_buf_reg(intel_encoder);
|
||||
|
||||
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
|
||||
|
@ -3069,6 +3070,8 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
|
|||
case TRANS_DDI_MODE_SELECT_DP_SST:
|
||||
case TRANS_DDI_MODE_SELECT_DP_MST:
|
||||
pipe_config->has_dp_encoder = true;
|
||||
pipe_config->lane_count =
|
||||
((temp & DDI_PORT_WIDTH_MASK) >> DDI_PORT_WIDTH_SHIFT) + 1;
|
||||
intel_dp_get_m_n(intel_crtc, pipe_config);
|
||||
break;
|
||||
default:
|
||||
|
@ -3215,7 +3218,15 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
|
|||
goto err;
|
||||
|
||||
intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
|
||||
dev_priv->hotplug.irq_port[port] = intel_dig_port;
|
||||
/*
|
||||
* On BXT A0/A1, sw needs to activate DDIA HPD logic and
|
||||
* interrupts to check the external panel connection.
|
||||
*/
|
||||
if (IS_BROXTON(dev_priv) && (INTEL_REVID(dev) < BXT_REVID_B0)
|
||||
&& port == PORT_B)
|
||||
dev_priv->hotplug.irq_port[PORT_A] = intel_dig_port;
|
||||
else
|
||||
dev_priv->hotplug.irq_port[port] = intel_dig_port;
|
||||
}
|
||||
|
||||
/* In theory we don't need the encoder->type check, but leave it just in
|
||||
|
|
|
@ -72,6 +72,10 @@ static const uint32_t skl_primary_formats[] = {
|
|||
DRM_FORMAT_ABGR8888,
|
||||
DRM_FORMAT_XRGB2101010,
|
||||
DRM_FORMAT_XBGR2101010,
|
||||
DRM_FORMAT_YUYV,
|
||||
DRM_FORMAT_YVYU,
|
||||
DRM_FORMAT_UYVY,
|
||||
DRM_FORMAT_VYUY,
|
||||
};
|
||||
|
||||
/* Cursor formats */
|
||||
|
@ -135,6 +139,39 @@ intel_pch_rawclk(struct drm_device *dev)
|
|||
return I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK;
|
||||
}
|
||||
|
||||
/* hrawclock is 1/4 the FSB frequency */
|
||||
int intel_hrawclk(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
uint32_t clkcfg;
|
||||
|
||||
/* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
|
||||
if (IS_VALLEYVIEW(dev))
|
||||
return 200;
|
||||
|
||||
clkcfg = I915_READ(CLKCFG);
|
||||
switch (clkcfg & CLKCFG_FSB_MASK) {
|
||||
case CLKCFG_FSB_400:
|
||||
return 100;
|
||||
case CLKCFG_FSB_533:
|
||||
return 133;
|
||||
case CLKCFG_FSB_667:
|
||||
return 166;
|
||||
case CLKCFG_FSB_800:
|
||||
return 200;
|
||||
case CLKCFG_FSB_1067:
|
||||
return 266;
|
||||
case CLKCFG_FSB_1333:
|
||||
return 333;
|
||||
/* these two are just a guess; one of them might be right */
|
||||
case CLKCFG_FSB_1600:
|
||||
case CLKCFG_FSB_1600_ALT:
|
||||
return 400;
|
||||
default:
|
||||
return 133;
|
||||
}
|
||||
}
|
||||
|
||||
static inline u32 /* units of 100MHz */
|
||||
intel_fdi_link_freq(struct drm_device *dev)
|
||||
{
|
||||
|
@ -1061,54 +1098,6 @@ static void intel_wait_for_pipe_off(struct intel_crtc *crtc)
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* ibx_digital_port_connected - is the specified port connected?
|
||||
* @dev_priv: i915 private structure
|
||||
* @port: the port to test
|
||||
*
|
||||
* Returns true if @port is connected, false otherwise.
|
||||
*/
|
||||
bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
|
||||
struct intel_digital_port *port)
|
||||
{
|
||||
u32 bit;
|
||||
|
||||
if (HAS_PCH_IBX(dev_priv->dev)) {
|
||||
switch (port->port) {
|
||||
case PORT_B:
|
||||
bit = SDE_PORTB_HOTPLUG;
|
||||
break;
|
||||
case PORT_C:
|
||||
bit = SDE_PORTC_HOTPLUG;
|
||||
break;
|
||||
case PORT_D:
|
||||
bit = SDE_PORTD_HOTPLUG;
|
||||
break;
|
||||
default:
|
||||
return true;
|
||||
}
|
||||
} else {
|
||||
switch (port->port) {
|
||||
case PORT_B:
|
||||
bit = SDE_PORTB_HOTPLUG_CPT;
|
||||
break;
|
||||
case PORT_C:
|
||||
bit = SDE_PORTC_HOTPLUG_CPT;
|
||||
break;
|
||||
case PORT_D:
|
||||
bit = SDE_PORTD_HOTPLUG_CPT;
|
||||
break;
|
||||
case PORT_E:
|
||||
bit = SDE_PORTE_HOTPLUG_SPT;
|
||||
break;
|
||||
default:
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return I915_READ(SDEISR) & bit;
|
||||
}
|
||||
|
||||
static const char *state_string(bool enabled)
|
||||
{
|
||||
return enabled ? "on" : "off";
|
||||
|
@ -1585,26 +1574,6 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
|
|||
assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
|
||||
}
|
||||
|
||||
static void intel_init_dpio(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
if (!IS_VALLEYVIEW(dev))
|
||||
return;
|
||||
|
||||
/*
|
||||
* IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C),
|
||||
* CHV x1 PHY (DP/HDMI D)
|
||||
* IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C)
|
||||
*/
|
||||
if (IS_CHERRYVIEW(dev)) {
|
||||
DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2;
|
||||
DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO;
|
||||
} else {
|
||||
DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
|
||||
}
|
||||
}
|
||||
|
||||
static void vlv_enable_pll(struct intel_crtc *crtc,
|
||||
const struct intel_crtc_state *pipe_config)
|
||||
{
|
||||
|
@ -1831,17 +1800,6 @@ static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
|
|||
val &= ~DPIO_DCLKP_EN;
|
||||
vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
|
||||
|
||||
/* disable left/right clock distribution */
|
||||
if (pipe != PIPE_B) {
|
||||
val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
|
||||
val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
|
||||
vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
|
||||
} else {
|
||||
val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
|
||||
val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
|
||||
vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
|
||||
}
|
||||
|
||||
mutex_unlock(&dev_priv->sb_lock);
|
||||
}
|
||||
|
||||
|
@ -2936,8 +2894,6 @@ static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
|
|||
I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0);
|
||||
I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
|
||||
I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
|
||||
DRM_DEBUG_KMS("CRTC:%d Disabled scaler id %u.%u\n",
|
||||
intel_crtc->base.base.id, intel_crtc->pipe, id);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -3179,24 +3135,20 @@ static void intel_complete_page_flips(struct drm_device *dev)
|
|||
|
||||
static void intel_update_primary_planes(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_crtc *crtc;
|
||||
|
||||
for_each_crtc(dev, crtc) {
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
struct intel_plane *plane = to_intel_plane(crtc->primary);
|
||||
struct intel_plane_state *plane_state;
|
||||
|
||||
drm_modeset_lock(&crtc->mutex, NULL);
|
||||
/*
|
||||
* FIXME: Once we have proper support for primary planes (and
|
||||
* disabling them without disabling the entire crtc) allow again
|
||||
* a NULL crtc->primary->fb.
|
||||
*/
|
||||
if (intel_crtc->active && crtc->primary->fb)
|
||||
dev_priv->display.update_primary_plane(crtc,
|
||||
crtc->primary->fb,
|
||||
crtc->x,
|
||||
crtc->y);
|
||||
drm_modeset_unlock(&crtc->mutex);
|
||||
drm_modeset_lock_crtc(crtc, &plane->base);
|
||||
|
||||
plane_state = to_intel_plane_state(plane->base.state);
|
||||
|
||||
if (plane_state->base.fb)
|
||||
plane->commit_plane(&plane->base, plane_state);
|
||||
|
||||
drm_modeset_unlock_crtc(crtc);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3240,6 +3192,9 @@ void intel_finish_reset(struct drm_device *dev)
|
|||
* so update the base address of all primary
|
||||
* planes to the the last fb to make sure we're
|
||||
* showing the correct fb after a reset.
|
||||
*
|
||||
* FIXME: Atomic will make this obsolete since we won't schedule
|
||||
* CS-based flips (which might get lost in gpu resets) any more.
|
||||
*/
|
||||
intel_update_primary_planes(dev);
|
||||
return;
|
||||
|
@ -4963,12 +4918,10 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
|
|||
|
||||
intel_ddi_enable_pipe_clock(intel_crtc);
|
||||
|
||||
if (INTEL_INFO(dev)->gen == 9)
|
||||
if (INTEL_INFO(dev)->gen >= 9)
|
||||
skylake_pfit_enable(intel_crtc);
|
||||
else if (INTEL_INFO(dev)->gen < 9)
|
||||
ironlake_pfit_enable(intel_crtc);
|
||||
else
|
||||
MISSING_CASE(INTEL_INFO(dev)->gen);
|
||||
ironlake_pfit_enable(intel_crtc);
|
||||
|
||||
/*
|
||||
* On ILK+ LUT must be loaded before the pipe is running but with
|
||||
|
@ -5100,12 +5053,10 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
|
|||
|
||||
intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
|
||||
|
||||
if (INTEL_INFO(dev)->gen == 9)
|
||||
if (INTEL_INFO(dev)->gen >= 9)
|
||||
skylake_scaler_disable(intel_crtc);
|
||||
else if (INTEL_INFO(dev)->gen < 9)
|
||||
ironlake_pfit_disable(intel_crtc);
|
||||
else
|
||||
MISSING_CASE(INTEL_INFO(dev)->gen);
|
||||
ironlake_pfit_disable(intel_crtc);
|
||||
|
||||
intel_ddi_disable_pipe_clock(intel_crtc);
|
||||
|
||||
|
@ -5277,6 +5228,21 @@ static void modeset_update_crtc_power_domains(struct drm_atomic_state *state)
|
|||
modeset_put_power_domains(dev_priv, put_domains[i]);
|
||||
}
|
||||
|
||||
static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
int max_cdclk_freq = dev_priv->max_cdclk_freq;
|
||||
|
||||
if (INTEL_INFO(dev_priv)->gen >= 9 ||
|
||||
IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
|
||||
return max_cdclk_freq;
|
||||
else if (IS_CHERRYVIEW(dev_priv))
|
||||
return max_cdclk_freq*95/100;
|
||||
else if (INTEL_INFO(dev_priv)->gen < 4)
|
||||
return 2*max_cdclk_freq*90/100;
|
||||
else
|
||||
return max_cdclk_freq*90/100;
|
||||
}
|
||||
|
||||
static void intel_update_max_cdclk(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
@ -5316,8 +5282,13 @@ static void intel_update_max_cdclk(struct drm_device *dev)
|
|||
dev_priv->max_cdclk_freq = dev_priv->cdclk_freq;
|
||||
}
|
||||
|
||||
dev_priv->max_dotclk_freq = intel_compute_max_dotclk(dev_priv);
|
||||
|
||||
DRM_DEBUG_DRIVER("Max CD clock rate: %d kHz\n",
|
||||
dev_priv->max_cdclk_freq);
|
||||
|
||||
DRM_DEBUG_DRIVER("Max dotclock rate: %d kHz\n",
|
||||
dev_priv->max_dotclk_freq);
|
||||
}
|
||||
|
||||
static void intel_update_cdclk(struct drm_device *dev)
|
||||
|
@ -6035,13 +6006,6 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
|
|||
|
||||
is_dsi = intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI);
|
||||
|
||||
if (!is_dsi) {
|
||||
if (IS_CHERRYVIEW(dev))
|
||||
chv_prepare_pll(intel_crtc, intel_crtc->config);
|
||||
else
|
||||
vlv_prepare_pll(intel_crtc, intel_crtc->config);
|
||||
}
|
||||
|
||||
if (intel_crtc->config->has_dp_encoder)
|
||||
intel_dp_set_m_n(intel_crtc, M1_N1);
|
||||
|
||||
|
@ -6065,10 +6029,13 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
|
|||
encoder->pre_pll_enable(encoder);
|
||||
|
||||
if (!is_dsi) {
|
||||
if (IS_CHERRYVIEW(dev))
|
||||
if (IS_CHERRYVIEW(dev)) {
|
||||
chv_prepare_pll(intel_crtc, intel_crtc->config);
|
||||
chv_enable_pll(intel_crtc, intel_crtc->config);
|
||||
else
|
||||
} else {
|
||||
vlv_prepare_pll(intel_crtc, intel_crtc->config);
|
||||
vlv_enable_pll(intel_crtc, intel_crtc->config);
|
||||
}
|
||||
}
|
||||
|
||||
for_each_encoder_on_crtc(dev, crtc, encoder)
|
||||
|
@ -6196,6 +6163,10 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
|
|||
i9xx_disable_pll(intel_crtc);
|
||||
}
|
||||
|
||||
for_each_encoder_on_crtc(dev, crtc, encoder)
|
||||
if (encoder->post_pll_disable)
|
||||
encoder->post_pll_disable(encoder);
|
||||
|
||||
if (!IS_GEN2(dev))
|
||||
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
|
||||
|
||||
|
@ -7377,8 +7348,7 @@ static void chv_prepare_pll(struct intel_crtc *crtc,
|
|||
1 << DPIO_CHV_N_DIV_SHIFT);
|
||||
|
||||
/* M2 fraction division */
|
||||
if (bestm2_frac)
|
||||
vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
|
||||
vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
|
||||
|
||||
/* M2 fraction division enable */
|
||||
dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
|
||||
|
@ -8119,6 +8089,14 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
|
|||
else
|
||||
i9xx_crtc_clock_get(crtc, pipe_config);
|
||||
|
||||
/*
|
||||
* Normally the dotclock is filled in by the encoder .get_config()
|
||||
* but in case the pipe is enabled w/o any ports we need a sane
|
||||
* default.
|
||||
*/
|
||||
pipe_config->base.adjusted_mode.crtc_clock =
|
||||
pipe_config->port_clock / pipe_config->pixel_multiplier;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -8380,8 +8358,7 @@ static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread,
|
|||
|
||||
if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
|
||||
with_spread = true;
|
||||
if (WARN(dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE &&
|
||||
with_fdi, "LP PCH doesn't have FDI\n"))
|
||||
if (WARN(HAS_PCH_LPT_LP(dev) && with_fdi, "LP PCH doesn't have FDI\n"))
|
||||
with_fdi = false;
|
||||
|
||||
mutex_lock(&dev_priv->sb_lock);
|
||||
|
@ -8404,8 +8381,7 @@ static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread,
|
|||
}
|
||||
}
|
||||
|
||||
reg = (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) ?
|
||||
SBI_GEN0 : SBI_DBUFF0;
|
||||
reg = HAS_PCH_LPT_LP(dev) ? SBI_GEN0 : SBI_DBUFF0;
|
||||
tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
|
||||
tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
|
||||
intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
|
||||
|
@ -8421,8 +8397,7 @@ static void lpt_disable_clkout_dp(struct drm_device *dev)
|
|||
|
||||
mutex_lock(&dev_priv->sb_lock);
|
||||
|
||||
reg = (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) ?
|
||||
SBI_GEN0 : SBI_DBUFF0;
|
||||
reg = HAS_PCH_LPT_LP(dev) ? SBI_GEN0 : SBI_DBUFF0;
|
||||
tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
|
||||
tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
|
||||
intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
|
||||
|
@ -9434,7 +9409,7 @@ void hsw_enable_pc8(struct drm_i915_private *dev_priv)
|
|||
|
||||
DRM_DEBUG_KMS("Enabling package C8+\n");
|
||||
|
||||
if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
|
||||
if (HAS_PCH_LPT_LP(dev)) {
|
||||
val = I915_READ(SOUTH_DSPCLK_GATE_D);
|
||||
val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
|
||||
I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
|
||||
|
@ -9454,7 +9429,7 @@ void hsw_disable_pc8(struct drm_i915_private *dev_priv)
|
|||
hsw_restore_lcpll(dev_priv);
|
||||
lpt_init_pch_refclk(dev);
|
||||
|
||||
if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
|
||||
if (HAS_PCH_LPT_LP(dev)) {
|
||||
val = I915_READ(SOUTH_DSPCLK_GATE_D);
|
||||
val |= PCH_LP_PARTITION_LEVEL_DISABLE;
|
||||
I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
|
||||
|
@ -9804,12 +9779,10 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
|
|||
}
|
||||
|
||||
if (intel_display_power_is_enabled(dev_priv, pfit_domain)) {
|
||||
if (INTEL_INFO(dev)->gen == 9)
|
||||
if (INTEL_INFO(dev)->gen >= 9)
|
||||
skylake_get_pfit_config(crtc, pipe_config);
|
||||
else if (INTEL_INFO(dev)->gen < 9)
|
||||
ironlake_get_pfit_config(crtc, pipe_config);
|
||||
else
|
||||
MISSING_CASE(INTEL_INFO(dev)->gen);
|
||||
ironlake_get_pfit_config(crtc, pipe_config);
|
||||
}
|
||||
|
||||
if (IS_HASWELL(dev))
|
||||
|
@ -9943,8 +9916,9 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
|
|||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
int pipe = intel_crtc->pipe;
|
||||
int x = crtc->cursor_x;
|
||||
int y = crtc->cursor_y;
|
||||
struct drm_plane_state *cursor_state = crtc->cursor->state;
|
||||
int x = cursor_state->crtc_x;
|
||||
int y = cursor_state->crtc_y;
|
||||
u32 base = 0, pos = 0;
|
||||
|
||||
if (on)
|
||||
|
@ -9957,7 +9931,7 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
|
|||
base = 0;
|
||||
|
||||
if (x < 0) {
|
||||
if (x + intel_crtc->base.cursor->state->crtc_w <= 0)
|
||||
if (x + cursor_state->crtc_w <= 0)
|
||||
base = 0;
|
||||
|
||||
pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
|
||||
|
@ -9966,7 +9940,7 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
|
|||
pos |= x << CURSOR_X_SHIFT;
|
||||
|
||||
if (y < 0) {
|
||||
if (y + intel_crtc->base.cursor->state->crtc_h <= 0)
|
||||
if (y + cursor_state->crtc_h <= 0)
|
||||
base = 0;
|
||||
|
||||
pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
|
||||
|
@ -9982,8 +9956,8 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
|
|||
/* ILK+ do this automagically */
|
||||
if (HAS_GMCH_DISPLAY(dev) &&
|
||||
crtc->cursor->state->rotation == BIT(DRM_ROTATE_180)) {
|
||||
base += (intel_crtc->base.cursor->state->crtc_h *
|
||||
intel_crtc->base.cursor->state->crtc_w - 1) * 4;
|
||||
base += (cursor_state->crtc_h *
|
||||
cursor_state->crtc_w - 1) * 4;
|
||||
}
|
||||
|
||||
if (IS_845G(dev) || IS_I865G(dev))
|
||||
|
@ -11034,10 +11008,10 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
|
|||
DERRMR_PIPEB_PRI_FLIP_DONE |
|
||||
DERRMR_PIPEC_PRI_FLIP_DONE));
|
||||
if (IS_GEN8(dev))
|
||||
intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8(1) |
|
||||
intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8 |
|
||||
MI_SRM_LRM_GLOBAL_GTT);
|
||||
else
|
||||
intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) |
|
||||
intel_ring_emit(ring, MI_STORE_REGISTER_MEM |
|
||||
MI_SRM_LRM_GLOBAL_GTT);
|
||||
intel_ring_emit(ring, DERRMR);
|
||||
intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
|
||||
|
@ -11161,11 +11135,10 @@ static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc)
|
|||
static void intel_do_mmio_flip(struct intel_crtc *intel_crtc)
|
||||
{
|
||||
struct drm_device *dev = intel_crtc->base.dev;
|
||||
u32 start_vbl_count;
|
||||
|
||||
intel_mark_page_flip_active(intel_crtc);
|
||||
|
||||
intel_pipe_update_start(intel_crtc, &start_vbl_count);
|
||||
intel_pipe_update_start(intel_crtc);
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 9)
|
||||
skl_do_mmio_flip(intel_crtc);
|
||||
|
@ -11173,7 +11146,7 @@ static void intel_do_mmio_flip(struct intel_crtc *intel_crtc)
|
|||
/* use_mmio_flip() retricts MMIO flips to ilk+ */
|
||||
ilk_do_mmio_flip(intel_crtc);
|
||||
|
||||
intel_pipe_update_end(intel_crtc, start_vbl_count);
|
||||
intel_pipe_update_end(intel_crtc);
|
||||
}
|
||||
|
||||
static void intel_mmio_flip_work_func(struct work_struct *work)
|
||||
|
@ -11237,6 +11210,9 @@ static bool __intel_pageflip_stall_check(struct drm_device *dev,
|
|||
if (atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE)
|
||||
return true;
|
||||
|
||||
if (atomic_read(&work->pending) < INTEL_FLIP_PENDING)
|
||||
return false;
|
||||
|
||||
if (!work->enable_stall_check)
|
||||
return false;
|
||||
|
||||
|
@ -11627,7 +11603,7 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
|
|||
intel_crtc->atomic.update_wm_pre = true;
|
||||
}
|
||||
|
||||
if (visible)
|
||||
if (visible || was_visible)
|
||||
intel_crtc->atomic.fb_bits |=
|
||||
to_intel_plane(plane)->frontbuffer_bit;
|
||||
|
||||
|
@ -11900,14 +11876,16 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
|
|||
pipe_config->fdi_m_n.gmch_m, pipe_config->fdi_m_n.gmch_n,
|
||||
pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n,
|
||||
pipe_config->fdi_m_n.tu);
|
||||
DRM_DEBUG_KMS("dp: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
|
||||
DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
|
||||
pipe_config->has_dp_encoder,
|
||||
pipe_config->lane_count,
|
||||
pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n,
|
||||
pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n,
|
||||
pipe_config->dp_m_n.tu);
|
||||
|
||||
DRM_DEBUG_KMS("dp: %i, gmch_m2: %u, gmch_n2: %u, link_m2: %u, link_n2: %u, tu2: %u\n",
|
||||
DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m2: %u, gmch_n2: %u, link_m2: %u, link_n2: %u, tu2: %u\n",
|
||||
pipe_config->has_dp_encoder,
|
||||
pipe_config->lane_count,
|
||||
pipe_config->dp_m2_n2.gmch_m,
|
||||
pipe_config->dp_m2_n2.gmch_n,
|
||||
pipe_config->dp_m2_n2.link_m,
|
||||
|
@ -12119,10 +12097,6 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
|
|||
(DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
|
||||
pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
|
||||
|
||||
/* Compute a starting value for pipe_config->pipe_bpp taking the source
|
||||
* plane pixel format and any sink constraints into account. Returns the
|
||||
* source plane bpp so that dithering can be selected on mismatches
|
||||
* after encoders and crtc also have had their say. */
|
||||
base_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
|
||||
pipe_config);
|
||||
if (base_bpp < 0)
|
||||
|
@ -12191,7 +12165,7 @@ encoder_retry:
|
|||
/* Dithering seems to not pass-through bits correctly when it should, so
|
||||
* only enable it on 6bpc panels. */
|
||||
pipe_config->dither = pipe_config->pipe_bpp == 6*3;
|
||||
DRM_DEBUG_KMS("plane bpp: %i, pipe bpp: %i, dithering: %i\n",
|
||||
DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
|
||||
base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
|
||||
|
||||
fail:
|
||||
|
@ -12414,6 +12388,7 @@ intel_pipe_config_compare(struct drm_device *dev,
|
|||
PIPE_CONF_CHECK_M_N(fdi_m_n);
|
||||
|
||||
PIPE_CONF_CHECK_I(has_dp_encoder);
|
||||
PIPE_CONF_CHECK_I(lane_count);
|
||||
|
||||
if (INTEL_INFO(dev)->gen < 8) {
|
||||
PIPE_CONF_CHECK_M_N(dp_m_n);
|
||||
|
@ -12464,16 +12439,16 @@ intel_pipe_config_compare(struct drm_device *dev,
|
|||
PIPE_CONF_CHECK_I(pipe_src_w);
|
||||
PIPE_CONF_CHECK_I(pipe_src_h);
|
||||
|
||||
PIPE_CONF_CHECK_I(gmch_pfit.control);
|
||||
PIPE_CONF_CHECK_X(gmch_pfit.control);
|
||||
/* pfit ratios are autocomputed by the hw on gen4+ */
|
||||
if (INTEL_INFO(dev)->gen < 4)
|
||||
PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios);
|
||||
PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits);
|
||||
PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
|
||||
|
||||
PIPE_CONF_CHECK_I(pch_pfit.enabled);
|
||||
if (current_config->pch_pfit.enabled) {
|
||||
PIPE_CONF_CHECK_I(pch_pfit.pos);
|
||||
PIPE_CONF_CHECK_I(pch_pfit.size);
|
||||
PIPE_CONF_CHECK_X(pch_pfit.pos);
|
||||
PIPE_CONF_CHECK_X(pch_pfit.size);
|
||||
}
|
||||
|
||||
PIPE_CONF_CHECK_I(scaler_state.scaler_id);
|
||||
|
@ -13451,7 +13426,9 @@ intel_commit_primary_plane(struct drm_plane *plane,
|
|||
/* FIXME: kill this fastboot hack */
|
||||
intel_update_pipe_size(intel_crtc);
|
||||
|
||||
dev_priv->display.update_primary_plane(crtc, fb, crtc->x, crtc->y);
|
||||
dev_priv->display.update_primary_plane(crtc, fb,
|
||||
state->src.x1 >> 16,
|
||||
state->src.y1 >> 16);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -13475,7 +13452,7 @@ static void intel_begin_crtc_commit(struct drm_crtc *crtc,
|
|||
|
||||
/* Perform vblank evasion around commit operation */
|
||||
if (crtc->state->active)
|
||||
intel_pipe_update_start(intel_crtc, &intel_crtc->start_vbl_count);
|
||||
intel_pipe_update_start(intel_crtc);
|
||||
|
||||
if (!needs_modeset(crtc->state) && INTEL_INFO(dev)->gen >= 9)
|
||||
skl_detach_scalers(intel_crtc);
|
||||
|
@ -13487,7 +13464,7 @@ static void intel_finish_crtc_commit(struct drm_crtc *crtc,
|
|||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
|
||||
if (crtc->state->active)
|
||||
intel_pipe_update_end(intel_crtc, intel_crtc->start_vbl_count);
|
||||
intel_pipe_update_end(intel_crtc);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -13656,10 +13633,6 @@ intel_commit_cursor_plane(struct drm_plane *plane,
|
|||
crtc = crtc ? crtc : plane->crtc;
|
||||
intel_crtc = to_intel_crtc(crtc);
|
||||
|
||||
plane->fb = state->base.fb;
|
||||
crtc->cursor_x = state->base.crtc_x;
|
||||
crtc->cursor_y = state->base.crtc_y;
|
||||
|
||||
if (intel_crtc->cursor_bo == obj)
|
||||
goto update;
|
||||
|
||||
|
@ -14798,8 +14771,6 @@ void intel_modeset_init(struct drm_device *dev)
|
|||
}
|
||||
}
|
||||
|
||||
intel_init_dpio(dev);
|
||||
|
||||
intel_shared_dpll_init(dev);
|
||||
|
||||
/* Just disable it once at startup */
|
||||
|
@ -14881,13 +14852,22 @@ intel_check_plane_mapping(struct intel_crtc *crtc)
|
|||
return true;
|
||||
}
|
||||
|
||||
static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->base.dev;
|
||||
struct intel_encoder *encoder;
|
||||
|
||||
for_each_encoder_on_crtc(dev, &crtc->base, encoder)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static void intel_sanitize_crtc(struct intel_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_encoder *encoder;
|
||||
u32 reg;
|
||||
bool enable;
|
||||
|
||||
/* Clear any frame start delays used for debugging left by the BIOS */
|
||||
reg = PIPECONF(crtc->config->cpu_transcoder);
|
||||
|
@ -14931,16 +14911,11 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
|
|||
|
||||
/* Adjust the state of the output pipe according to whether we
|
||||
* have active connectors/encoders. */
|
||||
enable = false;
|
||||
for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
|
||||
enable = true;
|
||||
break;
|
||||
}
|
||||
|
||||
if (!enable)
|
||||
if (!intel_crtc_has_encoders(crtc))
|
||||
intel_crtc_disable_noatomic(&crtc->base);
|
||||
|
||||
if (crtc->active != crtc->base.state->active) {
|
||||
struct intel_encoder *encoder;
|
||||
|
||||
/* This can happen either due to bugs in the get_hw_state
|
||||
* functions or because of calls to intel_crtc_disable_noatomic,
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -39,7 +39,7 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
|
|||
struct intel_dp *intel_dp = &intel_dig_port->dp;
|
||||
struct drm_atomic_state *state;
|
||||
int bpp, i;
|
||||
int lane_count, slots, rate;
|
||||
int lane_count, slots;
|
||||
struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
|
||||
struct drm_connector *drm_connector;
|
||||
struct intel_connector *connector, *found = NULL;
|
||||
|
@ -56,20 +56,11 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
|
|||
*/
|
||||
lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
|
||||
|
||||
rate = intel_dp_max_link_rate(intel_dp);
|
||||
|
||||
if (intel_dp->num_sink_rates) {
|
||||
intel_dp->link_bw = 0;
|
||||
intel_dp->rate_select = intel_dp_rate_select(intel_dp, rate);
|
||||
} else {
|
||||
intel_dp->link_bw = drm_dp_link_rate_to_bw_code(rate);
|
||||
intel_dp->rate_select = 0;
|
||||
}
|
||||
|
||||
intel_dp->lane_count = lane_count;
|
||||
pipe_config->lane_count = lane_count;
|
||||
|
||||
pipe_config->pipe_bpp = 24;
|
||||
pipe_config->port_clock = rate;
|
||||
pipe_config->port_clock = intel_dp_max_link_rate(intel_dp);
|
||||
|
||||
state = pipe_config->base.state;
|
||||
|
||||
|
@ -184,6 +175,8 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder)
|
|||
if (intel_dp->active_mst_links == 0) {
|
||||
enum port port = intel_ddi_get_encoder_port(encoder);
|
||||
|
||||
intel_dp_set_link_params(intel_dp, intel_crtc->config);
|
||||
|
||||
/* FIXME: add support for SKL */
|
||||
if (INTEL_INFO(dev)->gen < 9)
|
||||
I915_WRITE(PORT_CLK_SEL(port),
|
||||
|
@ -286,6 +279,10 @@ static void intel_dp_mst_enc_get_config(struct intel_encoder *encoder,
|
|||
break;
|
||||
}
|
||||
pipe_config->base.adjusted_mode.flags |= flags;
|
||||
|
||||
pipe_config->lane_count =
|
||||
((temp & DDI_PORT_WIDTH_MASK) >> DDI_PORT_WIDTH_SHIFT) + 1;
|
||||
|
||||
intel_dp_get_m_n(crtc, pipe_config);
|
||||
|
||||
intel_ddi_clock_get(&intel_dig_port->base, pipe_config);
|
||||
|
|
|
@ -142,6 +142,7 @@ struct intel_encoder {
|
|||
void (*mode_set)(struct intel_encoder *intel_encoder);
|
||||
void (*disable)(struct intel_encoder *);
|
||||
void (*post_disable)(struct intel_encoder *);
|
||||
void (*post_pll_disable)(struct intel_encoder *);
|
||||
/* Read out the current hw state of this connector, returning true if
|
||||
* the encoder is active. If the encoder is enabled it also set the pipe
|
||||
* it is connected to in the pipe parameter. */
|
||||
|
@ -423,6 +424,8 @@ struct intel_crtc_state {
|
|||
/* Used by SDVO (and if we ever fix it, HDMI). */
|
||||
unsigned pixel_multiplier;
|
||||
|
||||
uint8_t lane_count;
|
||||
|
||||
/* Panel fitter controls for gen2-gen4 + VLV */
|
||||
struct {
|
||||
u32 control;
|
||||
|
@ -561,6 +564,8 @@ struct intel_crtc {
|
|||
int scanline_offset;
|
||||
|
||||
unsigned start_vbl_count;
|
||||
ktime_t start_vbl_time;
|
||||
|
||||
struct intel_crtc_atomic_commit atomic;
|
||||
|
||||
/* scalers available on this crtc */
|
||||
|
@ -657,13 +662,14 @@ struct cxsr_latency {
|
|||
struct intel_hdmi {
|
||||
u32 hdmi_reg;
|
||||
int ddc_bus;
|
||||
uint32_t color_range;
|
||||
bool limited_color_range;
|
||||
bool color_range_auto;
|
||||
bool has_hdmi_sink;
|
||||
bool has_audio;
|
||||
enum hdmi_force_audio force_audio;
|
||||
bool rgb_quant_range_selectable;
|
||||
enum hdmi_picture_aspect aspect_ratio;
|
||||
struct intel_connector *attached_connector;
|
||||
void (*write_infoframe)(struct drm_encoder *encoder,
|
||||
enum hdmi_infoframe_type type,
|
||||
const void *frame, ssize_t len);
|
||||
|
@ -696,23 +702,29 @@ enum link_m_n_set {
|
|||
M2_N2
|
||||
};
|
||||
|
||||
struct sink_crc {
|
||||
bool started;
|
||||
u8 last_crc[6];
|
||||
int last_count;
|
||||
};
|
||||
|
||||
struct intel_dp {
|
||||
uint32_t output_reg;
|
||||
uint32_t aux_ch_ctl_reg;
|
||||
uint32_t DP;
|
||||
int link_rate;
|
||||
uint8_t lane_count;
|
||||
bool has_audio;
|
||||
enum hdmi_force_audio force_audio;
|
||||
uint32_t color_range;
|
||||
bool limited_color_range;
|
||||
bool color_range_auto;
|
||||
uint8_t link_bw;
|
||||
uint8_t rate_select;
|
||||
uint8_t lane_count;
|
||||
uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
|
||||
uint8_t psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE];
|
||||
uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS];
|
||||
/* sink rates as reported by DP_SUPPORTED_LINK_RATES */
|
||||
uint8_t num_sink_rates;
|
||||
int sink_rates[DP_MAX_SUPPORTED_RATES];
|
||||
struct sink_crc sink_crc;
|
||||
struct drm_dp_aux aux;
|
||||
uint8_t train_set[4];
|
||||
int panel_power_up_delay;
|
||||
|
@ -735,7 +747,6 @@ struct intel_dp {
|
|||
enum pipe pps_pipe;
|
||||
struct edp_power_seq pps_delays;
|
||||
|
||||
bool use_tps3;
|
||||
bool can_mst; /* this port supports mst */
|
||||
bool is_mst;
|
||||
int active_mst_links;
|
||||
|
@ -770,6 +781,7 @@ struct intel_digital_port {
|
|||
struct intel_dp dp;
|
||||
struct intel_hdmi hdmi;
|
||||
enum irqreturn (*hpd_pulse)(struct intel_digital_port *, bool);
|
||||
bool release_cl2_override;
|
||||
};
|
||||
|
||||
struct intel_dp_mst_encoder {
|
||||
|
@ -779,7 +791,7 @@ struct intel_dp_mst_encoder {
|
|||
void *port; /* store this opaque as its illegal to dereference it */
|
||||
};
|
||||
|
||||
static inline int
|
||||
static inline enum dpio_channel
|
||||
vlv_dport_to_channel(struct intel_digital_port *dport)
|
||||
{
|
||||
switch (dport->port) {
|
||||
|
@ -793,7 +805,21 @@ vlv_dport_to_channel(struct intel_digital_port *dport)
|
|||
}
|
||||
}
|
||||
|
||||
static inline int
|
||||
static inline enum dpio_phy
|
||||
vlv_dport_to_phy(struct intel_digital_port *dport)
|
||||
{
|
||||
switch (dport->port) {
|
||||
case PORT_B:
|
||||
case PORT_C:
|
||||
return DPIO_PHY0;
|
||||
case PORT_D:
|
||||
return DPIO_PHY1;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
}
|
||||
|
||||
static inline enum dpio_channel
|
||||
vlv_pipe_to_channel(enum pipe pipe)
|
||||
{
|
||||
switch (pipe) {
|
||||
|
@ -987,6 +1013,7 @@ void i915_audio_component_cleanup(struct drm_i915_private *dev_priv);
|
|||
extern const struct drm_plane_funcs intel_plane_funcs;
|
||||
bool intel_has_pending_fb_unpin(struct drm_device *dev);
|
||||
int intel_pch_rawclk(struct drm_device *dev);
|
||||
int intel_hrawclk(struct drm_device *dev);
|
||||
void intel_mark_busy(struct drm_device *dev);
|
||||
void intel_mark_idle(struct drm_device *dev);
|
||||
void intel_crtc_restore_mode(struct drm_crtc *crtc);
|
||||
|
@ -995,8 +1022,6 @@ void intel_encoder_destroy(struct drm_encoder *encoder);
|
|||
int intel_connector_init(struct intel_connector *);
|
||||
struct intel_connector *intel_connector_alloc(void);
|
||||
bool intel_connector_get_hw_state(struct intel_connector *connector);
|
||||
bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
|
||||
struct intel_digital_port *port);
|
||||
void intel_connector_attach_encoder(struct intel_connector *connector,
|
||||
struct intel_encoder *encoder);
|
||||
struct drm_encoder *intel_best_encoder(struct drm_connector *connector);
|
||||
|
@ -1153,6 +1178,8 @@ void assert_csr_loaded(struct drm_i915_private *dev_priv);
|
|||
void intel_dp_init(struct drm_device *dev, int output_reg, enum port port);
|
||||
bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
|
||||
struct intel_connector *intel_connector);
|
||||
void intel_dp_set_link_params(struct intel_dp *intel_dp,
|
||||
const struct intel_crtc_state *pipe_config);
|
||||
void intel_dp_start_link_train(struct intel_dp *intel_dp);
|
||||
void intel_dp_complete_link_train(struct intel_dp *intel_dp);
|
||||
void intel_dp_stop_link_train(struct intel_dp *intel_dp);
|
||||
|
@ -1337,6 +1364,12 @@ void intel_runtime_pm_put(struct drm_i915_private *dev_priv);
|
|||
|
||||
void intel_display_set_init_power(struct drm_i915_private *dev, bool enable);
|
||||
|
||||
void chv_phy_powergate_lanes(struct intel_encoder *encoder,
|
||||
bool override, unsigned int mask);
|
||||
bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
|
||||
enum dpio_channel ch, bool override);
|
||||
|
||||
|
||||
/* intel_pm.c */
|
||||
void intel_init_clock_gating(struct drm_device *dev);
|
||||
void intel_suspend_hw(struct drm_device *dev);
|
||||
|
@ -1382,9 +1415,8 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob);
|
|||
int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane);
|
||||
int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
void intel_pipe_update_start(struct intel_crtc *crtc,
|
||||
uint32_t *start_vbl_count);
|
||||
void intel_pipe_update_end(struct intel_crtc *crtc, u32 start_vbl_count);
|
||||
void intel_pipe_update_start(struct intel_crtc *crtc);
|
||||
void intel_pipe_update_end(struct intel_crtc *crtc);
|
||||
|
||||
/* intel_tv.c */
|
||||
void intel_tv_init(struct drm_device *dev);
|
||||
|
|
|
@ -654,6 +654,7 @@ intel_dsi_mode_valid(struct drm_connector *connector,
|
|||
{
|
||||
struct intel_connector *intel_connector = to_intel_connector(connector);
|
||||
struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
|
||||
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
|
||||
|
||||
DRM_DEBUG_KMS("\n");
|
||||
|
||||
|
@ -667,6 +668,8 @@ intel_dsi_mode_valid(struct drm_connector *connector,
|
|||
return MODE_PANEL;
|
||||
if (mode->vdisplay > fixed_mode->vdisplay)
|
||||
return MODE_PANEL;
|
||||
if (fixed_mode->clock > max_dotclk)
|
||||
return MODE_CLOCK_HIGH;
|
||||
}
|
||||
|
||||
return MODE_OK;
|
||||
|
|
|
@ -201,6 +201,8 @@ intel_dvo_mode_valid(struct drm_connector *connector,
|
|||
struct drm_display_mode *mode)
|
||||
{
|
||||
struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
|
||||
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
|
||||
int target_clock = mode->clock;
|
||||
|
||||
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
|
||||
return MODE_NO_DBLESCAN;
|
||||
|
@ -212,8 +214,13 @@ intel_dvo_mode_valid(struct drm_connector *connector,
|
|||
return MODE_PANEL;
|
||||
if (mode->vdisplay > intel_dvo->panel_fixed_mode->vdisplay)
|
||||
return MODE_PANEL;
|
||||
|
||||
target_clock = intel_dvo->panel_fixed_mode->clock;
|
||||
}
|
||||
|
||||
if (target_clock > max_dotclk)
|
||||
return MODE_CLOCK_HIGH;
|
||||
|
||||
return intel_dvo->dev.dev_ops->mode_valid(&intel_dvo->dev, mode);
|
||||
}
|
||||
|
||||
|
|
|
@ -263,7 +263,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
|
|||
|
||||
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
|
||||
|
||||
DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08lx, bo %p\n",
|
||||
DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08llx, bo %p\n",
|
||||
fb->width, fb->height,
|
||||
i915_gem_obj_ggtt_offset(obj), obj);
|
||||
|
||||
|
|
|
@ -0,0 +1,122 @@
|
|||
/*
|
||||
* Copyright © 2014 Intel Corporation
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
* IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
#ifndef _INTEL_GUC_H_
|
||||
#define _INTEL_GUC_H_
|
||||
|
||||
#include "intel_guc_fwif.h"
|
||||
#include "i915_guc_reg.h"
|
||||
|
||||
struct i915_guc_client {
|
||||
struct drm_i915_gem_object *client_obj;
|
||||
struct intel_context *owner;
|
||||
struct intel_guc *guc;
|
||||
uint32_t priority;
|
||||
uint32_t ctx_index;
|
||||
|
||||
uint32_t proc_desc_offset;
|
||||
uint32_t doorbell_offset;
|
||||
uint32_t cookie;
|
||||
uint16_t doorbell_id;
|
||||
uint16_t padding; /* Maintain alignment */
|
||||
|
||||
uint32_t wq_offset;
|
||||
uint32_t wq_size;
|
||||
|
||||
spinlock_t wq_lock; /* Protects all data below */
|
||||
uint32_t wq_tail;
|
||||
|
||||
/* GuC submission statistics & status */
|
||||
uint64_t submissions[I915_NUM_RINGS];
|
||||
uint32_t q_fail;
|
||||
uint32_t b_fail;
|
||||
int retcode;
|
||||
};
|
||||
|
||||
enum intel_guc_fw_status {
|
||||
GUC_FIRMWARE_FAIL = -1,
|
||||
GUC_FIRMWARE_NONE = 0,
|
||||
GUC_FIRMWARE_PENDING,
|
||||
GUC_FIRMWARE_SUCCESS
|
||||
};
|
||||
|
||||
/*
|
||||
* This structure encapsulates all the data needed during the process
|
||||
* of fetching, caching, and loading the firmware image into the GuC.
|
||||
*/
|
||||
struct intel_guc_fw {
|
||||
struct drm_device * guc_dev;
|
||||
const char * guc_fw_path;
|
||||
size_t guc_fw_size;
|
||||
struct drm_i915_gem_object * guc_fw_obj;
|
||||
enum intel_guc_fw_status guc_fw_fetch_status;
|
||||
enum intel_guc_fw_status guc_fw_load_status;
|
||||
|
||||
uint16_t guc_fw_major_wanted;
|
||||
uint16_t guc_fw_minor_wanted;
|
||||
uint16_t guc_fw_major_found;
|
||||
uint16_t guc_fw_minor_found;
|
||||
};
|
||||
|
||||
struct intel_guc {
|
||||
struct intel_guc_fw guc_fw;
|
||||
|
||||
uint32_t log_flags;
|
||||
struct drm_i915_gem_object *log_obj;
|
||||
|
||||
struct drm_i915_gem_object *ctx_pool_obj;
|
||||
struct ida ctx_ids;
|
||||
|
||||
struct i915_guc_client *execbuf_client;
|
||||
|
||||
spinlock_t host2guc_lock; /* Protects all data below */
|
||||
|
||||
DECLARE_BITMAP(doorbell_bitmap, GUC_MAX_DOORBELLS);
|
||||
uint32_t db_cacheline; /* Cyclic counter mod pagesize */
|
||||
|
||||
/* Action status & statistics */
|
||||
uint64_t action_count; /* Total commands issued */
|
||||
uint32_t action_cmd; /* Last command word */
|
||||
uint32_t action_status; /* Last return status */
|
||||
uint32_t action_fail; /* Total number of failures */
|
||||
int32_t action_err; /* Last error code */
|
||||
|
||||
uint64_t submissions[I915_NUM_RINGS];
|
||||
uint32_t last_seqno[I915_NUM_RINGS];
|
||||
};
|
||||
|
||||
/* intel_guc_loader.c */
|
||||
extern void intel_guc_ucode_init(struct drm_device *dev);
|
||||
extern int intel_guc_ucode_load(struct drm_device *dev);
|
||||
extern void intel_guc_ucode_fini(struct drm_device *dev);
|
||||
extern const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status);
|
||||
|
||||
/* i915_guc_submission.c */
|
||||
int i915_guc_submission_init(struct drm_device *dev);
|
||||
int i915_guc_submission_enable(struct drm_device *dev);
|
||||
int i915_guc_submit(struct i915_guc_client *client,
|
||||
struct drm_i915_gem_request *rq);
|
||||
void i915_guc_submission_disable(struct drm_device *dev);
|
||||
void i915_guc_submission_fini(struct drm_device *dev);
|
||||
|
||||
#endif
|
|
@ -32,17 +32,16 @@
|
|||
* EDITING THIS FILE IS THEREFORE NOT RECOMMENDED - YOUR CHANGES MAY BE LOST.
|
||||
*/
|
||||
|
||||
#define GFXCORE_FAMILY_GEN8 11
|
||||
#define GFXCORE_FAMILY_GEN9 12
|
||||
#define GFXCORE_FAMILY_FORCE_ULONG 0x7fffffff
|
||||
#define GFXCORE_FAMILY_UNKNOWN 0x7fffffff
|
||||
|
||||
#define GUC_CTX_PRIORITY_CRITICAL 0
|
||||
#define GUC_CTX_PRIORITY_KMD_HIGH 0
|
||||
#define GUC_CTX_PRIORITY_HIGH 1
|
||||
#define GUC_CTX_PRIORITY_NORMAL 2
|
||||
#define GUC_CTX_PRIORITY_LOW 3
|
||||
#define GUC_CTX_PRIORITY_KMD_NORMAL 2
|
||||
#define GUC_CTX_PRIORITY_NORMAL 3
|
||||
|
||||
#define GUC_MAX_GPU_CONTEXTS 1024
|
||||
#define GUC_INVALID_CTX_ID (GUC_MAX_GPU_CONTEXTS + 1)
|
||||
#define GUC_INVALID_CTX_ID GUC_MAX_GPU_CONTEXTS
|
||||
|
||||
/* Work queue item header definitions */
|
||||
#define WQ_STATUS_ACTIVE 1
|
||||
|
@ -76,6 +75,7 @@
|
|||
#define GUC_CTX_DESC_ATTR_RESET (1 << 4)
|
||||
#define GUC_CTX_DESC_ATTR_WQLOCKED (1 << 5)
|
||||
#define GUC_CTX_DESC_ATTR_PCH (1 << 6)
|
||||
#define GUC_CTX_DESC_ATTR_TERMINATED (1 << 7)
|
||||
|
||||
/* The guc control data is 10 DWORDs */
|
||||
#define GUC_CTL_CTXINFO 0
|
||||
|
@ -108,6 +108,7 @@
|
|||
#define GUC_CTL_DISABLE_SCHEDULER (1 << 4)
|
||||
#define GUC_CTL_PREEMPTION_LOG (1 << 5)
|
||||
#define GUC_CTL_ENABLE_SLPC (1 << 7)
|
||||
#define GUC_CTL_RESET_ON_PREMPT_FAILURE (1 << 8)
|
||||
#define GUC_CTL_DEBUG 8
|
||||
#define GUC_LOG_VERBOSITY_SHIFT 0
|
||||
#define GUC_LOG_VERBOSITY_LOW (0 << GUC_LOG_VERBOSITY_SHIFT)
|
||||
|
@ -117,8 +118,9 @@
|
|||
/* Verbosity range-check limits, without the shift */
|
||||
#define GUC_LOG_VERBOSITY_MIN 0
|
||||
#define GUC_LOG_VERBOSITY_MAX 3
|
||||
#define GUC_CTL_RSRVD 9
|
||||
|
||||
#define GUC_CTL_MAX_DWORDS (GUC_CTL_DEBUG + 1)
|
||||
#define GUC_CTL_MAX_DWORDS (GUC_CTL_RSRVD + 1)
|
||||
|
||||
struct guc_doorbell_info {
|
||||
u32 db_status;
|
||||
|
@ -208,7 +210,9 @@ struct guc_context_desc {
|
|||
|
||||
u32 engine_presence;
|
||||
|
||||
u32 reserved0[1];
|
||||
u8 engine_suspended;
|
||||
|
||||
u8 reserved0[3];
|
||||
u64 reserved1[1];
|
||||
|
||||
u64 desc_private;
|
||||
|
|
|
@ -0,0 +1,606 @@
|
|||
/*
|
||||
* Copyright © 2014 Intel Corporation
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
* IN THE SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Vinit Azad <vinit.azad@intel.com>
|
||||
* Ben Widawsky <ben@bwidawsk.net>
|
||||
* Dave Gordon <david.s.gordon@intel.com>
|
||||
* Alex Dai <yu.dai@intel.com>
|
||||
*/
|
||||
#include <linux/firmware.h>
|
||||
#include "i915_drv.h"
|
||||
#include "intel_guc.h"
|
||||
|
||||
/**
|
||||
* DOC: GuC
|
||||
*
|
||||
* intel_guc:
|
||||
* Top level structure of guc. It handles firmware loading and manages client
|
||||
* pool and doorbells. intel_guc owns a i915_guc_client to replace the legacy
|
||||
* ExecList submission.
|
||||
*
|
||||
* Firmware versioning:
|
||||
* The firmware build process will generate a version header file with major and
|
||||
* minor version defined. The versions are built into CSS header of firmware.
|
||||
* i915 kernel driver set the minimal firmware version required per platform.
|
||||
* The firmware installation package will install (symbolic link) proper version
|
||||
* of firmware.
|
||||
*
|
||||
* GuC address space:
|
||||
* GuC does not allow any gfx GGTT address that falls into range [0, WOPCM_TOP),
|
||||
* which is reserved for Boot ROM, SRAM and WOPCM. Currently this top address is
|
||||
* 512K. In order to exclude 0-512K address space from GGTT, all gfx objects
|
||||
* used by GuC is pinned with PIN_OFFSET_BIAS along with size of WOPCM.
|
||||
*
|
||||
* Firmware log:
|
||||
* Firmware log is enabled by setting i915.guc_log_level to non-negative level.
|
||||
* Log data is printed out via reading debugfs i915_guc_log_dump. Reading from
|
||||
* i915_guc_load_status will print out firmware loading status and scratch
|
||||
* registers value.
|
||||
*
|
||||
*/
|
||||
|
||||
#define I915_SKL_GUC_UCODE "i915/skl_guc_ver4.bin"
|
||||
MODULE_FIRMWARE(I915_SKL_GUC_UCODE);
|
||||
|
||||
/* User-friendly representation of an enum */
|
||||
const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status)
|
||||
{
|
||||
switch (status) {
|
||||
case GUC_FIRMWARE_FAIL:
|
||||
return "FAIL";
|
||||
case GUC_FIRMWARE_NONE:
|
||||
return "NONE";
|
||||
case GUC_FIRMWARE_PENDING:
|
||||
return "PENDING";
|
||||
case GUC_FIRMWARE_SUCCESS:
|
||||
return "SUCCESS";
|
||||
default:
|
||||
return "UNKNOWN!";
|
||||
}
|
||||
};
|
||||
|
||||
static void direct_interrupts_to_host(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_engine_cs *ring;
|
||||
int i, irqs;
|
||||
|
||||
/* tell all command streamers NOT to forward interrupts and vblank to GuC */
|
||||
irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_NEVER);
|
||||
irqs |= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING);
|
||||
for_each_ring(ring, dev_priv, i)
|
||||
I915_WRITE(RING_MODE_GEN7(ring), irqs);
|
||||
|
||||
/* tell DE to send nothing to GuC */
|
||||
I915_WRITE(DE_GUCRMR, ~0);
|
||||
|
||||
/* route all GT interrupts to the host */
|
||||
I915_WRITE(GUC_BCS_RCS_IER, 0);
|
||||
I915_WRITE(GUC_VCS2_VCS1_IER, 0);
|
||||
I915_WRITE(GUC_WD_VECS_IER, 0);
|
||||
}
|
||||
|
||||
static void direct_interrupts_to_guc(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_engine_cs *ring;
|
||||
int i, irqs;
|
||||
|
||||
/* tell all command streamers to forward interrupts and vblank to GuC */
|
||||
irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_ALWAYS);
|
||||
irqs |= _MASKED_BIT_ENABLE(GFX_INTERRUPT_STEERING);
|
||||
for_each_ring(ring, dev_priv, i)
|
||||
I915_WRITE(RING_MODE_GEN7(ring), irqs);
|
||||
|
||||
/* tell DE to send (all) flip_done to GuC */
|
||||
irqs = DERRMR_PIPEA_PRI_FLIP_DONE | DERRMR_PIPEA_SPR_FLIP_DONE |
|
||||
DERRMR_PIPEB_PRI_FLIP_DONE | DERRMR_PIPEB_SPR_FLIP_DONE |
|
||||
DERRMR_PIPEC_PRI_FLIP_DONE | DERRMR_PIPEC_SPR_FLIP_DONE;
|
||||
/* Unmasked bits will cause GuC response message to be sent */
|
||||
I915_WRITE(DE_GUCRMR, ~irqs);
|
||||
|
||||
/* route USER_INTERRUPT to Host, all others are sent to GuC. */
|
||||
irqs = GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
|
||||
GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
|
||||
/* These three registers have the same bit definitions */
|
||||
I915_WRITE(GUC_BCS_RCS_IER, ~irqs);
|
||||
I915_WRITE(GUC_VCS2_VCS1_IER, ~irqs);
|
||||
I915_WRITE(GUC_WD_VECS_IER, ~irqs);
|
||||
}
|
||||
|
||||
static u32 get_gttype(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
/* XXX: GT type based on PCI device ID? field seems unused by fw */
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u32 get_core_family(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
switch (INTEL_INFO(dev_priv)->gen) {
|
||||
case 9:
|
||||
return GFXCORE_FAMILY_GEN9;
|
||||
|
||||
default:
|
||||
DRM_ERROR("GUC: unsupported core family\n");
|
||||
return GFXCORE_FAMILY_UNKNOWN;
|
||||
}
|
||||
}
|
||||
|
||||
static void set_guc_init_params(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_guc *guc = &dev_priv->guc;
|
||||
u32 params[GUC_CTL_MAX_DWORDS];
|
||||
int i;
|
||||
|
||||
memset(¶ms, 0, sizeof(params));
|
||||
|
||||
params[GUC_CTL_DEVICE_INFO] |=
|
||||
(get_gttype(dev_priv) << GUC_CTL_GTTYPE_SHIFT) |
|
||||
(get_core_family(dev_priv) << GUC_CTL_COREFAMILY_SHIFT);
|
||||
|
||||
/*
|
||||
* GuC ARAT increment is 10 ns. GuC default scheduler quantum is one
|
||||
* second. This ARAR is calculated by:
|
||||
* Scheduler-Quantum-in-ns / ARAT-increment-in-ns = 1000000000 / 10
|
||||
*/
|
||||
params[GUC_CTL_ARAT_HIGH] = 0;
|
||||
params[GUC_CTL_ARAT_LOW] = 100000000;
|
||||
|
||||
params[GUC_CTL_WA] |= GUC_CTL_WA_UK_BY_DRIVER;
|
||||
|
||||
params[GUC_CTL_FEATURE] |= GUC_CTL_DISABLE_SCHEDULER |
|
||||
GUC_CTL_VCS2_ENABLED;
|
||||
|
||||
if (i915.guc_log_level >= 0) {
|
||||
params[GUC_CTL_LOG_PARAMS] = guc->log_flags;
|
||||
params[GUC_CTL_DEBUG] =
|
||||
i915.guc_log_level << GUC_LOG_VERBOSITY_SHIFT;
|
||||
}
|
||||
|
||||
/* If GuC submission is enabled, set up additional parameters here */
|
||||
if (i915.enable_guc_submission) {
|
||||
u32 pgs = i915_gem_obj_ggtt_offset(dev_priv->guc.ctx_pool_obj);
|
||||
u32 ctx_in_16 = GUC_MAX_GPU_CONTEXTS / 16;
|
||||
|
||||
pgs >>= PAGE_SHIFT;
|
||||
params[GUC_CTL_CTXINFO] = (pgs << GUC_CTL_BASE_ADDR_SHIFT) |
|
||||
(ctx_in_16 << GUC_CTL_CTXNUM_IN16_SHIFT);
|
||||
|
||||
params[GUC_CTL_FEATURE] |= GUC_CTL_KERNEL_SUBMISSIONS;
|
||||
|
||||
/* Unmask this bit to enable the GuC's internal scheduler */
|
||||
params[GUC_CTL_FEATURE] &= ~GUC_CTL_DISABLE_SCHEDULER;
|
||||
}
|
||||
|
||||
I915_WRITE(SOFT_SCRATCH(0), 0);
|
||||
|
||||
for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
|
||||
I915_WRITE(SOFT_SCRATCH(1 + i), params[i]);
|
||||
}
|
||||
|
||||
/*
|
||||
* Read the GuC status register (GUC_STATUS) and store it in the
|
||||
* specified location; then return a boolean indicating whether
|
||||
* the value matches either of two values representing completion
|
||||
* of the GuC boot process.
|
||||
*
|
||||
* This is used for polling the GuC status in a wait_for_atomic()
|
||||
* loop below.
|
||||
*/
|
||||
static inline bool guc_ucode_response(struct drm_i915_private *dev_priv,
|
||||
u32 *status)
|
||||
{
|
||||
u32 val = I915_READ(GUC_STATUS);
|
||||
*status = val;
|
||||
return ((val & GS_UKERNEL_MASK) == GS_UKERNEL_READY ||
|
||||
(val & GS_UKERNEL_MASK) == GS_UKERNEL_LAPIC_DONE);
|
||||
}
|
||||
|
||||
/*
|
||||
* Transfer the firmware image to RAM for execution by the microcontroller.
|
||||
*
|
||||
* GuC Firmware layout:
|
||||
* +-------------------------------+ ----
|
||||
* | CSS header | 128B
|
||||
* | contains major/minor version |
|
||||
* +-------------------------------+ ----
|
||||
* | uCode |
|
||||
* +-------------------------------+ ----
|
||||
* | RSA signature | 256B
|
||||
* +-------------------------------+ ----
|
||||
*
|
||||
* Architecturally, the DMA engine is bidirectional, and can potentially even
|
||||
* transfer between GTT locations. This functionality is left out of the API
|
||||
* for now as there is no need for it.
|
||||
*
|
||||
* Note that GuC needs the CSS header plus uKernel code to be copied by the
|
||||
* DMA engine in one operation, whereas the RSA signature is loaded via MMIO.
|
||||
*/
|
||||
|
||||
#define UOS_CSS_HEADER_OFFSET 0
|
||||
#define UOS_VER_MINOR_OFFSET 0x44
|
||||
#define UOS_VER_MAJOR_OFFSET 0x46
|
||||
#define UOS_CSS_HEADER_SIZE 0x80
|
||||
#define UOS_RSA_SIG_SIZE 0x100
|
||||
|
||||
static int guc_ucode_xfer_dma(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
|
||||
struct drm_i915_gem_object *fw_obj = guc_fw->guc_fw_obj;
|
||||
unsigned long offset;
|
||||
struct sg_table *sg = fw_obj->pages;
|
||||
u32 status, ucode_size, rsa[UOS_RSA_SIG_SIZE / sizeof(u32)];
|
||||
int i, ret = 0;
|
||||
|
||||
/* uCode size, also is where RSA signature starts */
|
||||
offset = ucode_size = guc_fw->guc_fw_size - UOS_RSA_SIG_SIZE;
|
||||
I915_WRITE(DMA_COPY_SIZE, ucode_size);
|
||||
|
||||
/* Copy RSA signature from the fw image to HW for verification */
|
||||
sg_pcopy_to_buffer(sg->sgl, sg->nents, rsa, UOS_RSA_SIG_SIZE, offset);
|
||||
for (i = 0; i < UOS_RSA_SIG_SIZE / sizeof(u32); i++)
|
||||
I915_WRITE(UOS_RSA_SCRATCH_0 + i * sizeof(u32), rsa[i]);
|
||||
|
||||
/* Set the source address for the new blob */
|
||||
offset = i915_gem_obj_ggtt_offset(fw_obj);
|
||||
I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset));
|
||||
I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF);
|
||||
|
||||
/*
|
||||
* Set the DMA destination. Current uCode expects the code to be
|
||||
* loaded at 8k; locations below this are used for the stack.
|
||||
*/
|
||||
I915_WRITE(DMA_ADDR_1_LOW, 0x2000);
|
||||
I915_WRITE(DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);
|
||||
|
||||
/* Finally start the DMA */
|
||||
I915_WRITE(DMA_CTRL, _MASKED_BIT_ENABLE(UOS_MOVE | START_DMA));
|
||||
|
||||
/*
|
||||
* Spin-wait for the DMA to complete & the GuC to start up.
|
||||
* NB: Docs recommend not using the interrupt for completion.
|
||||
* Measurements indicate this should take no more than 20ms, so a
|
||||
* timeout here indicates that the GuC has failed and is unusable.
|
||||
* (Higher levels of the driver will attempt to fall back to
|
||||
* execlist mode if this happens.)
|
||||
*/
|
||||
ret = wait_for_atomic(guc_ucode_response(dev_priv, &status), 100);
|
||||
|
||||
DRM_DEBUG_DRIVER("DMA status 0x%x, GuC status 0x%x\n",
|
||||
I915_READ(DMA_CTRL), status);
|
||||
|
||||
if ((status & GS_BOOTROM_MASK) == GS_BOOTROM_RSA_FAILED) {
|
||||
DRM_ERROR("GuC firmware signature verification failed\n");
|
||||
ret = -ENOEXEC;
|
||||
}
|
||||
|
||||
DRM_DEBUG_DRIVER("returning %d\n", ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Load the GuC firmware blob into the MinuteIA.
|
||||
*/
|
||||
static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
int ret;
|
||||
|
||||
ret = i915_gem_object_set_to_gtt_domain(guc_fw->guc_fw_obj, false);
|
||||
if (ret) {
|
||||
DRM_DEBUG_DRIVER("set-domain failed %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = i915_gem_obj_ggtt_pin(guc_fw->guc_fw_obj, 0, 0);
|
||||
if (ret) {
|
||||
DRM_DEBUG_DRIVER("pin failed %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Invalidate GuC TLB to let GuC take the latest updates to GTT. */
|
||||
I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
|
||||
|
||||
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
|
||||
|
||||
/* init WOPCM */
|
||||
I915_WRITE(GUC_WOPCM_SIZE, GUC_WOPCM_SIZE_VALUE);
|
||||
I915_WRITE(DMA_GUC_WOPCM_OFFSET, GUC_WOPCM_OFFSET_VALUE);
|
||||
|
||||
/* Enable MIA caching. GuC clock gating is disabled. */
|
||||
I915_WRITE(GUC_SHIM_CONTROL, GUC_SHIM_CONTROL_VALUE);
|
||||
|
||||
/* WaC6DisallowByGfxPause*/
|
||||
I915_WRITE(GEN6_GFXPAUSE, 0x30FFF);
|
||||
|
||||
if (IS_BROXTON(dev))
|
||||
I915_WRITE(GEN9LP_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
|
||||
else
|
||||
I915_WRITE(GEN9_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
|
||||
|
||||
if (IS_GEN9(dev)) {
|
||||
/* DOP Clock Gating Enable for GuC clocks */
|
||||
I915_WRITE(GEN7_MISCCPCTL, (GEN8_DOP_CLOCK_GATE_GUC_ENABLE |
|
||||
I915_READ(GEN7_MISCCPCTL)));
|
||||
|
||||
/* allows for 5us before GT can go to RC6 */
|
||||
I915_WRITE(GUC_ARAT_C6DIS, 0x1FF);
|
||||
}
|
||||
|
||||
set_guc_init_params(dev_priv);
|
||||
|
||||
ret = guc_ucode_xfer_dma(dev_priv);
|
||||
|
||||
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
|
||||
|
||||
/*
|
||||
* We keep the object pages for reuse during resume. But we can unpin it
|
||||
* now that DMA has completed, so it doesn't continue to take up space.
|
||||
*/
|
||||
i915_gem_object_ggtt_unpin(guc_fw->guc_fw_obj);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_guc_ucode_load() - load GuC uCode into the device
|
||||
* @dev: drm device
|
||||
*
|
||||
* Called from gem_init_hw() during driver loading and also after a GPU reset.
|
||||
*
|
||||
* The firmware image should have already been fetched into memory by the
|
||||
* earlier call to intel_guc_ucode_init(), so here we need only check that
|
||||
* is succeeded, and then transfer the image to the h/w.
|
||||
*
|
||||
* Return: non-zero code on error
|
||||
*/
|
||||
int intel_guc_ucode_load(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
|
||||
int err = 0;
|
||||
|
||||
DRM_DEBUG_DRIVER("GuC fw status: fetch %s, load %s\n",
|
||||
intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status),
|
||||
intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
|
||||
|
||||
direct_interrupts_to_host(dev_priv);
|
||||
i915_guc_submission_disable(dev);
|
||||
|
||||
if (guc_fw->guc_fw_fetch_status == GUC_FIRMWARE_NONE)
|
||||
return 0;
|
||||
|
||||
if (guc_fw->guc_fw_fetch_status == GUC_FIRMWARE_SUCCESS &&
|
||||
guc_fw->guc_fw_load_status == GUC_FIRMWARE_FAIL)
|
||||
return -ENOEXEC;
|
||||
|
||||
guc_fw->guc_fw_load_status = GUC_FIRMWARE_PENDING;
|
||||
|
||||
DRM_DEBUG_DRIVER("GuC fw fetch status %s\n",
|
||||
intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status));
|
||||
|
||||
switch (guc_fw->guc_fw_fetch_status) {
|
||||
case GUC_FIRMWARE_FAIL:
|
||||
/* something went wrong :( */
|
||||
err = -EIO;
|
||||
goto fail;
|
||||
|
||||
case GUC_FIRMWARE_NONE:
|
||||
case GUC_FIRMWARE_PENDING:
|
||||
default:
|
||||
/* "can't happen" */
|
||||
WARN_ONCE(1, "GuC fw %s invalid guc_fw_fetch_status %s [%d]\n",
|
||||
guc_fw->guc_fw_path,
|
||||
intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status),
|
||||
guc_fw->guc_fw_fetch_status);
|
||||
err = -ENXIO;
|
||||
goto fail;
|
||||
|
||||
case GUC_FIRMWARE_SUCCESS:
|
||||
break;
|
||||
}
|
||||
|
||||
err = i915_guc_submission_init(dev);
|
||||
if (err)
|
||||
goto fail;
|
||||
|
||||
err = guc_ucode_xfer(dev_priv);
|
||||
if (err)
|
||||
goto fail;
|
||||
|
||||
guc_fw->guc_fw_load_status = GUC_FIRMWARE_SUCCESS;
|
||||
|
||||
DRM_DEBUG_DRIVER("GuC fw status: fetch %s, load %s\n",
|
||||
intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status),
|
||||
intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
|
||||
|
||||
if (i915.enable_guc_submission) {
|
||||
err = i915_guc_submission_enable(dev);
|
||||
if (err)
|
||||
goto fail;
|
||||
direct_interrupts_to_guc(dev_priv);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
if (guc_fw->guc_fw_load_status == GUC_FIRMWARE_PENDING)
|
||||
guc_fw->guc_fw_load_status = GUC_FIRMWARE_FAIL;
|
||||
|
||||
direct_interrupts_to_host(dev_priv);
|
||||
i915_guc_submission_disable(dev);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw)
|
||||
{
|
||||
struct drm_i915_gem_object *obj;
|
||||
const struct firmware *fw;
|
||||
const u8 *css_header;
|
||||
const size_t minsize = UOS_CSS_HEADER_SIZE + UOS_RSA_SIG_SIZE;
|
||||
const size_t maxsize = GUC_WOPCM_SIZE_VALUE + UOS_RSA_SIG_SIZE
|
||||
- 0x8000; /* 32k reserved (8K stack + 24k context) */
|
||||
int err;
|
||||
|
||||
DRM_DEBUG_DRIVER("before requesting firmware: GuC fw fetch status %s\n",
|
||||
intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status));
|
||||
|
||||
err = request_firmware(&fw, guc_fw->guc_fw_path, &dev->pdev->dev);
|
||||
if (err)
|
||||
goto fail;
|
||||
if (!fw)
|
||||
goto fail;
|
||||
|
||||
DRM_DEBUG_DRIVER("fetch GuC fw from %s succeeded, fw %p\n",
|
||||
guc_fw->guc_fw_path, fw);
|
||||
DRM_DEBUG_DRIVER("firmware file size %zu (minimum %zu, maximum %zu)\n",
|
||||
fw->size, minsize, maxsize);
|
||||
|
||||
/* Check the size of the blob befoe examining buffer contents */
|
||||
if (fw->size < minsize || fw->size > maxsize)
|
||||
goto fail;
|
||||
|
||||
/*
|
||||
* The GuC firmware image has the version number embedded at a well-known
|
||||
* offset within the firmware blob; note that major / minor version are
|
||||
* TWO bytes each (i.e. u16), although all pointers and offsets are defined
|
||||
* in terms of bytes (u8).
|
||||
*/
|
||||
css_header = fw->data + UOS_CSS_HEADER_OFFSET;
|
||||
guc_fw->guc_fw_major_found = *(u16 *)(css_header + UOS_VER_MAJOR_OFFSET);
|
||||
guc_fw->guc_fw_minor_found = *(u16 *)(css_header + UOS_VER_MINOR_OFFSET);
|
||||
|
||||
if (guc_fw->guc_fw_major_found != guc_fw->guc_fw_major_wanted ||
|
||||
guc_fw->guc_fw_minor_found < guc_fw->guc_fw_minor_wanted) {
|
||||
DRM_ERROR("GuC firmware version %d.%d, required %d.%d\n",
|
||||
guc_fw->guc_fw_major_found, guc_fw->guc_fw_minor_found,
|
||||
guc_fw->guc_fw_major_wanted, guc_fw->guc_fw_minor_wanted);
|
||||
err = -ENOEXEC;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
DRM_DEBUG_DRIVER("firmware version %d.%d OK (minimum %d.%d)\n",
|
||||
guc_fw->guc_fw_major_found, guc_fw->guc_fw_minor_found,
|
||||
guc_fw->guc_fw_major_wanted, guc_fw->guc_fw_minor_wanted);
|
||||
|
||||
obj = i915_gem_object_create_from_data(dev, fw->data, fw->size);
|
||||
if (IS_ERR_OR_NULL(obj)) {
|
||||
err = obj ? PTR_ERR(obj) : -ENOMEM;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
guc_fw->guc_fw_obj = obj;
|
||||
guc_fw->guc_fw_size = fw->size;
|
||||
|
||||
DRM_DEBUG_DRIVER("GuC fw fetch status SUCCESS, obj %p\n",
|
||||
guc_fw->guc_fw_obj);
|
||||
|
||||
release_firmware(fw);
|
||||
guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_SUCCESS;
|
||||
return;
|
||||
|
||||
fail:
|
||||
DRM_DEBUG_DRIVER("GuC fw fetch status FAIL; err %d, fw %p, obj %p\n",
|
||||
err, fw, guc_fw->guc_fw_obj);
|
||||
DRM_ERROR("Failed to fetch GuC firmware from %s (error %d)\n",
|
||||
guc_fw->guc_fw_path, err);
|
||||
|
||||
obj = guc_fw->guc_fw_obj;
|
||||
if (obj)
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
guc_fw->guc_fw_obj = NULL;
|
||||
|
||||
release_firmware(fw); /* OK even if fw is NULL */
|
||||
guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_FAIL;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_guc_ucode_init() - define parameters and fetch firmware
|
||||
* @dev: drm device
|
||||
*
|
||||
* Called early during driver load, but after GEM is initialised.
|
||||
* The device struct_mutex must be held by the caller, as we're
|
||||
* going to allocate a GEM object to hold the firmware image.
|
||||
*
|
||||
* The firmware will be transferred to the GuC's memory later,
|
||||
* when intel_guc_ucode_load() is called.
|
||||
*/
|
||||
void intel_guc_ucode_init(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
|
||||
const char *fw_path;
|
||||
|
||||
if (!HAS_GUC_SCHED(dev))
|
||||
i915.enable_guc_submission = false;
|
||||
|
||||
if (!HAS_GUC_UCODE(dev)) {
|
||||
fw_path = NULL;
|
||||
} else if (IS_SKYLAKE(dev)) {
|
||||
fw_path = I915_SKL_GUC_UCODE;
|
||||
guc_fw->guc_fw_major_wanted = 4;
|
||||
guc_fw->guc_fw_minor_wanted = 3;
|
||||
} else {
|
||||
i915.enable_guc_submission = false;
|
||||
fw_path = ""; /* unknown device */
|
||||
}
|
||||
|
||||
guc_fw->guc_dev = dev;
|
||||
guc_fw->guc_fw_path = fw_path;
|
||||
guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_NONE;
|
||||
guc_fw->guc_fw_load_status = GUC_FIRMWARE_NONE;
|
||||
|
||||
if (fw_path == NULL)
|
||||
return;
|
||||
|
||||
if (*fw_path == '\0') {
|
||||
DRM_ERROR("No GuC firmware known for this platform\n");
|
||||
guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_FAIL;
|
||||
return;
|
||||
}
|
||||
|
||||
guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_PENDING;
|
||||
DRM_DEBUG_DRIVER("GuC firmware pending, path %s\n", fw_path);
|
||||
guc_fw_fetch(dev, guc_fw);
|
||||
/* status must now be FAIL or SUCCESS */
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_guc_ucode_fini() - clean up all allocated resources
|
||||
* @dev: drm device
|
||||
*/
|
||||
void intel_guc_ucode_fini(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
|
||||
|
||||
direct_interrupts_to_host(dev_priv);
|
||||
i915_guc_submission_fini(dev);
|
||||
|
||||
if (guc_fw->guc_fw_obj)
|
||||
drm_gem_object_unreference(&guc_fw->guc_fw_obj->base);
|
||||
guc_fw->guc_fw_obj = NULL;
|
||||
|
||||
guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_NONE;
|
||||
}
|
|
@ -848,8 +848,8 @@ static void intel_hdmi_prepare(struct intel_encoder *encoder)
|
|||
u32 hdmi_val;
|
||||
|
||||
hdmi_val = SDVO_ENCODING_HDMI;
|
||||
if (!HAS_PCH_SPLIT(dev))
|
||||
hdmi_val |= intel_hdmi->color_range;
|
||||
if (!HAS_PCH_SPLIT(dev) && crtc->config->limited_color_range)
|
||||
hdmi_val |= HDMI_COLOR_RANGE_16_235;
|
||||
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
|
||||
hdmi_val |= SDVO_VSYNC_ACTIVE_HIGH;
|
||||
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
|
||||
|
@ -1260,11 +1260,12 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
|
|||
|
||||
if (intel_hdmi->color_range_auto) {
|
||||
/* See CEA-861-E - 5.1 Default Encoding Parameters */
|
||||
if (pipe_config->has_hdmi_sink &&
|
||||
drm_match_cea_mode(adjusted_mode) > 1)
|
||||
intel_hdmi->color_range = HDMI_COLOR_RANGE_16_235;
|
||||
else
|
||||
intel_hdmi->color_range = 0;
|
||||
pipe_config->limited_color_range =
|
||||
pipe_config->has_hdmi_sink &&
|
||||
drm_match_cea_mode(adjusted_mode) > 1;
|
||||
} else {
|
||||
pipe_config->limited_color_range =
|
||||
intel_hdmi->limited_color_range;
|
||||
}
|
||||
|
||||
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) {
|
||||
|
@ -1273,9 +1274,6 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
|
|||
clock_12bpc *= 2;
|
||||
}
|
||||
|
||||
if (intel_hdmi->color_range)
|
||||
pipe_config->limited_color_range = true;
|
||||
|
||||
if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev))
|
||||
pipe_config->has_pch_encoder = true;
|
||||
|
||||
|
@ -1470,7 +1468,7 @@ intel_hdmi_set_property(struct drm_connector *connector,
|
|||
|
||||
if (property == dev_priv->broadcast_rgb_property) {
|
||||
bool old_auto = intel_hdmi->color_range_auto;
|
||||
uint32_t old_range = intel_hdmi->color_range;
|
||||
bool old_range = intel_hdmi->limited_color_range;
|
||||
|
||||
switch (val) {
|
||||
case INTEL_BROADCAST_RGB_AUTO:
|
||||
|
@ -1478,18 +1476,18 @@ intel_hdmi_set_property(struct drm_connector *connector,
|
|||
break;
|
||||
case INTEL_BROADCAST_RGB_FULL:
|
||||
intel_hdmi->color_range_auto = false;
|
||||
intel_hdmi->color_range = 0;
|
||||
intel_hdmi->limited_color_range = false;
|
||||
break;
|
||||
case INTEL_BROADCAST_RGB_LIMITED:
|
||||
intel_hdmi->color_range_auto = false;
|
||||
intel_hdmi->color_range = HDMI_COLOR_RANGE_16_235;
|
||||
intel_hdmi->limited_color_range = true;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (old_auto == intel_hdmi->color_range_auto &&
|
||||
old_range == intel_hdmi->color_range)
|
||||
old_range == intel_hdmi->limited_color_range)
|
||||
return 0;
|
||||
|
||||
goto done;
|
||||
|
@ -1617,6 +1615,50 @@ static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
|
|||
mutex_unlock(&dev_priv->sb_lock);
|
||||
}
|
||||
|
||||
static void chv_data_lane_soft_reset(struct intel_encoder *encoder,
|
||||
bool reset)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
|
||||
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
|
||||
enum pipe pipe = crtc->pipe;
|
||||
uint32_t val;
|
||||
|
||||
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
|
||||
if (reset)
|
||||
val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
|
||||
else
|
||||
val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
|
||||
|
||||
if (crtc->config->lane_count > 2) {
|
||||
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
|
||||
if (reset)
|
||||
val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
|
||||
else
|
||||
val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
|
||||
}
|
||||
|
||||
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
|
||||
val |= CHV_PCS_REQ_SOFTRESET_EN;
|
||||
if (reset)
|
||||
val &= ~DPIO_PCS_CLK_SOFT_RESET;
|
||||
else
|
||||
val |= DPIO_PCS_CLK_SOFT_RESET;
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
|
||||
|
||||
if (crtc->config->lane_count > 2) {
|
||||
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
|
||||
val |= CHV_PCS_REQ_SOFTRESET_EN;
|
||||
if (reset)
|
||||
val &= ~DPIO_PCS_CLK_SOFT_RESET;
|
||||
else
|
||||
val |= DPIO_PCS_CLK_SOFT_RESET;
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
|
||||
}
|
||||
}
|
||||
|
||||
static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
|
||||
{
|
||||
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
|
||||
|
@ -1630,8 +1672,21 @@ static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
|
|||
|
||||
intel_hdmi_prepare(encoder);
|
||||
|
||||
/*
|
||||
* Must trick the second common lane into life.
|
||||
* Otherwise we can't even access the PLL.
|
||||
*/
|
||||
if (ch == DPIO_CH0 && pipe == PIPE_B)
|
||||
dport->release_cl2_override =
|
||||
!chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
|
||||
|
||||
chv_phy_powergate_lanes(encoder, true, 0x0);
|
||||
|
||||
mutex_lock(&dev_priv->sb_lock);
|
||||
|
||||
/* Assert data lane reset */
|
||||
chv_data_lane_soft_reset(encoder, true);
|
||||
|
||||
/* program left/right clock distribution */
|
||||
if (pipe != PIPE_B) {
|
||||
val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
|
||||
|
@ -1683,6 +1738,39 @@ static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
|
|||
mutex_unlock(&dev_priv->sb_lock);
|
||||
}
|
||||
|
||||
static void chv_hdmi_post_pll_disable(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
|
||||
u32 val;
|
||||
|
||||
mutex_lock(&dev_priv->sb_lock);
|
||||
|
||||
/* disable left/right clock distribution */
|
||||
if (pipe != PIPE_B) {
|
||||
val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
|
||||
val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
|
||||
vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
|
||||
} else {
|
||||
val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
|
||||
val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
|
||||
vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
|
||||
}
|
||||
|
||||
mutex_unlock(&dev_priv->sb_lock);
|
||||
|
||||
/*
|
||||
* Leave the power down bit cleared for at least one
|
||||
* lane so that chv_powergate_phy_ch() will power
|
||||
* on something when the channel is otherwise unused.
|
||||
* When the port is off and the override is removed
|
||||
* the lanes power down anyway, so otherwise it doesn't
|
||||
* really matter what the state of power down bits is
|
||||
* after this.
|
||||
*/
|
||||
chv_phy_powergate_lanes(encoder, false, 0x0);
|
||||
}
|
||||
|
||||
static void vlv_hdmi_post_disable(struct intel_encoder *encoder)
|
||||
{
|
||||
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
|
||||
|
@ -1701,33 +1789,13 @@ static void vlv_hdmi_post_disable(struct intel_encoder *encoder)
|
|||
|
||||
static void chv_hdmi_post_disable(struct intel_encoder *encoder)
|
||||
{
|
||||
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *intel_crtc =
|
||||
to_intel_crtc(encoder->base.crtc);
|
||||
enum dpio_channel ch = vlv_dport_to_channel(dport);
|
||||
enum pipe pipe = intel_crtc->pipe;
|
||||
u32 val;
|
||||
|
||||
mutex_lock(&dev_priv->sb_lock);
|
||||
|
||||
/* Propagate soft reset to data lane reset */
|
||||
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
|
||||
val |= CHV_PCS_REQ_SOFTRESET_EN;
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
|
||||
|
||||
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
|
||||
val |= CHV_PCS_REQ_SOFTRESET_EN;
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
|
||||
|
||||
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
|
||||
val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
|
||||
|
||||
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
|
||||
val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
|
||||
/* Assert data lane reset */
|
||||
chv_data_lane_soft_reset(encoder, true);
|
||||
|
||||
mutex_unlock(&dev_priv->sb_lock);
|
||||
}
|
||||
|
@ -1758,23 +1826,6 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
|
|||
val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
|
||||
|
||||
/* Deassert soft data lane reset*/
|
||||
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
|
||||
val |= CHV_PCS_REQ_SOFTRESET_EN;
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
|
||||
|
||||
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
|
||||
val |= CHV_PCS_REQ_SOFTRESET_EN;
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
|
||||
|
||||
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
|
||||
val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
|
||||
|
||||
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
|
||||
val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
|
||||
|
||||
/* Program Tx latency optimal setting */
|
||||
for (i = 0; i < 4; i++) {
|
||||
/* Set the upar bit */
|
||||
|
@ -1817,6 +1868,9 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
|
|||
DPIO_TX1_STAGGER_MULT(7) |
|
||||
DPIO_TX2_STAGGER_MULT(5));
|
||||
|
||||
/* Deassert data lane reset */
|
||||
chv_data_lane_soft_reset(encoder, false);
|
||||
|
||||
/* Clear calc init */
|
||||
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
|
||||
val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
|
||||
|
@ -1851,31 +1905,33 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
|
|||
|
||||
for (i = 0; i < 4; i++) {
|
||||
val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
|
||||
|
||||
val &= ~DPIO_SWING_MARGIN000_MASK;
|
||||
val |= 102 << DPIO_SWING_MARGIN000_SHIFT;
|
||||
|
||||
/*
|
||||
* Supposedly this value shouldn't matter when unique transition
|
||||
* scale is disabled, but in fact it does matter. Let's just
|
||||
* always program the same value and hope it's OK.
|
||||
*/
|
||||
val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
|
||||
val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
|
||||
|
||||
vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
|
||||
}
|
||||
|
||||
/* Disable unique transition scale */
|
||||
/*
|
||||
* The document said it needs to set bit 27 for ch0 and bit 26
|
||||
* for ch1. Might be a typo in the doc.
|
||||
* For now, for this unique transition scale selection, set bit
|
||||
* 27 for ch0 and ch1.
|
||||
*/
|
||||
for (i = 0; i < 4; i++) {
|
||||
val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
|
||||
val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
|
||||
vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
|
||||
}
|
||||
|
||||
/* Additional steps for 1200mV-0dB */
|
||||
#if 0
|
||||
val = vlv_dpio_read(dev_priv, pipe, VLV_TX_DW3(ch));
|
||||
if (ch)
|
||||
val |= DPIO_TX_UNIQ_TRANS_SCALE_CH1;
|
||||
else
|
||||
val |= DPIO_TX_UNIQ_TRANS_SCALE_CH0;
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(ch), val);
|
||||
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(ch),
|
||||
vlv_dpio_read(dev_priv, pipe, VLV_TX_DW2(ch)) |
|
||||
(0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT));
|
||||
#endif
|
||||
/* Start swing calculation */
|
||||
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
|
||||
val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
|
||||
|
@ -1899,6 +1955,12 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
|
|||
g4x_enable_hdmi(encoder);
|
||||
|
||||
vlv_wait_port_ready(dev_priv, dport, 0x0);
|
||||
|
||||
/* Second common lane will stay alive on its own now */
|
||||
if (dport->release_cl2_override) {
|
||||
chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
|
||||
dport->release_cl2_override = false;
|
||||
}
|
||||
}
|
||||
|
||||
static void intel_hdmi_destroy(struct drm_connector *connector)
|
||||
|
@ -1974,7 +2036,14 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
|
|||
intel_hdmi->ddc_bus = GMBUS_PIN_1_BXT;
|
||||
else
|
||||
intel_hdmi->ddc_bus = GMBUS_PIN_DPB;
|
||||
intel_encoder->hpd_pin = HPD_PORT_B;
|
||||
/*
|
||||
* On BXT A0/A1, sw needs to activate DDIA HPD logic and
|
||||
* interrupts to check the external panel connection.
|
||||
*/
|
||||
if (IS_BROXTON(dev_priv) && (INTEL_REVID(dev) < BXT_REVID_B0))
|
||||
intel_encoder->hpd_pin = HPD_PORT_A;
|
||||
else
|
||||
intel_encoder->hpd_pin = HPD_PORT_B;
|
||||
break;
|
||||
case PORT_C:
|
||||
if (IS_BROXTON(dev_priv))
|
||||
|
@ -2051,6 +2120,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
|
|||
|
||||
intel_connector_attach_encoder(intel_connector, intel_encoder);
|
||||
drm_connector_register(connector);
|
||||
intel_hdmi->attached_connector = intel_connector;
|
||||
|
||||
/* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
|
||||
* 0xd. Failure to do so will result in spurious interrupts being
|
||||
|
@ -2097,6 +2167,7 @@ void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port)
|
|||
intel_encoder->pre_enable = chv_hdmi_pre_enable;
|
||||
intel_encoder->enable = vlv_enable_hdmi;
|
||||
intel_encoder->post_disable = chv_hdmi_post_disable;
|
||||
intel_encoder->post_pll_disable = chv_hdmi_post_pll_disable;
|
||||
} else if (IS_VALLEYVIEW(dev)) {
|
||||
intel_encoder->pre_pll_enable = vlv_hdmi_pre_pll_enable;
|
||||
intel_encoder->pre_enable = vlv_hdmi_pre_enable;
|
||||
|
|
|
@ -196,13 +196,21 @@
|
|||
reg_state[CTX_PDP ## n ## _LDW+1] = lower_32_bits(_addr); \
|
||||
}
|
||||
|
||||
#define ASSIGN_CTX_PML4(ppgtt, reg_state) { \
|
||||
reg_state[CTX_PDP0_UDW + 1] = upper_32_bits(px_dma(&ppgtt->pml4)); \
|
||||
reg_state[CTX_PDP0_LDW + 1] = lower_32_bits(px_dma(&ppgtt->pml4)); \
|
||||
}
|
||||
|
||||
enum {
|
||||
ADVANCED_CONTEXT = 0,
|
||||
LEGACY_CONTEXT,
|
||||
LEGACY_32B_CONTEXT,
|
||||
ADVANCED_AD_CONTEXT,
|
||||
LEGACY_64B_CONTEXT
|
||||
};
|
||||
#define GEN8_CTX_MODE_SHIFT 3
|
||||
#define GEN8_CTX_ADDRESSING_MODE_SHIFT 3
|
||||
#define GEN8_CTX_ADDRESSING_MODE(dev) (USES_FULL_48BIT_PPGTT(dev) ?\
|
||||
LEGACY_64B_CONTEXT :\
|
||||
LEGACY_32B_CONTEXT)
|
||||
enum {
|
||||
FAULT_AND_HANG = 0,
|
||||
FAULT_AND_HALT, /* Debug only */
|
||||
|
@ -228,6 +236,12 @@ int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists
|
|||
{
|
||||
WARN_ON(i915.enable_ppgtt == -1);
|
||||
|
||||
/* On platforms with execlist available, vGPU will only
|
||||
* support execlist mode, no ring buffer mode.
|
||||
*/
|
||||
if (HAS_LOGICAL_RING_CONTEXTS(dev) && intel_vgpu_active(dev))
|
||||
return 1;
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 9)
|
||||
return 1;
|
||||
|
||||
|
@ -255,25 +269,27 @@ int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists
|
|||
*/
|
||||
u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj)
|
||||
{
|
||||
u32 lrca = i915_gem_obj_ggtt_offset(ctx_obj);
|
||||
u32 lrca = i915_gem_obj_ggtt_offset(ctx_obj) +
|
||||
LRC_PPHWSP_PN * PAGE_SIZE;
|
||||
|
||||
/* LRCA is required to be 4K aligned so the more significant 20 bits
|
||||
* are globally unique */
|
||||
return lrca >> 12;
|
||||
}
|
||||
|
||||
static uint64_t execlists_ctx_descriptor(struct drm_i915_gem_request *rq)
|
||||
uint64_t intel_lr_context_descriptor(struct intel_context *ctx,
|
||||
struct intel_engine_cs *ring)
|
||||
{
|
||||
struct intel_engine_cs *ring = rq->ring;
|
||||
struct drm_device *dev = ring->dev;
|
||||
struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state;
|
||||
struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
|
||||
uint64_t desc;
|
||||
uint64_t lrca = i915_gem_obj_ggtt_offset(ctx_obj);
|
||||
uint64_t lrca = i915_gem_obj_ggtt_offset(ctx_obj) +
|
||||
LRC_PPHWSP_PN * PAGE_SIZE;
|
||||
|
||||
WARN_ON(lrca & 0xFFFFFFFF00000FFFULL);
|
||||
|
||||
desc = GEN8_CTX_VALID;
|
||||
desc |= LEGACY_CONTEXT << GEN8_CTX_MODE_SHIFT;
|
||||
desc |= GEN8_CTX_ADDRESSING_MODE(dev) << GEN8_CTX_ADDRESSING_MODE_SHIFT;
|
||||
if (IS_GEN8(ctx_obj->base.dev))
|
||||
desc |= GEN8_CTX_L3LLC_COHERENT;
|
||||
desc |= GEN8_CTX_PRIVILEGE;
|
||||
|
@ -304,13 +320,13 @@ static void execlists_elsp_write(struct drm_i915_gem_request *rq0,
|
|||
uint64_t desc[2];
|
||||
|
||||
if (rq1) {
|
||||
desc[1] = execlists_ctx_descriptor(rq1);
|
||||
desc[1] = intel_lr_context_descriptor(rq1->ctx, rq1->ring);
|
||||
rq1->elsp_submitted++;
|
||||
} else {
|
||||
desc[1] = 0;
|
||||
}
|
||||
|
||||
desc[0] = execlists_ctx_descriptor(rq0);
|
||||
desc[0] = intel_lr_context_descriptor(rq0->ctx, rq0->ring);
|
||||
rq0->elsp_submitted++;
|
||||
|
||||
/* You must always write both descriptors in the order below. */
|
||||
|
@ -342,16 +358,18 @@ static int execlists_update_context(struct drm_i915_gem_request *rq)
|
|||
WARN_ON(!i915_gem_obj_is_pinned(ctx_obj));
|
||||
WARN_ON(!i915_gem_obj_is_pinned(rb_obj));
|
||||
|
||||
page = i915_gem_object_get_page(ctx_obj, 1);
|
||||
page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
|
||||
reg_state = kmap_atomic(page);
|
||||
|
||||
reg_state[CTX_RING_TAIL+1] = rq->tail;
|
||||
reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(rb_obj);
|
||||
|
||||
/* True PPGTT with dynamic page allocation: update PDP registers and
|
||||
* point the unallocated PDPs to the scratch page
|
||||
*/
|
||||
if (ppgtt) {
|
||||
if (ppgtt && !USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
|
||||
/* True 32b PPGTT with dynamic page allocation: update PDP
|
||||
* registers and point the unallocated PDPs to scratch page.
|
||||
* PML4 is allocated during ppgtt init, so this is not needed
|
||||
* in 48-bit mode.
|
||||
*/
|
||||
ASSIGN_CTX_PDP(ppgtt, reg_state, 3);
|
||||
ASSIGN_CTX_PDP(ppgtt, reg_state, 2);
|
||||
ASSIGN_CTX_PDP(ppgtt, reg_state, 1);
|
||||
|
@ -538,8 +556,6 @@ static int execlists_context_queue(struct drm_i915_gem_request *request)
|
|||
|
||||
i915_gem_request_reference(request);
|
||||
|
||||
request->tail = request->ringbuf->tail;
|
||||
|
||||
spin_lock_irq(&ring->execlist_lock);
|
||||
|
||||
list_for_each_entry(cursor, &ring->execlist_queue, execlist_link)
|
||||
|
@ -692,13 +708,19 @@ static void
|
|||
intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
|
||||
{
|
||||
struct intel_engine_cs *ring = request->ring;
|
||||
struct drm_i915_private *dev_priv = request->i915;
|
||||
|
||||
intel_logical_ring_advance(request->ringbuf);
|
||||
|
||||
request->tail = request->ringbuf->tail;
|
||||
|
||||
if (intel_ring_stopped(ring))
|
||||
return;
|
||||
|
||||
execlists_context_queue(request);
|
||||
if (dev_priv->guc.execbuf_client)
|
||||
i915_guc_submit(dev_priv->guc.execbuf_client, request);
|
||||
else
|
||||
execlists_context_queue(request);
|
||||
}
|
||||
|
||||
static void __wrap_ring_buffer(struct intel_ringbuffer *ringbuf)
|
||||
|
@ -988,6 +1010,7 @@ int logical_ring_flush_all_caches(struct drm_i915_gem_request *req)
|
|||
|
||||
static int intel_lr_context_pin(struct drm_i915_gem_request *rq)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = rq->i915;
|
||||
struct intel_engine_cs *ring = rq->ring;
|
||||
struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state;
|
||||
struct intel_ringbuffer *ringbuf = rq->ringbuf;
|
||||
|
@ -995,8 +1018,8 @@ static int intel_lr_context_pin(struct drm_i915_gem_request *rq)
|
|||
|
||||
WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
|
||||
if (rq->ctx->engine[ring->id].pin_count++ == 0) {
|
||||
ret = i915_gem_obj_ggtt_pin(ctx_obj,
|
||||
GEN8_LR_CONTEXT_ALIGN, 0);
|
||||
ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN,
|
||||
PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
|
||||
if (ret)
|
||||
goto reset_pin_count;
|
||||
|
||||
|
@ -1005,6 +1028,10 @@ static int intel_lr_context_pin(struct drm_i915_gem_request *rq)
|
|||
goto unpin_ctx_obj;
|
||||
|
||||
ctx_obj->dirty = true;
|
||||
|
||||
/* Invalidate GuC TLB. */
|
||||
if (i915.enable_guc_submission)
|
||||
I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@ -1111,7 +1138,7 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *ring,
|
|||
if (IS_SKYLAKE(ring->dev) && INTEL_REVID(ring->dev) <= SKL_REVID_E0)
|
||||
l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS;
|
||||
|
||||
wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8(1) |
|
||||
wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 |
|
||||
MI_SRM_LRM_GLOBAL_GTT));
|
||||
wa_ctx_emit(batch, index, GEN8_L3SQCREG4);
|
||||
wa_ctx_emit(batch, index, ring->scratch.gtt_offset + 256);
|
||||
|
@ -1129,7 +1156,7 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *ring,
|
|||
wa_ctx_emit(batch, index, 0);
|
||||
wa_ctx_emit(batch, index, 0);
|
||||
|
||||
wa_ctx_emit(batch, index, (MI_LOAD_REGISTER_MEM_GEN8(1) |
|
||||
wa_ctx_emit(batch, index, (MI_LOAD_REGISTER_MEM_GEN8 |
|
||||
MI_SRM_LRM_GLOBAL_GTT));
|
||||
wa_ctx_emit(batch, index, GEN8_L3SQCREG4);
|
||||
wa_ctx_emit(batch, index, ring->scratch.gtt_offset + 256);
|
||||
|
@ -1517,12 +1544,16 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
|
|||
* Ideally, we should set Force PD Restore in ctx descriptor,
|
||||
* but we can't. Force Restore would be a second option, but
|
||||
* it is unsafe in case of lite-restore (because the ctx is
|
||||
* not idle). */
|
||||
* not idle). PML4 is allocated during ppgtt init so this is
|
||||
* not needed in 48-bit.*/
|
||||
if (req->ctx->ppgtt &&
|
||||
(intel_ring_flag(req->ring) & req->ctx->ppgtt->pd_dirty_rings)) {
|
||||
ret = intel_logical_ring_emit_pdps(req);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (!USES_FULL_48BIT_PPGTT(req->i915) &&
|
||||
!intel_vgpu_active(req->i915->dev)) {
|
||||
ret = intel_logical_ring_emit_pdps(req);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
req->ctx->ppgtt->pd_dirty_rings &= ~intel_ring_flag(req->ring);
|
||||
}
|
||||
|
@ -1688,6 +1719,34 @@ static void gen8_set_seqno(struct intel_engine_cs *ring, u32 seqno)
|
|||
intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
|
||||
}
|
||||
|
||||
static u32 bxt_a_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
|
||||
{
|
||||
|
||||
/*
|
||||
* On BXT A steppings there is a HW coherency issue whereby the
|
||||
* MI_STORE_DATA_IMM storing the completed request's seqno
|
||||
* occasionally doesn't invalidate the CPU cache. Work around this by
|
||||
* clflushing the corresponding cacheline whenever the caller wants
|
||||
* the coherency to be guaranteed. Note that this cacheline is known
|
||||
* to be clean at this point, since we only write it in
|
||||
* bxt_a_set_seqno(), where we also do a clflush after the write. So
|
||||
* this clflush in practice becomes an invalidate operation.
|
||||
*/
|
||||
|
||||
if (!lazy_coherency)
|
||||
intel_flush_status_page(ring, I915_GEM_HWS_INDEX);
|
||||
|
||||
return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
|
||||
}
|
||||
|
||||
static void bxt_a_set_seqno(struct intel_engine_cs *ring, u32 seqno)
|
||||
{
|
||||
intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
|
||||
|
||||
/* See bxt_a_get_seqno() explaining the reason for the clflush. */
|
||||
intel_flush_status_page(ring, I915_GEM_HWS_INDEX);
|
||||
}
|
||||
|
||||
static int gen8_emit_request(struct drm_i915_gem_request *request)
|
||||
{
|
||||
struct intel_ringbuffer *ringbuf = request->ringbuf;
|
||||
|
@ -1857,8 +1916,13 @@ static int logical_render_ring_init(struct drm_device *dev)
|
|||
ring->init_hw = gen8_init_render_ring;
|
||||
ring->init_context = gen8_init_rcs_context;
|
||||
ring->cleanup = intel_fini_pipe_control;
|
||||
ring->get_seqno = gen8_get_seqno;
|
||||
ring->set_seqno = gen8_set_seqno;
|
||||
if (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0) {
|
||||
ring->get_seqno = bxt_a_get_seqno;
|
||||
ring->set_seqno = bxt_a_set_seqno;
|
||||
} else {
|
||||
ring->get_seqno = gen8_get_seqno;
|
||||
ring->set_seqno = gen8_set_seqno;
|
||||
}
|
||||
ring->emit_request = gen8_emit_request;
|
||||
ring->emit_flush = gen8_emit_flush_render;
|
||||
ring->irq_get = gen8_logical_ring_get_irq;
|
||||
|
@ -1904,8 +1968,13 @@ static int logical_bsd_ring_init(struct drm_device *dev)
|
|||
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
|
||||
|
||||
ring->init_hw = gen8_init_common_ring;
|
||||
ring->get_seqno = gen8_get_seqno;
|
||||
ring->set_seqno = gen8_set_seqno;
|
||||
if (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0) {
|
||||
ring->get_seqno = bxt_a_get_seqno;
|
||||
ring->set_seqno = bxt_a_set_seqno;
|
||||
} else {
|
||||
ring->get_seqno = gen8_get_seqno;
|
||||
ring->set_seqno = gen8_set_seqno;
|
||||
}
|
||||
ring->emit_request = gen8_emit_request;
|
||||
ring->emit_flush = gen8_emit_flush;
|
||||
ring->irq_get = gen8_logical_ring_get_irq;
|
||||
|
@ -1954,8 +2023,13 @@ static int logical_blt_ring_init(struct drm_device *dev)
|
|||
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
|
||||
|
||||
ring->init_hw = gen8_init_common_ring;
|
||||
ring->get_seqno = gen8_get_seqno;
|
||||
ring->set_seqno = gen8_set_seqno;
|
||||
if (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0) {
|
||||
ring->get_seqno = bxt_a_get_seqno;
|
||||
ring->set_seqno = bxt_a_set_seqno;
|
||||
} else {
|
||||
ring->get_seqno = gen8_get_seqno;
|
||||
ring->set_seqno = gen8_set_seqno;
|
||||
}
|
||||
ring->emit_request = gen8_emit_request;
|
||||
ring->emit_flush = gen8_emit_flush;
|
||||
ring->irq_get = gen8_logical_ring_get_irq;
|
||||
|
@ -1979,8 +2053,13 @@ static int logical_vebox_ring_init(struct drm_device *dev)
|
|||
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
|
||||
|
||||
ring->init_hw = gen8_init_common_ring;
|
||||
ring->get_seqno = gen8_get_seqno;
|
||||
ring->set_seqno = gen8_set_seqno;
|
||||
if (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0) {
|
||||
ring->get_seqno = bxt_a_get_seqno;
|
||||
ring->set_seqno = bxt_a_set_seqno;
|
||||
} else {
|
||||
ring->get_seqno = gen8_get_seqno;
|
||||
ring->set_seqno = gen8_set_seqno;
|
||||
}
|
||||
ring->emit_request = gen8_emit_request;
|
||||
ring->emit_flush = gen8_emit_flush;
|
||||
ring->irq_get = gen8_logical_ring_get_irq;
|
||||
|
@ -2126,7 +2205,7 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
|
|||
|
||||
/* The second page of the context object contains some fields which must
|
||||
* be set up prior to the first execution. */
|
||||
page = i915_gem_object_get_page(ctx_obj, 1);
|
||||
page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
|
||||
reg_state = kmap_atomic(page);
|
||||
|
||||
/* A context is actually a big batch buffer with several MI_LOAD_REGISTER_IMM
|
||||
|
@ -2203,13 +2282,24 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
|
|||
reg_state[CTX_PDP0_UDW] = GEN8_RING_PDP_UDW(ring, 0);
|
||||
reg_state[CTX_PDP0_LDW] = GEN8_RING_PDP_LDW(ring, 0);
|
||||
|
||||
/* With dynamic page allocation, PDPs may not be allocated at this point,
|
||||
* Point the unallocated PDPs to the scratch page
|
||||
*/
|
||||
ASSIGN_CTX_PDP(ppgtt, reg_state, 3);
|
||||
ASSIGN_CTX_PDP(ppgtt, reg_state, 2);
|
||||
ASSIGN_CTX_PDP(ppgtt, reg_state, 1);
|
||||
ASSIGN_CTX_PDP(ppgtt, reg_state, 0);
|
||||
if (USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
|
||||
/* 64b PPGTT (48bit canonical)
|
||||
* PDP0_DESCRIPTOR contains the base address to PML4 and
|
||||
* other PDP Descriptors are ignored.
|
||||
*/
|
||||
ASSIGN_CTX_PML4(ppgtt, reg_state);
|
||||
} else {
|
||||
/* 32b PPGTT
|
||||
* PDP*_DESCRIPTOR contains the base address of space supported.
|
||||
* With dynamic page allocation, PDPs may not be allocated at
|
||||
* this point. Point the unallocated PDPs to the scratch page
|
||||
*/
|
||||
ASSIGN_CTX_PDP(ppgtt, reg_state, 3);
|
||||
ASSIGN_CTX_PDP(ppgtt, reg_state, 2);
|
||||
ASSIGN_CTX_PDP(ppgtt, reg_state, 1);
|
||||
ASSIGN_CTX_PDP(ppgtt, reg_state, 0);
|
||||
}
|
||||
|
||||
if (ring->id == RCS) {
|
||||
reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
|
||||
reg_state[CTX_R_PWR_CLK_STATE] = GEN8_R_PWR_CLK_STATE;
|
||||
|
@ -2250,8 +2340,7 @@ void intel_lr_context_free(struct intel_context *ctx)
|
|||
i915_gem_object_ggtt_unpin(ctx_obj);
|
||||
}
|
||||
WARN_ON(ctx->engine[ring->id].pin_count);
|
||||
intel_destroy_ringbuffer_obj(ringbuf);
|
||||
kfree(ringbuf);
|
||||
intel_ringbuffer_free(ringbuf);
|
||||
drm_gem_object_unreference(&ctx_obj->base);
|
||||
}
|
||||
}
|
||||
|
@ -2285,12 +2374,13 @@ static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring,
|
|||
struct drm_i915_gem_object *default_ctx_obj)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = ring->dev->dev_private;
|
||||
struct page *page;
|
||||
|
||||
/* The status page is offset 0 from the default context object
|
||||
* in LRC mode. */
|
||||
ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(default_ctx_obj);
|
||||
ring->status_page.page_addr =
|
||||
kmap(sg_page(default_ctx_obj->pages->sgl));
|
||||
/* The HWSP is part of the default context object in LRC mode. */
|
||||
ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(default_ctx_obj)
|
||||
+ LRC_PPHWSP_PN * PAGE_SIZE;
|
||||
page = i915_gem_object_get_page(default_ctx_obj, LRC_PPHWSP_PN);
|
||||
ring->status_page.page_addr = kmap(page);
|
||||
ring->status_page.obj = default_ctx_obj;
|
||||
|
||||
I915_WRITE(RING_HWS_PGA(ring->mmio_base),
|
||||
|
@ -2316,6 +2406,7 @@ int intel_lr_context_deferred_create(struct intel_context *ctx,
|
|||
{
|
||||
const bool is_global_default_ctx = (ctx == ring->default_context);
|
||||
struct drm_device *dev = ring->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_gem_object *ctx_obj;
|
||||
uint32_t context_size;
|
||||
struct intel_ringbuffer *ringbuf;
|
||||
|
@ -2326,6 +2417,9 @@ int intel_lr_context_deferred_create(struct intel_context *ctx,
|
|||
|
||||
context_size = round_up(get_lr_context_size(ring), 4096);
|
||||
|
||||
/* One extra page as the sharing data between driver and GuC */
|
||||
context_size += PAGE_SIZE * LRC_PPHWSP_PN;
|
||||
|
||||
ctx_obj = i915_gem_alloc_object(dev, context_size);
|
||||
if (!ctx_obj) {
|
||||
DRM_DEBUG_DRIVER("Alloc LRC backing obj failed.\n");
|
||||
|
@ -2333,51 +2427,34 @@ int intel_lr_context_deferred_create(struct intel_context *ctx,
|
|||
}
|
||||
|
||||
if (is_global_default_ctx) {
|
||||
ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN, 0);
|
||||
ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN,
|
||||
PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
|
||||
if (ret) {
|
||||
DRM_DEBUG_DRIVER("Pin LRC backing obj failed: %d\n",
|
||||
ret);
|
||||
drm_gem_object_unreference(&ctx_obj->base);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Invalidate GuC TLB. */
|
||||
if (i915.enable_guc_submission)
|
||||
I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
|
||||
}
|
||||
|
||||
ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
|
||||
if (!ringbuf) {
|
||||
DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s\n",
|
||||
ring->name);
|
||||
ret = -ENOMEM;
|
||||
ringbuf = intel_engine_create_ringbuffer(ring, 4 * PAGE_SIZE);
|
||||
if (IS_ERR(ringbuf)) {
|
||||
ret = PTR_ERR(ringbuf);
|
||||
goto error_unpin_ctx;
|
||||
}
|
||||
|
||||
ringbuf->ring = ring;
|
||||
|
||||
ringbuf->size = 32 * PAGE_SIZE;
|
||||
ringbuf->effective_size = ringbuf->size;
|
||||
ringbuf->head = 0;
|
||||
ringbuf->tail = 0;
|
||||
ringbuf->last_retired_head = -1;
|
||||
intel_ring_update_space(ringbuf);
|
||||
|
||||
if (ringbuf->obj == NULL) {
|
||||
ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
|
||||
if (is_global_default_ctx) {
|
||||
ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf);
|
||||
if (ret) {
|
||||
DRM_DEBUG_DRIVER(
|
||||
"Failed to allocate ringbuffer obj %s: %d\n",
|
||||
ring->name, ret);
|
||||
goto error_free_rbuf;
|
||||
DRM_ERROR(
|
||||
"Failed to pin and map ringbuffer %s: %d\n",
|
||||
ring->name, ret);
|
||||
goto error_ringbuf;
|
||||
}
|
||||
|
||||
if (is_global_default_ctx) {
|
||||
ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf);
|
||||
if (ret) {
|
||||
DRM_ERROR(
|
||||
"Failed to pin and map ringbuffer %s: %d\n",
|
||||
ring->name, ret);
|
||||
goto error_destroy_rbuf;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
ret = populate_lr_context(ctx, ctx_obj, ring, ringbuf);
|
||||
|
@ -2419,10 +2496,8 @@ int intel_lr_context_deferred_create(struct intel_context *ctx,
|
|||
error:
|
||||
if (is_global_default_ctx)
|
||||
intel_unpin_ringbuffer_obj(ringbuf);
|
||||
error_destroy_rbuf:
|
||||
intel_destroy_ringbuffer_obj(ringbuf);
|
||||
error_free_rbuf:
|
||||
kfree(ringbuf);
|
||||
error_ringbuf:
|
||||
intel_ringbuffer_free(ringbuf);
|
||||
error_unpin_ctx:
|
||||
if (is_global_default_ctx)
|
||||
i915_gem_object_ggtt_unpin(ctx_obj);
|
||||
|
@ -2452,7 +2527,7 @@ void intel_lr_context_reset(struct drm_device *dev,
|
|||
WARN(1, "Failed get_pages for context obj\n");
|
||||
continue;
|
||||
}
|
||||
page = i915_gem_object_get_page(ctx_obj, 1);
|
||||
page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
|
||||
reg_state = kmap_atomic(page);
|
||||
|
||||
reg_state[CTX_RING_HEAD+1] = 0;
|
||||
|
|
|
@ -68,12 +68,20 @@ static inline void intel_logical_ring_emit(struct intel_ringbuffer *ringbuf,
|
|||
}
|
||||
|
||||
/* Logical Ring Contexts */
|
||||
|
||||
/* One extra page is added before LRC for GuC as shared data */
|
||||
#define LRC_GUCSHR_PN (0)
|
||||
#define LRC_PPHWSP_PN (LRC_GUCSHR_PN + 1)
|
||||
#define LRC_STATE_PN (LRC_PPHWSP_PN + 1)
|
||||
|
||||
void intel_lr_context_free(struct intel_context *ctx);
|
||||
int intel_lr_context_deferred_create(struct intel_context *ctx,
|
||||
struct intel_engine_cs *ring);
|
||||
void intel_lr_context_unpin(struct drm_i915_gem_request *req);
|
||||
void intel_lr_context_reset(struct drm_device *dev,
|
||||
struct intel_context *ctx);
|
||||
uint64_t intel_lr_context_descriptor(struct intel_context *ctx,
|
||||
struct intel_engine_cs *ring);
|
||||
|
||||
/* Execlists */
|
||||
int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists);
|
||||
|
|
|
@ -289,11 +289,14 @@ intel_lvds_mode_valid(struct drm_connector *connector,
|
|||
{
|
||||
struct intel_connector *intel_connector = to_intel_connector(connector);
|
||||
struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
|
||||
int max_pixclk = to_i915(connector->dev)->max_dotclk_freq;
|
||||
|
||||
if (mode->hdisplay > fixed_mode->hdisplay)
|
||||
return MODE_PANEL;
|
||||
if (mode->vdisplay > fixed_mode->vdisplay)
|
||||
return MODE_PANEL;
|
||||
if (fixed_mode->clock > max_pixclk)
|
||||
return MODE_CLOCK_HIGH;
|
||||
|
||||
return MODE_OK;
|
||||
}
|
||||
|
@ -952,7 +955,7 @@ void intel_lvds_init(struct drm_device *dev)
|
|||
if (HAS_PCH_SPLIT(dev)) {
|
||||
I915_WRITE(PCH_PP_CONTROL,
|
||||
I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS);
|
||||
} else {
|
||||
} else if (INTEL_INFO(dev_priv)->gen < 5) {
|
||||
I915_WRITE(PP_CONTROL,
|
||||
I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
|
||||
}
|
||||
|
@ -982,6 +985,18 @@ void intel_lvds_init(struct drm_device *dev)
|
|||
DRM_DEBUG_KMS("LVDS is not present in VBT, but enabled anyway\n");
|
||||
}
|
||||
|
||||
/* Set the Panel Power On/Off timings if uninitialized. */
|
||||
if (INTEL_INFO(dev_priv)->gen < 5 &&
|
||||
I915_READ(PP_ON_DELAYS) == 0 && I915_READ(PP_OFF_DELAYS) == 0) {
|
||||
/* Set T2 to 40ms and T5 to 200ms */
|
||||
I915_WRITE(PP_ON_DELAYS, 0x019007d0);
|
||||
|
||||
/* Set T3 to 35ms and Tx to 200ms */
|
||||
I915_WRITE(PP_OFF_DELAYS, 0x015e07d0);
|
||||
|
||||
DRM_DEBUG_KMS("Panel power timings uninitialized, setting defaults\n");
|
||||
}
|
||||
|
||||
lvds_encoder = kzalloc(sizeof(*lvds_encoder), GFP_KERNEL);
|
||||
if (!lvds_encoder)
|
||||
return;
|
||||
|
|
|
@ -484,7 +484,7 @@ static u32 intel_panel_compute_brightness(struct intel_connector *connector,
|
|||
return val;
|
||||
}
|
||||
|
||||
static u32 bdw_get_backlight(struct intel_connector *connector)
|
||||
static u32 lpt_get_backlight(struct intel_connector *connector)
|
||||
{
|
||||
struct drm_device *dev = connector->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
@ -576,7 +576,7 @@ static u32 intel_panel_get_backlight(struct intel_connector *connector)
|
|||
return val;
|
||||
}
|
||||
|
||||
static void bdw_set_backlight(struct intel_connector *connector, u32 level)
|
||||
static void lpt_set_backlight(struct intel_connector *connector, u32 level)
|
||||
{
|
||||
struct drm_device *dev = connector->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
@ -729,6 +729,18 @@ void intel_panel_set_backlight_acpi(struct intel_connector *connector,
|
|||
mutex_unlock(&dev_priv->backlight_lock);
|
||||
}
|
||||
|
||||
static void lpt_disable_backlight(struct intel_connector *connector)
|
||||
{
|
||||
struct drm_device *dev = connector->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 tmp;
|
||||
|
||||
intel_panel_actually_set_backlight(connector, 0);
|
||||
|
||||
tmp = I915_READ(BLC_PWM_PCH_CTL1);
|
||||
I915_WRITE(BLC_PWM_PCH_CTL1, tmp & ~BLM_PCH_PWM_ENABLE);
|
||||
}
|
||||
|
||||
static void pch_disable_backlight(struct intel_connector *connector)
|
||||
{
|
||||
struct drm_device *dev = connector->base.dev;
|
||||
|
@ -829,7 +841,7 @@ void intel_panel_disable_backlight(struct intel_connector *connector)
|
|||
mutex_unlock(&dev_priv->backlight_lock);
|
||||
}
|
||||
|
||||
static void bdw_enable_backlight(struct intel_connector *connector)
|
||||
static void lpt_enable_backlight(struct intel_connector *connector)
|
||||
{
|
||||
struct drm_device *dev = connector->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
@ -1212,10 +1224,149 @@ static void intel_backlight_device_unregister(struct intel_connector *connector)
|
|||
#endif /* CONFIG_BACKLIGHT_CLASS_DEVICE */
|
||||
|
||||
/*
|
||||
* Note: The setup hooks can't assume pipe is set!
|
||||
* SPT: This value represents the period of the PWM stream in clock periods
|
||||
* multiplied by 16 (default increment) or 128 (alternate increment selected in
|
||||
* SCHICKEN_1 bit 0). PWM clock is 24 MHz.
|
||||
*/
|
||||
static u32 spt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
|
||||
{
|
||||
struct drm_device *dev = connector->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 mul, clock;
|
||||
|
||||
if (I915_READ(SOUTH_CHICKEN1) & SPT_PWM_GRANULARITY)
|
||||
mul = 128;
|
||||
else
|
||||
mul = 16;
|
||||
|
||||
clock = MHz(24);
|
||||
|
||||
return clock / (pwm_freq_hz * mul);
|
||||
}
|
||||
|
||||
/*
|
||||
* LPT: This value represents the period of the PWM stream in clock periods
|
||||
* multiplied by 128 (default increment) or 16 (alternate increment, selected in
|
||||
* LPT SOUTH_CHICKEN2 register bit 5).
|
||||
*/
|
||||
static u32 lpt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
|
||||
{
|
||||
struct drm_device *dev = connector->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 mul, clock;
|
||||
|
||||
if (I915_READ(SOUTH_CHICKEN2) & LPT_PWM_GRANULARITY)
|
||||
mul = 16;
|
||||
else
|
||||
mul = 128;
|
||||
|
||||
if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE)
|
||||
clock = MHz(135); /* LPT:H */
|
||||
else
|
||||
clock = MHz(24); /* LPT:LP */
|
||||
|
||||
return clock / (pwm_freq_hz * mul);
|
||||
}
|
||||
|
||||
/*
|
||||
* ILK/SNB/IVB: This value represents the period of the PWM stream in PCH
|
||||
* display raw clocks multiplied by 128.
|
||||
*/
|
||||
static u32 pch_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
|
||||
{
|
||||
struct drm_device *dev = connector->base.dev;
|
||||
int clock = MHz(intel_pch_rawclk(dev));
|
||||
|
||||
return clock / (pwm_freq_hz * 128);
|
||||
}
|
||||
|
||||
/*
|
||||
* Gen2: This field determines the number of time base events (display core
|
||||
* clock frequency/32) in total for a complete cycle of modulated backlight
|
||||
* control.
|
||||
*
|
||||
* XXX: Query mode clock or hardware clock and program PWM modulation frequency
|
||||
* appropriately when it's 0. Use VBT and/or sane defaults.
|
||||
* Gen3: A time base event equals the display core clock ([DevPNV] HRAW clock)
|
||||
* divided by 32.
|
||||
*/
|
||||
static u32 i9xx_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
|
||||
{
|
||||
struct drm_device *dev = connector->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int clock;
|
||||
|
||||
if (IS_PINEVIEW(dev))
|
||||
clock = intel_hrawclk(dev);
|
||||
else
|
||||
clock = 1000 * dev_priv->display.get_display_clock_speed(dev);
|
||||
|
||||
return clock / (pwm_freq_hz * 32);
|
||||
}
|
||||
|
||||
/*
|
||||
* Gen4: This value represents the period of the PWM stream in display core
|
||||
* clocks multiplied by 128.
|
||||
*/
|
||||
static u32 i965_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
|
||||
{
|
||||
struct drm_device *dev = connector->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int clock = 1000 * dev_priv->display.get_display_clock_speed(dev);
|
||||
|
||||
return clock / (pwm_freq_hz * 128);
|
||||
}
|
||||
|
||||
/*
|
||||
* VLV: This value represents the period of the PWM stream in display core
|
||||
* clocks ([DevCTG] 200MHz HRAW clocks) multiplied by 128 or 25MHz S0IX clocks
|
||||
* multiplied by 16. CHV uses a 19.2MHz S0IX clock.
|
||||
*/
|
||||
static u32 vlv_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
|
||||
{
|
||||
struct drm_device *dev = connector->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int clock;
|
||||
|
||||
if ((I915_READ(CBR1_VLV) & CBR_PWM_CLOCK_MUX_SELECT) == 0) {
|
||||
if (IS_CHERRYVIEW(dev))
|
||||
return KHz(19200) / (pwm_freq_hz * 16);
|
||||
else
|
||||
return MHz(25) / (pwm_freq_hz * 16);
|
||||
} else {
|
||||
clock = intel_hrawclk(dev);
|
||||
return MHz(clock) / (pwm_freq_hz * 128);
|
||||
}
|
||||
}
|
||||
|
||||
static u32 get_backlight_max_vbt(struct intel_connector *connector)
|
||||
{
|
||||
struct drm_device *dev = connector->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u16 pwm_freq_hz = dev_priv->vbt.backlight.pwm_freq_hz;
|
||||
u32 pwm;
|
||||
|
||||
if (!pwm_freq_hz) {
|
||||
DRM_DEBUG_KMS("backlight frequency not specified in VBT\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!dev_priv->display.backlight_hz_to_pwm) {
|
||||
DRM_DEBUG_KMS("backlight frequency setting from VBT currently not supported on this platform\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
pwm = dev_priv->display.backlight_hz_to_pwm(connector, pwm_freq_hz);
|
||||
if (!pwm) {
|
||||
DRM_DEBUG_KMS("backlight frequency conversion failed\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
DRM_DEBUG_KMS("backlight frequency %u Hz from VBT\n", pwm_freq_hz);
|
||||
|
||||
return pwm;
|
||||
}
|
||||
|
||||
/*
|
||||
* Note: The setup hooks can't assume pipe is set!
|
||||
*/
|
||||
static u32 get_backlight_min_vbt(struct intel_connector *connector)
|
||||
{
|
||||
|
@ -1243,7 +1394,7 @@ static u32 get_backlight_min_vbt(struct intel_connector *connector)
|
|||
return scale(min, 0, 255, 0, panel->backlight.max);
|
||||
}
|
||||
|
||||
static int bdw_setup_backlight(struct intel_connector *connector, enum pipe unused)
|
||||
static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unused)
|
||||
{
|
||||
struct drm_device *dev = connector->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
@ -1255,12 +1406,16 @@ static int bdw_setup_backlight(struct intel_connector *connector, enum pipe unus
|
|||
|
||||
pch_ctl2 = I915_READ(BLC_PWM_PCH_CTL2);
|
||||
panel->backlight.max = pch_ctl2 >> 16;
|
||||
|
||||
if (!panel->backlight.max)
|
||||
panel->backlight.max = get_backlight_max_vbt(connector);
|
||||
|
||||
if (!panel->backlight.max)
|
||||
return -ENODEV;
|
||||
|
||||
panel->backlight.min = get_backlight_min_vbt(connector);
|
||||
|
||||
val = bdw_get_backlight(connector);
|
||||
val = lpt_get_backlight(connector);
|
||||
panel->backlight.level = intel_panel_compute_brightness(connector, val);
|
||||
|
||||
panel->backlight.enabled = (pch_ctl1 & BLM_PCH_PWM_ENABLE) &&
|
||||
|
@ -1281,6 +1436,10 @@ static int pch_setup_backlight(struct intel_connector *connector, enum pipe unus
|
|||
|
||||
pch_ctl2 = I915_READ(BLC_PWM_PCH_CTL2);
|
||||
panel->backlight.max = pch_ctl2 >> 16;
|
||||
|
||||
if (!panel->backlight.max)
|
||||
panel->backlight.max = get_backlight_max_vbt(connector);
|
||||
|
||||
if (!panel->backlight.max)
|
||||
return -ENODEV;
|
||||
|
||||
|
@ -1312,12 +1471,18 @@ static int i9xx_setup_backlight(struct intel_connector *connector, enum pipe unu
|
|||
panel->backlight.active_low_pwm = ctl & BLM_POLARITY_PNV;
|
||||
|
||||
panel->backlight.max = ctl >> 17;
|
||||
if (panel->backlight.combination_mode)
|
||||
panel->backlight.max *= 0xff;
|
||||
|
||||
if (!panel->backlight.max) {
|
||||
panel->backlight.max = get_backlight_max_vbt(connector);
|
||||
panel->backlight.max >>= 1;
|
||||
}
|
||||
|
||||
if (!panel->backlight.max)
|
||||
return -ENODEV;
|
||||
|
||||
if (panel->backlight.combination_mode)
|
||||
panel->backlight.max *= 0xff;
|
||||
|
||||
panel->backlight.min = get_backlight_min_vbt(connector);
|
||||
|
||||
val = i9xx_get_backlight(connector);
|
||||
|
@ -1341,12 +1506,16 @@ static int i965_setup_backlight(struct intel_connector *connector, enum pipe unu
|
|||
|
||||
ctl = I915_READ(BLC_PWM_CTL);
|
||||
panel->backlight.max = ctl >> 16;
|
||||
if (panel->backlight.combination_mode)
|
||||
panel->backlight.max *= 0xff;
|
||||
|
||||
if (!panel->backlight.max)
|
||||
panel->backlight.max = get_backlight_max_vbt(connector);
|
||||
|
||||
if (!panel->backlight.max)
|
||||
return -ENODEV;
|
||||
|
||||
if (panel->backlight.combination_mode)
|
||||
panel->backlight.max *= 0xff;
|
||||
|
||||
panel->backlight.min = get_backlight_min_vbt(connector);
|
||||
|
||||
val = i9xx_get_backlight(connector);
|
||||
|
@ -1363,21 +1532,8 @@ static int vlv_setup_backlight(struct intel_connector *connector, enum pipe pipe
|
|||
struct drm_device *dev = connector->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_panel *panel = &connector->panel;
|
||||
enum pipe p;
|
||||
u32 ctl, ctl2, val;
|
||||
|
||||
for_each_pipe(dev_priv, p) {
|
||||
u32 cur_val = I915_READ(VLV_BLC_PWM_CTL(p));
|
||||
|
||||
/* Skip if the modulation freq is already set */
|
||||
if (cur_val & ~BACKLIGHT_DUTY_CYCLE_MASK)
|
||||
continue;
|
||||
|
||||
cur_val &= BACKLIGHT_DUTY_CYCLE_MASK;
|
||||
I915_WRITE(VLV_BLC_PWM_CTL(p), (0xf42 << 16) |
|
||||
cur_val);
|
||||
}
|
||||
|
||||
if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
|
||||
return -ENODEV;
|
||||
|
||||
|
@ -1386,6 +1542,10 @@ static int vlv_setup_backlight(struct intel_connector *connector, enum pipe pipe
|
|||
|
||||
ctl = I915_READ(VLV_BLC_PWM_CTL(pipe));
|
||||
panel->backlight.max = ctl >> 16;
|
||||
|
||||
if (!panel->backlight.max)
|
||||
panel->backlight.max = get_backlight_max_vbt(connector);
|
||||
|
||||
if (!panel->backlight.max)
|
||||
return -ENODEV;
|
||||
|
||||
|
@ -1412,6 +1572,10 @@ bxt_setup_backlight(struct intel_connector *connector, enum pipe unused)
|
|||
panel->backlight.active_low_pwm = pwm_ctl & BXT_BLC_PWM_POLARITY;
|
||||
|
||||
panel->backlight.max = I915_READ(BXT_BLC_PWM_FREQ1);
|
||||
|
||||
if (!panel->backlight.max)
|
||||
panel->backlight.max = get_backlight_max_vbt(connector);
|
||||
|
||||
if (!panel->backlight.max)
|
||||
return -ENODEV;
|
||||
|
||||
|
@ -1519,18 +1683,23 @@ void intel_panel_init_backlight_funcs(struct drm_device *dev)
|
|||
dev_priv->display.disable_backlight = bxt_disable_backlight;
|
||||
dev_priv->display.set_backlight = bxt_set_backlight;
|
||||
dev_priv->display.get_backlight = bxt_get_backlight;
|
||||
} else if (IS_BROADWELL(dev) || IS_SKYLAKE(dev)) {
|
||||
dev_priv->display.setup_backlight = bdw_setup_backlight;
|
||||
dev_priv->display.enable_backlight = bdw_enable_backlight;
|
||||
dev_priv->display.disable_backlight = pch_disable_backlight;
|
||||
dev_priv->display.set_backlight = bdw_set_backlight;
|
||||
dev_priv->display.get_backlight = bdw_get_backlight;
|
||||
} else if (HAS_PCH_LPT(dev) || HAS_PCH_SPT(dev)) {
|
||||
dev_priv->display.setup_backlight = lpt_setup_backlight;
|
||||
dev_priv->display.enable_backlight = lpt_enable_backlight;
|
||||
dev_priv->display.disable_backlight = lpt_disable_backlight;
|
||||
dev_priv->display.set_backlight = lpt_set_backlight;
|
||||
dev_priv->display.get_backlight = lpt_get_backlight;
|
||||
if (HAS_PCH_LPT(dev))
|
||||
dev_priv->display.backlight_hz_to_pwm = lpt_hz_to_pwm;
|
||||
else
|
||||
dev_priv->display.backlight_hz_to_pwm = spt_hz_to_pwm;
|
||||
} else if (HAS_PCH_SPLIT(dev)) {
|
||||
dev_priv->display.setup_backlight = pch_setup_backlight;
|
||||
dev_priv->display.enable_backlight = pch_enable_backlight;
|
||||
dev_priv->display.disable_backlight = pch_disable_backlight;
|
||||
dev_priv->display.set_backlight = pch_set_backlight;
|
||||
dev_priv->display.get_backlight = pch_get_backlight;
|
||||
dev_priv->display.backlight_hz_to_pwm = pch_hz_to_pwm;
|
||||
} else if (IS_VALLEYVIEW(dev)) {
|
||||
if (dev_priv->vbt.has_mipi) {
|
||||
dev_priv->display.setup_backlight = pwm_setup_backlight;
|
||||
|
@ -1544,6 +1713,7 @@ void intel_panel_init_backlight_funcs(struct drm_device *dev)
|
|||
dev_priv->display.disable_backlight = vlv_disable_backlight;
|
||||
dev_priv->display.set_backlight = vlv_set_backlight;
|
||||
dev_priv->display.get_backlight = vlv_get_backlight;
|
||||
dev_priv->display.backlight_hz_to_pwm = vlv_hz_to_pwm;
|
||||
}
|
||||
} else if (IS_GEN4(dev)) {
|
||||
dev_priv->display.setup_backlight = i965_setup_backlight;
|
||||
|
@ -1551,12 +1721,14 @@ void intel_panel_init_backlight_funcs(struct drm_device *dev)
|
|||
dev_priv->display.disable_backlight = i965_disable_backlight;
|
||||
dev_priv->display.set_backlight = i9xx_set_backlight;
|
||||
dev_priv->display.get_backlight = i9xx_get_backlight;
|
||||
dev_priv->display.backlight_hz_to_pwm = i965_hz_to_pwm;
|
||||
} else {
|
||||
dev_priv->display.setup_backlight = i9xx_setup_backlight;
|
||||
dev_priv->display.enable_backlight = i9xx_enable_backlight;
|
||||
dev_priv->display.disable_backlight = i9xx_disable_backlight;
|
||||
dev_priv->display.set_backlight = i9xx_set_backlight;
|
||||
dev_priv->display.get_backlight = i9xx_get_backlight;
|
||||
dev_priv->display.backlight_hz_to_pwm = i9xx_hz_to_pwm;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -116,18 +116,24 @@ static void bxt_init_clock_gating(struct drm_device *dev)
|
|||
|
||||
gen9_init_clock_gating(dev);
|
||||
|
||||
/* WaDisableSDEUnitClockGating:bxt */
|
||||
I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
|
||||
GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
|
||||
|
||||
/*
|
||||
* FIXME:
|
||||
* GEN8_SDEUNIT_CLOCK_GATE_DISABLE applies on A0 only.
|
||||
* GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ applies on 3x6 GT SKUs only.
|
||||
*/
|
||||
/* WaDisableSDEUnitClockGating:bxt */
|
||||
I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
|
||||
GEN8_SDEUNIT_CLOCK_GATE_DISABLE |
|
||||
GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ);
|
||||
|
||||
/* FIXME: apply on A0 only */
|
||||
I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF);
|
||||
if (INTEL_REVID(dev) == BXT_REVID_A0) {
|
||||
/*
|
||||
* Hardware specification requires this bit to be
|
||||
* set to 1 for A0
|
||||
*/
|
||||
I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF);
|
||||
}
|
||||
}
|
||||
|
||||
static void i915_pineview_get_mem_freq(struct drm_device *dev)
|
||||
|
@ -3166,7 +3172,8 @@ static void skl_compute_wm_pipe_parameters(struct drm_crtc *crtc,
|
|||
if (fb) {
|
||||
p->plane[0].enabled = true;
|
||||
p->plane[0].bytes_per_pixel = fb->pixel_format == DRM_FORMAT_NV12 ?
|
||||
drm_format_plane_cpp(fb->pixel_format, 1) : fb->bits_per_pixel / 8;
|
||||
drm_format_plane_cpp(fb->pixel_format, 1) :
|
||||
drm_format_plane_cpp(fb->pixel_format, 0);
|
||||
p->plane[0].y_bytes_per_pixel = fb->pixel_format == DRM_FORMAT_NV12 ?
|
||||
drm_format_plane_cpp(fb->pixel_format, 0) : 0;
|
||||
p->plane[0].tiling = fb->modifier[0];
|
||||
|
@ -5565,7 +5572,7 @@ static void cherryview_enable_rps(struct drm_device *dev)
|
|||
/* RPS code assumes GPLL is used */
|
||||
WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
|
||||
|
||||
DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & GPLLENABLE ? "yes" : "no");
|
||||
DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
|
||||
DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
|
||||
|
||||
dev_priv->rps.cur_freq = (val >> 8) & 0xff;
|
||||
|
@ -5655,7 +5662,7 @@ static void valleyview_enable_rps(struct drm_device *dev)
|
|||
/* RPS code assumes GPLL is used */
|
||||
WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
|
||||
|
||||
DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & GPLLENABLE ? "yes" : "no");
|
||||
DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
|
||||
DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
|
||||
|
||||
dev_priv->rps.cur_freq = (val >> 8) & 0xff;
|
||||
|
@ -6604,7 +6611,7 @@ static void lpt_init_clock_gating(struct drm_device *dev)
|
|||
* TODO: this bit should only be enabled when really needed, then
|
||||
* disabled when not needed anymore in order to save power.
|
||||
*/
|
||||
if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)
|
||||
if (HAS_PCH_LPT_LP(dev))
|
||||
I915_WRITE(SOUTH_DSPCLK_GATE_D,
|
||||
I915_READ(SOUTH_DSPCLK_GATE_D) |
|
||||
PCH_LP_PARTITION_LEVEL_DISABLE);
|
||||
|
@ -6619,7 +6626,7 @@ static void lpt_suspend_hw(struct drm_device *dev)
|
|||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
|
||||
if (HAS_PCH_LPT_LP(dev)) {
|
||||
uint32_t val = I915_READ(SOUTH_DSPCLK_GATE_D);
|
||||
|
||||
val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
|
||||
|
|
|
@ -1996,14 +1996,14 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
|
|||
return 0;
|
||||
}
|
||||
|
||||
void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
|
||||
static void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
|
||||
{
|
||||
drm_gem_object_unreference(&ringbuf->obj->base);
|
||||
ringbuf->obj = NULL;
|
||||
}
|
||||
|
||||
int intel_alloc_ringbuffer_obj(struct drm_device *dev,
|
||||
struct intel_ringbuffer *ringbuf)
|
||||
static int intel_alloc_ringbuffer_obj(struct drm_device *dev,
|
||||
struct intel_ringbuffer *ringbuf)
|
||||
{
|
||||
struct drm_i915_gem_object *obj;
|
||||
|
||||
|
@ -2023,6 +2023,48 @@ int intel_alloc_ringbuffer_obj(struct drm_device *dev,
|
|||
return 0;
|
||||
}
|
||||
|
||||
struct intel_ringbuffer *
|
||||
intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size)
|
||||
{
|
||||
struct intel_ringbuffer *ring;
|
||||
int ret;
|
||||
|
||||
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
|
||||
if (ring == NULL)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
ring->ring = engine;
|
||||
|
||||
ring->size = size;
|
||||
/* Workaround an erratum on the i830 which causes a hang if
|
||||
* the TAIL pointer points to within the last 2 cachelines
|
||||
* of the buffer.
|
||||
*/
|
||||
ring->effective_size = size;
|
||||
if (IS_I830(engine->dev) || IS_845G(engine->dev))
|
||||
ring->effective_size -= 2 * CACHELINE_BYTES;
|
||||
|
||||
ring->last_retired_head = -1;
|
||||
intel_ring_update_space(ring);
|
||||
|
||||
ret = intel_alloc_ringbuffer_obj(engine->dev, ring);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to allocate ringbuffer %s: %d\n",
|
||||
engine->name, ret);
|
||||
kfree(ring);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
return ring;
|
||||
}
|
||||
|
||||
void
|
||||
intel_ringbuffer_free(struct intel_ringbuffer *ring)
|
||||
{
|
||||
intel_destroy_ringbuffer_obj(ring);
|
||||
kfree(ring);
|
||||
}
|
||||
|
||||
static int intel_init_ring_buffer(struct drm_device *dev,
|
||||
struct intel_engine_cs *ring)
|
||||
{
|
||||
|
@ -2031,22 +2073,20 @@ static int intel_init_ring_buffer(struct drm_device *dev,
|
|||
|
||||
WARN_ON(ring->buffer);
|
||||
|
||||
ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
|
||||
if (!ringbuf)
|
||||
return -ENOMEM;
|
||||
ring->buffer = ringbuf;
|
||||
|
||||
ring->dev = dev;
|
||||
INIT_LIST_HEAD(&ring->active_list);
|
||||
INIT_LIST_HEAD(&ring->request_list);
|
||||
INIT_LIST_HEAD(&ring->execlist_queue);
|
||||
i915_gem_batch_pool_init(dev, &ring->batch_pool);
|
||||
ringbuf->size = 32 * PAGE_SIZE;
|
||||
ringbuf->ring = ring;
|
||||
memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno));
|
||||
|
||||
init_waitqueue_head(&ring->irq_queue);
|
||||
|
||||
ringbuf = intel_engine_create_ringbuffer(ring, 32 * PAGE_SIZE);
|
||||
if (IS_ERR(ringbuf))
|
||||
return PTR_ERR(ringbuf);
|
||||
ring->buffer = ringbuf;
|
||||
|
||||
if (I915_NEED_GFX_HWS(dev)) {
|
||||
ret = init_status_page(ring);
|
||||
if (ret)
|
||||
|
@ -2058,15 +2098,6 @@ static int intel_init_ring_buffer(struct drm_device *dev,
|
|||
goto error;
|
||||
}
|
||||
|
||||
WARN_ON(ringbuf->obj);
|
||||
|
||||
ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to allocate ringbuffer %s: %d\n",
|
||||
ring->name, ret);
|
||||
goto error;
|
||||
}
|
||||
|
||||
ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n",
|
||||
|
@ -2075,14 +2106,6 @@ static int intel_init_ring_buffer(struct drm_device *dev,
|
|||
goto error;
|
||||
}
|
||||
|
||||
/* Workaround an erratum on the i830 which causes a hang if
|
||||
* the TAIL pointer points to within the last 2 cachelines
|
||||
* of the buffer.
|
||||
*/
|
||||
ringbuf->effective_size = ringbuf->size;
|
||||
if (IS_I830(dev) || IS_845G(dev))
|
||||
ringbuf->effective_size -= 2 * CACHELINE_BYTES;
|
||||
|
||||
ret = i915_cmd_parser_init_ring(ring);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
@ -2090,7 +2113,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
|
|||
return 0;
|
||||
|
||||
error:
|
||||
kfree(ringbuf);
|
||||
intel_ringbuffer_free(ringbuf);
|
||||
ring->buffer = NULL;
|
||||
return ret;
|
||||
}
|
||||
|
@ -2098,19 +2121,18 @@ error:
|
|||
void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
|
||||
{
|
||||
struct drm_i915_private *dev_priv;
|
||||
struct intel_ringbuffer *ringbuf;
|
||||
|
||||
if (!intel_ring_initialized(ring))
|
||||
return;
|
||||
|
||||
dev_priv = to_i915(ring->dev);
|
||||
ringbuf = ring->buffer;
|
||||
|
||||
intel_stop_ring_buffer(ring);
|
||||
WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0);
|
||||
|
||||
intel_unpin_ringbuffer_obj(ringbuf);
|
||||
intel_destroy_ringbuffer_obj(ringbuf);
|
||||
intel_unpin_ringbuffer_obj(ring->buffer);
|
||||
intel_ringbuffer_free(ring->buffer);
|
||||
ring->buffer = NULL;
|
||||
|
||||
if (ring->cleanup)
|
||||
ring->cleanup(ring);
|
||||
|
@ -2119,9 +2141,6 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
|
|||
|
||||
i915_cmd_parser_fini_ring(ring);
|
||||
i915_gem_batch_pool_fini(&ring->batch_pool);
|
||||
|
||||
kfree(ringbuf);
|
||||
ring->buffer = NULL;
|
||||
}
|
||||
|
||||
static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
|
||||
|
|
|
@ -377,6 +377,13 @@ intel_ring_sync_index(struct intel_engine_cs *ring,
|
|||
return idx;
|
||||
}
|
||||
|
||||
static inline void
|
||||
intel_flush_status_page(struct intel_engine_cs *ring, int reg)
|
||||
{
|
||||
drm_clflush_virt_range(&ring->status_page.page_addr[reg],
|
||||
sizeof(uint32_t));
|
||||
}
|
||||
|
||||
static inline u32
|
||||
intel_read_status_page(struct intel_engine_cs *ring,
|
||||
int reg)
|
||||
|
@ -413,12 +420,12 @@ intel_write_status_page(struct intel_engine_cs *ring,
|
|||
#define I915_GEM_HWS_SCRATCH_INDEX 0x40
|
||||
#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
|
||||
|
||||
void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
|
||||
struct intel_ringbuffer *
|
||||
intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size);
|
||||
int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
|
||||
struct intel_ringbuffer *ringbuf);
|
||||
void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
|
||||
int intel_alloc_ringbuffer_obj(struct drm_device *dev,
|
||||
struct intel_ringbuffer *ringbuf);
|
||||
void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
|
||||
void intel_ringbuffer_free(struct intel_ringbuffer *ring);
|
||||
|
||||
void intel_stop_ring_buffer(struct intel_engine_cs *ring);
|
||||
void intel_cleanup_ring_buffer(struct intel_engine_cs *ring);
|
||||
|
|
|
@ -855,6 +855,25 @@ static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
|
|||
|
||||
static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
enum pipe pipe;
|
||||
|
||||
/*
|
||||
* Enable the CRI clock source so we can get at the
|
||||
* display and the reference clock for VGA
|
||||
* hotplug / manual detection. Supposedly DSI also
|
||||
* needs the ref clock up and running.
|
||||
*
|
||||
* CHV DPLL B/C have some issues if VGA mode is enabled.
|
||||
*/
|
||||
for_each_pipe(dev_priv->dev, pipe) {
|
||||
u32 val = I915_READ(DPLL(pipe));
|
||||
|
||||
val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
|
||||
if (pipe != PIPE_A)
|
||||
val |= DPLL_INTEGRATED_CRI_CLK_VLV;
|
||||
|
||||
I915_WRITE(DPLL(pipe), val);
|
||||
}
|
||||
|
||||
spin_lock_irq(&dev_priv->irq_lock);
|
||||
valleyview_enable_display_irqs(dev_priv);
|
||||
|
@ -906,13 +925,7 @@ static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
|
|||
{
|
||||
WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
|
||||
|
||||
/*
|
||||
* Enable the CRI clock source so we can get at the
|
||||
* display and the reference clock for VGA
|
||||
* hotplug / manual detection.
|
||||
*/
|
||||
I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) | DPLL_VGA_MODE_DIS |
|
||||
DPLL_REF_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
|
||||
/* since ref/cri clock was enabled */
|
||||
udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
|
||||
|
||||
vlv_set_power_well(dev_priv, power_well, true);
|
||||
|
@ -947,30 +960,126 @@ static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
|
|||
vlv_set_power_well(dev_priv, power_well, false);
|
||||
}
|
||||
|
||||
#define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
|
||||
|
||||
static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv,
|
||||
int power_well_id)
|
||||
{
|
||||
struct i915_power_domains *power_domains = &dev_priv->power_domains;
|
||||
struct i915_power_well *power_well;
|
||||
int i;
|
||||
|
||||
for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
|
||||
if (power_well->data == power_well_id)
|
||||
return power_well;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#define BITS_SET(val, bits) (((val) & (bits)) == (bits))
|
||||
|
||||
static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct i915_power_well *cmn_bc =
|
||||
lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
|
||||
struct i915_power_well *cmn_d =
|
||||
lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_D);
|
||||
u32 phy_control = dev_priv->chv_phy_control;
|
||||
u32 phy_status = 0;
|
||||
u32 tmp;
|
||||
|
||||
if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) {
|
||||
phy_status |= PHY_POWERGOOD(DPIO_PHY0);
|
||||
|
||||
/* this assumes override is only used to enable lanes */
|
||||
if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0)
|
||||
phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0);
|
||||
|
||||
if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0)
|
||||
phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1);
|
||||
|
||||
/* CL1 is on whenever anything is on in either channel */
|
||||
if (BITS_SET(phy_control,
|
||||
PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) |
|
||||
PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)))
|
||||
phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0);
|
||||
|
||||
/*
|
||||
* The DPLLB check accounts for the pipe B + port A usage
|
||||
* with CL2 powered up but all the lanes in the second channel
|
||||
* powered down.
|
||||
*/
|
||||
if (BITS_SET(phy_control,
|
||||
PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) &&
|
||||
(I915_READ(DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0)
|
||||
phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1);
|
||||
|
||||
if (BITS_SET(phy_control,
|
||||
PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0)))
|
||||
phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0);
|
||||
if (BITS_SET(phy_control,
|
||||
PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0)))
|
||||
phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1);
|
||||
|
||||
if (BITS_SET(phy_control,
|
||||
PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1)))
|
||||
phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0);
|
||||
if (BITS_SET(phy_control,
|
||||
PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1)))
|
||||
phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1);
|
||||
}
|
||||
|
||||
if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) {
|
||||
phy_status |= PHY_POWERGOOD(DPIO_PHY1);
|
||||
|
||||
/* this assumes override is only used to enable lanes */
|
||||
if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0)
|
||||
phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0);
|
||||
|
||||
if (BITS_SET(phy_control,
|
||||
PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0)))
|
||||
phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0);
|
||||
|
||||
if (BITS_SET(phy_control,
|
||||
PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0)))
|
||||
phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0);
|
||||
if (BITS_SET(phy_control,
|
||||
PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0)))
|
||||
phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1);
|
||||
}
|
||||
|
||||
/*
|
||||
* The PHY may be busy with some initial calibration and whatnot,
|
||||
* so the power state can take a while to actually change.
|
||||
*/
|
||||
if (wait_for((tmp = I915_READ(DISPLAY_PHY_STATUS)) == phy_status, 10))
|
||||
WARN(phy_status != tmp,
|
||||
"Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
|
||||
tmp, phy_status, dev_priv->chv_phy_control);
|
||||
}
|
||||
|
||||
#undef BITS_SET
|
||||
|
||||
static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
|
||||
struct i915_power_well *power_well)
|
||||
{
|
||||
enum dpio_phy phy;
|
||||
enum pipe pipe;
|
||||
uint32_t tmp;
|
||||
|
||||
WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
|
||||
power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
|
||||
|
||||
/*
|
||||
* Enable the CRI clock source so we can get at the
|
||||
* display and the reference clock for VGA
|
||||
* hotplug / manual detection.
|
||||
*/
|
||||
if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
|
||||
pipe = PIPE_A;
|
||||
phy = DPIO_PHY0;
|
||||
I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) | DPLL_VGA_MODE_DIS |
|
||||
DPLL_REF_CLK_ENABLE_VLV);
|
||||
I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) | DPLL_VGA_MODE_DIS |
|
||||
DPLL_REF_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
|
||||
} else {
|
||||
pipe = PIPE_C;
|
||||
phy = DPIO_PHY1;
|
||||
I915_WRITE(DPLL(PIPE_C), I915_READ(DPLL(PIPE_C)) | DPLL_VGA_MODE_DIS |
|
||||
DPLL_REF_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
|
||||
}
|
||||
|
||||
/* since ref/cri clock was enabled */
|
||||
udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
|
||||
vlv_set_power_well(dev_priv, power_well, true);
|
||||
|
||||
|
@ -978,8 +1087,38 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
|
|||
if (wait_for(I915_READ(DISPLAY_PHY_STATUS) & PHY_POWERGOOD(phy), 1))
|
||||
DRM_ERROR("Display PHY %d is not power up\n", phy);
|
||||
|
||||
mutex_lock(&dev_priv->sb_lock);
|
||||
|
||||
/* Enable dynamic power down */
|
||||
tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28);
|
||||
tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN |
|
||||
DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
|
||||
vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp);
|
||||
|
||||
if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
|
||||
tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1);
|
||||
tmp |= DPIO_DYNPWRDOWNEN_CH1;
|
||||
vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp);
|
||||
} else {
|
||||
/*
|
||||
* Force the non-existing CL2 off. BXT does this
|
||||
* too, so maybe it saves some power even though
|
||||
* CL2 doesn't exist?
|
||||
*/
|
||||
tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
|
||||
tmp |= DPIO_CL2_LDOFUSE_PWRENB;
|
||||
vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp);
|
||||
}
|
||||
|
||||
mutex_unlock(&dev_priv->sb_lock);
|
||||
|
||||
dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
|
||||
I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
|
||||
|
||||
DRM_DEBUG_KMS("Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
|
||||
phy, dev_priv->chv_phy_control);
|
||||
|
||||
assert_chv_phy_status(dev_priv);
|
||||
}
|
||||
|
||||
static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
|
||||
|
@ -1003,6 +1142,124 @@ static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
|
|||
I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
|
||||
|
||||
vlv_set_power_well(dev_priv, power_well, false);
|
||||
|
||||
DRM_DEBUG_KMS("Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
|
||||
phy, dev_priv->chv_phy_control);
|
||||
|
||||
assert_chv_phy_status(dev_priv);
|
||||
}
|
||||
|
||||
static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy,
|
||||
enum dpio_channel ch, bool override, unsigned int mask)
|
||||
{
|
||||
enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C;
|
||||
u32 reg, val, expected, actual;
|
||||
|
||||
if (ch == DPIO_CH0)
|
||||
reg = _CHV_CMN_DW0_CH0;
|
||||
else
|
||||
reg = _CHV_CMN_DW6_CH1;
|
||||
|
||||
mutex_lock(&dev_priv->sb_lock);
|
||||
val = vlv_dpio_read(dev_priv, pipe, reg);
|
||||
mutex_unlock(&dev_priv->sb_lock);
|
||||
|
||||
/*
|
||||
* This assumes !override is only used when the port is disabled.
|
||||
* All lanes should power down even without the override when
|
||||
* the port is disabled.
|
||||
*/
|
||||
if (!override || mask == 0xf) {
|
||||
expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
|
||||
/*
|
||||
* If CH1 common lane is not active anymore
|
||||
* (eg. for pipe B DPLL) the entire channel will
|
||||
* shut down, which causes the common lane registers
|
||||
* to read as 0. That means we can't actually check
|
||||
* the lane power down status bits, but as the entire
|
||||
* register reads as 0 it's a good indication that the
|
||||
* channel is indeed entirely powered down.
|
||||
*/
|
||||
if (ch == DPIO_CH1 && val == 0)
|
||||
expected = 0;
|
||||
} else if (mask != 0x0) {
|
||||
expected = DPIO_ANYDL_POWERDOWN;
|
||||
} else {
|
||||
expected = 0;
|
||||
}
|
||||
|
||||
if (ch == DPIO_CH0)
|
||||
actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0;
|
||||
else
|
||||
actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1;
|
||||
actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
|
||||
|
||||
WARN(actual != expected,
|
||||
"Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
|
||||
!!(actual & DPIO_ALLDL_POWERDOWN), !!(actual & DPIO_ANYDL_POWERDOWN),
|
||||
!!(expected & DPIO_ALLDL_POWERDOWN), !!(expected & DPIO_ANYDL_POWERDOWN),
|
||||
reg, val);
|
||||
}
|
||||
|
||||
bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
|
||||
enum dpio_channel ch, bool override)
|
||||
{
|
||||
struct i915_power_domains *power_domains = &dev_priv->power_domains;
|
||||
bool was_override;
|
||||
|
||||
mutex_lock(&power_domains->lock);
|
||||
|
||||
was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
|
||||
|
||||
if (override == was_override)
|
||||
goto out;
|
||||
|
||||
if (override)
|
||||
dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
|
||||
else
|
||||
dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
|
||||
|
||||
I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
|
||||
|
||||
DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
|
||||
phy, ch, dev_priv->chv_phy_control);
|
||||
|
||||
assert_chv_phy_status(dev_priv);
|
||||
|
||||
out:
|
||||
mutex_unlock(&power_domains->lock);
|
||||
|
||||
return was_override;
|
||||
}
|
||||
|
||||
void chv_phy_powergate_lanes(struct intel_encoder *encoder,
|
||||
bool override, unsigned int mask)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct i915_power_domains *power_domains = &dev_priv->power_domains;
|
||||
enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(&encoder->base));
|
||||
enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
|
||||
|
||||
mutex_lock(&power_domains->lock);
|
||||
|
||||
dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch);
|
||||
dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch);
|
||||
|
||||
if (override)
|
||||
dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
|
||||
else
|
||||
dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
|
||||
|
||||
I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
|
||||
|
||||
DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
|
||||
phy, ch, mask, dev_priv->chv_phy_control);
|
||||
|
||||
assert_chv_phy_status(dev_priv);
|
||||
|
||||
assert_chv_phy_powergate(dev_priv, phy, ch, override, mask);
|
||||
|
||||
mutex_unlock(&power_domains->lock);
|
||||
}
|
||||
|
||||
static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
|
||||
|
@ -1166,8 +1423,6 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
|
|||
intel_runtime_pm_put(dev_priv);
|
||||
}
|
||||
|
||||
#define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
|
||||
|
||||
#define HSW_ALWAYS_ON_POWER_DOMAINS ( \
|
||||
BIT(POWER_DOMAIN_PIPE_A) | \
|
||||
BIT(POWER_DOMAIN_TRANSCODER_EDP) | \
|
||||
|
@ -1429,21 +1684,6 @@ static struct i915_power_well chv_power_wells[] = {
|
|||
},
|
||||
};
|
||||
|
||||
static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv,
|
||||
int power_well_id)
|
||||
{
|
||||
struct i915_power_domains *power_domains = &dev_priv->power_domains;
|
||||
struct i915_power_well *power_well;
|
||||
int i;
|
||||
|
||||
for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
|
||||
if (power_well->data == power_well_id)
|
||||
return power_well;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
|
||||
int power_well_id)
|
||||
{
|
||||
|
@ -1629,19 +1869,72 @@ static void chv_phy_control_init(struct drm_i915_private *dev_priv)
|
|||
* DISPLAY_PHY_CONTROL can get corrupted if read. As a
|
||||
* workaround never ever read DISPLAY_PHY_CONTROL, and
|
||||
* instead maintain a shadow copy ourselves. Use the actual
|
||||
* power well state to reconstruct the expected initial
|
||||
* value.
|
||||
* power well state and lane status to reconstruct the
|
||||
* expected initial value.
|
||||
*/
|
||||
dev_priv->chv_phy_control =
|
||||
PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
|
||||
PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
|
||||
PHY_CH_POWER_MODE(PHY_CH_SU_PSR, DPIO_PHY0, DPIO_CH0) |
|
||||
PHY_CH_POWER_MODE(PHY_CH_SU_PSR, DPIO_PHY0, DPIO_CH1) |
|
||||
PHY_CH_POWER_MODE(PHY_CH_SU_PSR, DPIO_PHY1, DPIO_CH0);
|
||||
if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc))
|
||||
PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) |
|
||||
PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) |
|
||||
PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0);
|
||||
|
||||
/*
|
||||
* If all lanes are disabled we leave the override disabled
|
||||
* with all power down bits cleared to match the state we
|
||||
* would use after disabling the port. Otherwise enable the
|
||||
* override and set the lane powerdown bits accding to the
|
||||
* current lane status.
|
||||
*/
|
||||
if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) {
|
||||
uint32_t status = I915_READ(DPLL(PIPE_A));
|
||||
unsigned int mask;
|
||||
|
||||
mask = status & DPLL_PORTB_READY_MASK;
|
||||
if (mask == 0xf)
|
||||
mask = 0x0;
|
||||
else
|
||||
dev_priv->chv_phy_control |=
|
||||
PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0);
|
||||
|
||||
dev_priv->chv_phy_control |=
|
||||
PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0);
|
||||
|
||||
mask = (status & DPLL_PORTC_READY_MASK) >> 4;
|
||||
if (mask == 0xf)
|
||||
mask = 0x0;
|
||||
else
|
||||
dev_priv->chv_phy_control |=
|
||||
PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1);
|
||||
|
||||
dev_priv->chv_phy_control |=
|
||||
PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1);
|
||||
|
||||
dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
|
||||
if (cmn_d->ops->is_enabled(dev_priv, cmn_d))
|
||||
}
|
||||
|
||||
if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) {
|
||||
uint32_t status = I915_READ(DPIO_PHY_STATUS);
|
||||
unsigned int mask;
|
||||
|
||||
mask = status & DPLL_PORTD_READY_MASK;
|
||||
|
||||
if (mask == 0xf)
|
||||
mask = 0x0;
|
||||
else
|
||||
dev_priv->chv_phy_control |=
|
||||
PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0);
|
||||
|
||||
dev_priv->chv_phy_control |=
|
||||
PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0);
|
||||
|
||||
dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
|
||||
}
|
||||
|
||||
I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
|
||||
|
||||
DRM_DEBUG_KMS("Initial PHY_CONTROL=0x%08x\n",
|
||||
dev_priv->chv_phy_control);
|
||||
}
|
||||
|
||||
static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
|
||||
|
@ -1687,7 +1980,9 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv)
|
|||
power_domains->initializing = true;
|
||||
|
||||
if (IS_CHERRYVIEW(dev)) {
|
||||
mutex_lock(&power_domains->lock);
|
||||
chv_phy_control_init(dev_priv);
|
||||
mutex_unlock(&power_domains->lock);
|
||||
} else if (IS_VALLEYVIEW(dev)) {
|
||||
mutex_lock(&power_domains->lock);
|
||||
vlv_cmnlane_wa(dev_priv);
|
||||
|
|
|
@ -53,7 +53,7 @@
|
|||
#define IS_DIGITAL(c) (c->output_flag & (SDVO_TMDS_MASK | SDVO_LVDS_MASK))
|
||||
|
||||
|
||||
static const char *tv_format_names[] = {
|
||||
static const char * const tv_format_names[] = {
|
||||
"NTSC_M" , "NTSC_J" , "NTSC_443",
|
||||
"PAL_B" , "PAL_D" , "PAL_G" ,
|
||||
"PAL_H" , "PAL_I" , "PAL_M" ,
|
||||
|
@ -63,7 +63,7 @@ static const char *tv_format_names[] = {
|
|||
"SECAM_60"
|
||||
};
|
||||
|
||||
#define TV_FORMAT_NUM (sizeof(tv_format_names) / sizeof(*tv_format_names))
|
||||
#define TV_FORMAT_NUM ARRAY_SIZE(tv_format_names)
|
||||
|
||||
struct intel_sdvo {
|
||||
struct intel_encoder base;
|
||||
|
@ -452,7 +452,7 @@ static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd,
|
|||
DRM_DEBUG_KMS("%s: W: %02X %s\n", SDVO_NAME(intel_sdvo), cmd, buffer);
|
||||
}
|
||||
|
||||
static const char *cmd_status_names[] = {
|
||||
static const char * const cmd_status_names[] = {
|
||||
"Power on",
|
||||
"Success",
|
||||
"Not supported",
|
||||
|
|
|
@ -76,7 +76,7 @@ static int usecs_to_scanlines(const struct drm_display_mode *mode, int usecs)
|
|||
* avoid random delays. The value written to @start_vbl_count should be
|
||||
* supplied to intel_pipe_update_end() for error checking.
|
||||
*/
|
||||
void intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl_count)
|
||||
void intel_pipe_update_start(struct intel_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->base.dev;
|
||||
const struct drm_display_mode *mode = &crtc->config->base.adjusted_mode;
|
||||
|
@ -95,7 +95,7 @@ void intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl_count)
|
|||
max = vblank_start - 1;
|
||||
|
||||
local_irq_disable();
|
||||
*start_vbl_count = 0;
|
||||
crtc->start_vbl_count = 0;
|
||||
|
||||
if (min <= 0 || max <= 0)
|
||||
return;
|
||||
|
@ -134,9 +134,11 @@ void intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl_count)
|
|||
|
||||
drm_crtc_vblank_put(&crtc->base);
|
||||
|
||||
*start_vbl_count = dev->driver->get_vblank_counter(dev, pipe);
|
||||
crtc->start_vbl_time = ktime_get();
|
||||
crtc->start_vbl_count = dev->driver->get_vblank_counter(dev, pipe);
|
||||
|
||||
trace_i915_pipe_update_vblank_evaded(crtc, min, max, *start_vbl_count);
|
||||
trace_i915_pipe_update_vblank_evaded(crtc, min, max,
|
||||
crtc->start_vbl_count);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -148,19 +150,21 @@ void intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl_count)
|
|||
* re-enables interrupts and verifies the update was actually completed
|
||||
* before a vblank using the value of @start_vbl_count.
|
||||
*/
|
||||
void intel_pipe_update_end(struct intel_crtc *crtc, u32 start_vbl_count)
|
||||
void intel_pipe_update_end(struct intel_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->base.dev;
|
||||
enum pipe pipe = crtc->pipe;
|
||||
u32 end_vbl_count = dev->driver->get_vblank_counter(dev, pipe);
|
||||
ktime_t end_vbl_time = ktime_get();
|
||||
|
||||
trace_i915_pipe_update_end(crtc, end_vbl_count);
|
||||
|
||||
local_irq_enable();
|
||||
|
||||
if (start_vbl_count && start_vbl_count != end_vbl_count)
|
||||
DRM_ERROR("Atomic update failure on pipe %c (start=%u end=%u)\n",
|
||||
pipe_name(pipe), start_vbl_count, end_vbl_count);
|
||||
if (crtc->start_vbl_count && crtc->start_vbl_count != end_vbl_count)
|
||||
DRM_ERROR("Atomic update failure on pipe %c (start=%u end=%u) time %lld us\n",
|
||||
pipe_name(pipe), crtc->start_vbl_count, end_vbl_count,
|
||||
ktime_us_delta(end_vbl_time, crtc->start_vbl_time));
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -923,8 +927,6 @@ intel_commit_sprite_plane(struct drm_plane *plane,
|
|||
|
||||
crtc = crtc ? crtc : plane->crtc;
|
||||
|
||||
plane->fb = fb;
|
||||
|
||||
if (!crtc->state->active)
|
||||
return;
|
||||
|
||||
|
|
|
@ -1291,7 +1291,7 @@ static void intel_tv_find_better_format(struct drm_connector *connector)
|
|||
return;
|
||||
|
||||
|
||||
for (i = 0; i < sizeof(tv_modes) / sizeof(*tv_modes); i++) {
|
||||
for (i = 0; i < ARRAY_SIZE(tv_modes); i++) {
|
||||
tv_mode = tv_modes + i;
|
||||
|
||||
if ((intel_tv->type == DRM_MODE_CONNECTOR_Component) ==
|
||||
|
|
|
@ -52,8 +52,7 @@ static const char * const forcewake_domain_names[] = {
|
|||
const char *
|
||||
intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id)
|
||||
{
|
||||
BUILD_BUG_ON((sizeof(forcewake_domain_names)/sizeof(const char *)) !=
|
||||
FW_DOMAIN_ID_COUNT);
|
||||
BUILD_BUG_ON(ARRAY_SIZE(forcewake_domain_names) != FW_DOMAIN_ID_COUNT);
|
||||
|
||||
if (id >= 0 && id < FW_DOMAIN_ID_COUNT)
|
||||
return forcewake_domain_names[id];
|
||||
|
@ -770,6 +769,7 @@ static u##x \
|
|||
gen9_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
|
||||
enum forcewake_domains fw_engine; \
|
||||
GEN6_READ_HEADER(x); \
|
||||
hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \
|
||||
if (!SKL_NEEDS_FORCE_WAKE((dev_priv), (reg))) \
|
||||
fw_engine = 0; \
|
||||
else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) \
|
||||
|
@ -783,6 +783,7 @@ gen9_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
|
|||
if (fw_engine) \
|
||||
__force_wake_get(dev_priv, fw_engine); \
|
||||
val = __raw_i915_read##x(dev_priv, reg); \
|
||||
hsw_unclaimed_reg_debug(dev_priv, reg, true, false); \
|
||||
GEN6_READ_FOOTER; \
|
||||
}
|
||||
|
||||
|
@ -983,6 +984,7 @@ gen9_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, \
|
|||
bool trace) { \
|
||||
enum forcewake_domains fw_engine; \
|
||||
GEN6_WRITE_HEADER; \
|
||||
hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
|
||||
if (!SKL_NEEDS_FORCE_WAKE((dev_priv), (reg)) || \
|
||||
is_gen9_shadowed(dev_priv, reg)) \
|
||||
fw_engine = 0; \
|
||||
|
@ -997,6 +999,8 @@ gen9_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, \
|
|||
if (fw_engine) \
|
||||
__force_wake_get(dev_priv, fw_engine); \
|
||||
__raw_i915_write##x(dev_priv, reg, val); \
|
||||
hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
|
||||
hsw_unclaimed_reg_detect(dev_priv); \
|
||||
GEN6_WRITE_FOOTER; \
|
||||
}
|
||||
|
||||
|
@ -1198,8 +1202,6 @@ void intel_uncore_init(struct drm_device *dev)
|
|||
|
||||
switch (INTEL_INFO(dev)->gen) {
|
||||
default:
|
||||
MISSING_CASE(INTEL_INFO(dev)->gen);
|
||||
return;
|
||||
case 9:
|
||||
ASSIGN_WRITE_MMIO_VFUNCS(gen9);
|
||||
ASSIGN_READ_MMIO_VFUNCS(gen9);
|
||||
|
|
|
@ -634,6 +634,13 @@ drm_dp_enhanced_frame_cap(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
|
|||
(dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
drm_dp_tps3_supported(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
|
||||
{
|
||||
return dpcd[DP_DPCD_REV] >= 0x12 &&
|
||||
dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED;
|
||||
}
|
||||
|
||||
/*
|
||||
* DisplayPort AUX channel
|
||||
*/
|
||||
|
|
Loading…
Reference in New Issue