Merge tag 'drm-intel-next-2014-12-19' of git://anongit.freedesktop.org/drm-intel into drm-next

- plane handling refactoring from Matt Roper and Gustavo Padovan in prep for
  atomic updates
- fixes and more patches for the seqno to request transformation from John
- docbook for fbc from Rodrigo
- prep work for dual-link dsi from Gaurav Signh
- crc fixes from Ville
- special ggtt views infrastructure from Tvrtko Ursulin
- shadow patch copying for the cmd parser from Brad Volkin
- execlist and full ppgtt by default on gen8, for testing for now

* tag 'drm-intel-next-2014-12-19' of git://anongit.freedesktop.org/drm-intel: (131 commits)
  drm/i915: Update DRIVER_DATE to 20141219
  drm/i915: Hold runtime PM during plane commit
  drm/i915: Organize bind_vma funcs
  drm/i915: Organize INSTDONE report for future.
  drm/i915: Organize PDP regs report for future.
  drm/i915: Organize PPGTT init
  drm/i915: Organize Fence registers for future enablement.
  drm/i915: tame the chattermouth (v2)
  drm/i915: Warn about missing context state workarounds only once
  drm/i915: Use true PPGTT in Gen8+ when execlists are enabled
  drm/i915: Skip gunit save/restore for cherryview
  drm/i915/chv: Use timeout mode for RC6 on chv
  drm/i915: Add GPGPU_THREADS_DISPATCHED to the register whitelist
  drm/i915: Tidy up execbuffer command parsing code
  drm/i915: Mark shadow batch buffers as purgeable
  drm/i915: Use batch length instead of object size in command parser
  drm/i915: Use batch pools with the command parser
  drm/i915: Implement a framework for batch buffer pools
  drm/i915: fix use after free during eDP encoder destroying
  drm/i915/skl: Skylake also supports DP MST
  ...
This commit is contained in:
Dave Airlie 2015-01-10 08:46:24 +10:00
commit adc31849b2
52 changed files with 3896 additions and 2822 deletions

View File

@ -4035,6 +4035,11 @@ int num_ioctls;</synopsis>
<title>Panel Self Refresh PSR (PSR/SRD)</title>
!Pdrivers/gpu/drm/i915/intel_psr.c Panel Self Refresh (PSR/SRD)
!Idrivers/gpu/drm/i915/intel_psr.c
</sect2>
<sect2>
<title>Frame Buffer Compression (FBC)</title>
!Pdrivers/gpu/drm/i915/intel_fbc.c Frame Buffer Compression (FBC)
!Idrivers/gpu/drm/i915/intel_fbc.c
</sect2>
<sect2>
<title>DPIO</title>
@ -4138,12 +4143,22 @@ int num_ioctls;</synopsis>
<title>Batchbuffer Parsing</title>
!Pdrivers/gpu/drm/i915/i915_cmd_parser.c batch buffer command parser
!Idrivers/gpu/drm/i915/i915_cmd_parser.c
</sect2>
<sect2>
<title>Batchbuffer Pools</title>
!Pdrivers/gpu/drm/i915/i915_gem_batch_pool.c batch pool
!Idrivers/gpu/drm/i915/i915_gem_batch_pool.c
</sect2>
<sect2>
<title>Logical Rings, Logical Ring Contexts and Execlists</title>
!Pdrivers/gpu/drm/i915/intel_lrc.c Logical Rings, Logical Ring Contexts and Execlists
!Idrivers/gpu/drm/i915/intel_lrc.c
</sect2>
<sect2>
<title>Global GTT views</title>
!Pdrivers/gpu/drm/i915/i915_gem_gtt.c Global GTT views
!Idrivers/gpu/drm/i915/i915_gem_gtt.c
</sect2>
</sect1>
<sect1>

View File

@ -2658,6 +2658,27 @@ int drm_mode_set_config_internal(struct drm_mode_set *set)
}
EXPORT_SYMBOL(drm_mode_set_config_internal);
/**
* drm_crtc_get_hv_timing - Fetches hdisplay/vdisplay for given mode
* @mode: mode to query
* @hdisplay: hdisplay value to fill in
* @vdisplay: vdisplay value to fill in
*
* The vdisplay value will be doubled if the specified mode is a stereo mode of
* the appropriate layout.
*/
void drm_crtc_get_hv_timing(const struct drm_display_mode *mode,
int *hdisplay, int *vdisplay)
{
struct drm_display_mode adjusted;
drm_mode_copy(&adjusted, mode);
drm_mode_set_crtcinfo(&adjusted, CRTC_STEREO_DOUBLE_ONLY);
*hdisplay = adjusted.crtc_hdisplay;
*vdisplay = adjusted.crtc_vdisplay;
}
EXPORT_SYMBOL(drm_crtc_get_hv_timing);
/**
* drm_crtc_check_viewport - Checks that a framebuffer is big enough for the
* CRTC viewport
@ -2675,16 +2696,7 @@ int drm_crtc_check_viewport(const struct drm_crtc *crtc,
{
int hdisplay, vdisplay;
hdisplay = mode->hdisplay;
vdisplay = mode->vdisplay;
if (drm_mode_is_stereo(mode)) {
struct drm_display_mode adjusted = *mode;
drm_mode_set_crtcinfo(&adjusted, CRTC_STEREO_DOUBLE);
hdisplay = adjusted.crtc_hdisplay;
vdisplay = adjusted.crtc_vdisplay;
}
drm_crtc_get_hv_timing(mode, &hdisplay, &vdisplay);
if (crtc->invert_dimensions)
swap(hdisplay, vdisplay);

View File

@ -739,6 +739,8 @@ EXPORT_SYMBOL(drm_mode_vrefresh);
* - The CRTC_STEREO_DOUBLE flag can be used to compute the timings for
* buffers containing two eyes (only adjust the timings when needed, eg. for
* "frame packing" or "side by side full").
* - The CRTC_NO_DBLSCAN and CRTC_NO_VSCAN flags request that adjustment *not*
* be performed for doublescan and vscan > 1 modes respectively.
*/
void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags)
{
@ -765,19 +767,23 @@ void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags)
}
}
if (!(adjust_flags & CRTC_NO_DBLSCAN)) {
if (p->flags & DRM_MODE_FLAG_DBLSCAN) {
p->crtc_vdisplay *= 2;
p->crtc_vsync_start *= 2;
p->crtc_vsync_end *= 2;
p->crtc_vtotal *= 2;
}
}
if (!(adjust_flags & CRTC_NO_VSCAN)) {
if (p->vscan > 1) {
p->crtc_vdisplay *= p->vscan;
p->crtc_vsync_start *= p->vscan;
p->crtc_vsync_end *= p->vscan;
p->crtc_vtotal *= p->vscan;
}
}
if (adjust_flags & CRTC_STEREO_DOUBLE) {
unsigned int layout = p->flags & DRM_MODE_FLAG_3D_MASK;

View File

@ -19,6 +19,7 @@ i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o
# GEM code
i915-y += i915_cmd_parser.o \
i915_gem_batch_pool.o \
i915_gem_context.o \
i915_gem_render_state.o \
i915_gem_debug.o \
@ -47,6 +48,7 @@ i915-y += intel_renderstate_gen6.o \
i915-y += intel_audio.o \
intel_bios.o \
intel_display.o \
intel_fbc.o \
intel_fifo_underrun.o \
intel_frontbuffer.o \
intel_modes.o \

View File

@ -152,6 +152,7 @@ static const struct drm_i915_cmd_descriptor render_cmds[] = {
CMD( MI_PREDICATE, SMI, F, 1, S ),
CMD( MI_TOPOLOGY_FILTER, SMI, F, 1, S ),
CMD( MI_DISPLAY_FLIP, SMI, !F, 0xFF, R ),
CMD( MI_SET_APPID, SMI, F, 1, S ),
CMD( MI_SET_CONTEXT, SMI, !F, 0xFF, R ),
CMD( MI_URB_CLEAR, SMI, !F, 0xFF, S ),
CMD( MI_STORE_DWORD_IMM, SMI, !F, 0x3F, B,
@ -210,6 +211,7 @@ static const struct drm_i915_cmd_descriptor hsw_render_cmds[] = {
CMD( MI_SET_PREDICATE, SMI, F, 1, S ),
CMD( MI_RS_CONTROL, SMI, F, 1, S ),
CMD( MI_URB_ATOMIC_ALLOC, SMI, F, 1, S ),
CMD( MI_SET_APPID, SMI, F, 1, S ),
CMD( MI_RS_CONTEXT, SMI, F, 1, S ),
CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, M ),
CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ),
@ -229,6 +231,7 @@ static const struct drm_i915_cmd_descriptor hsw_render_cmds[] = {
static const struct drm_i915_cmd_descriptor video_cmds[] = {
CMD( MI_ARB_ON_OFF, SMI, F, 1, R ),
CMD( MI_SET_APPID, SMI, F, 1, S ),
CMD( MI_STORE_DWORD_IMM, SMI, !F, 0xFF, B,
.bits = {{
.offset = 0,
@ -272,6 +275,7 @@ static const struct drm_i915_cmd_descriptor video_cmds[] = {
static const struct drm_i915_cmd_descriptor vecs_cmds[] = {
CMD( MI_ARB_ON_OFF, SMI, F, 1, R ),
CMD( MI_SET_APPID, SMI, F, 1, S ),
CMD( MI_STORE_DWORD_IMM, SMI, !F, 0xFF, B,
.bits = {{
.offset = 0,
@ -401,6 +405,7 @@ static const struct drm_i915_cmd_table hsw_blt_ring_cmds[] = {
#define REG64(addr) (addr), (addr + sizeof(u32))
static const u32 gen7_render_regs[] = {
REG64(GPGPU_THREADS_DISPATCHED),
REG64(HS_INVOCATION_COUNT),
REG64(DS_INVOCATION_COUNT),
REG64(IA_VERTICES_COUNT),
@ -481,13 +486,17 @@ static u32 gen7_bsd_get_cmd_length_mask(u32 cmd_header)
u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT;
u32 subclient =
(cmd_header & INSTR_SUBCLIENT_MASK) >> INSTR_SUBCLIENT_SHIFT;
u32 op = (cmd_header & INSTR_26_TO_24_MASK) >> INSTR_26_TO_24_SHIFT;
if (client == INSTR_MI_CLIENT)
return 0x3F;
else if (client == INSTR_RC_CLIENT) {
if (subclient == INSTR_MEDIA_SUBCLIENT)
return 0xFFF;
if (subclient == INSTR_MEDIA_SUBCLIENT) {
if (op == 6)
return 0xFFFF;
else
return 0xFFF;
} else
return 0xFF;
}
@ -716,14 +725,14 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *ring)
BUG_ON(!validate_cmds_sorted(ring, cmd_tables, cmd_table_count));
BUG_ON(!validate_regs_sorted(ring));
if (hash_empty(ring->cmd_hash)) {
WARN_ON(!hash_empty(ring->cmd_hash));
ret = init_hash_table(ring, cmd_tables, cmd_table_count);
if (ret) {
DRM_ERROR("CMD: cmd_parser_init failed!\n");
fini_hash_table(ring);
return ret;
}
}
ring->needs_cmd_parser = true;
@ -840,6 +849,69 @@ finish:
return (u32*)addr;
}
/* Returns a vmap'd pointer to dest_obj, which the caller must unmap */
static u32 *copy_batch(struct drm_i915_gem_object *dest_obj,
struct drm_i915_gem_object *src_obj,
u32 batch_start_offset,
u32 batch_len)
{
int ret = 0;
int needs_clflush = 0;
u32 *src_base, *dest_base = NULL;
u32 *src_addr, *dest_addr;
u32 offset = batch_start_offset / sizeof(*dest_addr);
u32 end = batch_start_offset + batch_len;
if (end > dest_obj->base.size || end > src_obj->base.size)
return ERR_PTR(-E2BIG);
ret = i915_gem_obj_prepare_shmem_read(src_obj, &needs_clflush);
if (ret) {
DRM_DEBUG_DRIVER("CMD: failed to prep read\n");
return ERR_PTR(ret);
}
src_base = vmap_batch(src_obj);
if (!src_base) {
DRM_DEBUG_DRIVER("CMD: Failed to vmap batch\n");
ret = -ENOMEM;
goto unpin_src;
}
src_addr = src_base + offset;
if (needs_clflush)
drm_clflush_virt_range((char *)src_addr, batch_len);
ret = i915_gem_object_set_to_cpu_domain(dest_obj, true);
if (ret) {
DRM_DEBUG_DRIVER("CMD: Failed to set batch CPU domain\n");
goto unmap_src;
}
dest_base = vmap_batch(dest_obj);
if (!dest_base) {
DRM_DEBUG_DRIVER("CMD: Failed to vmap shadow batch\n");
ret = -ENOMEM;
goto unmap_src;
}
dest_addr = dest_base + offset;
if (batch_start_offset != 0)
memset((u8 *)dest_base, 0, batch_start_offset);
memcpy(dest_addr, src_addr, batch_len);
memset((u8 *)dest_addr + batch_len, 0, dest_obj->base.size - end);
unmap_src:
vunmap(src_base);
unpin_src:
i915_gem_object_unpin_pages(src_obj);
return ret ? ERR_PTR(ret) : dest_base;
}
/**
* i915_needs_cmd_parser() - should a given ring use software command parsing?
* @ring: the ring in question
@ -956,7 +1028,9 @@ static bool check_cmd(const struct intel_engine_cs *ring,
* i915_parse_cmds() - parse a submitted batch buffer for privilege violations
* @ring: the ring on which the batch is to execute
* @batch_obj: the batch buffer in question
* @shadow_batch_obj: copy of the batch buffer in question
* @batch_start_offset: byte offset in the batch at which execution starts
* @batch_len: length of the commands in batch_obj
* @is_master: is the submitting process the drm master?
*
* Parses the specified batch buffer looking for privilege violations as
@ -967,33 +1041,38 @@ static bool check_cmd(const struct intel_engine_cs *ring,
*/
int i915_parse_cmds(struct intel_engine_cs *ring,
struct drm_i915_gem_object *batch_obj,
struct drm_i915_gem_object *shadow_batch_obj,
u32 batch_start_offset,
u32 batch_len,
bool is_master)
{
int ret = 0;
u32 *cmd, *batch_base, *batch_end;
struct drm_i915_cmd_descriptor default_desc = { 0 };
int needs_clflush = 0;
bool oacontrol_set = false; /* OACONTROL tracking. See check_cmd() */
ret = i915_gem_obj_prepare_shmem_read(batch_obj, &needs_clflush);
ret = i915_gem_obj_ggtt_pin(shadow_batch_obj, 4096, 0);
if (ret) {
DRM_DEBUG_DRIVER("CMD: failed to prep read\n");
return ret;
DRM_DEBUG_DRIVER("CMD: Failed to pin shadow batch\n");
return -1;
}
batch_base = vmap_batch(batch_obj);
if (!batch_base) {
DRM_DEBUG_DRIVER("CMD: Failed to vmap batch\n");
i915_gem_object_unpin_pages(batch_obj);
return -ENOMEM;
batch_base = copy_batch(shadow_batch_obj, batch_obj,
batch_start_offset, batch_len);
if (IS_ERR(batch_base)) {
DRM_DEBUG_DRIVER("CMD: Failed to copy batch\n");
i915_gem_object_ggtt_unpin(shadow_batch_obj);
return PTR_ERR(batch_base);
}
if (needs_clflush)
drm_clflush_virt_range((char *)batch_base, batch_obj->base.size);
cmd = batch_base + (batch_start_offset / sizeof(*cmd));
batch_end = cmd + (batch_obj->base.size / sizeof(*batch_end));
/*
* We use the batch length as size because the shadow object is as
* large or larger and copy_batch() will write MI_NOPs to the extra
* space. Parsing should be faster in some cases this way.
*/
batch_end = cmd + (batch_len / sizeof(*batch_end));
while (cmd < batch_end) {
const struct drm_i915_cmd_descriptor *desc;
@ -1053,8 +1132,7 @@ int i915_parse_cmds(struct intel_engine_cs *ring,
}
vunmap(batch_base);
i915_gem_object_unpin_pages(batch_obj);
i915_gem_object_ggtt_unpin(shadow_batch_obj);
return ret;
}
@ -1076,6 +1154,7 @@ int i915_cmd_parser_get_version(void)
* hardware parsing enabled (so does not allow new use cases).
* 2. Allow access to the MI_PREDICATE_SRC0 and
* MI_PREDICATE_SRC1 registers.
* 3. Allow access to the GPGPU_THREADS_DISPATCHED register.
*/
return 2;
return 3;
}

View File

@ -96,9 +96,7 @@ static int i915_capabilities(struct seq_file *m, void *data)
static const char *get_pin_flag(struct drm_i915_gem_object *obj)
{
if (obj->user_pin_count > 0)
return "P";
else if (i915_gem_obj_is_pinned(obj))
if (i915_gem_obj_is_pinned(obj))
return "p";
else
return " ";
@ -133,9 +131,9 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
obj->base.size / 1024,
obj->base.read_domains,
obj->base.write_domain,
obj->last_read_seqno,
obj->last_write_seqno,
obj->last_fenced_seqno,
i915_gem_request_get_seqno(obj->last_read_req),
i915_gem_request_get_seqno(obj->last_write_req),
i915_gem_request_get_seqno(obj->last_fenced_req),
i915_cache_level_str(to_i915(obj->base.dev), obj->cache_level),
obj->dirty ? " dirty" : "",
obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
@ -154,8 +152,9 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
seq_puts(m, " (pp");
else
seq_puts(m, " (g");
seq_printf(m, "gtt offset: %08lx, size: %08lx)",
vma->node.start, vma->node.size);
seq_printf(m, "gtt offset: %08lx, size: %08lx, type: %u)",
vma->node.start, vma->node.size,
vma->ggtt_view.type);
}
if (obj->stolen)
seq_printf(m, " (stolen: %08lx)", obj->stolen->start);
@ -168,8 +167,9 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
*t = '\0';
seq_printf(m, " (%s mappable)", s);
}
if (obj->ring != NULL)
seq_printf(m, " (%s)", obj->ring->name);
if (obj->last_read_req != NULL)
seq_printf(m, " (%s)",
i915_gem_request_get_ring(obj->last_read_req)->name);
if (obj->frontbuffer_bits)
seq_printf(m, " (frontbuffer: 0x%03x)", obj->frontbuffer_bits);
}
@ -336,7 +336,7 @@ static int per_file_stats(int id, void *ptr, void *data)
if (ppgtt->file_priv != stats->file_priv)
continue;
if (obj->ring) /* XXX per-vma statistic */
if (obj->active) /* XXX per-vma statistic */
stats->active += obj->base.size;
else
stats->inactive += obj->base.size;
@ -346,7 +346,7 @@ static int per_file_stats(int id, void *ptr, void *data)
} else {
if (i915_gem_obj_ggtt_bound(obj)) {
stats->global += obj->base.size;
if (obj->ring)
if (obj->active)
stats->active += obj->base.size;
else
stats->inactive += obj->base.size;
@ -360,6 +360,33 @@ static int per_file_stats(int id, void *ptr, void *data)
return 0;
}
#define print_file_stats(m, name, stats) \
seq_printf(m, "%s: %u objects, %zu bytes (%zu active, %zu inactive, %zu global, %zu shared, %zu unbound)\n", \
name, \
stats.count, \
stats.total, \
stats.active, \
stats.inactive, \
stats.global, \
stats.shared, \
stats.unbound)
static void print_batch_pool_stats(struct seq_file *m,
struct drm_i915_private *dev_priv)
{
struct drm_i915_gem_object *obj;
struct file_stats stats;
memset(&stats, 0, sizeof(stats));
list_for_each_entry(obj,
&dev_priv->mm.batch_pool.cache_list,
batch_pool_list)
per_file_stats(0, obj, &stats);
print_file_stats(m, "batch pool", stats);
}
#define count_vmas(list, member) do { \
list_for_each_entry(vma, list, member) { \
size += i915_gem_obj_ggtt_size(vma->obj); \
@ -441,6 +468,9 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
dev_priv->gtt.base.total,
dev_priv->gtt.mappable_end - dev_priv->gtt.base.start);
seq_putc(m, '\n');
print_batch_pool_stats(m, dev_priv);
seq_putc(m, '\n');
list_for_each_entry_reverse(file, &dev->filelist, lhead) {
struct file_stats stats;
@ -459,15 +489,7 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
*/
rcu_read_lock();
task = pid_task(file->pid, PIDTYPE_PID);
seq_printf(m, "%s: %u objects, %zu bytes (%zu active, %zu inactive, %zu global, %zu shared, %zu unbound)\n",
task ? task->comm : "<unknown>",
stats.count,
stats.total,
stats.active,
stats.inactive,
stats.global,
stats.shared,
stats.unbound);
print_file_stats(m, task ? task->comm : "<unknown>", stats);
rcu_read_unlock();
}
@ -543,14 +565,16 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
pipe, plane);
}
if (work->flip_queued_ring) {
if (work->flip_queued_req) {
struct intel_engine_cs *ring =
i915_gem_request_get_ring(work->flip_queued_req);
seq_printf(m, "Flip queued on %s at seqno %u, next seqno %u [current breadcrumb %u], completed? %d\n",
work->flip_queued_ring->name,
work->flip_queued_seqno,
ring->name,
i915_gem_request_get_seqno(work->flip_queued_req),
dev_priv->next_seqno,
work->flip_queued_ring->get_seqno(work->flip_queued_ring, true),
i915_seqno_passed(work->flip_queued_ring->get_seqno(work->flip_queued_ring, true),
work->flip_queued_seqno));
ring->get_seqno(ring, true),
i915_gem_request_completed(work->flip_queued_req, true));
} else
seq_printf(m, "Flip not associated with any ring\n");
seq_printf(m, "Flip queued on frame %d, (was ready on frame %d), now %d\n",
@ -582,6 +606,36 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
return 0;
}
static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
int count = 0;
int ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
seq_puts(m, "cache:\n");
list_for_each_entry(obj,
&dev_priv->mm.batch_pool.cache_list,
batch_pool_list) {
seq_puts(m, " ");
describe_obj(m, obj);
seq_putc(m, '\n');
count++;
}
seq_printf(m, "total: %d\n", count);
mutex_unlock(&dev->struct_mutex);
return 0;
}
static int i915_gem_request_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = m->private;
@ -2155,6 +2209,8 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 psrperf = 0;
u32 stat[3];
enum pipe pipe;
bool enabled = false;
intel_runtime_pm_get(dev_priv);
@ -2169,14 +2225,36 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
seq_printf(m, "Re-enable work scheduled: %s\n",
yesno(work_busy(&dev_priv->psr.work.work)));
enabled = HAS_PSR(dev) &&
I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
seq_printf(m, "HW Enabled & Active bit: %s\n", yesno(enabled));
if (HAS_PSR(dev)) {
if (HAS_DDI(dev))
enabled = I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
else {
for_each_pipe(dev_priv, pipe) {
stat[pipe] = I915_READ(VLV_PSRSTAT(pipe)) &
VLV_EDP_PSR_CURR_STATE_MASK;
if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
(stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
enabled = true;
}
}
}
seq_printf(m, "HW Enabled & Active bit: %s", yesno(enabled));
if (HAS_PSR(dev))
if (!HAS_DDI(dev))
for_each_pipe(dev_priv, pipe) {
if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
(stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
seq_printf(m, " pipe %c", pipe_name(pipe));
}
seq_puts(m, "\n");
/* CHV PSR has no kind of performance counter */
if (HAS_PSR(dev) && HAS_DDI(dev)) {
psrperf = I915_READ(EDP_PSR_PERF_CNT(dev)) &
EDP_PSR_PERF_CNT_MASK;
seq_printf(m, "Performance_Counter: %u\n", psrperf);
}
mutex_unlock(&dev_priv->psr.lock);
intel_runtime_pm_put(dev_priv);
@ -2322,7 +2400,7 @@ static const char *power_domain_str(enum intel_display_power_domain domain)
case POWER_DOMAIN_INIT:
return "INIT";
default:
WARN_ON(1);
MISSING_CASE(domain);
return "?";
}
}
@ -2718,6 +2796,9 @@ static int i915_ddb_info(struct seq_file *m, void *unused)
enum pipe pipe;
int plane;
if (INTEL_INFO(dev)->gen < 9)
return 0;
drm_modeset_lock_all(dev);
ddb = &dev_priv->wm.skl_hw.ddb;
@ -2830,7 +2911,7 @@ i915_pipe_crc_read(struct file *filep, char __user *user_buf, size_t count,
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
char buf[PIPE_CRC_BUFFER_LEN];
int head, tail, n_entries, n;
int n_entries;
ssize_t bytes_read;
/*
@ -2862,36 +2943,39 @@ i915_pipe_crc_read(struct file *filep, char __user *user_buf, size_t count,
}
/* We now have one or more entries to read */
head = pipe_crc->head;
tail = pipe_crc->tail;
n_entries = min((size_t)CIRC_CNT(head, tail, INTEL_PIPE_CRC_ENTRIES_NR),
count / PIPE_CRC_LINE_LEN);
spin_unlock_irq(&pipe_crc->lock);
n_entries = count / PIPE_CRC_LINE_LEN;
bytes_read = 0;
n = 0;
do {
struct intel_pipe_crc_entry *entry = &pipe_crc->entries[tail];
while (n_entries > 0) {
struct intel_pipe_crc_entry *entry =
&pipe_crc->entries[pipe_crc->tail];
int ret;
if (CIRC_CNT(pipe_crc->head, pipe_crc->tail,
INTEL_PIPE_CRC_ENTRIES_NR) < 1)
break;
BUILD_BUG_ON_NOT_POWER_OF_2(INTEL_PIPE_CRC_ENTRIES_NR);
pipe_crc->tail = (pipe_crc->tail + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
bytes_read += snprintf(buf, PIPE_CRC_BUFFER_LEN,
"%8u %8x %8x %8x %8x %8x\n",
entry->frame, entry->crc[0],
entry->crc[1], entry->crc[2],
entry->crc[3], entry->crc[4]);
ret = copy_to_user(user_buf + n * PIPE_CRC_LINE_LEN,
buf, PIPE_CRC_LINE_LEN);
spin_unlock_irq(&pipe_crc->lock);
ret = copy_to_user(user_buf, buf, PIPE_CRC_LINE_LEN);
if (ret == PIPE_CRC_LINE_LEN)
return -EFAULT;
BUILD_BUG_ON_NOT_POWER_OF_2(INTEL_PIPE_CRC_ENTRIES_NR);
tail = (tail + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
n++;
} while (--n_entries);
user_buf += PIPE_CRC_LINE_LEN;
n_entries--;
spin_lock_irq(&pipe_crc->lock);
pipe_crc->tail = tail;
}
spin_unlock_irq(&pipe_crc->lock);
return bytes_read;
@ -3072,6 +3156,12 @@ static int vlv_pipe_crc_ctl_reg(struct drm_device *dev,
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_VLV;
need_stable_symbols = true;
break;
case INTEL_PIPE_CRC_SOURCE_DP_D:
if (!IS_CHERRYVIEW(dev))
return -EINVAL;
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_VLV;
need_stable_symbols = true;
break;
case INTEL_PIPE_CRC_SOURCE_NONE:
*val = 0;
break;
@ -3092,11 +3182,19 @@ static int vlv_pipe_crc_ctl_reg(struct drm_device *dev,
uint32_t tmp = I915_READ(PORT_DFT2_G4X);
tmp |= DC_BALANCE_RESET_VLV;
if (pipe == PIPE_A)
switch (pipe) {
case PIPE_A:
tmp |= PIPE_A_SCRAMBLE_RESET;
else
break;
case PIPE_B:
tmp |= PIPE_B_SCRAMBLE_RESET;
break;
case PIPE_C:
tmp |= PIPE_C_SCRAMBLE_RESET;
break;
default:
return -EINVAL;
}
I915_WRITE(PORT_DFT2_G4X, tmp);
}
@ -3185,10 +3283,19 @@ static void vlv_undo_pipe_scramble_reset(struct drm_device *dev,
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t tmp = I915_READ(PORT_DFT2_G4X);
if (pipe == PIPE_A)
switch (pipe) {
case PIPE_A:
tmp &= ~PIPE_A_SCRAMBLE_RESET;
else
break;
case PIPE_B:
tmp &= ~PIPE_B_SCRAMBLE_RESET;
break;
case PIPE_C:
tmp &= ~PIPE_C_SCRAMBLE_RESET;
break;
default:
return;
}
if (!(tmp & PIPE_SCRAMBLE_RESET_MASK))
tmp &= ~DC_BALANCE_RESET_VLV;
I915_WRITE(PORT_DFT2_G4X, tmp);
@ -3359,13 +3466,15 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
/* none -> real source transition */
if (source) {
struct intel_pipe_crc_entry *entries;
DRM_DEBUG_DRIVER("collecting CRCs for pipe %c, %s\n",
pipe_name(pipe), pipe_crc_source_name(source));
pipe_crc->entries = kzalloc(sizeof(*pipe_crc->entries) *
INTEL_PIPE_CRC_ENTRIES_NR,
entries = kcalloc(INTEL_PIPE_CRC_ENTRIES_NR,
sizeof(pipe_crc->entries[0]),
GFP_KERNEL);
if (!pipe_crc->entries)
if (!entries)
return -ENOMEM;
/*
@ -3377,6 +3486,8 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
hsw_disable_ips(crtc);
spin_lock_irq(&pipe_crc->lock);
kfree(pipe_crc->entries);
pipe_crc->entries = entries;
pipe_crc->head = 0;
pipe_crc->tail = 0;
spin_unlock_irq(&pipe_crc->lock);
@ -3404,6 +3515,8 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
spin_lock_irq(&pipe_crc->lock);
entries = pipe_crc->entries;
pipe_crc->entries = NULL;
pipe_crc->head = 0;
pipe_crc->tail = 0;
spin_unlock_irq(&pipe_crc->lock);
kfree(entries);
@ -4296,6 +4409,7 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS},
{"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
{"i915_gem_hws_vebox", i915_hws_info, 0, (void *)VECS},
{"i915_gem_batch_pool", i915_gem_batch_pool_info, 0},
{"i915_frequency_info", i915_frequency_info, 0},
{"i915_drpc_info", i915_drpc_info, 0},
{"i915_emon_status", i915_emon_status, 0},

View File

@ -928,6 +928,7 @@ int i915_driver_unload(struct drm_device *dev)
mutex_lock(&dev->struct_mutex);
i915_gem_cleanup_ringbuffer(dev);
i915_gem_batch_pool_fini(&dev_priv->mm.batch_pool);
i915_gem_context_fini(dev);
mutex_unlock(&dev->struct_mutex);
i915_gem_cleanup_stolen(dev);
@ -1004,6 +1005,13 @@ void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
kfree(file_priv);
}
static int
i915_gem_reject_pin_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
return -ENODEV;
}
const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH),
@ -1025,8 +1033,8 @@ const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),

View File

@ -841,6 +841,8 @@ int i915_reset(struct drm_device *dev)
return ret;
}
intel_overlay_reset(dev_priv);
/* Ok, now get things going again... */
/*
@ -1299,6 +1301,8 @@ static int vlv_suspend_complete(struct drm_i915_private *dev_priv)
err = vlv_allow_gt_wake(dev_priv, false);
if (err)
goto err2;
if (!IS_CHERRYVIEW(dev_priv->dev))
vlv_save_gunit_s0ix_state(dev_priv);
err = vlv_force_gfx_clock(dev_priv, false);
@ -1330,6 +1334,7 @@ static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
*/
ret = vlv_force_gfx_clock(dev_priv, true);
if (!IS_CHERRYVIEW(dev_priv->dev))
vlv_restore_gunit_s0ix_state(dev_priv);
err = vlv_allow_gt_wake(dev_priv, true);

View File

@ -55,10 +55,51 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
#define DRIVER_DATE "20141121"
#define DRIVER_DATE "20141219"
#undef WARN_ON
#define WARN_ON(x) WARN(x, "WARN_ON(" #x ")")
/* Many gcc seem to no see through this and fall over :( */
#if 0
#define WARN_ON(x) ({ \
bool __i915_warn_cond = (x); \
if (__builtin_constant_p(__i915_warn_cond)) \
BUILD_BUG_ON(__i915_warn_cond); \
WARN(__i915_warn_cond, "WARN_ON(" #x ")"); })
#else
#define WARN_ON(x) WARN((x), "WARN_ON(" #x ")")
#endif
#define MISSING_CASE(x) WARN(1, "Missing switch case (%lu) in %s\n", \
(long) (x), __func__);
/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
* WARN_ON()) for hw state sanity checks to check for unexpected conditions
* which may not necessarily be a user visible problem. This will either
* WARN() or DRM_ERROR() depending on the verbose_checks moduleparam, to
* enable distros and users to tailor their preferred amount of i915 abrt
* spam.
*/
#define I915_STATE_WARN(condition, format...) ({ \
int __ret_warn_on = !!(condition); \
if (unlikely(__ret_warn_on)) { \
if (i915.verbose_state_checks) \
__WARN_printf(format); \
else \
DRM_ERROR(format); \
} \
unlikely(__ret_warn_on); \
})
#define I915_STATE_WARN_ON(condition) ({ \
int __ret_warn_on = !!(condition); \
if (unlikely(__ret_warn_on)) { \
if (i915.verbose_state_checks) \
__WARN_printf("WARN_ON(" #condition ")\n"); \
else \
DRM_ERROR("WARN_ON(" #condition ")\n"); \
} \
unlikely(__ret_warn_on); \
})
enum pipe {
INVALID_PIPE = -1,
@ -1130,6 +1171,11 @@ struct intel_l3_parity {
int which_slice;
};
struct i915_gem_batch_pool {
struct drm_device *dev;
struct list_head cache_list;
};
struct i915_gem_mm {
/** Memory allocator for GTT stolen memory */
struct drm_mm stolen;
@ -1143,6 +1189,13 @@ struct i915_gem_mm {
*/
struct list_head unbound_list;
/*
* A pool of objects to use as shadow copies of client batch buffers
* when the command parser is enabled. Prevents the client from
* modifying the batch contents after software parsing.
*/
struct i915_gem_batch_pool batch_pool;
/** Usable portion of the GTT for GEM */
unsigned long stolen_base; /* limited to low memory (32-bit) */
@ -1307,6 +1360,13 @@ enum drrs_support_type {
SEAMLESS_DRRS_SUPPORT = 2
};
enum psr_lines_to_wait {
PSR_0_LINES_TO_WAIT = 0,
PSR_1_LINE_TO_WAIT,
PSR_4_LINES_TO_WAIT,
PSR_8_LINES_TO_WAIT
};
struct intel_vbt_data {
struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
@ -1335,11 +1395,21 @@ struct intel_vbt_data {
int edp_bpp;
struct edp_power_seq edp_pps;
struct {
bool full_link;
bool require_aux_wakeup;
int idle_frames;
enum psr_lines_to_wait lines_to_wait;
int tp1_wakeup_time;
int tp2_tp3_wakeup_time;
} psr;
struct {
u16 pwm_freq_hz;
bool present;
bool active_low_pwm;
u8 min_brightness; /* min_brightness/255 of max */
u8 controller; /* brightness controller number */
} backlight;
/* MIPI DSI */
@ -1772,6 +1842,8 @@ struct drm_i915_private {
void (*stop_ring)(struct intel_engine_cs *ring);
} gt;
uint32_t request_uniq;
/*
* NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch
* will be rejected. Instead look for a better place.
@ -1855,6 +1927,8 @@ struct drm_i915_gem_object {
/** Used in execbuf to temporarily hold a ref */
struct list_head obj_exec_link;
struct list_head batch_pool_list;
/**
* This is set if the object is on the active lists (has pending
* rendering and so a non-zero seqno), and is not set if it i s on
@ -1926,13 +2000,11 @@ struct drm_i915_gem_object {
void *dma_buf_vmapping;
int vmapping_count;
struct intel_engine_cs *ring;
/** Breadcrumb of last rendering to the buffer. */
uint32_t last_read_seqno;
uint32_t last_write_seqno;
struct drm_i915_gem_request *last_read_req;
struct drm_i915_gem_request *last_write_req;
/** Breadcrumb of last fenced GPU access to the buffer. */
uint32_t last_fenced_seqno;
struct drm_i915_gem_request *last_fenced_req;
/** Current tiling stride for the object, if it's tiled. */
uint32_t stride;
@ -1943,10 +2015,6 @@ struct drm_i915_gem_object {
/** Record of address bit 17 of each page at last unbind. */
unsigned long *bit_17;
/** User space pin count and filp owning the pin */
unsigned long user_pin_count;
struct drm_file *pin_filp;
union {
/** for phy allocated objects */
struct drm_dma_handle *phys_handle;
@ -1975,11 +2043,14 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old,
* The request queue allows us to note sequence numbers that have been emitted
* and may be associated with active buffers to be retired.
*
* By keeping this list, we can avoid having to do questionable
* sequence-number comparisons on buffer last_rendering_seqnos, and associate
* an emission time with seqnos for tracking how far ahead of the GPU we are.
* By keeping this list, we can avoid having to do questionable sequence
* number comparisons on buffer last_read|write_seqno. It also allows an
* emission time to be associated with the request for tracking how far ahead
* of the GPU the submission is.
*/
struct drm_i915_gem_request {
struct kref ref;
/** On Which ring this request was generated */
struct intel_engine_cs *ring;
@ -2007,8 +2078,55 @@ struct drm_i915_gem_request {
struct drm_i915_file_private *file_priv;
/** file_priv list entry for this request */
struct list_head client_list;
uint32_t uniq;
};
void i915_gem_request_free(struct kref *req_ref);
static inline uint32_t
i915_gem_request_get_seqno(struct drm_i915_gem_request *req)
{
return req ? req->seqno : 0;
}
static inline struct intel_engine_cs *
i915_gem_request_get_ring(struct drm_i915_gem_request *req)
{
return req ? req->ring : NULL;
}
static inline void
i915_gem_request_reference(struct drm_i915_gem_request *req)
{
kref_get(&req->ref);
}
static inline void
i915_gem_request_unreference(struct drm_i915_gem_request *req)
{
WARN_ON(!mutex_is_locked(&req->ring->dev->struct_mutex));
kref_put(&req->ref, i915_gem_request_free);
}
static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst,
struct drm_i915_gem_request *src)
{
if (src)
i915_gem_request_reference(src);
if (*pdst)
i915_gem_request_unreference(*pdst);
*pdst = src;
}
/*
* XXX: i915_gem_request_completed should be here but currently needs the
* definition of i915_seqno_passed() which is below. It will be moved in
* a later patch when the call to i915_seqno_passed() is obsoleted...
*/
struct drm_i915_file_private {
struct drm_i915_private *dev_priv;
struct drm_file *file;
@ -2242,7 +2360,8 @@ struct drm_i915_cmd_table {
#define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi)
#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg)
#define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev))
#define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev) || \
IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
#define HAS_RUNTIME_PM(dev) (IS_GEN6(dev) || IS_HASWELL(dev) || \
IS_BROADWELL(dev) || IS_VALLEYVIEW(dev))
#define HAS_RC6(dev) (INTEL_INFO(dev)->gen >= 6)
@ -2312,6 +2431,7 @@ struct i915_params {
bool disable_vtd_wa;
int use_mmio_flip;
bool mmio_debug;
bool verbose_state_checks;
};
extern struct i915_params i915 __read_mostly;
@ -2412,10 +2532,6 @@ int i915_gem_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_execbuffer2(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_pin_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
@ -2460,10 +2576,23 @@ void i915_gem_vma_destroy(struct i915_vma *vma);
#define PIN_GLOBAL 0x4
#define PIN_OFFSET_BIAS 0x8
#define PIN_OFFSET_MASK (~4095)
int __must_check i915_gem_object_pin_view(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
uint32_t alignment,
uint64_t flags,
const struct i915_ggtt_view *view);
static inline
int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
uint32_t alignment,
uint64_t flags);
uint64_t flags)
{
return i915_gem_object_pin_view(obj, vm, alignment, flags,
&i915_ggtt_view_normal);
}
int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
u32 flags);
int __must_check i915_vma_unbind(struct i915_vma *vma);
int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv);
@ -2512,6 +2641,18 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
return (int32_t)(seq1 - seq2) >= 0;
}
static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req,
bool lazy_coherency)
{
u32 seqno;
BUG_ON(req == NULL);
seqno = req->ring->get_seqno(req->ring, lazy_coherency);
return i915_seqno_passed(seqno, req->seqno);
}
int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno);
int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
@ -2527,7 +2668,7 @@ bool i915_gem_retire_requests(struct drm_device *dev);
void i915_gem_retire_requests_ring(struct intel_engine_cs *ring);
int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
bool interruptible);
int __must_check i915_gem_check_olr(struct intel_engine_cs *ring, u32 seqno);
int __must_check i915_gem_check_olr(struct drm_i915_gem_request *req);
static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
{
@ -2570,17 +2711,15 @@ int __must_check i915_gpu_idle(struct drm_device *dev);
int __must_check i915_gem_suspend(struct drm_device *dev);
int __i915_add_request(struct intel_engine_cs *ring,
struct drm_file *file,
struct drm_i915_gem_object *batch_obj,
u32 *seqno);
#define i915_add_request(ring, seqno) \
__i915_add_request(ring, NULL, NULL, seqno)
int __i915_wait_seqno(struct intel_engine_cs *ring, u32 seqno,
struct drm_i915_gem_object *batch_obj);
#define i915_add_request(ring) \
__i915_add_request(ring, NULL, NULL)
int __i915_wait_request(struct drm_i915_gem_request *req,
unsigned reset_counter,
bool interruptible,
s64 *timeout,
struct drm_i915_file_private *file_priv);
int __must_check i915_wait_seqno(struct intel_engine_cs *ring,
uint32_t seqno);
int __must_check i915_wait_request(struct drm_i915_gem_request *req);
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
int __must_check
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
@ -2614,18 +2753,51 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
void i915_gem_restore_fences(struct drm_device *dev);
unsigned long i915_gem_obj_offset_view(struct drm_i915_gem_object *o,
struct i915_address_space *vm,
enum i915_ggtt_view_type view);
static inline
unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
struct i915_address_space *vm);
struct i915_address_space *vm)
{
return i915_gem_obj_offset_view(o, vm, I915_GGTT_VIEW_NORMAL);
}
bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o);
bool i915_gem_obj_bound_view(struct drm_i915_gem_object *o,
struct i915_address_space *vm,
enum i915_ggtt_view_type view);
static inline
bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
struct i915_address_space *vm);
struct i915_address_space *vm)
{
return i915_gem_obj_bound_view(o, vm, I915_GGTT_VIEW_NORMAL);
}
unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
struct i915_address_space *vm);
struct i915_vma *i915_gem_obj_to_vma_view(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
const struct i915_ggtt_view *view);
static inline
struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
struct i915_address_space *vm);
struct i915_address_space *vm)
{
return i915_gem_obj_to_vma_view(obj, vm, &i915_ggtt_view_normal);
}
struct i915_vma *
i915_gem_obj_lookup_or_create_vma_view(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
const struct i915_ggtt_view *view);
static inline
struct i915_vma *
i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
struct i915_address_space *vm);
struct i915_address_space *vm)
{
return i915_gem_obj_lookup_or_create_vma_view(obj, vm,
&i915_ggtt_view_normal);
}
struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj);
static inline bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj) {
@ -2807,6 +2979,13 @@ void i915_destroy_error_state(struct drm_device *dev);
void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone);
const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
/* i915_gem_batch_pool.c */
void i915_gem_batch_pool_init(struct drm_device *dev,
struct i915_gem_batch_pool *pool);
void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool);
struct drm_i915_gem_object*
i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool, size_t size);
/* i915_cmd_parser.c */
int i915_cmd_parser_get_version(void);
int i915_cmd_parser_init_ring(struct intel_engine_cs *ring);
@ -2814,7 +2993,9 @@ void i915_cmd_parser_fini_ring(struct intel_engine_cs *ring);
bool i915_needs_cmd_parser(struct intel_engine_cs *ring);
int i915_parse_cmds(struct intel_engine_cs *ring,
struct drm_i915_gem_object *batch_obj,
struct drm_i915_gem_object *shadow_batch_obj,
u32 batch_start_offset,
u32 batch_len,
bool is_master);
/* i915_suspend.c */
@ -2894,9 +3075,6 @@ extern void intel_modeset_setup_hw_state(struct drm_device *dev,
bool force_restore);
extern void i915_redisable_vga(struct drm_device *dev);
extern void i915_redisable_vga_power_on(struct drm_device *dev);
extern bool intel_fbc_enabled(struct drm_device *dev);
extern void bdw_fbc_sw_flush(struct drm_device *dev, u32 value);
extern void intel_disable_fbc(struct drm_device *dev);
extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
extern void intel_init_pch_refclk(struct drm_device *dev);
extern void gen6_set_rps(struct drm_device *dev, u8 val);
@ -3072,4 +3250,11 @@ wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
}
}
static inline void i915_trace_irq_get(struct intel_engine_cs *ring,
struct drm_i915_gem_request *req)
{
if (ring->trace_irq_req == NULL && ring->irq_get(ring))
i915_gem_request_assign(&ring->trace_irq_req, req);
}
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,137 @@
/*
* Copyright © 2014 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#include "i915_drv.h"
/**
* DOC: batch pool
*
* In order to submit batch buffers as 'secure', the software command parser
* must ensure that a batch buffer cannot be modified after parsing. It does
* this by copying the user provided batch buffer contents to a kernel owned
* buffer from which the hardware will actually execute, and by carefully
* managing the address space bindings for such buffers.
*
* The batch pool framework provides a mechanism for the driver to manage a
* set of scratch buffers to use for this purpose. The framework can be
* extended to support other uses cases should they arise.
*/
/**
* i915_gem_batch_pool_init() - initialize a batch buffer pool
* @dev: the drm device
* @pool: the batch buffer pool
*/
void i915_gem_batch_pool_init(struct drm_device *dev,
struct i915_gem_batch_pool *pool)
{
pool->dev = dev;
INIT_LIST_HEAD(&pool->cache_list);
}
/**
* i915_gem_batch_pool_fini() - clean up a batch buffer pool
* @pool: the pool to clean up
*
* Note: Callers must hold the struct_mutex.
*/
void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool)
{
WARN_ON(!mutex_is_locked(&pool->dev->struct_mutex));
while (!list_empty(&pool->cache_list)) {
struct drm_i915_gem_object *obj =
list_first_entry(&pool->cache_list,
struct drm_i915_gem_object,
batch_pool_list);
WARN_ON(obj->active);
list_del_init(&obj->batch_pool_list);
drm_gem_object_unreference(&obj->base);
}
}
/**
* i915_gem_batch_pool_get() - select a buffer from the pool
* @pool: the batch buffer pool
* @size: the minimum desired size of the returned buffer
*
* Finds or allocates a batch buffer in the pool with at least the requested
* size. The caller is responsible for any domain, active/inactive, or
* purgeability management for the returned buffer.
*
* Note: Callers must hold the struct_mutex
*
* Return: the selected batch buffer object
*/
struct drm_i915_gem_object *
i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
size_t size)
{
struct drm_i915_gem_object *obj = NULL;
struct drm_i915_gem_object *tmp, *next;
WARN_ON(!mutex_is_locked(&pool->dev->struct_mutex));
list_for_each_entry_safe(tmp, next,
&pool->cache_list, batch_pool_list) {
if (tmp->active)
continue;
/* While we're looping, do some clean up */
if (tmp->madv == __I915_MADV_PURGED) {
list_del(&tmp->batch_pool_list);
drm_gem_object_unreference(&tmp->base);
continue;
}
/*
* Select a buffer that is at least as big as needed
* but not 'too much' bigger. A better way to do this
* might be to bucket the pool objects based on size.
*/
if (tmp->base.size >= size &&
tmp->base.size <= (2 * size)) {
obj = tmp;
break;
}
}
if (!obj) {
obj = i915_gem_alloc_object(pool->dev, size);
if (!obj)
return ERR_PTR(-ENOMEM);
list_add_tail(&obj->batch_pool_list, &pool->cache_list);
}
else
/* Keep list in LRU order */
list_move_tail(&obj->batch_pool_list, &pool->cache_list);
obj->madv = I915_MADV_WILLNEED;
return obj;
}

View File

@ -408,9 +408,20 @@ int i915_gem_context_enable(struct drm_i915_private *dev_priv)
BUG_ON(!dev_priv->ring[RCS].default_context);
if (i915.enable_execlists)
return 0;
if (i915.enable_execlists) {
for_each_ring(ring, dev_priv, i) {
if (ring->init_context) {
ret = ring->init_context(ring,
ring->default_context);
if (ret) {
DRM_ERROR("ring init context: %d\n",
ret);
return ret;
}
}
}
} else
for_each_ring(ring, dev_priv, i) {
ret = i915_switch_context(ring, ring->default_context);
if (ret)
@ -611,9 +622,14 @@ static int do_switch(struct intel_engine_cs *ring,
goto unpin_out;
vma = i915_gem_obj_to_ggtt(to->legacy_hw_ctx.rcs_state);
if (!(vma->bound & GLOBAL_BIND))
vma->bind_vma(vma, to->legacy_hw_ctx.rcs_state->cache_level,
if (!(vma->bound & GLOBAL_BIND)) {
ret = i915_vma_bind(vma,
to->legacy_hw_ctx.rcs_state->cache_level,
GLOBAL_BIND);
/* This shouldn't ever fail. */
if (WARN_ONCE(ret, "GGTT context bind failed!"))
goto unpin_out;
}
if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to))
hw_flags |= MI_RESTORE_INHIBIT;
@ -651,7 +667,8 @@ static int do_switch(struct intel_engine_cs *ring,
* swapped, but there is no way to do that yet.
*/
from->legacy_hw_ctx.rcs_state->dirty = 1;
BUG_ON(from->legacy_hw_ctx.rcs_state->ring != ring);
BUG_ON(i915_gem_request_get_ring(
from->legacy_hw_ctx.rcs_state->last_read_req) != ring);
/* obj is kept alive until the next request by its active ref */
i915_gem_object_ggtt_unpin(from->legacy_hw_ctx.rcs_state);
@ -671,10 +688,6 @@ done:
if (ret)
DRM_ERROR("ring init context: %d\n", ret);
}
ret = i915_gem_render_state_init(ring);
if (ret)
DRM_ERROR("init render state: %d\n", ret);
}
return 0;

View File

@ -37,6 +37,7 @@
#define __EXEC_OBJECT_HAS_FENCE (1<<30)
#define __EXEC_OBJECT_NEEDS_MAP (1<<29)
#define __EXEC_OBJECT_NEEDS_BIAS (1<<28)
#define __EXEC_OBJECT_PURGEABLE (1<<27)
#define BATCH_OFFSET_BIAS (256*1024)
@ -223,7 +224,12 @@ i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
if (entry->flags & __EXEC_OBJECT_HAS_PIN)
vma->pin_count--;
entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
if (entry->flags & __EXEC_OBJECT_PURGEABLE)
obj->madv = I915_MADV_DONTNEED;
entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE |
__EXEC_OBJECT_HAS_PIN |
__EXEC_OBJECT_PURGEABLE);
}
static void eb_destroy(struct eb_vmas *eb)
@ -357,9 +363,12 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
* through the ppgtt for non_secure batchbuffers. */
if (unlikely(IS_GEN6(dev) &&
reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
!(target_vma->bound & GLOBAL_BIND)))
target_vma->bind_vma(target_vma, target_i915_obj->cache_level,
!(target_vma->bound & GLOBAL_BIND))) {
ret = i915_vma_bind(target_vma, target_i915_obj->cache_level,
GLOBAL_BIND);
if (WARN_ONCE(ret, "Unexpected failure to bind target VMA!"))
return ret;
}
/* Validate that the target is in a valid r/w GPU domain */
if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
@ -943,7 +952,7 @@ void
i915_gem_execbuffer_move_to_active(struct list_head *vmas,
struct intel_engine_cs *ring)
{
u32 seqno = intel_ring_get_seqno(ring);
struct drm_i915_gem_request *req = intel_ring_get_request(ring);
struct i915_vma *vma;
list_for_each_entry(vma, vmas, exec_list) {
@ -960,7 +969,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
i915_vma_move_to_active(vma, ring);
if (obj->base.write_domain) {
obj->dirty = 1;
obj->last_write_seqno = seqno;
i915_gem_request_assign(&obj->last_write_req, req);
intel_fb_obj_invalidate(obj, ring);
@ -968,7 +977,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
}
if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
obj->last_fenced_seqno = seqno;
i915_gem_request_assign(&obj->last_fenced_req, req);
if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
struct drm_i915_private *dev_priv = to_i915(ring->dev);
list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list,
@ -990,7 +999,7 @@ i915_gem_execbuffer_retire_commands(struct drm_device *dev,
ring->gpu_caches_dirty = true;
/* Add a breadcrumb for the completion of the batch buffer */
(void)__i915_add_request(ring, file, obj, NULL);
(void)__i915_add_request(ring, file, obj);
}
static int
@ -1060,6 +1069,65 @@ i915_emit_box(struct intel_engine_cs *ring,
return 0;
}
static struct drm_i915_gem_object*
i915_gem_execbuffer_parse(struct intel_engine_cs *ring,
struct drm_i915_gem_exec_object2 *shadow_exec_entry,
struct eb_vmas *eb,
struct drm_i915_gem_object *batch_obj,
u32 batch_start_offset,
u32 batch_len,
bool is_master,
u32 *flags)
{
struct drm_i915_private *dev_priv = to_i915(batch_obj->base.dev);
struct drm_i915_gem_object *shadow_batch_obj;
int ret;
shadow_batch_obj = i915_gem_batch_pool_get(&dev_priv->mm.batch_pool,
batch_obj->base.size);
if (IS_ERR(shadow_batch_obj))
return shadow_batch_obj;
ret = i915_parse_cmds(ring,
batch_obj,
shadow_batch_obj,
batch_start_offset,
batch_len,
is_master);
if (ret) {
if (ret == -EACCES)
return batch_obj;
} else {
struct i915_vma *vma;
memset(shadow_exec_entry, 0, sizeof(*shadow_exec_entry));
vma = i915_gem_obj_to_ggtt(shadow_batch_obj);
vma->exec_entry = shadow_exec_entry;
vma->exec_entry->flags = __EXEC_OBJECT_PURGEABLE;
drm_gem_object_reference(&shadow_batch_obj->base);
list_add_tail(&vma->exec_list, &eb->vmas);
shadow_batch_obj->base.pending_read_domains =
batch_obj->base.pending_read_domains;
/*
* Set the DISPATCH_SECURE bit to remove the NON_SECURE
* bit from MI_BATCH_BUFFER_START commands issued in the
* dispatch_execbuffer implementations. We specifically
* don't want that set when the command parser is
* enabled.
*
* FIXME: with aliasing ppgtt, buffers that should only
* be in ggtt still end up in the aliasing ppgtt. remove
* this check when that is fixed.
*/
if (USES_FULL_PPGTT(dev))
*flags |= I915_DISPATCH_SECURE;
}
return ret ? ERR_PTR(ret) : shadow_batch_obj;
}
int
i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
@ -1208,7 +1276,7 @@ i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
return ret;
}
trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
trace_i915_gem_ring_dispatch(intel_ring_get_request(ring), flags);
i915_gem_execbuffer_move_to_active(vmas, ring);
i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
@ -1277,6 +1345,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct drm_i915_private *dev_priv = dev->dev_private;
struct eb_vmas *eb;
struct drm_i915_gem_object *batch_obj;
struct drm_i915_gem_exec_object2 shadow_exec_entry;
struct intel_engine_cs *ring;
struct intel_context *ctx;
struct i915_address_space *vm;
@ -1393,28 +1462,24 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
ret = -EINVAL;
goto err;
}
batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
if (i915_needs_cmd_parser(ring)) {
ret = i915_parse_cmds(ring,
batch_obj = i915_gem_execbuffer_parse(ring,
&shadow_exec_entry,
eb,
batch_obj,
args->batch_start_offset,
file->is_master);
if (ret) {
if (ret != -EACCES)
args->batch_len,
file->is_master,
&flags);
if (IS_ERR(batch_obj)) {
ret = PTR_ERR(batch_obj);
goto err;
} else {
/*
* XXX: Actually do this when enabling batch copy...
*
* Set the DISPATCH_SECURE bit to remove the NON_SECURE bit
* from MI_BATCH_BUFFER_START commands issued in the
* dispatch_execbuffer implementations. We specifically don't
* want that set when the command parser is enabled.
*/
}
}
batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
/* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
* batch" bit. Hence we need to pin secure batches into the global gtt.
* hsw should have this fixed, but bdw mucks it up again. */

View File

@ -30,6 +30,68 @@
#include "i915_trace.h"
#include "intel_drv.h"
/**
* DOC: Global GTT views
*
* Background and previous state
*
* Historically objects could exists (be bound) in global GTT space only as
* singular instances with a view representing all of the object's backing pages
* in a linear fashion. This view will be called a normal view.
*
* To support multiple views of the same object, where the number of mapped
* pages is not equal to the backing store, or where the layout of the pages
* is not linear, concept of a GGTT view was added.
*
* One example of an alternative view is a stereo display driven by a single
* image. In this case we would have a framebuffer looking like this
* (2x2 pages):
*
* 12
* 34
*
* Above would represent a normal GGTT view as normally mapped for GPU or CPU
* rendering. In contrast, fed to the display engine would be an alternative
* view which could look something like this:
*
* 1212
* 3434
*
* In this example both the size and layout of pages in the alternative view is
* different from the normal view.
*
* Implementation and usage
*
* GGTT views are implemented using VMAs and are distinguished via enum
* i915_ggtt_view_type and struct i915_ggtt_view.
*
* A new flavour of core GEM functions which work with GGTT bound objects were
* added with the _view suffix. They take the struct i915_ggtt_view parameter
* encapsulating all metadata required to implement a view.
*
* As a helper for callers which are only interested in the normal view,
* globally const i915_ggtt_view_normal singleton instance exists. All old core
* GEM API functions, the ones not taking the view parameter, are operating on,
* or with the normal GGTT view.
*
* Code wanting to add or use a new GGTT view needs to:
*
* 1. Add a new enum with a suitable name.
* 2. Extend the metadata in the i915_ggtt_view structure if required.
* 3. Add support to i915_get_vma_pages().
*
* New views are required to build a scatter-gather table from within the
* i915_get_vma_pages function. This table is stored in the vma.ggtt_view and
* exists for the lifetime of an VMA.
*
* Core API is designed to have copy semantics which means that passed in
* struct i915_ggtt_view does not need to be persistent (left around after
* calling the core API functions).
*
*/
const struct i915_ggtt_view i915_ggtt_view_normal;
static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv);
static void chv_setup_private_ppat(struct drm_i915_private *dev_priv);
@ -40,8 +102,6 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
has_aliasing_ppgtt = INTEL_INFO(dev)->gen >= 6;
has_full_ppgtt = INTEL_INFO(dev)->gen >= 7;
if (IS_GEN8(dev))
has_full_ppgtt = false; /* XXX why? */
/*
* We don't allow disabling PPGTT for gen9+ as it's a requirement for
@ -72,6 +132,9 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
return 0;
}
if (INTEL_INFO(dev)->gen >= 8 && i915.enable_execlists)
return 2;
else
return has_aliasing_ppgtt ? 1 : 0;
}
@ -132,7 +195,7 @@ static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr,
pte |= GEN6_PTE_UNCACHED;
break;
default:
WARN_ON(1);
MISSING_CASE(level);
}
return pte;
@ -156,7 +219,7 @@ static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr,
pte |= GEN6_PTE_UNCACHED;
break;
default:
WARN_ON(1);
MISSING_CASE(level);
}
return pte;
@ -1102,10 +1165,8 @@ static int __hw_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
if (INTEL_INFO(dev)->gen < 8)
return gen6_ppgtt_init(ppgtt);
else if (IS_GEN8(dev) || IS_GEN9(dev))
return gen8_ppgtt_init(ppgtt, dev_priv->gtt.base.total);
else
BUG();
return gen8_ppgtt_init(ppgtt, dev_priv->gtt.base.total);
}
int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
{
@ -1146,7 +1207,7 @@ int i915_ppgtt_init_hw(struct drm_device *dev)
else if (INTEL_INFO(dev)->gen >= 8)
gen8_ppgtt_enable(dev);
else
WARN_ON(1);
MISSING_CASE(INTEL_INFO(dev)->gen);
if (ppgtt) {
for_each_ring(ring, dev_priv, i) {
@ -1341,9 +1402,12 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
/* The bind_vma code tries to be smart about tracking mappings.
* Unfortunately above, we've just wiped out the mappings
* without telling our object about it. So we need to fake it.
*
* Bind is not expected to fail since this is only called on
* resume and assumption is all requirements exist already.
*/
vma->bound &= ~GLOBAL_BIND;
vma->bind_vma(vma, obj->cache_level, GLOBAL_BIND);
WARN_ON(i915_vma_bind(vma, obj->cache_level, GLOBAL_BIND));
}
@ -1538,7 +1602,7 @@ static void i915_ggtt_bind_vma(struct i915_vma *vma,
AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
BUG_ON(!i915_is_ggtt(vma->vm));
intel_gtt_insert_sg_entries(vma->obj->pages, entry, flags);
intel_gtt_insert_sg_entries(vma->ggtt_view.pages, entry, flags);
vma->bound = GLOBAL_BIND;
}
@ -1588,7 +1652,7 @@ static void ggtt_bind_vma(struct i915_vma *vma,
if (!dev_priv->mm.aliasing_ppgtt || flags & GLOBAL_BIND) {
if (!(vma->bound & GLOBAL_BIND) ||
(cache_level != obj->cache_level)) {
vma->vm->insert_entries(vma->vm, obj->pages,
vma->vm->insert_entries(vma->vm, vma->ggtt_view.pages,
vma->node.start,
cache_level, flags);
vma->bound |= GLOBAL_BIND;
@ -1600,7 +1664,7 @@ static void ggtt_bind_vma(struct i915_vma *vma,
(cache_level != obj->cache_level))) {
struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
appgtt->base.insert_entries(&appgtt->base,
vma->obj->pages,
vma->ggtt_view.pages,
vma->node.start,
cache_level, flags);
vma->bound |= LOCAL_BIND;
@ -2165,7 +2229,8 @@ int i915_gem_gtt_init(struct drm_device *dev)
}
static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
struct i915_address_space *vm)
struct i915_address_space *vm,
const struct i915_ggtt_view *view)
{
struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
if (vma == NULL)
@ -2176,12 +2241,9 @@ static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
INIT_LIST_HEAD(&vma->exec_list);
vma->vm = vm;
vma->obj = obj;
vma->ggtt_view = *view;
switch (INTEL_INFO(vm->dev)->gen) {
case 9:
case 8:
case 7:
case 6:
if (INTEL_INFO(vm->dev)->gen >= 6) {
if (i915_is_ggtt(vm)) {
vma->unbind_vma = ggtt_unbind_vma;
vma->bind_vma = ggtt_bind_vma;
@ -2189,39 +2251,73 @@ static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
vma->unbind_vma = ppgtt_unbind_vma;
vma->bind_vma = ppgtt_bind_vma;
}
break;
case 5:
case 4:
case 3:
case 2:
} else {
BUG_ON(!i915_is_ggtt(vm));
vma->unbind_vma = i915_ggtt_unbind_vma;
vma->bind_vma = i915_ggtt_bind_vma;
break;
default:
BUG();
}
/* Keep GGTT vmas first to make debug easier */
if (i915_is_ggtt(vm))
list_add(&vma->vma_link, &obj->vma_list);
else {
list_add_tail(&vma->vma_link, &obj->vma_list);
if (!i915_is_ggtt(vm))
i915_ppgtt_get(i915_vm_to_ppgtt(vm));
}
return vma;
}
struct i915_vma *
i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
struct i915_address_space *vm)
i915_gem_obj_lookup_or_create_vma_view(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
const struct i915_ggtt_view *view)
{
struct i915_vma *vma;
vma = i915_gem_obj_to_vma(obj, vm);
vma = i915_gem_obj_to_vma_view(obj, vm, view);
if (!vma)
vma = __i915_gem_vma_create(obj, vm);
vma = __i915_gem_vma_create(obj, vm, view);
return vma;
}
static inline
int i915_get_vma_pages(struct i915_vma *vma)
{
if (vma->ggtt_view.pages)
return 0;
if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL)
vma->ggtt_view.pages = vma->obj->pages;
else
WARN_ONCE(1, "GGTT view %u not implemented!\n",
vma->ggtt_view.type);
if (!vma->ggtt_view.pages) {
DRM_ERROR("Failed to get pages for VMA view type %u!\n",
vma->ggtt_view.type);
return -EINVAL;
}
return 0;
}
/**
* i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
* @vma: VMA to map
* @cache_level: mapping cache level
* @flags: flags like global or local mapping
*
* DMA addresses are taken from the scatter-gather table of this object (or of
* this VMA in case of non-default GGTT views) and PTE entries set up.
* Note that DMA addresses are also the only part of the SG table we care about.
*/
int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
u32 flags)
{
int ret = i915_get_vma_pages(vma);
if (ret)
return ret;
vma->bind_vma(vma, cache_level, flags);
return 0;
}

View File

@ -109,7 +109,20 @@ typedef gen8_gtt_pte_t gen8_ppgtt_pde_t;
#define GEN8_PPAT_ELLC_OVERRIDE (0<<2)
#define GEN8_PPAT(i, x) ((uint64_t) (x) << ((i) * 8))
enum i915_ggtt_view_type {
I915_GGTT_VIEW_NORMAL = 0,
};
struct i915_ggtt_view {
enum i915_ggtt_view_type type;
struct sg_table *pages;
};
extern const struct i915_ggtt_view i915_ggtt_view_normal;
enum i915_cache_level;
/**
* A VMA represents a GEM BO that is bound into an address space. Therefore, a
* VMA's presence cannot be guaranteed before binding, or after unbinding the
@ -129,6 +142,15 @@ struct i915_vma {
#define PTE_READ_ONLY (1<<2)
unsigned int bound : 4;
/**
* Support different GGTT views into the same object.
* This means there can be multiple VMA mappings per object and per VM.
* i915_ggtt_view_type is used to distinguish between those entries.
* The default one of zero (I915_GGTT_VIEW_NORMAL) is default and also
* assumed in GEM functions which take no ggtt view parameter.
*/
struct i915_ggtt_view ggtt_view;
/** This object's place on the active/inactive lists */
struct list_head mm_list;
@ -146,11 +168,10 @@ struct i915_vma {
/**
* How many users have pinned this object in GTT space. The following
* users can each hold at most one reference: pwrite/pread, pin_ioctl
* (via user_pin_count), execbuffer (objects are not allowed multiple
* times for the same batchbuffer), and the framebuffer code. When
* switching/pageflipping, the framebuffer code has at most two buffers
* pinned per crtc.
* users can each hold at most one reference: pwrite/pread, execbuffer
* (objects are not allowed multiple times for the same batchbuffer),
* and the framebuffer code. When switching/pageflipping, the
* framebuffer code has at most two buffers pinned per crtc.
*
* In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
* bits with absolutely no headroom. So use 4 bits. */
@ -182,7 +203,7 @@ struct i915_address_space {
* List of objects currently involved in rendering.
*
* Includes buffers having the contents of their GPU caches
* flushed, not necessarily primitives. last_rendering_seqno
* flushed, not necessarily primitives. last_read_req
* represents when the rendering involved will be completed.
*
* A reference is held on the buffer while on this list.
@ -193,7 +214,7 @@ struct i915_address_space {
* LRU list of objects which are not in the ringbuffer and
* are ready to unbind, but are still in the GTT.
*
* last_rendering_seqno is 0 while an object is in this list.
* last_read_req is NULL while an object is in this list.
*
* A reference is not held on the buffer while on this list,
* as merely being GTT-bound shouldn't prevent its being

View File

@ -173,7 +173,7 @@ int i915_gem_render_state_init(struct intel_engine_cs *ring)
i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), ring);
ret = __i915_add_request(ring, NULL, so.obj, NULL);
ret = __i915_add_request(ring, NULL, so.obj);
/* __i915_add_request moves object to inactive if it fails */
out:
i915_gem_render_state_fini(&so);

View File

@ -399,7 +399,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
}
obj->fence_dirty =
obj->last_fenced_seqno ||
obj->last_fenced_req ||
obj->fence_reg != I915_FENCE_REG_NONE;
obj->tiling_mode = args->tiling_mode;

View File

@ -670,8 +670,8 @@ static void capture_bo(struct drm_i915_error_buffer *err,
err->size = obj->base.size;
err->name = obj->base.name;
err->rseqno = obj->last_read_seqno;
err->wseqno = obj->last_write_seqno;
err->rseqno = i915_gem_request_get_seqno(obj->last_read_req);
err->wseqno = i915_gem_request_get_seqno(obj->last_write_req);
err->gtt_offset = vma->node.start;
err->read_domains = obj->base.read_domains;
err->write_domain = obj->base.write_domain;
@ -679,13 +679,12 @@ static void capture_bo(struct drm_i915_error_buffer *err,
err->pinned = 0;
if (i915_gem_obj_is_pinned(obj))
err->pinned = 1;
if (obj->user_pin_count > 0)
err->pinned = -1;
err->tiling = obj->tiling_mode;
err->dirty = obj->dirty;
err->purgeable = obj->madv != I915_MADV_WILLNEED;
err->userptr = obj->userptr.mm != NULL;
err->ring = obj->ring ? obj->ring->id : -1;
err->ring = obj->last_read_req ?
i915_gem_request_get_ring(obj->last_read_req)->id : -1;
err->cache_level = obj->cache_level;
}
@ -719,10 +718,8 @@ static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
break;
list_for_each_entry(vma, &obj->vma_list, vma_link)
if (vma->vm == vm && vma->pin_count > 0) {
if (vma->vm == vm && vma->pin_count > 0)
capture_bo(err++, vma);
break;
}
}
return err - first;
@ -767,32 +764,21 @@ static void i915_gem_record_fences(struct drm_device *dev,
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
/* Fences */
switch (INTEL_INFO(dev)->gen) {
case 9:
case 8:
case 7:
case 6:
for (i = 0; i < dev_priv->num_fence_regs; i++)
error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
break;
case 5:
case 4:
for (i = 0; i < 16; i++)
error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
break;
case 3:
if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
for (i = 0; i < 8; i++)
error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
case 2:
if (IS_GEN3(dev) || IS_GEN2(dev)) {
for (i = 0; i < 8; i++)
error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
break;
default:
BUG();
}
if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
for (i = 0; i < 8; i++)
error->fence[i+8] = I915_READ(FENCE_REG_945_8 +
(i * 4));
} else if (IS_GEN5(dev) || IS_GEN4(dev))
for (i = 0; i < 16; i++)
error->fence[i] = I915_READ64(FENCE_REG_965_0 +
(i * 8));
else if (INTEL_INFO(dev)->gen >= 6)
for (i = 0; i < dev_priv->num_fence_regs; i++)
error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 +
(i * 8));
}
@ -926,9 +912,13 @@ static void i915_record_ring_state(struct drm_device *dev,
ering->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(ring));
switch (INTEL_INFO(dev)->gen) {
case 9:
case 8:
if (IS_GEN6(dev))
ering->vm_info.pp_dir_base =
I915_READ(RING_PP_DIR_BASE_READ(ring));
else if (IS_GEN7(dev))
ering->vm_info.pp_dir_base =
I915_READ(RING_PP_DIR_BASE(ring));
else if (INTEL_INFO(dev)->gen >= 8)
for (i = 0; i < 4; i++) {
ering->vm_info.pdp[i] =
I915_READ(GEN8_RING_PDP_UDW(ring, i));
@ -936,16 +926,6 @@ static void i915_record_ring_state(struct drm_device *dev,
ering->vm_info.pdp[i] |=
I915_READ(GEN8_RING_PDP_LDW(ring, i));
}
break;
case 7:
ering->vm_info.pp_dir_base =
I915_READ(RING_PP_DIR_BASE(ring));
break;
case 6:
ering->vm_info.pp_dir_base =
I915_READ(RING_PP_DIR_BASE_READ(ring));
break;
}
}
}
@ -1097,10 +1077,8 @@ static void i915_gem_capture_vm(struct drm_i915_private *dev_priv,
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
list_for_each_entry(vma, &obj->vma_list, vma_link)
if (vma->vm == vm && vma->pin_count > 0) {
if (vma->vm == vm && vma->pin_count > 0)
i++;
break;
}
}
error->pinned_bo_count[ndx] = i - error->active_bo_count[ndx];
@ -1378,26 +1356,15 @@ void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone)
struct drm_i915_private *dev_priv = dev->dev_private;
memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
switch (INTEL_INFO(dev)->gen) {
case 2:
case 3:
if (IS_GEN2(dev) || IS_GEN3(dev))
instdone[0] = I915_READ(INSTDONE);
break;
case 4:
case 5:
case 6:
else if (IS_GEN4(dev) || IS_GEN5(dev) || IS_GEN6(dev)) {
instdone[0] = I915_READ(INSTDONE_I965);
instdone[1] = I915_READ(INSTDONE1);
break;
default:
WARN_ONCE(1, "Unsupported platform\n");
case 7:
case 8:
case 9:
} else if (INTEL_INFO(dev)->gen >= 7) {
instdone[0] = I915_READ(GEN7_INSTDONE_1);
instdone[1] = I915_READ(GEN7_SC_INSTDONE);
instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
break;
}
}

View File

@ -183,6 +183,8 @@ static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
{
assert_spin_locked(&dev_priv->irq_lock);
WARN_ON(enabled_irq_mask & ~interrupt_mask);
if (WARN_ON(!intel_irqs_enabled(dev_priv)))
return;
@ -229,6 +231,8 @@ static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
{
uint32_t new_val;
WARN_ON(enabled_irq_mask & ~interrupt_mask);
assert_spin_locked(&dev_priv->irq_lock);
new_val = dev_priv->pm_irq_mask;
@ -332,6 +336,8 @@ void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
sdeimr &= ~interrupt_mask;
sdeimr |= (~enabled_irq_mask & interrupt_mask);
WARN_ON(enabled_irq_mask & ~interrupt_mask);
assert_spin_locked(&dev_priv->irq_lock);
if (WARN_ON(!intel_irqs_enabled(dev_priv)))
@ -1017,7 +1023,7 @@ static void notify_ring(struct drm_device *dev,
if (!intel_ring_initialized(ring))
return;
trace_i915_gem_request_complete(ring);
trace_i915_gem_request_notify(ring);
wake_up_all(&ring->irq_queue);
}
@ -1383,14 +1389,14 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
if (rcs & GT_RENDER_USER_INTERRUPT)
notify_ring(dev, ring);
if (rcs & GT_CONTEXT_SWITCH_INTERRUPT)
intel_execlists_handle_ctx_events(ring);
intel_lrc_irq_handler(ring);
bcs = tmp >> GEN8_BCS_IRQ_SHIFT;
ring = &dev_priv->ring[BCS];
if (bcs & GT_RENDER_USER_INTERRUPT)
notify_ring(dev, ring);
if (bcs & GT_CONTEXT_SWITCH_INTERRUPT)
intel_execlists_handle_ctx_events(ring);
intel_lrc_irq_handler(ring);
} else
DRM_ERROR("The master control interrupt lied (GT0)!\n");
}
@ -1406,14 +1412,14 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
if (vcs & GT_RENDER_USER_INTERRUPT)
notify_ring(dev, ring);
if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
intel_execlists_handle_ctx_events(ring);
intel_lrc_irq_handler(ring);
vcs = tmp >> GEN8_VCS2_IRQ_SHIFT;
ring = &dev_priv->ring[VCS2];
if (vcs & GT_RENDER_USER_INTERRUPT)
notify_ring(dev, ring);
if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
intel_execlists_handle_ctx_events(ring);
intel_lrc_irq_handler(ring);
} else
DRM_ERROR("The master control interrupt lied (GT1)!\n");
}
@ -1440,7 +1446,7 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
if (vcs & GT_RENDER_USER_INTERRUPT)
notify_ring(dev, ring);
if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
intel_execlists_handle_ctx_events(ring);
intel_lrc_irq_handler(ring);
} else
DRM_ERROR("The master control interrupt lied (GT3)!\n");
}
@ -2753,18 +2759,18 @@ static void gen8_disable_vblank(struct drm_device *dev, int pipe)
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
static u32
ring_last_seqno(struct intel_engine_cs *ring)
static struct drm_i915_gem_request *
ring_last_request(struct intel_engine_cs *ring)
{
return list_entry(ring->request_list.prev,
struct drm_i915_gem_request, list)->seqno;
struct drm_i915_gem_request, list);
}
static bool
ring_idle(struct intel_engine_cs *ring, u32 seqno)
ring_idle(struct intel_engine_cs *ring)
{
return (list_empty(&ring->request_list) ||
i915_seqno_passed(seqno, ring_last_seqno(ring)));
i915_gem_request_completed(ring_last_request(ring), false));
}
static bool
@ -2984,7 +2990,7 @@ static void i915_hangcheck_elapsed(unsigned long data)
acthd = intel_ring_get_active_head(ring);
if (ring->hangcheck.seqno == seqno) {
if (ring_idle(ring, seqno)) {
if (ring_idle(ring)) {
ring->hangcheck.action = HANGCHECK_IDLE;
if (waitqueue_active(&ring->irq_queue)) {

View File

@ -35,7 +35,7 @@ struct i915_params i915 __read_mostly = {
.vbt_sdvo_panel_type = -1,
.enable_rc6 = -1,
.enable_fbc = -1,
.enable_execlists = 0,
.enable_execlists = -1,
.enable_hangcheck = true,
.enable_ppgtt = -1,
.enable_psr = 0,
@ -51,6 +51,7 @@ struct i915_params i915 __read_mostly = {
.disable_vtd_wa = 0,
.use_mmio_flip = 0,
.mmio_debug = 0,
.verbose_state_checks = 1,
};
module_param_named(modeset, i915.modeset, int, 0400);
@ -122,7 +123,7 @@ MODULE_PARM_DESC(enable_ppgtt,
module_param_named(enable_execlists, i915.enable_execlists, int, 0400);
MODULE_PARM_DESC(enable_execlists,
"Override execlists usage. "
"(-1=auto, 0=disabled [default], 1=enabled)");
"(-1=auto [default], 0=disabled, 1=enabled)");
module_param_named(enable_psr, i915.enable_psr, int, 0600);
MODULE_PARM_DESC(enable_psr, "Enable PSR (default: false)");
@ -173,3 +174,7 @@ module_param_named(mmio_debug, i915.mmio_debug, bool, 0600);
MODULE_PARM_DESC(mmio_debug,
"Enable the MMIO debug code (default: false). This may negatively "
"affect performance.");
module_param_named(verbose_state_checks, i915.verbose_state_checks, bool, 0600);
MODULE_PARM_DESC(verbose_state_checks,
"Enable verbose logs (ie. WARN_ON()) in case of unexpected hw state conditions.");

View File

@ -31,6 +31,8 @@
#define _PORT(port, a, b) ((a) + (port)*((b)-(a)))
#define _PIPE3(pipe, a, b, c) ((pipe) == PIPE_A ? (a) : \
(pipe) == PIPE_B ? (b) : (c))
#define _PORT3(port, a, b, c) ((port) == PORT_A ? (a) : \
(port) == PORT_B ? (b) : (c))
#define _MASKED_FIELD(mask, value) ({ \
if (__builtin_constant_p(mask)) \
@ -217,6 +219,8 @@
#define INSTR_SUBCLIENT_SHIFT 27
#define INSTR_SUBCLIENT_MASK 0x18000000
#define INSTR_MEDIA_SUBCLIENT 0x2
#define INSTR_26_TO_24_MASK 0x7000000
#define INSTR_26_TO_24_SHIFT 24
/*
* Memory interface instructions used by the kernel
@ -246,6 +250,7 @@
#define MI_BATCH_BUFFER_END MI_INSTR(0x0a, 0)
#define MI_SUSPEND_FLUSH MI_INSTR(0x0b, 0)
#define MI_SUSPEND_FLUSH_EN (1<<0)
#define MI_SET_APPID MI_INSTR(0x0e, 0)
#define MI_OVERLAY_FLIP MI_INSTR(0x11, 0)
#define MI_OVERLAY_CONTINUE (0x0<<21)
#define MI_OVERLAY_ON (0x1<<21)
@ -303,8 +308,9 @@
#define MI_SEMAPHORE_POLL (1<<15)
#define MI_SEMAPHORE_SAD_GTE_SDD (1<<12)
#define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1)
#define MI_STORE_DWORD_IMM_GEN8 MI_INSTR(0x20, 2)
#define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */
#define MI_STORE_DWORD_IMM_GEN4 MI_INSTR(0x20, 2)
#define MI_MEM_VIRTUAL (1 << 22) /* 945,g33,965 */
#define MI_USE_GGTT (1 << 22) /* g4x+ */
#define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1)
#define MI_STORE_DWORD_INDEX_SHIFT 2
/* Official intel docs are somewhat sloppy concerning MI_LOAD_REGISTER_IMM:
@ -470,6 +476,7 @@
*/
#define BCS_SWCTRL 0x22200
#define GPGPU_THREADS_DISPATCHED 0x2290
#define HS_INVOCATION_COUNT 0x2300
#define DS_INVOCATION_COUNT 0x2308
#define IA_VERTICES_COUNT 0x2310
@ -1509,7 +1516,7 @@ enum punit_power_well {
#define I915_ISP_INTERRUPT (1<<22)
#define I915_LPE_PIPE_B_INTERRUPT (1<<21)
#define I915_LPE_PIPE_A_INTERRUPT (1<<20)
#define I915_MIPIB_INTERRUPT (1<<19)
#define I915_MIPIC_INTERRUPT (1<<19)
#define I915_MIPIA_INTERRUPT (1<<18)
#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18)
#define I915_DISPLAY_PORT_INTERRUPT (1<<17)
@ -2539,6 +2546,42 @@ enum punit_power_well {
#define PIPESRC(trans) _TRANSCODER2(trans, _PIPEASRC)
#define PIPE_MULT(trans) _TRANSCODER2(trans, _PIPE_MULT_A)
/* VLV eDP PSR registers */
#define _PSRCTLA (VLV_DISPLAY_BASE + 0x60090)
#define _PSRCTLB (VLV_DISPLAY_BASE + 0x61090)
#define VLV_EDP_PSR_ENABLE (1<<0)
#define VLV_EDP_PSR_RESET (1<<1)
#define VLV_EDP_PSR_MODE_MASK (7<<2)
#define VLV_EDP_PSR_MODE_HW_TIMER (1<<3)
#define VLV_EDP_PSR_MODE_SW_TIMER (1<<2)
#define VLV_EDP_PSR_SINGLE_FRAME_UPDATE (1<<7)
#define VLV_EDP_PSR_ACTIVE_ENTRY (1<<8)
#define VLV_EDP_PSR_SRC_TRANSMITTER_STATE (1<<9)
#define VLV_EDP_PSR_DBL_FRAME (1<<10)
#define VLV_EDP_PSR_FRAME_COUNT_MASK (0xff<<16)
#define VLV_EDP_PSR_IDLE_FRAME_SHIFT 16
#define VLV_PSRCTL(pipe) _PIPE(pipe, _PSRCTLA, _PSRCTLB)
#define _VSCSDPA (VLV_DISPLAY_BASE + 0x600a0)
#define _VSCSDPB (VLV_DISPLAY_BASE + 0x610a0)
#define VLV_EDP_PSR_SDP_FREQ_MASK (3<<30)
#define VLV_EDP_PSR_SDP_FREQ_ONCE (1<<31)
#define VLV_EDP_PSR_SDP_FREQ_EVFRAME (1<<30)
#define VLV_VSCSDP(pipe) _PIPE(pipe, _VSCSDPA, _VSCSDPB)
#define _PSRSTATA (VLV_DISPLAY_BASE + 0x60094)
#define _PSRSTATB (VLV_DISPLAY_BASE + 0x61094)
#define VLV_EDP_PSR_LAST_STATE_MASK (7<<3)
#define VLV_EDP_PSR_CURR_STATE_MASK 7
#define VLV_EDP_PSR_DISABLED (0<<0)
#define VLV_EDP_PSR_INACTIVE (1<<0)
#define VLV_EDP_PSR_IN_TRANS_TO_ACTIVE (2<<0)
#define VLV_EDP_PSR_ACTIVE_NORFB_UP (3<<0)
#define VLV_EDP_PSR_ACTIVE_SF_UPDATE (4<<0)
#define VLV_EDP_PSR_EXIT (5<<0)
#define VLV_EDP_PSR_IN_TRANS (1<<7)
#define VLV_PSRSTAT(pipe) _PIPE(pipe, _PSRSTATA, _PSRSTATB)
/* HSW+ eDP PSR registers */
#define EDP_PSR_BASE(dev) (IS_HASWELL(dev) ? 0x64800 : 0x6f800)
#define EDP_PSR_CTL(dev) (EDP_PSR_BASE(dev) + 0)
@ -2762,7 +2805,8 @@ enum punit_power_well {
#define DC_BALANCE_RESET (1 << 25)
#define PORT_DFT2_G4X (dev_priv->info.display_mmio_offset + 0x61154)
#define DC_BALANCE_RESET_VLV (1 << 31)
#define PIPE_SCRAMBLE_RESET_MASK (0x3 << 0)
#define PIPE_SCRAMBLE_RESET_MASK ((1 << 14) | (0x3 << 0))
#define PIPE_C_SCRAMBLE_RESET (1 << 14) /* chv */
#define PIPE_B_SCRAMBLE_RESET (1 << 1)
#define PIPE_A_SCRAMBLE_RESET (1 << 0)
@ -6006,6 +6050,10 @@ enum punit_power_well {
#define GEN8_PMINTR_REDIRECT_TO_NON_DISP (1<<31)
#define VLV_PWRDWNUPCTL 0xA294
#define VLV_CHICKEN_3 (VLV_DISPLAY_BASE + 0x7040C)
#define PIXEL_OVERLAP_CNT_MASK (3 << 30)
#define PIXEL_OVERLAP_CNT_SHIFT 30
#define GEN6_PMISR 0x44020
#define GEN6_PMIMR 0x44024 /* rps_lock */
#define GEN6_PMIIR 0x44028
@ -6631,29 +6679,31 @@ enum punit_power_well {
#define PIPE_CSC_POSTOFF_ME(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_ME, _PIPE_B_CSC_POSTOFF_ME)
#define PIPE_CSC_POSTOFF_LO(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_LO, _PIPE_B_CSC_POSTOFF_LO)
/* VLV MIPI registers */
/* MIPI DSI registers */
#define _MIPI_PORT(port, a, c) _PORT3(port, a, 0, c) /* ports A and C only */
#define _MIPIA_PORT_CTRL (VLV_DISPLAY_BASE + 0x61190)
#define _MIPIB_PORT_CTRL (VLV_DISPLAY_BASE + 0x61700)
#define MIPI_PORT_CTRL(tc) _TRANSCODER(tc, _MIPIA_PORT_CTRL, \
_MIPIB_PORT_CTRL)
#define DPI_ENABLE (1 << 31) /* A + B */
#define _MIPIC_PORT_CTRL (VLV_DISPLAY_BASE + 0x61700)
#define MIPI_PORT_CTRL(port) _MIPI_PORT(port, _MIPIA_PORT_CTRL, _MIPIC_PORT_CTRL)
#define DPI_ENABLE (1 << 31) /* A + C */
#define MIPIA_MIPI4DPHY_DELAY_COUNT_SHIFT 27
#define MIPIA_MIPI4DPHY_DELAY_COUNT_MASK (0xf << 27)
#define DUAL_LINK_MODE_SHIFT 26
#define DUAL_LINK_MODE_MASK (1 << 26)
#define DUAL_LINK_MODE_FRONT_BACK (0 << 26)
#define DUAL_LINK_MODE_PIXEL_ALTERNATIVE (1 << 26)
#define DITHERING_ENABLE (1 << 25) /* A + B */
#define DITHERING_ENABLE (1 << 25) /* A + C */
#define FLOPPED_HSTX (1 << 23)
#define DE_INVERT (1 << 19) /* XXX */
#define MIPIA_FLISDSI_DELAY_COUNT_SHIFT 18
#define MIPIA_FLISDSI_DELAY_COUNT_MASK (0xf << 18)
#define AFE_LATCHOUT (1 << 17)
#define LP_OUTPUT_HOLD (1 << 16)
#define MIPIB_FLISDSI_DELAY_COUNT_HIGH_SHIFT 15
#define MIPIB_FLISDSI_DELAY_COUNT_HIGH_MASK (1 << 15)
#define MIPIB_MIPI4DPHY_DELAY_COUNT_SHIFT 11
#define MIPIB_MIPI4DPHY_DELAY_COUNT_MASK (0xf << 11)
#define MIPIC_FLISDSI_DELAY_COUNT_HIGH_SHIFT 15
#define MIPIC_FLISDSI_DELAY_COUNT_HIGH_MASK (1 << 15)
#define MIPIC_MIPI4DPHY_DELAY_COUNT_SHIFT 11
#define MIPIC_MIPI4DPHY_DELAY_COUNT_MASK (0xf << 11)
#define CSB_SHIFT 9
#define CSB_MASK (3 << 9)
#define CSB_20MHZ (0 << 9)
@ -6662,10 +6712,10 @@ enum punit_power_well {
#define BANDGAP_MASK (1 << 8)
#define BANDGAP_PNW_CIRCUIT (0 << 8)
#define BANDGAP_LNC_CIRCUIT (1 << 8)
#define MIPIB_FLISDSI_DELAY_COUNT_LOW_SHIFT 5
#define MIPIB_FLISDSI_DELAY_COUNT_LOW_MASK (7 << 5)
#define TEARING_EFFECT_DELAY (1 << 4) /* A + B */
#define TEARING_EFFECT_SHIFT 2 /* A + B */
#define MIPIC_FLISDSI_DELAY_COUNT_LOW_SHIFT 5
#define MIPIC_FLISDSI_DELAY_COUNT_LOW_MASK (7 << 5)
#define TEARING_EFFECT_DELAY (1 << 4) /* A + C */
#define TEARING_EFFECT_SHIFT 2 /* A + C */
#define TEARING_EFFECT_MASK (3 << 2)
#define TEARING_EFFECT_OFF (0 << 2)
#define TEARING_EFFECT_DSI (1 << 2)
@ -6677,9 +6727,9 @@ enum punit_power_well {
#define LANE_CONFIGURATION_DUAL_LINK_B (2 << 0)
#define _MIPIA_TEARING_CTRL (VLV_DISPLAY_BASE + 0x61194)
#define _MIPIB_TEARING_CTRL (VLV_DISPLAY_BASE + 0x61704)
#define MIPI_TEARING_CTRL(tc) _TRANSCODER(tc, \
_MIPIA_TEARING_CTRL, _MIPIB_TEARING_CTRL)
#define _MIPIC_TEARING_CTRL (VLV_DISPLAY_BASE + 0x61704)
#define MIPI_TEARING_CTRL(port) _MIPI_PORT(port, \
_MIPIA_TEARING_CTRL, _MIPIC_TEARING_CTRL)
#define TEARING_EFFECT_DELAY_SHIFT 0
#define TEARING_EFFECT_DELAY_MASK (0xffff << 0)
@ -6689,9 +6739,9 @@ enum punit_power_well {
/* MIPI DSI Controller and D-PHY registers */
#define _MIPIA_DEVICE_READY (dev_priv->mipi_mmio_base + 0xb000)
#define _MIPIB_DEVICE_READY (dev_priv->mipi_mmio_base + 0xb800)
#define MIPI_DEVICE_READY(tc) _TRANSCODER(tc, _MIPIA_DEVICE_READY, \
_MIPIB_DEVICE_READY)
#define _MIPIC_DEVICE_READY (dev_priv->mipi_mmio_base + 0xb800)
#define MIPI_DEVICE_READY(port) _MIPI_PORT(port, _MIPIA_DEVICE_READY, \
_MIPIC_DEVICE_READY)
#define BUS_POSSESSION (1 << 3) /* set to give bus to receiver */
#define ULPS_STATE_MASK (3 << 1)
#define ULPS_STATE_ENTER (2 << 1)
@ -6700,13 +6750,13 @@ enum punit_power_well {
#define DEVICE_READY (1 << 0)
#define _MIPIA_INTR_STAT (dev_priv->mipi_mmio_base + 0xb004)
#define _MIPIB_INTR_STAT (dev_priv->mipi_mmio_base + 0xb804)
#define MIPI_INTR_STAT(tc) _TRANSCODER(tc, _MIPIA_INTR_STAT, \
_MIPIB_INTR_STAT)
#define _MIPIC_INTR_STAT (dev_priv->mipi_mmio_base + 0xb804)
#define MIPI_INTR_STAT(port) _MIPI_PORT(port, _MIPIA_INTR_STAT, \
_MIPIC_INTR_STAT)
#define _MIPIA_INTR_EN (dev_priv->mipi_mmio_base + 0xb008)
#define _MIPIB_INTR_EN (dev_priv->mipi_mmio_base + 0xb808)
#define MIPI_INTR_EN(tc) _TRANSCODER(tc, _MIPIA_INTR_EN, \
_MIPIB_INTR_EN)
#define _MIPIC_INTR_EN (dev_priv->mipi_mmio_base + 0xb808)
#define MIPI_INTR_EN(port) _MIPI_PORT(port, _MIPIA_INTR_EN, \
_MIPIC_INTR_EN)
#define TEARING_EFFECT (1 << 31)
#define SPL_PKT_SENT_INTERRUPT (1 << 30)
#define GEN_READ_DATA_AVAIL (1 << 29)
@ -6741,9 +6791,9 @@ enum punit_power_well {
#define RXSOT_ERROR (1 << 0)
#define _MIPIA_DSI_FUNC_PRG (dev_priv->mipi_mmio_base + 0xb00c)
#define _MIPIB_DSI_FUNC_PRG (dev_priv->mipi_mmio_base + 0xb80c)
#define MIPI_DSI_FUNC_PRG(tc) _TRANSCODER(tc, _MIPIA_DSI_FUNC_PRG, \
_MIPIB_DSI_FUNC_PRG)
#define _MIPIC_DSI_FUNC_PRG (dev_priv->mipi_mmio_base + 0xb80c)
#define MIPI_DSI_FUNC_PRG(port) _MIPI_PORT(port, _MIPIA_DSI_FUNC_PRG, \
_MIPIC_DSI_FUNC_PRG)
#define CMD_MODE_DATA_WIDTH_MASK (7 << 13)
#define CMD_MODE_NOT_SUPPORTED (0 << 13)
#define CMD_MODE_DATA_WIDTH_16_BIT (1 << 13)
@ -6765,93 +6815,93 @@ enum punit_power_well {
#define DATA_LANES_PRG_REG_MASK (7 << 0)
#define _MIPIA_HS_TX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb010)
#define _MIPIB_HS_TX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb810)
#define MIPI_HS_TX_TIMEOUT(tc) _TRANSCODER(tc, _MIPIA_HS_TX_TIMEOUT, \
_MIPIB_HS_TX_TIMEOUT)
#define _MIPIC_HS_TX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb810)
#define MIPI_HS_TX_TIMEOUT(port) _MIPI_PORT(port, _MIPIA_HS_TX_TIMEOUT, \
_MIPIC_HS_TX_TIMEOUT)
#define HIGH_SPEED_TX_TIMEOUT_COUNTER_MASK 0xffffff
#define _MIPIA_LP_RX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb014)
#define _MIPIB_LP_RX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb814)
#define MIPI_LP_RX_TIMEOUT(tc) _TRANSCODER(tc, _MIPIA_LP_RX_TIMEOUT, \
_MIPIB_LP_RX_TIMEOUT)
#define _MIPIC_LP_RX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb814)
#define MIPI_LP_RX_TIMEOUT(port) _MIPI_PORT(port, _MIPIA_LP_RX_TIMEOUT, \
_MIPIC_LP_RX_TIMEOUT)
#define LOW_POWER_RX_TIMEOUT_COUNTER_MASK 0xffffff
#define _MIPIA_TURN_AROUND_TIMEOUT (dev_priv->mipi_mmio_base + 0xb018)
#define _MIPIB_TURN_AROUND_TIMEOUT (dev_priv->mipi_mmio_base + 0xb818)
#define MIPI_TURN_AROUND_TIMEOUT(tc) _TRANSCODER(tc, \
_MIPIA_TURN_AROUND_TIMEOUT, _MIPIB_TURN_AROUND_TIMEOUT)
#define _MIPIC_TURN_AROUND_TIMEOUT (dev_priv->mipi_mmio_base + 0xb818)
#define MIPI_TURN_AROUND_TIMEOUT(port) _MIPI_PORT(port, \
_MIPIA_TURN_AROUND_TIMEOUT, _MIPIC_TURN_AROUND_TIMEOUT)
#define TURN_AROUND_TIMEOUT_MASK 0x3f
#define _MIPIA_DEVICE_RESET_TIMER (dev_priv->mipi_mmio_base + 0xb01c)
#define _MIPIB_DEVICE_RESET_TIMER (dev_priv->mipi_mmio_base + 0xb81c)
#define MIPI_DEVICE_RESET_TIMER(tc) _TRANSCODER(tc, \
_MIPIA_DEVICE_RESET_TIMER, _MIPIB_DEVICE_RESET_TIMER)
#define _MIPIC_DEVICE_RESET_TIMER (dev_priv->mipi_mmio_base + 0xb81c)
#define MIPI_DEVICE_RESET_TIMER(port) _MIPI_PORT(port, \
_MIPIA_DEVICE_RESET_TIMER, _MIPIC_DEVICE_RESET_TIMER)
#define DEVICE_RESET_TIMER_MASK 0xffff
#define _MIPIA_DPI_RESOLUTION (dev_priv->mipi_mmio_base + 0xb020)
#define _MIPIB_DPI_RESOLUTION (dev_priv->mipi_mmio_base + 0xb820)
#define MIPI_DPI_RESOLUTION(tc) _TRANSCODER(tc, _MIPIA_DPI_RESOLUTION, \
_MIPIB_DPI_RESOLUTION)
#define _MIPIC_DPI_RESOLUTION (dev_priv->mipi_mmio_base + 0xb820)
#define MIPI_DPI_RESOLUTION(port) _MIPI_PORT(port, _MIPIA_DPI_RESOLUTION, \
_MIPIC_DPI_RESOLUTION)
#define VERTICAL_ADDRESS_SHIFT 16
#define VERTICAL_ADDRESS_MASK (0xffff << 16)
#define HORIZONTAL_ADDRESS_SHIFT 0
#define HORIZONTAL_ADDRESS_MASK 0xffff
#define _MIPIA_DBI_FIFO_THROTTLE (dev_priv->mipi_mmio_base + 0xb024)
#define _MIPIB_DBI_FIFO_THROTTLE (dev_priv->mipi_mmio_base + 0xb824)
#define MIPI_DBI_FIFO_THROTTLE(tc) _TRANSCODER(tc, \
_MIPIA_DBI_FIFO_THROTTLE, _MIPIB_DBI_FIFO_THROTTLE)
#define _MIPIC_DBI_FIFO_THROTTLE (dev_priv->mipi_mmio_base + 0xb824)
#define MIPI_DBI_FIFO_THROTTLE(port) _MIPI_PORT(port, \
_MIPIA_DBI_FIFO_THROTTLE, _MIPIC_DBI_FIFO_THROTTLE)
#define DBI_FIFO_EMPTY_HALF (0 << 0)
#define DBI_FIFO_EMPTY_QUARTER (1 << 0)
#define DBI_FIFO_EMPTY_7_LOCATIONS (2 << 0)
/* regs below are bits 15:0 */
#define _MIPIA_HSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb028)
#define _MIPIB_HSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb828)
#define MIPI_HSYNC_PADDING_COUNT(tc) _TRANSCODER(tc, \
_MIPIA_HSYNC_PADDING_COUNT, _MIPIB_HSYNC_PADDING_COUNT)
#define _MIPIC_HSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb828)
#define MIPI_HSYNC_PADDING_COUNT(port) _MIPI_PORT(port, \
_MIPIA_HSYNC_PADDING_COUNT, _MIPIC_HSYNC_PADDING_COUNT)
#define _MIPIA_HBP_COUNT (dev_priv->mipi_mmio_base + 0xb02c)
#define _MIPIB_HBP_COUNT (dev_priv->mipi_mmio_base + 0xb82c)
#define MIPI_HBP_COUNT(tc) _TRANSCODER(tc, _MIPIA_HBP_COUNT, \
_MIPIB_HBP_COUNT)
#define _MIPIC_HBP_COUNT (dev_priv->mipi_mmio_base + 0xb82c)
#define MIPI_HBP_COUNT(port) _MIPI_PORT(port, _MIPIA_HBP_COUNT, \
_MIPIC_HBP_COUNT)
#define _MIPIA_HFP_COUNT (dev_priv->mipi_mmio_base + 0xb030)
#define _MIPIB_HFP_COUNT (dev_priv->mipi_mmio_base + 0xb830)
#define MIPI_HFP_COUNT(tc) _TRANSCODER(tc, _MIPIA_HFP_COUNT, \
_MIPIB_HFP_COUNT)
#define _MIPIC_HFP_COUNT (dev_priv->mipi_mmio_base + 0xb830)
#define MIPI_HFP_COUNT(port) _MIPI_PORT(port, _MIPIA_HFP_COUNT, \
_MIPIC_HFP_COUNT)
#define _MIPIA_HACTIVE_AREA_COUNT (dev_priv->mipi_mmio_base + 0xb034)
#define _MIPIB_HACTIVE_AREA_COUNT (dev_priv->mipi_mmio_base + 0xb834)
#define MIPI_HACTIVE_AREA_COUNT(tc) _TRANSCODER(tc, \
_MIPIA_HACTIVE_AREA_COUNT, _MIPIB_HACTIVE_AREA_COUNT)
#define _MIPIC_HACTIVE_AREA_COUNT (dev_priv->mipi_mmio_base + 0xb834)
#define MIPI_HACTIVE_AREA_COUNT(port) _MIPI_PORT(port, \
_MIPIA_HACTIVE_AREA_COUNT, _MIPIC_HACTIVE_AREA_COUNT)
#define _MIPIA_VSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb038)
#define _MIPIB_VSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb838)
#define MIPI_VSYNC_PADDING_COUNT(tc) _TRANSCODER(tc, \
_MIPIA_VSYNC_PADDING_COUNT, _MIPIB_VSYNC_PADDING_COUNT)
#define _MIPIC_VSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb838)
#define MIPI_VSYNC_PADDING_COUNT(port) _MIPI_PORT(port, \
_MIPIA_VSYNC_PADDING_COUNT, _MIPIC_VSYNC_PADDING_COUNT)
#define _MIPIA_VBP_COUNT (dev_priv->mipi_mmio_base + 0xb03c)
#define _MIPIB_VBP_COUNT (dev_priv->mipi_mmio_base + 0xb83c)
#define MIPI_VBP_COUNT(tc) _TRANSCODER(tc, _MIPIA_VBP_COUNT, \
_MIPIB_VBP_COUNT)
#define _MIPIC_VBP_COUNT (dev_priv->mipi_mmio_base + 0xb83c)
#define MIPI_VBP_COUNT(port) _MIPI_PORT(port, _MIPIA_VBP_COUNT, \
_MIPIC_VBP_COUNT)
#define _MIPIA_VFP_COUNT (dev_priv->mipi_mmio_base + 0xb040)
#define _MIPIB_VFP_COUNT (dev_priv->mipi_mmio_base + 0xb840)
#define MIPI_VFP_COUNT(tc) _TRANSCODER(tc, _MIPIA_VFP_COUNT, \
_MIPIB_VFP_COUNT)
#define _MIPIC_VFP_COUNT (dev_priv->mipi_mmio_base + 0xb840)
#define MIPI_VFP_COUNT(port) _MIPI_PORT(port, _MIPIA_VFP_COUNT, \
_MIPIC_VFP_COUNT)
#define _MIPIA_HIGH_LOW_SWITCH_COUNT (dev_priv->mipi_mmio_base + 0xb044)
#define _MIPIB_HIGH_LOW_SWITCH_COUNT (dev_priv->mipi_mmio_base + 0xb844)
#define MIPI_HIGH_LOW_SWITCH_COUNT(tc) _TRANSCODER(tc, \
_MIPIA_HIGH_LOW_SWITCH_COUNT, _MIPIB_HIGH_LOW_SWITCH_COUNT)
#define _MIPIC_HIGH_LOW_SWITCH_COUNT (dev_priv->mipi_mmio_base + 0xb844)
#define MIPI_HIGH_LOW_SWITCH_COUNT(port) _MIPI_PORT(port, \
_MIPIA_HIGH_LOW_SWITCH_COUNT, _MIPIC_HIGH_LOW_SWITCH_COUNT)
/* regs above are bits 15:0 */
#define _MIPIA_DPI_CONTROL (dev_priv->mipi_mmio_base + 0xb048)
#define _MIPIB_DPI_CONTROL (dev_priv->mipi_mmio_base + 0xb848)
#define MIPI_DPI_CONTROL(tc) _TRANSCODER(tc, _MIPIA_DPI_CONTROL, \
_MIPIB_DPI_CONTROL)
#define _MIPIC_DPI_CONTROL (dev_priv->mipi_mmio_base + 0xb848)
#define MIPI_DPI_CONTROL(port) _MIPI_PORT(port, _MIPIA_DPI_CONTROL, \
_MIPIC_DPI_CONTROL)
#define DPI_LP_MODE (1 << 6)
#define BACKLIGHT_OFF (1 << 5)
#define BACKLIGHT_ON (1 << 4)
@ -6861,30 +6911,30 @@ enum punit_power_well {
#define SHUTDOWN (1 << 0)
#define _MIPIA_DPI_DATA (dev_priv->mipi_mmio_base + 0xb04c)
#define _MIPIB_DPI_DATA (dev_priv->mipi_mmio_base + 0xb84c)
#define MIPI_DPI_DATA(tc) _TRANSCODER(tc, _MIPIA_DPI_DATA, \
_MIPIB_DPI_DATA)
#define _MIPIC_DPI_DATA (dev_priv->mipi_mmio_base + 0xb84c)
#define MIPI_DPI_DATA(port) _MIPI_PORT(port, _MIPIA_DPI_DATA, \
_MIPIC_DPI_DATA)
#define COMMAND_BYTE_SHIFT 0
#define COMMAND_BYTE_MASK (0x3f << 0)
#define _MIPIA_INIT_COUNT (dev_priv->mipi_mmio_base + 0xb050)
#define _MIPIB_INIT_COUNT (dev_priv->mipi_mmio_base + 0xb850)
#define MIPI_INIT_COUNT(tc) _TRANSCODER(tc, _MIPIA_INIT_COUNT, \
_MIPIB_INIT_COUNT)
#define _MIPIC_INIT_COUNT (dev_priv->mipi_mmio_base + 0xb850)
#define MIPI_INIT_COUNT(port) _MIPI_PORT(port, _MIPIA_INIT_COUNT, \
_MIPIC_INIT_COUNT)
#define MASTER_INIT_TIMER_SHIFT 0
#define MASTER_INIT_TIMER_MASK (0xffff << 0)
#define _MIPIA_MAX_RETURN_PKT_SIZE (dev_priv->mipi_mmio_base + 0xb054)
#define _MIPIB_MAX_RETURN_PKT_SIZE (dev_priv->mipi_mmio_base + 0xb854)
#define MIPI_MAX_RETURN_PKT_SIZE(tc) _TRANSCODER(tc, \
_MIPIA_MAX_RETURN_PKT_SIZE, _MIPIB_MAX_RETURN_PKT_SIZE)
#define _MIPIC_MAX_RETURN_PKT_SIZE (dev_priv->mipi_mmio_base + 0xb854)
#define MIPI_MAX_RETURN_PKT_SIZE(port) _MIPI_PORT(port, \
_MIPIA_MAX_RETURN_PKT_SIZE, _MIPIC_MAX_RETURN_PKT_SIZE)
#define MAX_RETURN_PKT_SIZE_SHIFT 0
#define MAX_RETURN_PKT_SIZE_MASK (0x3ff << 0)
#define _MIPIA_VIDEO_MODE_FORMAT (dev_priv->mipi_mmio_base + 0xb058)
#define _MIPIB_VIDEO_MODE_FORMAT (dev_priv->mipi_mmio_base + 0xb858)
#define MIPI_VIDEO_MODE_FORMAT(tc) _TRANSCODER(tc, \
_MIPIA_VIDEO_MODE_FORMAT, _MIPIB_VIDEO_MODE_FORMAT)
#define _MIPIC_VIDEO_MODE_FORMAT (dev_priv->mipi_mmio_base + 0xb858)
#define MIPI_VIDEO_MODE_FORMAT(port) _MIPI_PORT(port, \
_MIPIA_VIDEO_MODE_FORMAT, _MIPIC_VIDEO_MODE_FORMAT)
#define RANDOM_DPI_DISPLAY_RESOLUTION (1 << 4)
#define DISABLE_VIDEO_BTA (1 << 3)
#define IP_TG_CONFIG (1 << 2)
@ -6893,9 +6943,9 @@ enum punit_power_well {
#define VIDEO_MODE_BURST (3 << 0)
#define _MIPIA_EOT_DISABLE (dev_priv->mipi_mmio_base + 0xb05c)
#define _MIPIB_EOT_DISABLE (dev_priv->mipi_mmio_base + 0xb85c)
#define MIPI_EOT_DISABLE(tc) _TRANSCODER(tc, _MIPIA_EOT_DISABLE, \
_MIPIB_EOT_DISABLE)
#define _MIPIC_EOT_DISABLE (dev_priv->mipi_mmio_base + 0xb85c)
#define MIPI_EOT_DISABLE(port) _MIPI_PORT(port, _MIPIA_EOT_DISABLE, \
_MIPIC_EOT_DISABLE)
#define LP_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 7)
#define HS_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 6)
#define LOW_CONTENTION_RECOVERY_DISABLE (1 << 5)
@ -6906,32 +6956,32 @@ enum punit_power_well {
#define EOT_DISABLE (1 << 0)
#define _MIPIA_LP_BYTECLK (dev_priv->mipi_mmio_base + 0xb060)
#define _MIPIB_LP_BYTECLK (dev_priv->mipi_mmio_base + 0xb860)
#define MIPI_LP_BYTECLK(tc) _TRANSCODER(tc, _MIPIA_LP_BYTECLK, \
_MIPIB_LP_BYTECLK)
#define _MIPIC_LP_BYTECLK (dev_priv->mipi_mmio_base + 0xb860)
#define MIPI_LP_BYTECLK(port) _MIPI_PORT(port, _MIPIA_LP_BYTECLK, \
_MIPIC_LP_BYTECLK)
#define LP_BYTECLK_SHIFT 0
#define LP_BYTECLK_MASK (0xffff << 0)
/* bits 31:0 */
#define _MIPIA_LP_GEN_DATA (dev_priv->mipi_mmio_base + 0xb064)
#define _MIPIB_LP_GEN_DATA (dev_priv->mipi_mmio_base + 0xb864)
#define MIPI_LP_GEN_DATA(tc) _TRANSCODER(tc, _MIPIA_LP_GEN_DATA, \
_MIPIB_LP_GEN_DATA)
#define _MIPIC_LP_GEN_DATA (dev_priv->mipi_mmio_base + 0xb864)
#define MIPI_LP_GEN_DATA(port) _MIPI_PORT(port, _MIPIA_LP_GEN_DATA, \
_MIPIC_LP_GEN_DATA)
/* bits 31:0 */
#define _MIPIA_HS_GEN_DATA (dev_priv->mipi_mmio_base + 0xb068)
#define _MIPIB_HS_GEN_DATA (dev_priv->mipi_mmio_base + 0xb868)
#define MIPI_HS_GEN_DATA(tc) _TRANSCODER(tc, _MIPIA_HS_GEN_DATA, \
_MIPIB_HS_GEN_DATA)
#define _MIPIC_HS_GEN_DATA (dev_priv->mipi_mmio_base + 0xb868)
#define MIPI_HS_GEN_DATA(port) _MIPI_PORT(port, _MIPIA_HS_GEN_DATA, \
_MIPIC_HS_GEN_DATA)
#define _MIPIA_LP_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb06c)
#define _MIPIB_LP_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb86c)
#define MIPI_LP_GEN_CTRL(tc) _TRANSCODER(tc, _MIPIA_LP_GEN_CTRL, \
_MIPIB_LP_GEN_CTRL)
#define _MIPIC_LP_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb86c)
#define MIPI_LP_GEN_CTRL(port) _MIPI_PORT(port, _MIPIA_LP_GEN_CTRL, \
_MIPIC_LP_GEN_CTRL)
#define _MIPIA_HS_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb070)
#define _MIPIB_HS_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb870)
#define MIPI_HS_GEN_CTRL(tc) _TRANSCODER(tc, _MIPIA_HS_GEN_CTRL, \
_MIPIB_HS_GEN_CTRL)
#define _MIPIC_HS_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb870)
#define MIPI_HS_GEN_CTRL(port) _MIPI_PORT(port, _MIPIA_HS_GEN_CTRL, \
_MIPIC_HS_GEN_CTRL)
#define LONG_PACKET_WORD_COUNT_SHIFT 8
#define LONG_PACKET_WORD_COUNT_MASK (0xffff << 8)
#define SHORT_PACKET_PARAM_SHIFT 8
@ -6943,9 +6993,9 @@ enum punit_power_well {
/* data type values, see include/video/mipi_display.h */
#define _MIPIA_GEN_FIFO_STAT (dev_priv->mipi_mmio_base + 0xb074)
#define _MIPIB_GEN_FIFO_STAT (dev_priv->mipi_mmio_base + 0xb874)
#define MIPI_GEN_FIFO_STAT(tc) _TRANSCODER(tc, _MIPIA_GEN_FIFO_STAT, \
_MIPIB_GEN_FIFO_STAT)
#define _MIPIC_GEN_FIFO_STAT (dev_priv->mipi_mmio_base + 0xb874)
#define MIPI_GEN_FIFO_STAT(port) _MIPI_PORT(port, _MIPIA_GEN_FIFO_STAT, \
_MIPIC_GEN_FIFO_STAT)
#define DPI_FIFO_EMPTY (1 << 28)
#define DBI_FIFO_EMPTY (1 << 27)
#define LP_CTRL_FIFO_EMPTY (1 << 26)
@ -6962,17 +7012,17 @@ enum punit_power_well {
#define HS_DATA_FIFO_FULL (1 << 0)
#define _MIPIA_HS_LS_DBI_ENABLE (dev_priv->mipi_mmio_base + 0xb078)
#define _MIPIB_HS_LS_DBI_ENABLE (dev_priv->mipi_mmio_base + 0xb878)
#define MIPI_HS_LP_DBI_ENABLE(tc) _TRANSCODER(tc, \
_MIPIA_HS_LS_DBI_ENABLE, _MIPIB_HS_LS_DBI_ENABLE)
#define _MIPIC_HS_LS_DBI_ENABLE (dev_priv->mipi_mmio_base + 0xb878)
#define MIPI_HS_LP_DBI_ENABLE(port) _MIPI_PORT(port, \
_MIPIA_HS_LS_DBI_ENABLE, _MIPIC_HS_LS_DBI_ENABLE)
#define DBI_HS_LP_MODE_MASK (1 << 0)
#define DBI_LP_MODE (1 << 0)
#define DBI_HS_MODE (0 << 0)
#define _MIPIA_DPHY_PARAM (dev_priv->mipi_mmio_base + 0xb080)
#define _MIPIB_DPHY_PARAM (dev_priv->mipi_mmio_base + 0xb880)
#define MIPI_DPHY_PARAM(tc) _TRANSCODER(tc, _MIPIA_DPHY_PARAM, \
_MIPIB_DPHY_PARAM)
#define _MIPIC_DPHY_PARAM (dev_priv->mipi_mmio_base + 0xb880)
#define MIPI_DPHY_PARAM(port) _MIPI_PORT(port, _MIPIA_DPHY_PARAM, \
_MIPIC_DPHY_PARAM)
#define EXIT_ZERO_COUNT_SHIFT 24
#define EXIT_ZERO_COUNT_MASK (0x3f << 24)
#define TRAIL_COUNT_SHIFT 16
@ -6984,36 +7034,36 @@ enum punit_power_well {
/* bits 31:0 */
#define _MIPIA_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb084)
#define _MIPIB_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb884)
#define MIPI_DBI_BW_CTRL(tc) _TRANSCODER(tc, _MIPIA_DBI_BW_CTRL, \
_MIPIB_DBI_BW_CTRL)
#define _MIPIC_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb884)
#define MIPI_DBI_BW_CTRL(port) _MIPI_PORT(port, _MIPIA_DBI_BW_CTRL, \
_MIPIC_DBI_BW_CTRL)
#define _MIPIA_CLK_LANE_SWITCH_TIME_CNT (dev_priv->mipi_mmio_base \
+ 0xb088)
#define _MIPIB_CLK_LANE_SWITCH_TIME_CNT (dev_priv->mipi_mmio_base \
#define _MIPIC_CLK_LANE_SWITCH_TIME_CNT (dev_priv->mipi_mmio_base \
+ 0xb888)
#define MIPI_CLK_LANE_SWITCH_TIME_CNT(tc) _TRANSCODER(tc, \
_MIPIA_CLK_LANE_SWITCH_TIME_CNT, _MIPIB_CLK_LANE_SWITCH_TIME_CNT)
#define MIPI_CLK_LANE_SWITCH_TIME_CNT(port) _MIPI_PORT(port, \
_MIPIA_CLK_LANE_SWITCH_TIME_CNT, _MIPIC_CLK_LANE_SWITCH_TIME_CNT)
#define LP_HS_SSW_CNT_SHIFT 16
#define LP_HS_SSW_CNT_MASK (0xffff << 16)
#define HS_LP_PWR_SW_CNT_SHIFT 0
#define HS_LP_PWR_SW_CNT_MASK (0xffff << 0)
#define _MIPIA_STOP_STATE_STALL (dev_priv->mipi_mmio_base + 0xb08c)
#define _MIPIB_STOP_STATE_STALL (dev_priv->mipi_mmio_base + 0xb88c)
#define MIPI_STOP_STATE_STALL(tc) _TRANSCODER(tc, \
_MIPIA_STOP_STATE_STALL, _MIPIB_STOP_STATE_STALL)
#define _MIPIC_STOP_STATE_STALL (dev_priv->mipi_mmio_base + 0xb88c)
#define MIPI_STOP_STATE_STALL(port) _MIPI_PORT(port, \
_MIPIA_STOP_STATE_STALL, _MIPIC_STOP_STATE_STALL)
#define STOP_STATE_STALL_COUNTER_SHIFT 0
#define STOP_STATE_STALL_COUNTER_MASK (0xff << 0)
#define _MIPIA_INTR_STAT_REG_1 (dev_priv->mipi_mmio_base + 0xb090)
#define _MIPIB_INTR_STAT_REG_1 (dev_priv->mipi_mmio_base + 0xb890)
#define MIPI_INTR_STAT_REG_1(tc) _TRANSCODER(tc, \
_MIPIA_INTR_STAT_REG_1, _MIPIB_INTR_STAT_REG_1)
#define _MIPIC_INTR_STAT_REG_1 (dev_priv->mipi_mmio_base + 0xb890)
#define MIPI_INTR_STAT_REG_1(port) _MIPI_PORT(port, \
_MIPIA_INTR_STAT_REG_1, _MIPIC_INTR_STAT_REG_1)
#define _MIPIA_INTR_EN_REG_1 (dev_priv->mipi_mmio_base + 0xb094)
#define _MIPIB_INTR_EN_REG_1 (dev_priv->mipi_mmio_base + 0xb894)
#define MIPI_INTR_EN_REG_1(tc) _TRANSCODER(tc, _MIPIA_INTR_EN_REG_1, \
_MIPIB_INTR_EN_REG_1)
#define _MIPIC_INTR_EN_REG_1 (dev_priv->mipi_mmio_base + 0xb894)
#define MIPI_INTR_EN_REG_1(port) _MIPI_PORT(port, _MIPIA_INTR_EN_REG_1, \
_MIPIC_INTR_EN_REG_1)
#define RX_CONTENTION_DETECTED (1 << 0)
/* XXX: only pipe A ?!? */
@ -7032,9 +7082,9 @@ enum punit_power_well {
/* MIPI adapter registers */
#define _MIPIA_CTRL (dev_priv->mipi_mmio_base + 0xb104)
#define _MIPIB_CTRL (dev_priv->mipi_mmio_base + 0xb904)
#define MIPI_CTRL(tc) _TRANSCODER(tc, _MIPIA_CTRL, \
_MIPIB_CTRL)
#define _MIPIC_CTRL (dev_priv->mipi_mmio_base + 0xb904)
#define MIPI_CTRL(port) _MIPI_PORT(port, _MIPIA_CTRL, \
_MIPIC_CTRL)
#define ESCAPE_CLOCK_DIVIDER_SHIFT 5 /* A only */
#define ESCAPE_CLOCK_DIVIDER_MASK (3 << 5)
#define ESCAPE_CLOCK_DIVIDER_1 (0 << 5)
@ -7047,24 +7097,24 @@ enum punit_power_well {
#define RGB_FLIP_TO_BGR (1 << 2)
#define _MIPIA_DATA_ADDRESS (dev_priv->mipi_mmio_base + 0xb108)
#define _MIPIB_DATA_ADDRESS (dev_priv->mipi_mmio_base + 0xb908)
#define MIPI_DATA_ADDRESS(tc) _TRANSCODER(tc, _MIPIA_DATA_ADDRESS, \
_MIPIB_DATA_ADDRESS)
#define _MIPIC_DATA_ADDRESS (dev_priv->mipi_mmio_base + 0xb908)
#define MIPI_DATA_ADDRESS(port) _MIPI_PORT(port, _MIPIA_DATA_ADDRESS, \
_MIPIC_DATA_ADDRESS)
#define DATA_MEM_ADDRESS_SHIFT 5
#define DATA_MEM_ADDRESS_MASK (0x7ffffff << 5)
#define DATA_VALID (1 << 0)
#define _MIPIA_DATA_LENGTH (dev_priv->mipi_mmio_base + 0xb10c)
#define _MIPIB_DATA_LENGTH (dev_priv->mipi_mmio_base + 0xb90c)
#define MIPI_DATA_LENGTH(tc) _TRANSCODER(tc, _MIPIA_DATA_LENGTH, \
_MIPIB_DATA_LENGTH)
#define _MIPIC_DATA_LENGTH (dev_priv->mipi_mmio_base + 0xb90c)
#define MIPI_DATA_LENGTH(port) _MIPI_PORT(port, _MIPIA_DATA_LENGTH, \
_MIPIC_DATA_LENGTH)
#define DATA_LENGTH_SHIFT 0
#define DATA_LENGTH_MASK (0xfffff << 0)
#define _MIPIA_COMMAND_ADDRESS (dev_priv->mipi_mmio_base + 0xb110)
#define _MIPIB_COMMAND_ADDRESS (dev_priv->mipi_mmio_base + 0xb910)
#define MIPI_COMMAND_ADDRESS(tc) _TRANSCODER(tc, \
_MIPIA_COMMAND_ADDRESS, _MIPIB_COMMAND_ADDRESS)
#define _MIPIC_COMMAND_ADDRESS (dev_priv->mipi_mmio_base + 0xb910)
#define MIPI_COMMAND_ADDRESS(port) _MIPI_PORT(port, \
_MIPIA_COMMAND_ADDRESS, _MIPIC_COMMAND_ADDRESS)
#define COMMAND_MEM_ADDRESS_SHIFT 5
#define COMMAND_MEM_ADDRESS_MASK (0x7ffffff << 5)
#define AUTO_PWG_ENABLE (1 << 2)
@ -7072,22 +7122,22 @@ enum punit_power_well {
#define COMMAND_VALID (1 << 0)
#define _MIPIA_COMMAND_LENGTH (dev_priv->mipi_mmio_base + 0xb114)
#define _MIPIB_COMMAND_LENGTH (dev_priv->mipi_mmio_base + 0xb914)
#define MIPI_COMMAND_LENGTH(tc) _TRANSCODER(tc, _MIPIA_COMMAND_LENGTH, \
_MIPIB_COMMAND_LENGTH)
#define _MIPIC_COMMAND_LENGTH (dev_priv->mipi_mmio_base + 0xb914)
#define MIPI_COMMAND_LENGTH(port) _MIPI_PORT(port, _MIPIA_COMMAND_LENGTH, \
_MIPIC_COMMAND_LENGTH)
#define COMMAND_LENGTH_SHIFT(n) (8 * (n)) /* n: 0...3 */
#define COMMAND_LENGTH_MASK(n) (0xff << (8 * (n)))
#define _MIPIA_READ_DATA_RETURN0 (dev_priv->mipi_mmio_base + 0xb118)
#define _MIPIB_READ_DATA_RETURN0 (dev_priv->mipi_mmio_base + 0xb918)
#define MIPI_READ_DATA_RETURN(tc, n) \
(_TRANSCODER(tc, _MIPIA_READ_DATA_RETURN0, _MIPIB_READ_DATA_RETURN0) \
#define _MIPIC_READ_DATA_RETURN0 (dev_priv->mipi_mmio_base + 0xb918)
#define MIPI_READ_DATA_RETURN(port, n) \
(_MIPI_PORT(port, _MIPIA_READ_DATA_RETURN0, _MIPIC_READ_DATA_RETURN0) \
+ 4 * (n)) /* n: 0...7 */
#define _MIPIA_READ_DATA_VALID (dev_priv->mipi_mmio_base + 0xb138)
#define _MIPIB_READ_DATA_VALID (dev_priv->mipi_mmio_base + 0xb938)
#define MIPI_READ_DATA_VALID(tc) _TRANSCODER(tc, \
_MIPIA_READ_DATA_VALID, _MIPIB_READ_DATA_VALID)
#define _MIPIC_READ_DATA_VALID (dev_priv->mipi_mmio_base + 0xb938)
#define MIPI_READ_DATA_VALID(port) _MIPI_PORT(port, \
_MIPIA_READ_DATA_VALID, _MIPIC_READ_DATA_VALID)
#define READ_DATA_VALID(n) (1 << (n))
/* For UMS only (deprecated): */

View File

@ -264,7 +264,7 @@ static void i915_restore_display(struct drm_device *dev)
}
/* only restore FBC info on the platform that supports FBC*/
intel_disable_fbc(dev);
intel_fbc_disable(dev);
/* restore FBC interval */
if (HAS_FBC(dev) && INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev))

View File

@ -328,8 +328,8 @@ TRACE_EVENT(i915_gem_evict_vm,
TRACE_EVENT(i915_gem_ring_sync_to,
TP_PROTO(struct intel_engine_cs *from,
struct intel_engine_cs *to,
u32 seqno),
TP_ARGS(from, to, seqno),
struct drm_i915_gem_request *req),
TP_ARGS(from, to, req),
TP_STRUCT__entry(
__field(u32, dev)
@ -342,7 +342,7 @@ TRACE_EVENT(i915_gem_ring_sync_to,
__entry->dev = from->dev->primary->index;
__entry->sync_from = from->id;
__entry->sync_to = to->id;
__entry->seqno = seqno;
__entry->seqno = i915_gem_request_get_seqno(req);
),
TP_printk("dev=%u, sync-from=%u, sync-to=%u, seqno=%u",
@ -352,8 +352,8 @@ TRACE_EVENT(i915_gem_ring_sync_to,
);
TRACE_EVENT(i915_gem_ring_dispatch,
TP_PROTO(struct intel_engine_cs *ring, u32 seqno, u32 flags),
TP_ARGS(ring, seqno, flags),
TP_PROTO(struct drm_i915_gem_request *req, u32 flags),
TP_ARGS(req, flags),
TP_STRUCT__entry(
__field(u32, dev)
@ -363,11 +363,13 @@ TRACE_EVENT(i915_gem_ring_dispatch,
),
TP_fast_assign(
struct intel_engine_cs *ring =
i915_gem_request_get_ring(req);
__entry->dev = ring->dev->primary->index;
__entry->ring = ring->id;
__entry->seqno = seqno;
__entry->seqno = i915_gem_request_get_seqno(req);
__entry->flags = flags;
i915_trace_irq_get(ring, seqno);
i915_trace_irq_get(ring, req);
),
TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x",
@ -398,31 +400,36 @@ TRACE_EVENT(i915_gem_ring_flush,
);
DECLARE_EVENT_CLASS(i915_gem_request,
TP_PROTO(struct intel_engine_cs *ring, u32 seqno),
TP_ARGS(ring, seqno),
TP_PROTO(struct drm_i915_gem_request *req),
TP_ARGS(req),
TP_STRUCT__entry(
__field(u32, dev)
__field(u32, ring)
__field(u32, uniq)
__field(u32, seqno)
),
TP_fast_assign(
struct intel_engine_cs *ring =
i915_gem_request_get_ring(req);
__entry->dev = ring->dev->primary->index;
__entry->ring = ring->id;
__entry->seqno = seqno;
__entry->uniq = req ? req->uniq : 0;
__entry->seqno = i915_gem_request_get_seqno(req);
),
TP_printk("dev=%u, ring=%u, seqno=%u",
__entry->dev, __entry->ring, __entry->seqno)
TP_printk("dev=%u, ring=%u, uniq=%u, seqno=%u",
__entry->dev, __entry->ring, __entry->uniq,
__entry->seqno)
);
DEFINE_EVENT(i915_gem_request, i915_gem_request_add,
TP_PROTO(struct intel_engine_cs *ring, u32 seqno),
TP_ARGS(ring, seqno)
TP_PROTO(struct drm_i915_gem_request *req),
TP_ARGS(req)
);
TRACE_EVENT(i915_gem_request_complete,
TRACE_EVENT(i915_gem_request_notify,
TP_PROTO(struct intel_engine_cs *ring),
TP_ARGS(ring),
@ -443,17 +450,23 @@ TRACE_EVENT(i915_gem_request_complete,
);
DEFINE_EVENT(i915_gem_request, i915_gem_request_retire,
TP_PROTO(struct intel_engine_cs *ring, u32 seqno),
TP_ARGS(ring, seqno)
TP_PROTO(struct drm_i915_gem_request *req),
TP_ARGS(req)
);
DEFINE_EVENT(i915_gem_request, i915_gem_request_complete,
TP_PROTO(struct drm_i915_gem_request *req),
TP_ARGS(req)
);
TRACE_EVENT(i915_gem_request_wait_begin,
TP_PROTO(struct intel_engine_cs *ring, u32 seqno),
TP_ARGS(ring, seqno),
TP_PROTO(struct drm_i915_gem_request *req),
TP_ARGS(req),
TP_STRUCT__entry(
__field(u32, dev)
__field(u32, ring)
__field(u32, uniq)
__field(u32, seqno)
__field(bool, blocking)
),
@ -465,20 +478,24 @@ TRACE_EVENT(i915_gem_request_wait_begin,
* less desirable.
*/
TP_fast_assign(
struct intel_engine_cs *ring =
i915_gem_request_get_ring(req);
__entry->dev = ring->dev->primary->index;
__entry->ring = ring->id;
__entry->seqno = seqno;
__entry->blocking = mutex_is_locked(&ring->dev->struct_mutex);
__entry->uniq = req ? req->uniq : 0;
__entry->seqno = i915_gem_request_get_seqno(req);
__entry->blocking =
mutex_is_locked(&ring->dev->struct_mutex);
),
TP_printk("dev=%u, ring=%u, seqno=%u, blocking=%s",
__entry->dev, __entry->ring, __entry->seqno,
__entry->blocking ? "yes (NB)" : "no")
TP_printk("dev=%u, ring=%u, uniq=%u, seqno=%u, blocking=%s",
__entry->dev, __entry->ring, __entry->uniq,
__entry->seqno, __entry->blocking ? "yes (NB)" : "no")
);
DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_end,
TP_PROTO(struct intel_engine_cs *ring, u32 seqno),
TP_ARGS(ring, seqno)
TP_PROTO(struct drm_i915_gem_request *req),
TP_ARGS(req)
);
DECLARE_EVENT_CLASS(i915_ring,

View File

@ -314,6 +314,7 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
{
const struct bdb_lfp_backlight_data *backlight_data;
const struct bdb_lfp_backlight_data_entry *entry;
const struct bdb_lfp_backlight_control_data *bl_ctrl_data;
backlight_data = find_section(bdb, BDB_LVDS_BACKLIGHT);
if (!backlight_data)
@ -326,6 +327,7 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
}
entry = &backlight_data->data[panel_type];
bl_ctrl_data = &backlight_data->blc_ctl[panel_type];
dev_priv->vbt.backlight.present = entry->type == BDB_BACKLIGHT_TYPE_PWM;
if (!dev_priv->vbt.backlight.present) {
@ -337,12 +339,30 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
dev_priv->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz;
dev_priv->vbt.backlight.active_low_pwm = entry->active_low_pwm;
dev_priv->vbt.backlight.min_brightness = entry->min_brightness;
dev_priv->vbt.backlight.controller = 0;
if (bdb->version >= 191) {
dev_priv->vbt.backlight.present =
bl_ctrl_data->pin == BLC_CONTROL_PIN_DDI;
if (!dev_priv->vbt.backlight.present) {
DRM_DEBUG_KMS("BL control pin is not DDI (pin %u)\n",
bl_ctrl_data->pin);
return;
}
if (bl_ctrl_data->controller == 1)
dev_priv->vbt.backlight.controller =
bl_ctrl_data->controller;
}
DRM_DEBUG_KMS("VBT backlight PWM modulation frequency %u Hz, "
"active %s, min brightness %u, level %u\n",
dev_priv->vbt.backlight.pwm_freq_hz,
dev_priv->vbt.backlight.active_low_pwm ? "low" : "high",
dev_priv->vbt.backlight.min_brightness,
backlight_data->level[panel_type]);
DRM_DEBUG_KMS("VBT BL controller %u\n",
dev_priv->vbt.backlight.controller);
}
/* Try to find sdvo panel data */
@ -664,6 +684,50 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
}
}
static void
parse_psr(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
{
struct bdb_psr *psr;
struct psr_table *psr_table;
psr = find_section(bdb, BDB_PSR);
if (!psr) {
DRM_DEBUG_KMS("No PSR BDB found.\n");
return;
}
psr_table = &psr->psr_table[panel_type];
dev_priv->vbt.psr.full_link = psr_table->full_link;
dev_priv->vbt.psr.require_aux_wakeup = psr_table->require_aux_to_wakeup;
/* Allowed VBT values goes from 0 to 15 */
dev_priv->vbt.psr.idle_frames = psr_table->idle_frames < 0 ? 0 :
psr_table->idle_frames > 15 ? 15 : psr_table->idle_frames;
switch (psr_table->lines_to_wait) {
case 0:
dev_priv->vbt.psr.lines_to_wait = PSR_0_LINES_TO_WAIT;
break;
case 1:
dev_priv->vbt.psr.lines_to_wait = PSR_1_LINE_TO_WAIT;
break;
case 2:
dev_priv->vbt.psr.lines_to_wait = PSR_4_LINES_TO_WAIT;
break;
case 3:
dev_priv->vbt.psr.lines_to_wait = PSR_8_LINES_TO_WAIT;
break;
default:
DRM_DEBUG_KMS("VBT has unknown PSR lines to wait %u\n",
psr_table->lines_to_wait);
break;
}
dev_priv->vbt.psr.tp1_wakeup_time = psr_table->tp1_wakeup_time;
dev_priv->vbt.psr.tp2_tp3_wakeup_time = psr_table->tp2_tp3_wakeup_time;
}
static u8 *goto_next_sequence(u8 *data, int *size)
{
u16 len;
@ -1241,6 +1305,7 @@ intel_parse_bios(struct drm_device *dev)
parse_device_mapping(dev_priv, bdb);
parse_driver_features(dev_priv, bdb);
parse_edp(dev_priv, bdb);
parse_psr(dev_priv, bdb);
parse_mipi(dev_priv, bdb);
parse_ddi_ports(dev_priv, bdb);

View File

@ -80,7 +80,7 @@ struct vbios_data {
#define BDB_EXT_MMIO_REGS 6
#define BDB_SWF_IO 7
#define BDB_SWF_MMIO 8
#define BDB_DOT_CLOCK_TABLE 9
#define BDB_PSR 9
#define BDB_MODE_REMOVAL_TABLE 10
#define BDB_CHILD_DEVICE_TABLE 11
#define BDB_DRIVER_FEATURES 12
@ -402,10 +402,21 @@ struct bdb_lfp_backlight_data_entry {
u8 obsolete3;
} __packed;
#define BLC_CONTROL_PIN_PMIC 0
#define BLC_CONTROL_PIN_LPSS_PWM 1
#define BLC_CONTROL_PIN_DDI 2
#define BLC_CONTROL_PIN_CABC 3
struct bdb_lfp_backlight_control_data {
u8 controller:4;
u8 pin:4;
} __packed;
struct bdb_lfp_backlight_data {
u8 entry_size;
struct bdb_lfp_backlight_data_entry data[16];
u8 level[16];
struct bdb_lfp_backlight_control_data blc_ctl[16];
} __packed;
struct aimdb_header {
@ -556,6 +567,26 @@ struct bdb_edp {
u16 edp_t3_optimization;
} __packed;
struct psr_table {
/* Feature bits */
u8 full_link:1;
u8 require_aux_to_wakeup:1;
u8 feature_bits_rsvd:6;
/* Wait times */
u8 idle_frames:4;
u8 lines_to_wait:3;
u8 wait_times_rsvd:1;
/* TP wake up time in multiple of 100 */
u16 tp1_wakeup_time;
u16 tp2_tp3_wakeup_time;
} __packed;
struct bdb_psr {
struct psr_table psr_table[16];
} __packed;
void intel_setup_bios(struct drm_device *dev);
int intel_parse_bios(struct drm_device *dev);
@ -798,7 +829,8 @@ struct mipi_config {
#define DUAL_LINK_PIXEL_ALT 2
u16 dual_link:2;
u16 lane_cnt:2;
u16 rsvd3:12;
u16 pixel_overlap:3;
u16 rsvd3:9;
u16 rsvd4;

View File

@ -128,15 +128,15 @@ static const struct ddi_buf_trans bdw_ddi_translations_hdmi[] = {
};
static const struct ddi_buf_trans skl_ddi_translations_dp[] = {
{ 0x00000018, 0x000000a0 },
{ 0x00004014, 0x00000098 },
{ 0x00000018, 0x000000a2 },
{ 0x00004014, 0x0000009B },
{ 0x00006012, 0x00000088 },
{ 0x00008010, 0x00000080 },
{ 0x00000018, 0x00000098 },
{ 0x00008010, 0x00000087 },
{ 0x00000018, 0x0000009B },
{ 0x00004014, 0x00000088 },
{ 0x00006012, 0x00000080 },
{ 0x00006012, 0x00000087 },
{ 0x00000018, 0x00000088 },
{ 0x00004014, 0x00000080 },
{ 0x00004014, 0x00000087 },
};
static const struct ddi_buf_trans skl_ddi_translations_hdmi[] = {
@ -834,7 +834,12 @@ static void hsw_ddi_clock_get(struct intel_encoder *encoder,
void intel_ddi_clock_get(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
{
struct drm_device *dev = encoder->base.dev;
if (INTEL_INFO(dev)->gen <= 8)
hsw_ddi_clock_get(encoder, pipe_config);
else
skl_ddi_clock_get(encoder, pipe_config);
}
static void
@ -2029,7 +2034,6 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
struct intel_hdmi *intel_hdmi;
u32 temp, flags = 0;
struct drm_device *dev = dev_priv->dev;
temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
if (temp & TRANS_DDI_PHSYNC)
@ -2106,10 +2110,7 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
}
if (INTEL_INFO(dev)->gen <= 8)
hsw_ddi_clock_get(encoder, pipe_config);
else
skl_ddi_clock_get(encoder, pipe_config);
intel_ddi_clock_get(encoder, pipe_config);
}
static void intel_ddi_destroy(struct drm_encoder *encoder)

File diff suppressed because it is too large Load Diff

View File

@ -1558,7 +1558,7 @@ void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
vdd = edp_panel_vdd_on(intel_dp);
pps_unlock(intel_dp);
WARN(!vdd, "eDP port %c VDD already requested on\n",
I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
port_name(dp_to_dig_port(intel_dp)->port));
}
@ -1642,7 +1642,7 @@ static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
if (!is_edp(intel_dp))
return;
WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
port_name(dp_to_dig_port(intel_dp)->port));
intel_dp->want_panel_vdd = false;
@ -2105,6 +2105,9 @@ static void intel_disable_dp(struct intel_encoder *encoder)
if (crtc->config.has_audio)
intel_audio_codec_disable(encoder);
if (HAS_PSR(dev) && !HAS_DDI(dev))
intel_psr_disable(intel_dp);
/* Make sure the panel is off before trying to change the mode. But also
* ensure that we have vdd while we switch off the panel. */
intel_edp_panel_vdd_on(intel_dp);
@ -2329,6 +2332,7 @@ static void vlv_enable_dp(struct intel_encoder *encoder)
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
intel_edp_backlight_on(intel_dp);
intel_psr_enable(intel_dp);
}
static void g4x_pre_enable_dp(struct intel_encoder *encoder)
@ -4306,7 +4310,6 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
drm_dp_aux_unregister(&intel_dp->aux);
intel_dp_mst_encoder_cleanup(intel_dig_port);
drm_encoder_cleanup(encoder);
if (is_edp(intel_dp)) {
cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
/*
@ -4322,6 +4325,7 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
intel_dp->edp_notifier.notifier_call = NULL;
}
}
drm_encoder_cleanup(encoder);
kfree(intel_dig_port);
}
@ -4763,14 +4767,9 @@ void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
}
/*
* FIXME: This needs proper synchronization with psr state. But really
* hard to tell without seeing the user of this function of this code.
* Check locking and ordering once that lands.
* FIXME: This needs proper synchronization with psr state for some
* platforms that cannot have PSR and DRRS enabled at the same time.
*/
if (INTEL_INFO(dev)->gen < 8 && intel_psr_is_enabled(dev)) {
DRM_DEBUG_KMS("DRRS is disabled as PSR is enabled\n");
return;
}
encoder = intel_attached_encoder(&intel_connector->base);
intel_dp = enc_to_intel_dp(&encoder->base);
@ -5086,7 +5085,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
intel_dp_aux_init(intel_dp, intel_connector);
/* init MST on ports that can support it */
if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
if (IS_HASWELL(dev) || IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
if (port == PORT_B || port == PORT_C || port == PORT_D) {
intel_dp_mst_encoder_init(intel_dig_port,
intel_connector->base.base.id);

View File

@ -244,8 +244,7 @@ typedef struct dpll {
} intel_clock_t;
struct intel_plane_state {
struct drm_crtc *crtc;
struct drm_framebuffer *fb;
struct drm_plane_state base;
struct drm_rect src;
struct drm_rect dst;
struct drm_rect clip;
@ -406,8 +405,7 @@ struct intel_pipe_wm {
};
struct intel_mmio_flip {
u32 seqno;
struct intel_engine_cs *ring;
struct drm_i915_gem_request *req;
struct work_struct work;
};
@ -510,6 +508,10 @@ struct intel_plane {
uint32_t src_w, uint32_t src_h);
void (*disable_plane)(struct drm_plane *plane,
struct drm_crtc *crtc);
int (*check_plane)(struct drm_plane *plane,
struct intel_plane_state *state);
void (*commit_plane)(struct drm_plane *plane,
struct intel_plane_state *state);
int (*update_colorkey)(struct drm_plane *plane,
struct drm_intel_sprite_colorkey *key);
void (*get_colorkey)(struct drm_plane *plane,
@ -708,8 +710,7 @@ struct intel_unpin_work {
#define INTEL_FLIP_COMPLETE 2
u32 flip_count;
u32 gtt_offset;
struct intel_engine_cs *flip_queued_ring;
u32 flip_queued_seqno;
struct drm_i915_gem_request *flip_queued_req;
int flip_queued_vblank;
int flip_ready_vblank;
bool enable_stall_check;
@ -874,7 +875,6 @@ void intel_audio_codec_enable(struct intel_encoder *encoder);
void intel_audio_codec_disable(struct intel_encoder *encoder);
/* intel_display.c */
const char *intel_output_name(int output);
bool intel_has_pending_fb_unpin(struct drm_device *dev);
int intel_pch_rawclk(struct drm_device *dev);
void intel_mark_busy(struct drm_device *dev);
@ -925,6 +925,10 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane);
void intel_finish_page_flip(struct drm_device *dev, int pipe);
void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
void intel_check_page_flip(struct drm_device *dev, int pipe);
int intel_prepare_plane_fb(struct drm_plane *plane,
struct drm_framebuffer *fb);
void intel_cleanup_plane_fb(struct drm_plane *plane,
struct drm_framebuffer *fb);
/* shared dpll functions */
struct intel_shared_dpll *intel_crtc_to_shared_dpll(struct intel_crtc *crtc);
@ -1010,6 +1014,12 @@ void intel_dp_hot_plug(struct intel_encoder *intel_encoder);
void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv);
uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes);
void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes);
int intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
struct drm_framebuffer *fb, int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h);
int intel_disable_plane(struct drm_plane *plane);
/* intel_dp_mst.c */
int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id);
@ -1053,6 +1063,13 @@ static inline void intel_fbdev_restore_mode(struct drm_device *dev)
}
#endif
/* intel_fbc.c */
bool intel_fbc_enabled(struct drm_device *dev);
void intel_fbc_update(struct drm_device *dev);
void intel_fbc_init(struct drm_i915_private *dev_priv);
void intel_fbc_disable(struct drm_device *dev);
void bdw_fbc_sw_flush(struct drm_device *dev, u32 value);
/* intel_hdmi.c */
void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port);
void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
@ -1083,6 +1100,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int intel_overlay_attrs(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void intel_overlay_reset(struct drm_i915_private *dev_priv);
/* intel_panel.c */
@ -1115,7 +1133,6 @@ void intel_backlight_unregister(struct drm_device *dev);
/* intel_psr.c */
bool intel_psr_is_enabled(struct drm_device *dev);
void intel_psr_enable(struct intel_dp *intel_dp);
void intel_psr_disable(struct intel_dp *intel_dp);
void intel_psr_invalidate(struct drm_device *dev,
@ -1159,8 +1176,6 @@ void intel_update_sprite_watermarks(struct drm_plane *plane,
bool enabled, bool scaled);
void intel_init_pm(struct drm_device *dev);
void intel_pm_setup(struct drm_device *dev);
bool intel_fbc_enabled(struct drm_device *dev);
void intel_update_fbc(struct drm_device *dev);
void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
void intel_gpu_ips_teardown(void);
void intel_init_gt_powersave(struct drm_device *dev);
@ -1191,7 +1206,6 @@ int intel_plane_set_property(struct drm_plane *plane,
struct drm_property *prop,
uint64_t val);
int intel_plane_restore(struct drm_plane *plane);
void intel_plane_disable(struct drm_plane *plane);
int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int intel_sprite_get_colorkey(struct drm_device *dev, void *data,

View File

@ -102,11 +102,62 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder,
return true;
}
static void intel_dsi_port_enable(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
enum port port;
u32 temp;
if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) {
temp = I915_READ(VLV_CHICKEN_3);
temp &= ~PIXEL_OVERLAP_CNT_MASK |
intel_dsi->pixel_overlap <<
PIXEL_OVERLAP_CNT_SHIFT;
I915_WRITE(VLV_CHICKEN_3, temp);
}
for_each_dsi_port(port, intel_dsi->ports) {
temp = I915_READ(MIPI_PORT_CTRL(port));
temp &= ~LANE_CONFIGURATION_MASK;
temp &= ~DUAL_LINK_MODE_MASK;
if (intel_dsi->ports == ((1 << PORT_A) | (1 << PORT_C))) {
temp |= (intel_dsi->dual_link - 1)
<< DUAL_LINK_MODE_SHIFT;
temp |= intel_crtc->pipe ?
LANE_CONFIGURATION_DUAL_LINK_B :
LANE_CONFIGURATION_DUAL_LINK_A;
}
/* assert ip_tg_enable signal */
I915_WRITE(MIPI_PORT_CTRL(port), temp | DPI_ENABLE);
POSTING_READ(MIPI_PORT_CTRL(port));
}
}
static void intel_dsi_port_disable(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
enum port port;
u32 temp;
for_each_dsi_port(port, intel_dsi->ports) {
/* de-assert ip_tg_enable signal */
temp = I915_READ(MIPI_PORT_CTRL(port));
I915_WRITE(MIPI_PORT_CTRL(port), temp & ~DPI_ENABLE);
POSTING_READ(MIPI_PORT_CTRL(port));
}
}
static void intel_dsi_device_ready(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
int pipe = intel_crtc->pipe;
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
enum port port;
u32 val;
DRM_DEBUG_KMS("\n");
@ -120,19 +171,27 @@ static void intel_dsi_device_ready(struct intel_encoder *encoder)
/* bandgap reset is needed after everytime we do power gate */
band_gap_reset(dev_priv);
I915_WRITE(MIPI_DEVICE_READY(pipe), ULPS_STATE_ENTER);
for_each_dsi_port(port, intel_dsi->ports) {
I915_WRITE(MIPI_DEVICE_READY(port), ULPS_STATE_ENTER);
usleep_range(2500, 3000);
val = I915_READ(MIPI_PORT_CTRL(pipe));
I915_WRITE(MIPI_PORT_CTRL(pipe), val | LP_OUTPUT_HOLD);
val = I915_READ(MIPI_PORT_CTRL(port));
/* Enable MIPI PHY transparent latch
* Common bit for both MIPI Port A & MIPI Port C
* No similar bit in MIPI Port C reg
*/
I915_WRITE(MIPI_PORT_CTRL(PORT_A), val | LP_OUTPUT_HOLD);
usleep_range(1000, 1500);
I915_WRITE(MIPI_DEVICE_READY(pipe), ULPS_STATE_EXIT);
I915_WRITE(MIPI_DEVICE_READY(port), ULPS_STATE_EXIT);
usleep_range(2500, 3000);
I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY);
I915_WRITE(MIPI_DEVICE_READY(port), DEVICE_READY);
usleep_range(2500, 3000);
}
}
static void intel_dsi_enable(struct intel_encoder *encoder)
{
@ -140,13 +199,12 @@ static void intel_dsi_enable(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
int pipe = intel_crtc->pipe;
u32 temp;
enum port port = intel_dsi_pipe_to_port(intel_crtc->pipe);
DRM_DEBUG_KMS("\n");
if (is_cmd_mode(intel_dsi))
I915_WRITE(MIPI_MAX_RETURN_PKT_SIZE(pipe), 8 * 4);
I915_WRITE(MIPI_MAX_RETURN_PKT_SIZE(port), 8 * 4);
else {
msleep(20); /* XXX */
dpi_send_cmd(intel_dsi, TURN_ON, DPI_LP_MODE_EN);
@ -157,11 +215,7 @@ static void intel_dsi_enable(struct intel_encoder *encoder)
wait_for_dsi_fifo_empty(intel_dsi);
/* assert ip_tg_enable signal */
temp = I915_READ(MIPI_PORT_CTRL(pipe)) & ~LANE_CONFIGURATION_MASK;
temp = temp | intel_dsi->port_bits;
I915_WRITE(MIPI_PORT_CTRL(pipe), temp | DPI_ENABLE);
POSTING_READ(MIPI_PORT_CTRL(pipe));
intel_dsi_port_enable(encoder);
}
}
@ -235,9 +289,8 @@ static void intel_dsi_disable(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
int pipe = intel_crtc->pipe;
enum port port;
u32 temp;
DRM_DEBUG_KMS("\n");
@ -245,31 +298,28 @@ static void intel_dsi_disable(struct intel_encoder *encoder)
if (is_vid_mode(intel_dsi)) {
wait_for_dsi_fifo_empty(intel_dsi);
/* de-assert ip_tg_enable signal */
temp = I915_READ(MIPI_PORT_CTRL(pipe));
I915_WRITE(MIPI_PORT_CTRL(pipe), temp & ~DPI_ENABLE);
POSTING_READ(MIPI_PORT_CTRL(pipe));
intel_dsi_port_disable(encoder);
msleep(2);
}
for_each_dsi_port(port, intel_dsi->ports) {
/* Panel commands can be sent when clock is in LP11 */
I915_WRITE(MIPI_DEVICE_READY(pipe), 0x0);
I915_WRITE(MIPI_DEVICE_READY(port), 0x0);
temp = I915_READ(MIPI_CTRL(pipe));
temp = I915_READ(MIPI_CTRL(port));
temp &= ~ESCAPE_CLOCK_DIVIDER_MASK;
I915_WRITE(MIPI_CTRL(pipe), temp |
I915_WRITE(MIPI_CTRL(port), temp |
intel_dsi->escape_clk_div <<
ESCAPE_CLOCK_DIVIDER_SHIFT);
I915_WRITE(MIPI_EOT_DISABLE(pipe), CLOCKSTOP);
I915_WRITE(MIPI_EOT_DISABLE(port), CLOCKSTOP);
temp = I915_READ(MIPI_DSI_FUNC_PRG(pipe));
temp = I915_READ(MIPI_DSI_FUNC_PRG(port));
temp &= ~VID_MODE_FORMAT_MASK;
I915_WRITE(MIPI_DSI_FUNC_PRG(pipe), temp);
I915_WRITE(MIPI_DEVICE_READY(pipe), 0x1);
I915_WRITE(MIPI_DSI_FUNC_PRG(port), temp);
I915_WRITE(MIPI_DEVICE_READY(port), 0x1);
}
/* if disable packets are sent before sending shutdown packet then in
* some next enable sequence send turn on packet error is observed */
if (intel_dsi->dev.dev_ops->disable)
@ -281,31 +331,42 @@ static void intel_dsi_disable(struct intel_encoder *encoder)
static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
int pipe = intel_crtc->pipe;
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
enum port port;
u32 val;
DRM_DEBUG_KMS("\n");
for_each_dsi_port(port, intel_dsi->ports) {
I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY | ULPS_STATE_ENTER);
I915_WRITE(MIPI_DEVICE_READY(port), DEVICE_READY |
ULPS_STATE_ENTER);
usleep_range(2000, 2500);
I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY | ULPS_STATE_EXIT);
I915_WRITE(MIPI_DEVICE_READY(port), DEVICE_READY |
ULPS_STATE_EXIT);
usleep_range(2000, 2500);
I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY | ULPS_STATE_ENTER);
I915_WRITE(MIPI_DEVICE_READY(port), DEVICE_READY |
ULPS_STATE_ENTER);
usleep_range(2000, 2500);
if (wait_for(((I915_READ(MIPI_PORT_CTRL(pipe)) & AFE_LATCHOUT)
/* Wait till Clock lanes are in LP-00 state for MIPI Port A
* only. MIPI Port C has no similar bit for checking
*/
if (wait_for(((I915_READ(MIPI_PORT_CTRL(PORT_A)) & AFE_LATCHOUT)
== 0x00000), 30))
DRM_ERROR("DSI LP not going Low\n");
val = I915_READ(MIPI_PORT_CTRL(pipe));
I915_WRITE(MIPI_PORT_CTRL(pipe), val & ~LP_OUTPUT_HOLD);
val = I915_READ(MIPI_PORT_CTRL(port));
/* Disable MIPI PHY transparent latch
* Common bit for both MIPI Port A & MIPI Port C
*/
I915_WRITE(MIPI_PORT_CTRL(PORT_A), val & ~LP_OUTPUT_HOLD);
usleep_range(1000, 1500);
I915_WRITE(MIPI_DEVICE_READY(pipe), 0x00);
I915_WRITE(MIPI_DEVICE_READY(port), 0x00);
usleep_range(2000, 2500);
}
vlv_disable_dsi_pll(encoder);
}
@ -337,9 +398,11 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
struct drm_device *dev = encoder->base.dev;
enum intel_display_power_domain power_domain;
u32 port, func;
enum pipe p;
u32 dpi_enabled, func;
enum port port;
DRM_DEBUG_KMS("\n");
@ -348,13 +411,23 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
return false;
/* XXX: this only works for one DSI output */
for (p = PIPE_A; p <= PIPE_B; p++) {
port = I915_READ(MIPI_PORT_CTRL(p));
func = I915_READ(MIPI_DSI_FUNC_PRG(p));
for_each_dsi_port(port, intel_dsi->ports) {
func = I915_READ(MIPI_DSI_FUNC_PRG(port));
dpi_enabled = I915_READ(MIPI_PORT_CTRL(port)) &
DPI_ENABLE;
if ((port & DPI_ENABLE) || (func & CMD_MODE_DATA_WIDTH_MASK)) {
if (I915_READ(MIPI_DEVICE_READY(p)) & DEVICE_READY) {
*pipe = p;
/* Due to some hardware limitations on BYT, MIPI Port C DPI
* Enable bit does not get set. To check whether DSI Port C
* was enabled in BIOS, check the Pipe B enable bit
*/
if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) &&
(port == PORT_C))
dpi_enabled = I915_READ(PIPECONF(PIPE_B)) &
PIPECONF_ENABLE;
if (dpi_enabled || (func & CMD_MODE_DATA_WIDTH_MASK)) {
if (I915_READ(MIPI_DEVICE_READY(port)) & DEVICE_READY) {
*pipe = port == PORT_A ? PIPE_A : PIPE_B;
return true;
}
}
@ -437,7 +510,7 @@ static void set_dsi_timings(struct drm_encoder *encoder,
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
int pipe = intel_crtc->pipe;
enum port port;
unsigned int bpp = intel_crtc->config.pipe_bpp;
unsigned int lane_count = intel_dsi->lane_count;
@ -448,6 +521,15 @@ static void set_dsi_timings(struct drm_encoder *encoder,
hsync = mode->hsync_end - mode->hsync_start;
hbp = mode->htotal - mode->hsync_end;
if (intel_dsi->dual_link) {
hactive /= 2;
if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK)
hactive += intel_dsi->pixel_overlap;
hfp /= 2;
hsync /= 2;
hbp /= 2;
}
vfp = mode->vsync_start - mode->vdisplay;
vsync = mode->vsync_end - mode->vsync_start;
vbp = mode->vtotal - mode->vsync_end;
@ -460,18 +542,20 @@ static void set_dsi_timings(struct drm_encoder *encoder,
intel_dsi->burst_mode_ratio);
hbp = txbyteclkhs(hbp, bpp, lane_count, intel_dsi->burst_mode_ratio);
I915_WRITE(MIPI_HACTIVE_AREA_COUNT(pipe), hactive);
I915_WRITE(MIPI_HFP_COUNT(pipe), hfp);
for_each_dsi_port(port, intel_dsi->ports) {
I915_WRITE(MIPI_HACTIVE_AREA_COUNT(port), hactive);
I915_WRITE(MIPI_HFP_COUNT(port), hfp);
/* meaningful for video mode non-burst sync pulse mode only, can be zero
* for non-burst sync events and burst modes */
I915_WRITE(MIPI_HSYNC_PADDING_COUNT(pipe), hsync);
I915_WRITE(MIPI_HBP_COUNT(pipe), hbp);
/* meaningful for video mode non-burst sync pulse mode only,
* can be zero for non-burst sync events and burst modes */
I915_WRITE(MIPI_HSYNC_PADDING_COUNT(port), hsync);
I915_WRITE(MIPI_HBP_COUNT(port), hbp);
/* vertical values are in terms of lines */
I915_WRITE(MIPI_VFP_COUNT(pipe), vfp);
I915_WRITE(MIPI_VSYNC_PADDING_COUNT(pipe), vsync);
I915_WRITE(MIPI_VBP_COUNT(pipe), vbp);
I915_WRITE(MIPI_VFP_COUNT(port), vfp);
I915_WRITE(MIPI_VSYNC_PADDING_COUNT(port), vsync);
I915_WRITE(MIPI_VBP_COUNT(port), vbp);
}
}
static void intel_dsi_prepare(struct intel_encoder *intel_encoder)
@ -483,32 +567,43 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder)
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
struct drm_display_mode *adjusted_mode =
&intel_crtc->config.adjusted_mode;
int pipe = intel_crtc->pipe;
enum port port;
unsigned int bpp = intel_crtc->config.pipe_bpp;
u32 val, tmp;
u16 mode_hdisplay;
DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe));
DRM_DEBUG_KMS("pipe %c\n", pipe_name(intel_crtc->pipe));
/* escape clock divider, 20MHz, shared for A and C. device ready must be
* off when doing this! txclkesc? */
tmp = I915_READ(MIPI_CTRL(0));
mode_hdisplay = adjusted_mode->hdisplay;
if (intel_dsi->dual_link) {
mode_hdisplay /= 2;
if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK)
mode_hdisplay += intel_dsi->pixel_overlap;
}
for_each_dsi_port(port, intel_dsi->ports) {
/* escape clock divider, 20MHz, shared for A and C.
* device ready must be off when doing this! txclkesc? */
tmp = I915_READ(MIPI_CTRL(PORT_A));
tmp &= ~ESCAPE_CLOCK_DIVIDER_MASK;
I915_WRITE(MIPI_CTRL(0), tmp | ESCAPE_CLOCK_DIVIDER_1);
I915_WRITE(MIPI_CTRL(PORT_A), tmp | ESCAPE_CLOCK_DIVIDER_1);
/* read request priority is per pipe */
tmp = I915_READ(MIPI_CTRL(pipe));
tmp = I915_READ(MIPI_CTRL(port));
tmp &= ~READ_REQUEST_PRIORITY_MASK;
I915_WRITE(MIPI_CTRL(pipe), tmp | READ_REQUEST_PRIORITY_HIGH);
I915_WRITE(MIPI_CTRL(port), tmp | READ_REQUEST_PRIORITY_HIGH);
/* XXX: why here, why like this? handling in irq handler?! */
I915_WRITE(MIPI_INTR_STAT(pipe), 0xffffffff);
I915_WRITE(MIPI_INTR_EN(pipe), 0xffffffff);
I915_WRITE(MIPI_INTR_STAT(port), 0xffffffff);
I915_WRITE(MIPI_INTR_EN(port), 0xffffffff);
I915_WRITE(MIPI_DPHY_PARAM(pipe), intel_dsi->dphy_reg);
I915_WRITE(MIPI_DPHY_PARAM(port), intel_dsi->dphy_reg);
I915_WRITE(MIPI_DPI_RESOLUTION(pipe),
I915_WRITE(MIPI_DPI_RESOLUTION(port),
adjusted_mode->vdisplay << VERTICAL_ADDRESS_SHIFT |
adjusted_mode->hdisplay << HORIZONTAL_ADDRESS_SHIFT);
mode_hdisplay << HORIZONTAL_ADDRESS_SHIFT);
}
set_dsi_timings(encoder, adjusted_mode);
@ -522,96 +617,103 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder)
/* XXX: cross-check bpp vs. pixel format? */
val |= intel_dsi->pixel_format;
}
I915_WRITE(MIPI_DSI_FUNC_PRG(pipe), val);
/* timeouts for recovery. one frame IIUC. if counter expires, EOT and
* stop state. */
tmp = 0;
if (intel_dsi->eotp_pkt == 0)
tmp |= EOT_DISABLE;
if (intel_dsi->clock_stop)
tmp |= CLOCKSTOP;
for_each_dsi_port(port, intel_dsi->ports) {
I915_WRITE(MIPI_DSI_FUNC_PRG(port), val);
/* timeouts for recovery. one frame IIUC. if counter expires,
* EOT and stop state. */
/*
* In burst mode, value greater than one DPI line Time in byte clock
* (txbyteclkhs) To timeout this timer 1+ of the above said value is
* recommended.
* In burst mode, value greater than one DPI line Time in byte
* clock (txbyteclkhs) To timeout this timer 1+ of the above
* said value is recommended.
*
* In non-burst mode, Value greater than one DPI frame time in byte
* clock(txbyteclkhs) To timeout this timer 1+ of the above said value
* is recommended.
* In non-burst mode, Value greater than one DPI frame time in
* byte clock(txbyteclkhs) To timeout this timer 1+ of the above
* said value is recommended.
*
* In DBI only mode, value greater than one DBI frame time in byte
* clock(txbyteclkhs) To timeout this timer 1+ of the above said value
* is recommended.
* In DBI only mode, value greater than one DBI frame time in
* byte clock(txbyteclkhs) To timeout this timer 1+ of the above
* said value is recommended.
*/
if (is_vid_mode(intel_dsi) &&
intel_dsi->video_mode_format == VIDEO_MODE_BURST) {
I915_WRITE(MIPI_HS_TX_TIMEOUT(pipe),
I915_WRITE(MIPI_HS_TX_TIMEOUT(port),
txbyteclkhs(adjusted_mode->htotal, bpp,
intel_dsi->lane_count,
intel_dsi->burst_mode_ratio) + 1);
} else {
I915_WRITE(MIPI_HS_TX_TIMEOUT(pipe),
I915_WRITE(MIPI_HS_TX_TIMEOUT(port),
txbyteclkhs(adjusted_mode->vtotal *
adjusted_mode->htotal,
bpp, intel_dsi->lane_count,
intel_dsi->burst_mode_ratio) + 1);
}
I915_WRITE(MIPI_LP_RX_TIMEOUT(pipe), intel_dsi->lp_rx_timeout);
I915_WRITE(MIPI_TURN_AROUND_TIMEOUT(pipe), intel_dsi->turn_arnd_val);
I915_WRITE(MIPI_DEVICE_RESET_TIMER(pipe), intel_dsi->rst_timer_val);
I915_WRITE(MIPI_LP_RX_TIMEOUT(port), intel_dsi->lp_rx_timeout);
I915_WRITE(MIPI_TURN_AROUND_TIMEOUT(port),
intel_dsi->turn_arnd_val);
I915_WRITE(MIPI_DEVICE_RESET_TIMER(port),
intel_dsi->rst_timer_val);
/* dphy stuff */
/* in terms of low power clock */
I915_WRITE(MIPI_INIT_COUNT(pipe), txclkesc(intel_dsi->escape_clk_div, 100));
I915_WRITE(MIPI_INIT_COUNT(port),
txclkesc(intel_dsi->escape_clk_div, 100));
val = 0;
if (intel_dsi->eotp_pkt == 0)
val |= EOT_DISABLE;
if (intel_dsi->clock_stop)
val |= CLOCKSTOP;
/* recovery disables */
I915_WRITE(MIPI_EOT_DISABLE(pipe), val);
I915_WRITE(MIPI_EOT_DISABLE(port), val);
/* in terms of low power clock */
I915_WRITE(MIPI_INIT_COUNT(pipe), intel_dsi->init_count);
I915_WRITE(MIPI_INIT_COUNT(port), intel_dsi->init_count);
/* in terms of txbyteclkhs. actual high to low switch +
* MIPI_STOP_STATE_STALL * MIPI_LP_BYTECLK.
*
* XXX: write MIPI_STOP_STATE_STALL?
*/
I915_WRITE(MIPI_HIGH_LOW_SWITCH_COUNT(pipe),
I915_WRITE(MIPI_HIGH_LOW_SWITCH_COUNT(port),
intel_dsi->hs_to_lp_count);
/* XXX: low power clock equivalence in terms of byte clock. the number
* of byte clocks occupied in one low power clock. based on txbyteclkhs
* and txclkesc. txclkesc time / txbyteclk time * (105 +
* MIPI_STOP_STATE_STALL) / 105.???
/* XXX: low power clock equivalence in terms of byte clock.
* the number of byte clocks occupied in one low power clock.
* based on txbyteclkhs and txclkesc.
* txclkesc time / txbyteclk time * (105 + MIPI_STOP_STATE_STALL
* ) / 105.???
*/
I915_WRITE(MIPI_LP_BYTECLK(pipe), intel_dsi->lp_byte_clk);
I915_WRITE(MIPI_LP_BYTECLK(port), intel_dsi->lp_byte_clk);
/* the bw essential for transmitting 16 long packets containing 252
* bytes meant for dcs write memory command is programmed in this
* register in terms of byte clocks. based on dsi transfer rate and the
* number of lanes configured the time taken to transmit 16 long packets
* in a dsi stream varies. */
I915_WRITE(MIPI_DBI_BW_CTRL(pipe), intel_dsi->bw_timer);
/* the bw essential for transmitting 16 long packets containing
* 252 bytes meant for dcs write memory command is programmed in
* this register in terms of byte clocks. based on dsi transfer
* rate and the number of lanes configured the time taken to
* transmit 16 long packets in a dsi stream varies. */
I915_WRITE(MIPI_DBI_BW_CTRL(port), intel_dsi->bw_timer);
I915_WRITE(MIPI_CLK_LANE_SWITCH_TIME_CNT(pipe),
I915_WRITE(MIPI_CLK_LANE_SWITCH_TIME_CNT(port),
intel_dsi->clk_lp_to_hs_count << LP_HS_SSW_CNT_SHIFT |
intel_dsi->clk_hs_to_lp_count << HS_LP_PWR_SW_CNT_SHIFT);
if (is_vid_mode(intel_dsi))
/* Some panels might have resolution which is not a multiple of
* 64 like 1366 x 768. Enable RANDOM resolution support for such
* panels by default */
I915_WRITE(MIPI_VIDEO_MODE_FORMAT(pipe),
/* Some panels might have resolution which is not a
* multiple of 64 like 1366 x 768. Enable RANDOM
* resolution support for such panels by default */
I915_WRITE(MIPI_VIDEO_MODE_FORMAT(port),
intel_dsi->video_frmt_cfg_bits |
intel_dsi->video_mode_format |
IP_TG_CONFIG |
RANDOM_DPI_DISPLAY_RESOLUTION);
}
}
static void intel_dsi_pre_pll_enable(struct intel_encoder *encoder)
{
@ -748,6 +850,15 @@ void intel_dsi_init(struct drm_device *dev)
intel_connector->get_hw_state = intel_connector_get_hw_state;
intel_connector->unregister = intel_connector_unregister;
/* Pipe A maps to MIPI DSI port A, pipe B maps to MIPI DSI port C */
if (dev_priv->vbt.dsi.port == DVO_PORT_MIPIA) {
intel_encoder->crtc_mask = (1 << PIPE_A);
intel_dsi->ports = (1 << PORT_A);
} else if (dev_priv->vbt.dsi.port == DVO_PORT_MIPIC) {
intel_encoder->crtc_mask = (1 << PIPE_B);
intel_dsi->ports = (1 << PORT_C);
}
for (i = 0; i < ARRAY_SIZE(intel_dsi_devices); i++) {
dsi = &intel_dsi_devices[i];
intel_dsi->dev = *dsi;
@ -762,8 +873,6 @@ void intel_dsi_init(struct drm_device *dev)
}
intel_encoder->type = INTEL_OUTPUT_DSI;
intel_encoder->crtc_mask = (1 << 0); /* XXX */
intel_encoder->cloneable = 0;
drm_connector_init(dev, connector, &intel_dsi_connector_funcs,
DRM_MODE_CONNECTOR_DSI);

View File

@ -28,6 +28,11 @@
#include <drm/drm_crtc.h>
#include "intel_drv.h"
/* Dual Link support */
#define DSI_DUAL_LINK_NONE 0
#define DSI_DUAL_LINK_FRONT_BACK 1
#define DSI_DUAL_LINK_PIXEL_ALT 2
struct intel_dsi_device {
unsigned int panel_id;
const char *name;
@ -78,6 +83,9 @@ struct intel_dsi {
struct intel_connector *attached_connector;
/* bit mask of ports being driven */
u16 ports;
/* if true, use HS mode, otherwise LP */
bool hs;
@ -101,6 +109,8 @@ struct intel_dsi {
u8 clock_stop;
u8 escape_clk_div;
u8 dual_link;
u8 pixel_overlap;
u32 port_bits;
u32 bw_timer;
u32 dphy_reg;
@ -127,6 +137,22 @@ struct intel_dsi {
u16 panel_pwr_cycle_delay;
};
/* XXX: Transitional before dual port configuration */
static inline enum port intel_dsi_pipe_to_port(enum pipe pipe)
{
if (pipe == PIPE_A)
return PORT_A;
else if (pipe == PIPE_B)
return PORT_C;
WARN(1, "DSI on pipe %c, assuming port C\n", pipe_name(pipe));
return PORT_C;
}
#define for_each_dsi_port(__port, __ports_mask) \
for ((__port) = PORT_A; (__port) < I915_MAX_PORTS; (__port)++) \
if ((__ports_mask) & (1 << (__port)))
static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder)
{
return container_of(encoder, struct intel_dsi, base.base);

View File

@ -48,21 +48,19 @@
* For memory writes, these should probably be used for performance.
*/
static void print_stat(struct intel_dsi *intel_dsi)
static void print_stat(struct intel_dsi *intel_dsi, enum port port)
{
struct drm_encoder *encoder = &intel_dsi->base.base;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
enum pipe pipe = intel_crtc->pipe;
u32 val;
val = I915_READ(MIPI_INTR_STAT(pipe));
val = I915_READ(MIPI_INTR_STAT(port));
#define STAT_BIT(val, bit) (val) & (bit) ? " " #bit : ""
DRM_DEBUG_KMS("MIPI_INTR_STAT(%d) = %08x"
DRM_DEBUG_KMS("MIPI_INTR_STAT(%c) = %08x"
"%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s"
"\n", pipe, val,
"\n", port_name(port), val,
STAT_BIT(val, TEARING_EFFECT),
STAT_BIT(val, SPL_PKT_SENT_INTERRUPT),
STAT_BIT(val, GEN_READ_DATA_AVAIL),
@ -104,34 +102,31 @@ enum dsi_type {
};
/* enable or disable command mode hs transmissions */
void dsi_hs_mode_enable(struct intel_dsi *intel_dsi, bool enable)
void dsi_hs_mode_enable(struct intel_dsi *intel_dsi, bool enable,
enum port port)
{
struct drm_encoder *encoder = &intel_dsi->base.base;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
enum pipe pipe = intel_crtc->pipe;
u32 temp;
u32 mask = DBI_FIFO_EMPTY;
if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(pipe)) & mask) == mask, 50))
if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(port)) & mask) == mask, 50))
DRM_ERROR("Timeout waiting for DBI FIFO empty\n");
temp = I915_READ(MIPI_HS_LP_DBI_ENABLE(pipe));
temp = I915_READ(MIPI_HS_LP_DBI_ENABLE(port));
temp &= DBI_HS_LP_MODE_MASK;
I915_WRITE(MIPI_HS_LP_DBI_ENABLE(pipe), enable ? DBI_HS_MODE : DBI_LP_MODE);
I915_WRITE(MIPI_HS_LP_DBI_ENABLE(port), enable ? DBI_HS_MODE : DBI_LP_MODE);
intel_dsi->hs = enable;
}
static int dsi_vc_send_short(struct intel_dsi *intel_dsi, int channel,
u8 data_type, u16 data)
u8 data_type, u16 data, enum port port)
{
struct drm_encoder *encoder = &intel_dsi->base.base;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
enum pipe pipe = intel_crtc->pipe;
u32 ctrl_reg;
u32 ctrl;
u32 mask;
@ -140,16 +135,16 @@ static int dsi_vc_send_short(struct intel_dsi *intel_dsi, int channel,
channel, data_type, data);
if (intel_dsi->hs) {
ctrl_reg = MIPI_HS_GEN_CTRL(pipe);
ctrl_reg = MIPI_HS_GEN_CTRL(port);
mask = HS_CTRL_FIFO_FULL;
} else {
ctrl_reg = MIPI_LP_GEN_CTRL(pipe);
ctrl_reg = MIPI_LP_GEN_CTRL(port);
mask = LP_CTRL_FIFO_FULL;
}
if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(pipe)) & mask) == 0, 50)) {
if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(port)) & mask) == 0, 50)) {
DRM_ERROR("Timeout waiting for HS/LP CTRL FIFO !full\n");
print_stat(intel_dsi);
print_stat(intel_dsi, port);
}
/*
@ -167,13 +162,11 @@ static int dsi_vc_send_short(struct intel_dsi *intel_dsi, int channel,
}
static int dsi_vc_send_long(struct intel_dsi *intel_dsi, int channel,
u8 data_type, const u8 *data, int len)
u8 data_type, const u8 *data, int len, enum port port)
{
struct drm_encoder *encoder = &intel_dsi->base.base;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
enum pipe pipe = intel_crtc->pipe;
u32 data_reg;
int i, j, n;
u32 mask;
@ -182,14 +175,14 @@ static int dsi_vc_send_long(struct intel_dsi *intel_dsi, int channel,
channel, data_type, len);
if (intel_dsi->hs) {
data_reg = MIPI_HS_GEN_DATA(pipe);
data_reg = MIPI_HS_GEN_DATA(port);
mask = HS_DATA_FIFO_FULL;
} else {
data_reg = MIPI_LP_GEN_DATA(pipe);
data_reg = MIPI_LP_GEN_DATA(port);
mask = LP_DATA_FIFO_FULL;
}
if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(pipe)) & mask) == 0, 50))
if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(port)) & mask) == 0, 50))
DRM_ERROR("Timeout waiting for HS/LP DATA FIFO !full\n");
for (i = 0; i < len; i += n) {
@ -204,12 +197,12 @@ static int dsi_vc_send_long(struct intel_dsi *intel_dsi, int channel,
* dwords, then wait for not set, then continue. */
}
return dsi_vc_send_short(intel_dsi, channel, data_type, len);
return dsi_vc_send_short(intel_dsi, channel, data_type, len, port);
}
static int dsi_vc_write_common(struct intel_dsi *intel_dsi,
int channel, const u8 *data, int len,
enum dsi_type type)
enum dsi_type type, enum port port)
{
int ret;
@ -217,50 +210,54 @@ static int dsi_vc_write_common(struct intel_dsi *intel_dsi,
BUG_ON(type == DSI_GENERIC);
ret = dsi_vc_send_short(intel_dsi, channel,
MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM,
0);
0, port);
} else if (len == 1) {
ret = dsi_vc_send_short(intel_dsi, channel,
type == DSI_GENERIC ?
MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM :
MIPI_DSI_DCS_SHORT_WRITE, data[0]);
MIPI_DSI_DCS_SHORT_WRITE, data[0],
port);
} else if (len == 2) {
ret = dsi_vc_send_short(intel_dsi, channel,
type == DSI_GENERIC ?
MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM :
MIPI_DSI_DCS_SHORT_WRITE_PARAM,
(data[1] << 8) | data[0]);
(data[1] << 8) | data[0], port);
} else {
ret = dsi_vc_send_long(intel_dsi, channel,
type == DSI_GENERIC ?
MIPI_DSI_GENERIC_LONG_WRITE :
MIPI_DSI_DCS_LONG_WRITE, data, len);
MIPI_DSI_DCS_LONG_WRITE, data, len,
port);
}
return ret;
}
int dsi_vc_dcs_write(struct intel_dsi *intel_dsi, int channel,
const u8 *data, int len)
const u8 *data, int len, enum port port)
{
return dsi_vc_write_common(intel_dsi, channel, data, len, DSI_DCS);
return dsi_vc_write_common(intel_dsi, channel, data, len, DSI_DCS,
port);
}
int dsi_vc_generic_write(struct intel_dsi *intel_dsi, int channel,
const u8 *data, int len)
const u8 *data, int len, enum port port)
{
return dsi_vc_write_common(intel_dsi, channel, data, len, DSI_GENERIC);
return dsi_vc_write_common(intel_dsi, channel, data, len, DSI_GENERIC,
port);
}
static int dsi_vc_dcs_send_read_request(struct intel_dsi *intel_dsi,
int channel, u8 dcs_cmd)
int channel, u8 dcs_cmd, enum port port)
{
return dsi_vc_send_short(intel_dsi, channel, MIPI_DSI_DCS_READ,
dcs_cmd);
dcs_cmd, port);
}
static int dsi_vc_generic_send_read_request(struct intel_dsi *intel_dsi,
int channel, u8 *reqdata,
int reqlen)
int reqlen, enum port port)
{
u16 data;
u8 data_type;
@ -282,24 +279,22 @@ static int dsi_vc_generic_send_read_request(struct intel_dsi *intel_dsi,
BUG();
}
return dsi_vc_send_short(intel_dsi, channel, data_type, data);
return dsi_vc_send_short(intel_dsi, channel, data_type, data, port);
}
static int dsi_read_data_return(struct intel_dsi *intel_dsi,
u8 *buf, int buflen)
u8 *buf, int buflen, enum port port)
{
struct drm_encoder *encoder = &intel_dsi->base.base;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
enum pipe pipe = intel_crtc->pipe;
int i, len = 0;
u32 data_reg, val;
if (intel_dsi->hs) {
data_reg = MIPI_HS_GEN_DATA(pipe);
data_reg = MIPI_HS_GEN_DATA(port);
} else {
data_reg = MIPI_LP_GEN_DATA(pipe);
data_reg = MIPI_LP_GEN_DATA(port);
}
while (len < buflen) {
@ -312,13 +307,11 @@ static int dsi_read_data_return(struct intel_dsi *intel_dsi,
}
int dsi_vc_dcs_read(struct intel_dsi *intel_dsi, int channel, u8 dcs_cmd,
u8 *buf, int buflen)
u8 *buf, int buflen, enum port port)
{
struct drm_encoder *encoder = &intel_dsi->base.base;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
enum pipe pipe = intel_crtc->pipe;
u32 mask;
int ret;
@ -327,17 +320,17 @@ int dsi_vc_dcs_read(struct intel_dsi *intel_dsi, int channel, u8 dcs_cmd,
* longer than MIPI_MAX_RETURN_PKT_SIZE
*/
I915_WRITE(MIPI_INTR_STAT(pipe), GEN_READ_DATA_AVAIL);
I915_WRITE(MIPI_INTR_STAT(port), GEN_READ_DATA_AVAIL);
ret = dsi_vc_dcs_send_read_request(intel_dsi, channel, dcs_cmd);
ret = dsi_vc_dcs_send_read_request(intel_dsi, channel, dcs_cmd, port);
if (ret)
return ret;
mask = GEN_READ_DATA_AVAIL;
if (wait_for((I915_READ(MIPI_INTR_STAT(pipe)) & mask) == mask, 50))
if (wait_for((I915_READ(MIPI_INTR_STAT(port)) & mask) == mask, 50))
DRM_ERROR("Timeout waiting for read data.\n");
ret = dsi_read_data_return(intel_dsi, buf, buflen);
ret = dsi_read_data_return(intel_dsi, buf, buflen, port);
if (ret < 0)
return ret;
@ -348,13 +341,11 @@ int dsi_vc_dcs_read(struct intel_dsi *intel_dsi, int channel, u8 dcs_cmd,
}
int dsi_vc_generic_read(struct intel_dsi *intel_dsi, int channel,
u8 *reqdata, int reqlen, u8 *buf, int buflen)
u8 *reqdata, int reqlen, u8 *buf, int buflen, enum port port)
{
struct drm_encoder *encoder = &intel_dsi->base.base;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
enum pipe pipe = intel_crtc->pipe;
u32 mask;
int ret;
@ -363,18 +354,18 @@ int dsi_vc_generic_read(struct intel_dsi *intel_dsi, int channel,
* longer than MIPI_MAX_RETURN_PKT_SIZE
*/
I915_WRITE(MIPI_INTR_STAT(pipe), GEN_READ_DATA_AVAIL);
I915_WRITE(MIPI_INTR_STAT(port), GEN_READ_DATA_AVAIL);
ret = dsi_vc_generic_send_read_request(intel_dsi, channel, reqdata,
reqlen);
reqlen, port);
if (ret)
return ret;
mask = GEN_READ_DATA_AVAIL;
if (wait_for((I915_READ(MIPI_INTR_STAT(pipe)) & mask) == mask, 50))
if (wait_for((I915_READ(MIPI_INTR_STAT(port)) & mask) == mask, 50))
DRM_ERROR("Timeout waiting for read data.\n");
ret = dsi_read_data_return(intel_dsi, buf, buflen);
ret = dsi_read_data_return(intel_dsi, buf, buflen, port);
if (ret < 0)
return ret;
@ -394,8 +385,7 @@ int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs)
struct drm_encoder *encoder = &intel_dsi->base.base;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
enum pipe pipe = intel_crtc->pipe;
enum port port;
u32 mask;
/* XXX: pipe, hs */
@ -404,18 +394,23 @@ int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs)
else
cmd |= DPI_LP_MODE;
for_each_dsi_port(port, intel_dsi->ports) {
/* clear bit */
I915_WRITE(MIPI_INTR_STAT(pipe), SPL_PKT_SENT_INTERRUPT);
I915_WRITE(MIPI_INTR_STAT(port), SPL_PKT_SENT_INTERRUPT);
/* XXX: old code skips write if control unchanged */
if (cmd == I915_READ(MIPI_DPI_CONTROL(pipe)))
DRM_ERROR("Same special packet %02x twice in a row.\n", cmd);
if (cmd == I915_READ(MIPI_DPI_CONTROL(port)))
DRM_ERROR("Same special packet %02x twice in a row.\n",
cmd);
I915_WRITE(MIPI_DPI_CONTROL(pipe), cmd);
I915_WRITE(MIPI_DPI_CONTROL(port), cmd);
mask = SPL_PKT_SENT_INTERRUPT;
if (wait_for((I915_READ(MIPI_INTR_STAT(pipe)) & mask) == mask, 100))
DRM_ERROR("Video mode command 0x%08x send failed.\n", cmd);
if (wait_for((I915_READ(MIPI_INTR_STAT(port)) & mask) == mask,
100))
DRM_ERROR("Video mode command 0x%08x send failed.\n",
cmd);
}
return 0;
}
@ -426,12 +421,12 @@ void wait_for_dsi_fifo_empty(struct intel_dsi *intel_dsi)
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
enum pipe pipe = intel_crtc->pipe;
enum port port = intel_dsi_pipe_to_port(intel_crtc->pipe);
u32 mask;
mask = LP_CTRL_FIFO_EMPTY | HS_CTRL_FIFO_EMPTY |
LP_DATA_FIFO_EMPTY | HS_DATA_FIFO_EMPTY;
if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(pipe)) & mask) == mask, 100))
if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(port)) & mask) == mask, 100))
DRM_ERROR("DPI FIFOs are not empty\n");
}

View File

@ -36,77 +36,81 @@
#define DPI_LP_MODE_EN false
#define DPI_HS_MODE_EN true
void dsi_hs_mode_enable(struct intel_dsi *intel_dsi, bool enable);
void dsi_hs_mode_enable(struct intel_dsi *intel_dsi, bool enable,
enum port port);
int dsi_vc_dcs_write(struct intel_dsi *intel_dsi, int channel,
const u8 *data, int len);
const u8 *data, int len, enum port port);
int dsi_vc_generic_write(struct intel_dsi *intel_dsi, int channel,
const u8 *data, int len);
const u8 *data, int len, enum port port);
int dsi_vc_dcs_read(struct intel_dsi *intel_dsi, int channel, u8 dcs_cmd,
u8 *buf, int buflen);
u8 *buf, int buflen, enum port port);
int dsi_vc_generic_read(struct intel_dsi *intel_dsi, int channel,
u8 *reqdata, int reqlen, u8 *buf, int buflen);
u8 *reqdata, int reqlen, u8 *buf, int buflen, enum port port);
int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs);
void wait_for_dsi_fifo_empty(struct intel_dsi *intel_dsi);
/* XXX: questionable write helpers */
static inline int dsi_vc_dcs_write_0(struct intel_dsi *intel_dsi,
int channel, u8 dcs_cmd)
int channel, u8 dcs_cmd, enum port port)
{
return dsi_vc_dcs_write(intel_dsi, channel, &dcs_cmd, 1);
return dsi_vc_dcs_write(intel_dsi, channel, &dcs_cmd, 1, port);
}
static inline int dsi_vc_dcs_write_1(struct intel_dsi *intel_dsi,
int channel, u8 dcs_cmd, u8 param)
int channel, u8 dcs_cmd, u8 param, enum port port)
{
u8 buf[2] = { dcs_cmd, param };
return dsi_vc_dcs_write(intel_dsi, channel, buf, 2);
return dsi_vc_dcs_write(intel_dsi, channel, buf, 2, port);
}
static inline int dsi_vc_generic_write_0(struct intel_dsi *intel_dsi,
int channel)
int channel, enum port port)
{
return dsi_vc_generic_write(intel_dsi, channel, NULL, 0);
return dsi_vc_generic_write(intel_dsi, channel, NULL, 0, port);
}
static inline int dsi_vc_generic_write_1(struct intel_dsi *intel_dsi,
int channel, u8 param)
int channel, u8 param, enum port port)
{
return dsi_vc_generic_write(intel_dsi, channel, &param, 1);
return dsi_vc_generic_write(intel_dsi, channel, &param, 1, port);
}
static inline int dsi_vc_generic_write_2(struct intel_dsi *intel_dsi,
int channel, u8 param1, u8 param2)
int channel, u8 param1, u8 param2, enum port port)
{
u8 buf[2] = { param1, param2 };
return dsi_vc_generic_write(intel_dsi, channel, buf, 2);
return dsi_vc_generic_write(intel_dsi, channel, buf, 2, port);
}
/* XXX: questionable read helpers */
static inline int dsi_vc_generic_read_0(struct intel_dsi *intel_dsi,
int channel, u8 *buf, int buflen)
int channel, u8 *buf, int buflen, enum port port)
{
return dsi_vc_generic_read(intel_dsi, channel, NULL, 0, buf, buflen);
return dsi_vc_generic_read(intel_dsi, channel, NULL, 0, buf, buflen,
port);
}
static inline int dsi_vc_generic_read_1(struct intel_dsi *intel_dsi,
int channel, u8 param, u8 *buf,
int buflen)
int buflen, enum port port)
{
return dsi_vc_generic_read(intel_dsi, channel, &param, 1, buf, buflen);
return dsi_vc_generic_read(intel_dsi, channel, &param, 1, buf, buflen,
port);
}
static inline int dsi_vc_generic_read_2(struct intel_dsi *intel_dsi,
int channel, u8 param1, u8 param2,
u8 *buf, int buflen)
u8 *buf, int buflen, enum port port)
{
u8 req[2] = { param1, param2 };
return dsi_vc_generic_read(intel_dsi, channel, req, 2, buf, buflen);
return dsi_vc_generic_read(intel_dsi, channel, req, 2, buf, buflen,
port);
}

View File

@ -94,16 +94,31 @@ static struct gpio_table gtable[] = {
{ GPIO_NC_11_PCONF0, GPIO_NC_11_PAD, 0}
};
static inline enum port intel_dsi_seq_port_to_port(u8 port)
{
return port ? PORT_C : PORT_A;
}
static u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi, u8 *data)
{
u8 type, byte, mode, vc, port;
u8 type, byte, mode, vc, seq_port;
u16 len;
enum port port;
byte = *data++;
mode = (byte >> MIPI_TRANSFER_MODE_SHIFT) & 0x1;
vc = (byte >> MIPI_VIRTUAL_CHANNEL_SHIFT) & 0x3;
port = (byte >> MIPI_PORT_SHIFT) & 0x3;
seq_port = (byte >> MIPI_PORT_SHIFT) & 0x3;
/* For DSI single link on Port A & C, the seq_port value which is
* parsed from Sequence Block#53 of VBT has been set to 0
* Now, read/write of packets for the DSI single link on Port A and
* Port C will based on the DVO port from VBT block 2.
*/
if (intel_dsi->ports == (1 << PORT_C))
port = PORT_C;
else
port = intel_dsi_seq_port_to_port(seq_port);
/* LP or HS mode */
intel_dsi->hs = mode;
@ -115,13 +130,13 @@ static u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi, u8 *data)
switch (type) {
case MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM:
dsi_vc_generic_write_0(intel_dsi, vc);
dsi_vc_generic_write_0(intel_dsi, vc, port);
break;
case MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM:
dsi_vc_generic_write_1(intel_dsi, vc, *data);
dsi_vc_generic_write_1(intel_dsi, vc, *data, port);
break;
case MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM:
dsi_vc_generic_write_2(intel_dsi, vc, *data, *(data + 1));
dsi_vc_generic_write_2(intel_dsi, vc, *data, *(data + 1), port);
break;
case MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM:
case MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM:
@ -129,19 +144,19 @@ static u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi, u8 *data)
DRM_DEBUG_DRIVER("Generic Read not yet implemented or used\n");
break;
case MIPI_DSI_GENERIC_LONG_WRITE:
dsi_vc_generic_write(intel_dsi, vc, data, len);
dsi_vc_generic_write(intel_dsi, vc, data, len, port);
break;
case MIPI_DSI_DCS_SHORT_WRITE:
dsi_vc_dcs_write_0(intel_dsi, vc, *data);
dsi_vc_dcs_write_0(intel_dsi, vc, *data, port);
break;
case MIPI_DSI_DCS_SHORT_WRITE_PARAM:
dsi_vc_dcs_write_1(intel_dsi, vc, *data, *(data + 1));
dsi_vc_dcs_write_1(intel_dsi, vc, *data, *(data + 1), port);
break;
case MIPI_DSI_DCS_READ:
DRM_DEBUG_DRIVER("DCS Read not yet implemented or used\n");
break;
case MIPI_DSI_DCS_LONG_WRITE:
dsi_vc_dcs_write(intel_dsi, vc, data, len);
dsi_vc_dcs_write(intel_dsi, vc, data, len, port);
break;
}
@ -280,6 +295,11 @@ static bool generic_init(struct intel_dsi_device *dsi)
intel_dsi->clock_stop = mipi_config->enable_clk_stop ? 1 : 0;
intel_dsi->lane_count = mipi_config->lane_cnt + 1;
intel_dsi->pixel_format = mipi_config->videomode_color_format << 7;
intel_dsi->dual_link = mipi_config->dual_link;
intel_dsi->pixel_overlap = mipi_config->pixel_overlap;
if (intel_dsi->dual_link)
intel_dsi->ports = ((1 << PORT_A) | (1 << PORT_C));
if (intel_dsi->pixel_format == VID_MODE_FORMAT_RGB666)
bits_per_pixel = 18;
@ -299,6 +319,20 @@ static bool generic_init(struct intel_dsi_device *dsi)
pclk = mode->clock;
/* In dual link mode each port needs half of pixel clock */
if (intel_dsi->dual_link) {
pclk = pclk / 2;
/* we can enable pixel_overlap if needed by panel. In this
* case we need to increase the pixelclock for extra pixels
*/
if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) {
pclk += DIV_ROUND_UP(mode->vtotal *
intel_dsi->pixel_overlap *
60, 1000);
}
}
/* Burst Mode Ratio
* Target ddr frequency from VBT / non burst ddr freq
* multiply by 100 to preserve remainder
@ -493,6 +527,12 @@ static bool generic_init(struct intel_dsi_device *dsi)
DRM_DEBUG_KMS("Clockstop %s\n", intel_dsi->clock_stop ?
"disabled" : "enabled");
DRM_DEBUG_KMS("Mode %s\n", intel_dsi->operation_mode ? "command" : "video");
if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK)
DRM_DEBUG_KMS("Dual link: DSI_DUAL_LINK_FRONT_BACK\n");
else if (intel_dsi->dual_link == DSI_DUAL_LINK_PIXEL_ALT)
DRM_DEBUG_KMS("Dual link: DSI_DUAL_LINK_PIXEL_ALT\n");
else
DRM_DEBUG_KMS("Dual link: NONE\n");
DRM_DEBUG_KMS("Pixel Format %d\n", intel_dsi->pixel_format);
DRM_DEBUG_KMS("TLPX %d\n", intel_dsi->escape_clk_div);
DRM_DEBUG_KMS("LP RX Timeout 0x%x\n", intel_dsi->lp_rx_timeout);

View File

@ -241,8 +241,12 @@ static void vlv_configure_dsi_pll(struct intel_encoder *encoder)
return;
}
if (intel_dsi->ports & (1 << PORT_A))
dsi_mnp.dsi_pll_ctrl |= DSI_PLL_CLK_GATE_DSI0_DSIPLL;
if (intel_dsi->ports & (1 << PORT_C))
dsi_mnp.dsi_pll_ctrl |= DSI_PLL_CLK_GATE_DSI1_DSIPLL;
DRM_DEBUG_KMS("dsi pll div %08x, ctrl %08x\n",
dsi_mnp.dsi_pll_div, dsi_mnp.dsi_pll_ctrl);
@ -269,12 +273,14 @@ void vlv_enable_dsi_pll(struct intel_encoder *encoder)
tmp |= DSI_PLL_VCO_EN;
vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, tmp);
mutex_unlock(&dev_priv->dpio_lock);
if (wait_for(vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL) &
DSI_PLL_LOCK, 20)) {
if (wait_for(I915_READ(PIPECONF(PIPE_A)) & PIPECONF_DSI_PLL_LOCKED, 20)) {
mutex_unlock(&dev_priv->dpio_lock);
DRM_ERROR("DSI PLL lock failed\n");
return;
}
mutex_unlock(&dev_priv->dpio_lock);
DRM_DEBUG_KMS("DSI PLL locked\n");
}

View File

@ -0,0 +1,701 @@
/*
* Copyright © 2014 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/**
* DOC: Frame Buffer Compression (FBC)
*
* FBC tries to save memory bandwidth (and so power consumption) by
* compressing the amount of memory used by the display. It is total
* transparent to user space and completely handled in the kernel.
*
* The benefits of FBC are mostly visible with solid backgrounds and
* variation-less patterns. It comes from keeping the memory footprint small
* and having fewer memory pages opened and accessed for refreshing the display.
*
* i915 is responsible to reserve stolen memory for FBC and configure its
* offset on proper registers. The hardware takes care of all
* compress/decompress. However there are many known cases where we have to
* forcibly disable it to allow proper screen updates.
*/
#include "intel_drv.h"
#include "i915_drv.h"
static void i8xx_fbc_disable(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 fbc_ctl;
dev_priv->fbc.enabled = false;
/* Disable compression */
fbc_ctl = I915_READ(FBC_CONTROL);
if ((fbc_ctl & FBC_CTL_EN) == 0)
return;
fbc_ctl &= ~FBC_CTL_EN;
I915_WRITE(FBC_CONTROL, fbc_ctl);
/* Wait for compressing bit to clear */
if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
DRM_DEBUG_KMS("FBC idle timed out\n");
return;
}
DRM_DEBUG_KMS("disabled FBC\n");
}
static void i8xx_fbc_enable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_framebuffer *fb = crtc->primary->fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int cfb_pitch;
int i;
u32 fbc_ctl;
dev_priv->fbc.enabled = true;
cfb_pitch = dev_priv->fbc.size / FBC_LL_SIZE;
if (fb->pitches[0] < cfb_pitch)
cfb_pitch = fb->pitches[0];
/* FBC_CTL wants 32B or 64B units */
if (IS_GEN2(dev))
cfb_pitch = (cfb_pitch / 32) - 1;
else
cfb_pitch = (cfb_pitch / 64) - 1;
/* Clear old tags */
for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
I915_WRITE(FBC_TAG + (i * 4), 0);
if (IS_GEN4(dev)) {
u32 fbc_ctl2;
/* Set it up... */
fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
fbc_ctl2 |= FBC_CTL_PLANE(intel_crtc->plane);
I915_WRITE(FBC_CONTROL2, fbc_ctl2);
I915_WRITE(FBC_FENCE_OFF, crtc->y);
}
/* enable it... */
fbc_ctl = I915_READ(FBC_CONTROL);
fbc_ctl &= 0x3fff << FBC_CTL_INTERVAL_SHIFT;
fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC;
if (IS_I945GM(dev))
fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
fbc_ctl |= obj->fence_reg;
I915_WRITE(FBC_CONTROL, fbc_ctl);
DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %c\n",
cfb_pitch, crtc->y, plane_name(intel_crtc->plane));
}
static bool i8xx_fbc_enabled(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
}
static void g4x_fbc_enable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_framebuffer *fb = crtc->primary->fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
u32 dpfc_ctl;
dev_priv->fbc.enabled = true;
dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane) | DPFC_SR_EN;
if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
dpfc_ctl |= DPFC_CTL_LIMIT_2X;
else
dpfc_ctl |= DPFC_CTL_LIMIT_1X;
dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
/* enable it... */
I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
}
static void g4x_fbc_disable(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpfc_ctl;
dev_priv->fbc.enabled = false;
/* Disable compression */
dpfc_ctl = I915_READ(DPFC_CONTROL);
if (dpfc_ctl & DPFC_CTL_EN) {
dpfc_ctl &= ~DPFC_CTL_EN;
I915_WRITE(DPFC_CONTROL, dpfc_ctl);
DRM_DEBUG_KMS("disabled FBC\n");
}
}
static bool g4x_fbc_enabled(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
}
static void snb_fbc_blit_update(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 blt_ecoskpd;
/* Make sure blitter notifies FBC of writes */
/* Blitter is part of Media powerwell on VLV. No impact of
* his param in other platforms for now */
gen6_gt_force_wake_get(dev_priv, FORCEWAKE_MEDIA);
blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
GEN6_BLITTER_LOCK_SHIFT;
I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
GEN6_BLITTER_LOCK_SHIFT);
I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
POSTING_READ(GEN6_BLITTER_ECOSKPD);
gen6_gt_force_wake_put(dev_priv, FORCEWAKE_MEDIA);
}
static void ilk_fbc_enable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_framebuffer *fb = crtc->primary->fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
u32 dpfc_ctl;
dev_priv->fbc.enabled = true;
dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane);
if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
dev_priv->fbc.threshold++;
switch (dev_priv->fbc.threshold) {
case 4:
case 3:
dpfc_ctl |= DPFC_CTL_LIMIT_4X;
break;
case 2:
dpfc_ctl |= DPFC_CTL_LIMIT_2X;
break;
case 1:
dpfc_ctl |= DPFC_CTL_LIMIT_1X;
break;
}
dpfc_ctl |= DPFC_CTL_FENCE_EN;
if (IS_GEN5(dev))
dpfc_ctl |= obj->fence_reg;
I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID);
/* enable it... */
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
if (IS_GEN6(dev)) {
I915_WRITE(SNB_DPFC_CTL_SA,
SNB_CPU_FENCE_ENABLE | obj->fence_reg);
I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
snb_fbc_blit_update(dev);
}
DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
}
static void ilk_fbc_disable(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpfc_ctl;
dev_priv->fbc.enabled = false;
/* Disable compression */
dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
if (dpfc_ctl & DPFC_CTL_EN) {
dpfc_ctl &= ~DPFC_CTL_EN;
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
DRM_DEBUG_KMS("disabled FBC\n");
}
}
static bool ilk_fbc_enabled(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
}
static void gen7_fbc_enable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_framebuffer *fb = crtc->primary->fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
u32 dpfc_ctl;
dev_priv->fbc.enabled = true;
dpfc_ctl = IVB_DPFC_CTL_PLANE(intel_crtc->plane);
if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
dev_priv->fbc.threshold++;
switch (dev_priv->fbc.threshold) {
case 4:
case 3:
dpfc_ctl |= DPFC_CTL_LIMIT_4X;
break;
case 2:
dpfc_ctl |= DPFC_CTL_LIMIT_2X;
break;
case 1:
dpfc_ctl |= DPFC_CTL_LIMIT_1X;
break;
}
dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
if (dev_priv->fbc.false_color)
dpfc_ctl |= FBC_CTL_FALSE_COLOR;
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
if (IS_IVYBRIDGE(dev)) {
/* WaFbcAsynchFlipDisableFbcQueue:ivb */
I915_WRITE(ILK_DISPLAY_CHICKEN1,
I915_READ(ILK_DISPLAY_CHICKEN1) |
ILK_FBCQ_DIS);
} else {
/* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
I915_WRITE(CHICKEN_PIPESL_1(intel_crtc->pipe),
I915_READ(CHICKEN_PIPESL_1(intel_crtc->pipe)) |
HSW_FBCQ_DIS);
}
I915_WRITE(SNB_DPFC_CTL_SA,
SNB_CPU_FENCE_ENABLE | obj->fence_reg);
I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
snb_fbc_blit_update(dev);
DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
}
/**
* intel_fbc_enabled - Is FBC enabled?
* @dev: the drm_device
*
* This function is used to verify the current state of FBC.
* FIXME: This should be tracked in the plane config eventually
* instead of queried at runtime for most callers.
*/
bool intel_fbc_enabled(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
return dev_priv->fbc.enabled;
}
void bdw_fbc_sw_flush(struct drm_device *dev, u32 value)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (!IS_GEN8(dev))
return;
if (!intel_fbc_enabled(dev))
return;
I915_WRITE(MSG_FBC_REND_STATE, value);
}
static void intel_fbc_work_fn(struct work_struct *__work)
{
struct intel_fbc_work *work =
container_of(to_delayed_work(__work),
struct intel_fbc_work, work);
struct drm_device *dev = work->crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
mutex_lock(&dev->struct_mutex);
if (work == dev_priv->fbc.fbc_work) {
/* Double check that we haven't switched fb without cancelling
* the prior work.
*/
if (work->crtc->primary->fb == work->fb) {
dev_priv->display.enable_fbc(work->crtc);
dev_priv->fbc.plane = to_intel_crtc(work->crtc)->plane;
dev_priv->fbc.fb_id = work->crtc->primary->fb->base.id;
dev_priv->fbc.y = work->crtc->y;
}
dev_priv->fbc.fbc_work = NULL;
}
mutex_unlock(&dev->struct_mutex);
kfree(work);
}
static void intel_fbc_cancel_work(struct drm_i915_private *dev_priv)
{
if (dev_priv->fbc.fbc_work == NULL)
return;
DRM_DEBUG_KMS("cancelling pending FBC enable\n");
/* Synchronisation is provided by struct_mutex and checking of
* dev_priv->fbc.fbc_work, so we can perform the cancellation
* entirely asynchronously.
*/
if (cancel_delayed_work(&dev_priv->fbc.fbc_work->work))
/* tasklet was killed before being run, clean up */
kfree(dev_priv->fbc.fbc_work);
/* Mark the work as no longer wanted so that if it does
* wake-up (because the work was already running and waiting
* for our mutex), it will discover that is no longer
* necessary to run.
*/
dev_priv->fbc.fbc_work = NULL;
}
static void intel_fbc_enable(struct drm_crtc *crtc)
{
struct intel_fbc_work *work;
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
if (!dev_priv->display.enable_fbc)
return;
intel_fbc_cancel_work(dev_priv);
work = kzalloc(sizeof(*work), GFP_KERNEL);
if (work == NULL) {
DRM_ERROR("Failed to allocate FBC work structure\n");
dev_priv->display.enable_fbc(crtc);
return;
}
work->crtc = crtc;
work->fb = crtc->primary->fb;
INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
dev_priv->fbc.fbc_work = work;
/* Delay the actual enabling to let pageflipping cease and the
* display to settle before starting the compression. Note that
* this delay also serves a second purpose: it allows for a
* vblank to pass after disabling the FBC before we attempt
* to modify the control registers.
*
* A more complicated solution would involve tracking vblanks
* following the termination of the page-flipping sequence
* and indeed performing the enable as a co-routine and not
* waiting synchronously upon the vblank.
*
* WaFbcWaitForVBlankBeforeEnable:ilk,snb
*/
schedule_delayed_work(&work->work, msecs_to_jiffies(50));
}
/**
* intel_fbc_disable - disable FBC
* @dev: the drm_device
*
* This function disables FBC.
*/
void intel_fbc_disable(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
intel_fbc_cancel_work(dev_priv);
if (!dev_priv->display.disable_fbc)
return;
dev_priv->display.disable_fbc(dev);
dev_priv->fbc.plane = -1;
}
static bool set_no_fbc_reason(struct drm_i915_private *dev_priv,
enum no_fbc_reason reason)
{
if (dev_priv->fbc.no_fbc_reason == reason)
return false;
dev_priv->fbc.no_fbc_reason = reason;
return true;
}
/**
* intel_fbc_update - enable/disable FBC as needed
* @dev: the drm_device
*
* Set up the framebuffer compression hardware at mode set time. We
* enable it if possible:
* - plane A only (on pre-965)
* - no pixel mulitply/line duplication
* - no alpha buffer discard
* - no dual wide
* - framebuffer <= max_hdisplay in width, max_vdisplay in height
*
* We can't assume that any compression will take place (worst case),
* so the compressed buffer has to be the same size as the uncompressed
* one. It also must reside (along with the line length buffer) in
* stolen memory.
*
* We need to enable/disable FBC on a global basis.
*/
void intel_fbc_update(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = NULL, *tmp_crtc;
struct intel_crtc *intel_crtc;
struct drm_framebuffer *fb;
struct drm_i915_gem_object *obj;
const struct drm_display_mode *adjusted_mode;
unsigned int max_width, max_height;
if (!HAS_FBC(dev)) {
set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED);
return;
}
if (!i915.powersave) {
if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
DRM_DEBUG_KMS("fbc disabled per module param\n");
return;
}
/*
* If FBC is already on, we just have to verify that we can
* keep it that way...
* Need to disable if:
* - more than one pipe is active
* - changing FBC params (stride, fence, mode)
* - new fb is too large to fit in compressed buffer
* - going to an unsupported config (interlace, pixel multiply, etc.)
*/
for_each_crtc(dev, tmp_crtc) {
if (intel_crtc_active(tmp_crtc) &&
to_intel_crtc(tmp_crtc)->primary_enabled) {
if (crtc) {
if (set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES))
DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
goto out_disable;
}
crtc = tmp_crtc;
}
}
if (!crtc || crtc->primary->fb == NULL) {
if (set_no_fbc_reason(dev_priv, FBC_NO_OUTPUT))
DRM_DEBUG_KMS("no output, disabling\n");
goto out_disable;
}
intel_crtc = to_intel_crtc(crtc);
fb = crtc->primary->fb;
obj = intel_fb_obj(fb);
adjusted_mode = &intel_crtc->config.adjusted_mode;
if (i915.enable_fbc < 0) {
if (set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT))
DRM_DEBUG_KMS("disabled per chip default\n");
goto out_disable;
}
if (!i915.enable_fbc) {
if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
DRM_DEBUG_KMS("fbc disabled per module param\n");
goto out_disable;
}
if ((adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) ||
(adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE))
DRM_DEBUG_KMS("mode incompatible with compression, "
"disabling\n");
goto out_disable;
}
if (INTEL_INFO(dev)->gen >= 8 || IS_HASWELL(dev)) {
max_width = 4096;
max_height = 4096;
} else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
max_width = 4096;
max_height = 2048;
} else {
max_width = 2048;
max_height = 1536;
}
if (intel_crtc->config.pipe_src_w > max_width ||
intel_crtc->config.pipe_src_h > max_height) {
if (set_no_fbc_reason(dev_priv, FBC_MODE_TOO_LARGE))
DRM_DEBUG_KMS("mode too large for compression, disabling\n");
goto out_disable;
}
if ((INTEL_INFO(dev)->gen < 4 || HAS_DDI(dev)) &&
intel_crtc->plane != PLANE_A) {
if (set_no_fbc_reason(dev_priv, FBC_BAD_PLANE))
DRM_DEBUG_KMS("plane not A, disabling compression\n");
goto out_disable;
}
/* The use of a CPU fence is mandatory in order to detect writes
* by the CPU to the scanout and trigger updates to the FBC.
*/
if (obj->tiling_mode != I915_TILING_X ||
obj->fence_reg == I915_FENCE_REG_NONE) {
if (set_no_fbc_reason(dev_priv, FBC_NOT_TILED))
DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
goto out_disable;
}
if (INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) &&
to_intel_plane(crtc->primary)->rotation != BIT(DRM_ROTATE_0)) {
if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE))
DRM_DEBUG_KMS("Rotation unsupported, disabling\n");
goto out_disable;
}
/* If the kernel debugger is active, always disable compression */
if (in_dbg_master())
goto out_disable;
if (i915_gem_stolen_setup_compression(dev, obj->base.size,
drm_format_plane_cpp(fb->pixel_format, 0))) {
if (set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL))
DRM_DEBUG_KMS("framebuffer too large, disabling compression\n");
goto out_disable;
}
/* If the scanout has not changed, don't modify the FBC settings.
* Note that we make the fundamental assumption that the fb->obj
* cannot be unpinned (and have its GTT offset and fence revoked)
* without first being decoupled from the scanout and FBC disabled.
*/
if (dev_priv->fbc.plane == intel_crtc->plane &&
dev_priv->fbc.fb_id == fb->base.id &&
dev_priv->fbc.y == crtc->y)
return;
if (intel_fbc_enabled(dev)) {
/* We update FBC along two paths, after changing fb/crtc
* configuration (modeswitching) and after page-flipping
* finishes. For the latter, we know that not only did
* we disable the FBC at the start of the page-flip
* sequence, but also more than one vblank has passed.
*
* For the former case of modeswitching, it is possible
* to switch between two FBC valid configurations
* instantaneously so we do need to disable the FBC
* before we can modify its control registers. We also
* have to wait for the next vblank for that to take
* effect. However, since we delay enabling FBC we can
* assume that a vblank has passed since disabling and
* that we can safely alter the registers in the deferred
* callback.
*
* In the scenario that we go from a valid to invalid
* and then back to valid FBC configuration we have
* no strict enforcement that a vblank occurred since
* disabling the FBC. However, along all current pipe
* disabling paths we do need to wait for a vblank at
* some point. And we wait before enabling FBC anyway.
*/
DRM_DEBUG_KMS("disabling active FBC for update\n");
intel_fbc_disable(dev);
}
intel_fbc_enable(crtc);
dev_priv->fbc.no_fbc_reason = FBC_OK;
return;
out_disable:
/* Multiple disables should be harmless */
if (intel_fbc_enabled(dev)) {
DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
intel_fbc_disable(dev);
}
i915_gem_stolen_cleanup_compression(dev);
}
/**
* intel_fbc_init - Initialize FBC
* @dev_priv: the i915 device
*
* This function might be called during PM init process.
*/
void intel_fbc_init(struct drm_i915_private *dev_priv)
{
if (!HAS_FBC(dev_priv)) {
dev_priv->fbc.enabled = false;
return;
}
if (INTEL_INFO(dev_priv)->gen >= 7) {
dev_priv->display.fbc_enabled = ilk_fbc_enabled;
dev_priv->display.enable_fbc = gen7_fbc_enable;
dev_priv->display.disable_fbc = ilk_fbc_disable;
} else if (INTEL_INFO(dev_priv)->gen >= 5) {
dev_priv->display.fbc_enabled = ilk_fbc_enabled;
dev_priv->display.enable_fbc = ilk_fbc_enable;
dev_priv->display.disable_fbc = ilk_fbc_disable;
} else if (IS_GM45(dev_priv)) {
dev_priv->display.fbc_enabled = g4x_fbc_enabled;
dev_priv->display.enable_fbc = g4x_fbc_enable;
dev_priv->display.disable_fbc = g4x_fbc_disable;
} else {
dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
dev_priv->display.enable_fbc = i8xx_fbc_enable;
dev_priv->display.disable_fbc = i8xx_fbc_disable;
/* This value was pulled out of someone's hat */
I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
}
dev_priv->fbc.enabled = dev_priv->display.fbc_enabled(dev_priv->dev);
}

View File

@ -212,8 +212,7 @@ static int intel_lr_context_pin(struct intel_engine_cs *ring,
* @enable_execlists: value of i915.enable_execlists module parameter.
*
* Only certain platforms support Execlists (the prerequisites being
* support for Logical Ring Contexts and Aliasing PPGTT or better),
* and only when enabled via module parameter.
* support for Logical Ring Contexts and Aliasing PPGTT or better).
*
* Return: 1 if Execlists is supported and has to be enabled.
*/
@ -474,13 +473,13 @@ static bool execlists_check_remove_request(struct intel_engine_cs *ring,
}
/**
* intel_execlists_handle_ctx_events() - handle Context Switch interrupts
* intel_lrc_irq_handler() - handle Context Switch interrupts
* @ring: Engine Command Streamer to handle.
*
* Check the unread Context Status Buffers and manage the submission of new
* contexts to the ELSP accordingly.
*/
void intel_execlists_handle_ctx_events(struct intel_engine_cs *ring)
void intel_lrc_irq_handler(struct intel_engine_cs *ring)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
u32 status_pointer;
@ -876,18 +875,17 @@ void intel_lr_context_unpin(struct intel_engine_cs *ring,
}
}
static int logical_ring_alloc_seqno(struct intel_engine_cs *ring,
static int logical_ring_alloc_request(struct intel_engine_cs *ring,
struct intel_context *ctx)
{
struct drm_i915_gem_request *request;
struct drm_i915_private *dev_private = ring->dev->dev_private;
int ret;
if (ring->outstanding_lazy_seqno)
if (ring->outstanding_lazy_request)
return 0;
if (ring->preallocated_lazy_request == NULL) {
struct drm_i915_gem_request *request;
request = kmalloc(sizeof(*request), GFP_KERNEL);
request = kzalloc(sizeof(*request), GFP_KERNEL);
if (request == NULL)
return -ENOMEM;
@ -899,6 +897,17 @@ static int logical_ring_alloc_seqno(struct intel_engine_cs *ring,
}
}
kref_init(&request->ref);
request->ring = ring;
request->uniq = dev_private->request_uniq++;
ret = i915_gem_get_seqno(ring->dev, &request->seqno);
if (ret) {
intel_lr_context_unpin(ring, ctx);
kfree(request);
return ret;
}
/* Hold a reference to the context this request belongs to
* (we will need it when the time comes to emit/retire the
* request).
@ -906,10 +915,8 @@ static int logical_ring_alloc_seqno(struct intel_engine_cs *ring,
request->ctx = ctx;
i915_gem_context_reference(request->ctx);
ring->preallocated_lazy_request = request;
}
return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno);
ring->outstanding_lazy_request = request;
return 0;
}
static int logical_ring_wait_request(struct intel_ringbuffer *ringbuf,
@ -917,39 +924,38 @@ static int logical_ring_wait_request(struct intel_ringbuffer *ringbuf,
{
struct intel_engine_cs *ring = ringbuf->ring;
struct drm_i915_gem_request *request;
u32 seqno = 0;
int ret;
if (ringbuf->last_retired_head != -1) {
ringbuf->head = ringbuf->last_retired_head;
ringbuf->last_retired_head = -1;
ringbuf->space = intel_ring_space(ringbuf);
if (ringbuf->space >= bytes)
if (intel_ring_space(ringbuf) >= bytes)
return 0;
}
list_for_each_entry(request, &ring->request_list, list) {
/*
* The request queue is per-engine, so can contain requests
* from multiple ringbuffers. Here, we must ignore any that
* aren't from the ringbuffer we're considering.
*/
struct intel_context *ctx = request->ctx;
if (ctx->engine[ring->id].ringbuf != ringbuf)
continue;
/* Would completion of this request free enough space? */
if (__intel_ring_space(request->tail, ringbuf->tail,
ringbuf->size) >= bytes) {
seqno = request->seqno;
break;
}
}
if (seqno == 0)
if (&request->list == &ring->request_list)
return -ENOSPC;
ret = i915_wait_seqno(ring, seqno);
ret = i915_wait_request(request);
if (ret)
return ret;
i915_gem_retire_requests_ring(ring);
ringbuf->head = ringbuf->last_retired_head;
ringbuf->last_retired_head = -1;
ringbuf->space = intel_ring_space(ringbuf);
return 0;
return intel_ring_space(ringbuf) >= bytes ? 0 : -ENOSPC;
}
static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf,
@ -975,13 +981,10 @@ static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf,
* case by choosing an insanely large timeout. */
end = jiffies + 60 * HZ;
do {
ringbuf->head = I915_READ_HEAD(ring);
ringbuf->space = intel_ring_space(ringbuf);
if (ringbuf->space >= bytes) {
ret = 0;
do {
if (intel_ring_space(ringbuf) >= bytes)
break;
}
msleep(1);
@ -1022,7 +1025,7 @@ static int logical_ring_wrap_buffer(struct intel_ringbuffer *ringbuf)
iowrite32(MI_NOOP, virt++);
ringbuf->tail = 0;
ringbuf->space = intel_ring_space(ringbuf);
intel_ring_update_space(ringbuf);
return 0;
}
@ -1076,7 +1079,7 @@ int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf, int num_dwords)
return ret;
/* Preallocate the olr before touching the ring */
ret = logical_ring_alloc_seqno(ring, ringbuf->FIXME_lrc_ctx);
ret = logical_ring_alloc_request(ring, ringbuf->FIXME_lrc_ctx);
if (ret)
return ret;
@ -1093,7 +1096,7 @@ static int intel_logical_ring_workarounds_emit(struct intel_engine_cs *ring,
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_workarounds *w = &dev_priv->workarounds;
if (WARN_ON(w->count == 0))
if (WARN_ON_ONCE(w->count == 0))
return 0;
ring->gpu_caches_dirty = true;
@ -1159,10 +1162,6 @@ static int gen8_init_render_ring(struct intel_engine_cs *ring)
*/
I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
ret = intel_init_pipe_control(ring);
if (ret)
return ret;
I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
return init_workarounds_ring(ring);
@ -1321,7 +1320,7 @@ static int gen8_emit_request(struct intel_ringbuffer *ringbuf)
if (ret)
return ret;
cmd = MI_STORE_DWORD_IMM_GEN8;
cmd = MI_STORE_DWORD_IMM_GEN4;
cmd |= MI_GLOBAL_GTT;
intel_logical_ring_emit(ringbuf, cmd);
@ -1329,7 +1328,8 @@ static int gen8_emit_request(struct intel_ringbuffer *ringbuf)
(ring->status_page.gfx_addr +
(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)));
intel_logical_ring_emit(ringbuf, 0);
intel_logical_ring_emit(ringbuf, ring->outstanding_lazy_seqno);
intel_logical_ring_emit(ringbuf,
i915_gem_request_get_seqno(ring->outstanding_lazy_request));
intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
intel_logical_ring_emit(ringbuf, MI_NOOP);
intel_logical_ring_advance_and_submit(ringbuf);
@ -1337,6 +1337,18 @@ static int gen8_emit_request(struct intel_ringbuffer *ringbuf)
return 0;
}
static int gen8_init_rcs_context(struct intel_engine_cs *ring,
struct intel_context *ctx)
{
int ret;
ret = intel_logical_ring_workarounds_emit(ring, ctx);
if (ret)
return ret;
return intel_lr_context_render_state_init(ring, ctx);
}
/**
* intel_logical_ring_cleanup() - deallocate the Engine Command Streamer
*
@ -1354,8 +1366,7 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *ring)
intel_logical_ring_stop(ring);
WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
ring->preallocated_lazy_request = NULL;
ring->outstanding_lazy_seqno = 0;
i915_gem_request_assign(&ring->outstanding_lazy_request, NULL);
if (ring->cleanup)
ring->cleanup(ring);
@ -1389,12 +1400,6 @@ static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *rin
if (ret)
return ret;
if (ring->init) {
ret = ring->init(ring);
if (ret)
return ret;
}
ret = intel_lr_context_deferred_create(ring->default_context, ring);
return ret;
@ -1404,6 +1409,7 @@ static int logical_render_ring_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring = &dev_priv->ring[RCS];
int ret;
ring->name = "render ring";
ring->id = RCS;
@ -1415,8 +1421,8 @@ static int logical_render_ring_init(struct drm_device *dev)
if (HAS_L3_DPF(dev))
ring->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
ring->init = gen8_init_render_ring;
ring->init_context = intel_logical_ring_workarounds_emit;
ring->init_hw = gen8_init_render_ring;
ring->init_context = gen8_init_rcs_context;
ring->cleanup = intel_fini_pipe_control;
ring->get_seqno = gen8_get_seqno;
ring->set_seqno = gen8_set_seqno;
@ -1426,7 +1432,12 @@ static int logical_render_ring_init(struct drm_device *dev)
ring->irq_put = gen8_logical_ring_put_irq;
ring->emit_bb_start = gen8_emit_bb_start;
return logical_ring_init(dev, ring);
ring->dev = dev;
ret = logical_ring_init(dev, ring);
if (ret)
return ret;
return intel_init_pipe_control(ring);
}
static int logical_bsd_ring_init(struct drm_device *dev)
@ -1442,7 +1453,7 @@ static int logical_bsd_ring_init(struct drm_device *dev)
ring->irq_keep_mask =
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
ring->init = gen8_init_common_ring;
ring->init_hw = gen8_init_common_ring;
ring->get_seqno = gen8_get_seqno;
ring->set_seqno = gen8_set_seqno;
ring->emit_request = gen8_emit_request;
@ -1467,7 +1478,7 @@ static int logical_bsd2_ring_init(struct drm_device *dev)
ring->irq_keep_mask =
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT;
ring->init = gen8_init_common_ring;
ring->init_hw = gen8_init_common_ring;
ring->get_seqno = gen8_get_seqno;
ring->set_seqno = gen8_set_seqno;
ring->emit_request = gen8_emit_request;
@ -1492,7 +1503,7 @@ static int logical_blt_ring_init(struct drm_device *dev)
ring->irq_keep_mask =
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
ring->init = gen8_init_common_ring;
ring->init_hw = gen8_init_common_ring;
ring->get_seqno = gen8_get_seqno;
ring->set_seqno = gen8_set_seqno;
ring->emit_request = gen8_emit_request;
@ -1517,7 +1528,7 @@ static int logical_vebox_ring_init(struct drm_device *dev)
ring->irq_keep_mask =
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
ring->init = gen8_init_common_ring;
ring->init_hw = gen8_init_common_ring;
ring->get_seqno = gen8_get_seqno;
ring->set_seqno = gen8_set_seqno;
ring->emit_request = gen8_emit_request;
@ -1616,7 +1627,7 @@ int intel_lr_context_render_state_init(struct intel_engine_cs *ring,
i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), ring);
ret = __i915_add_request(ring, file, so.obj, NULL);
ret = __i915_add_request(ring, file, so.obj);
/* intel_logical_ring_add_request moves object to inactive if it
* fails */
out:
@ -1835,8 +1846,7 @@ int intel_lr_context_deferred_create(struct intel_context *ctx,
int ret;
WARN_ON(ctx->legacy_hw_ctx.rcs_state != NULL);
if (ctx->engine[ring->id].state)
return 0;
WARN_ON(ctx->engine[ring->id].state);
context_size = round_up(get_lr_context_size(ring), 4096);
@ -1872,8 +1882,8 @@ int intel_lr_context_deferred_create(struct intel_context *ctx,
ringbuf->effective_size = ringbuf->size;
ringbuf->head = 0;
ringbuf->tail = 0;
ringbuf->space = ringbuf->size;
ringbuf->last_retired_head = -1;
intel_ring_update_space(ringbuf);
if (ringbuf->obj == NULL) {
ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
@ -1907,21 +1917,17 @@ int intel_lr_context_deferred_create(struct intel_context *ctx,
if (ctx == ring->default_context)
lrc_setup_hardware_status_page(ring, ctx_obj);
if (ring->id == RCS && !ctx->rcs_initialized) {
else if (ring->id == RCS && !ctx->rcs_initialized) {
if (ring->init_context) {
ret = ring->init_context(ring, ctx);
if (ret)
DRM_ERROR("ring init context: %d\n", ret);
}
ret = intel_lr_context_render_state_init(ring, ctx);
if (ret) {
DRM_ERROR("Init render state failed: %d\n", ret);
DRM_ERROR("ring init context: %d\n", ret);
ctx->engine[ring->id].ringbuf = NULL;
ctx->engine[ring->id].state = NULL;
goto error;
}
}
ctx->rcs_initialized = true;
}

View File

@ -112,7 +112,7 @@ struct intel_ctx_submit_request {
int elsp_submitted;
};
void intel_execlists_handle_ctx_events(struct intel_engine_cs *ring);
void intel_lrc_irq_handler(struct intel_engine_cs *ring);
void intel_execlists_retire_requests(struct intel_engine_cs *ring);
#endif /* _INTEL_LRC_H_ */

View File

@ -182,7 +182,7 @@ struct intel_overlay {
u32 flip_addr;
struct drm_i915_gem_object *reg_bo;
/* flip handling */
uint32_t last_flip_req;
struct drm_i915_gem_request *last_flip_req;
void (*flip_tail)(struct intel_overlay *);
};
@ -217,17 +217,19 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
int ret;
BUG_ON(overlay->last_flip_req);
ret = i915_add_request(ring, &overlay->last_flip_req);
i915_gem_request_assign(&overlay->last_flip_req,
ring->outstanding_lazy_request);
ret = i915_add_request(ring);
if (ret)
return ret;
overlay->flip_tail = tail;
ret = i915_wait_seqno(ring, overlay->last_flip_req);
ret = i915_wait_request(overlay->last_flip_req);
if (ret)
return ret;
i915_gem_retire_requests(dev);
overlay->last_flip_req = 0;
i915_gem_request_assign(&overlay->last_flip_req, NULL);
return 0;
}
@ -286,7 +288,10 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
intel_ring_emit(ring, flip_addr);
intel_ring_advance(ring);
return i915_add_request(ring, &overlay->last_flip_req);
WARN_ON(overlay->last_flip_req);
i915_gem_request_assign(&overlay->last_flip_req,
ring->outstanding_lazy_request);
return i915_add_request(ring);
}
static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay)
@ -361,23 +366,20 @@ static int intel_overlay_off(struct intel_overlay *overlay)
* We have to be careful not to repeat work forever an make forward progess. */
static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
{
struct drm_device *dev = overlay->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring = &dev_priv->ring[RCS];
int ret;
if (overlay->last_flip_req == 0)
if (overlay->last_flip_req == NULL)
return 0;
ret = i915_wait_seqno(ring, overlay->last_flip_req);
ret = i915_wait_request(overlay->last_flip_req);
if (ret)
return ret;
i915_gem_retire_requests(dev);
i915_gem_retire_requests(overlay->dev);
if (overlay->flip_tail)
overlay->flip_tail(overlay);
overlay->last_flip_req = 0;
i915_gem_request_assign(&overlay->last_flip_req, NULL);
return 0;
}
@ -392,6 +394,8 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
struct intel_engine_cs *ring = &dev_priv->ring[RCS];
int ret;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
/* Only wait if there is actually an old frame to release to
* guarantee forward progress.
*/
@ -422,6 +426,22 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
return 0;
}
void intel_overlay_reset(struct drm_i915_private *dev_priv)
{
struct intel_overlay *overlay = dev_priv->overlay;
if (!overlay)
return;
intel_overlay_release_old_vid(overlay);
overlay->last_flip_req = NULL;
overlay->old_xscale = 0;
overlay->old_yscale = 0;
overlay->crtc = NULL;
overlay->active = false;
}
struct put_image_params {
int format;
short dst_x;

View File

@ -52,17 +52,6 @@
#define INTEL_RC6p_ENABLE (1<<1)
#define INTEL_RC6pp_ENABLE (1<<2)
/* FBC, or Frame Buffer Compression, is a technique employed to compress the
* framebuffer contents in-memory, aiming at reducing the required bandwidth
* during in-memory transfers and, therefore, reduce the power packet.
*
* The benefits of FBC are mostly visible with solid backgrounds and
* variation-less patterns.
*
* FBC-related functionality can be enabled by the means of the
* i915.i915_enable_fbc parameter
*/
static void gen9_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@ -87,613 +76,6 @@ static void gen9_init_clock_gating(struct drm_device *dev)
_MASKED_BIT_ENABLE(GEN8_4x4_STC_OPTIMIZATION_DISABLE));
}
static void i8xx_disable_fbc(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 fbc_ctl;
dev_priv->fbc.enabled = false;
/* Disable compression */
fbc_ctl = I915_READ(FBC_CONTROL);
if ((fbc_ctl & FBC_CTL_EN) == 0)
return;
fbc_ctl &= ~FBC_CTL_EN;
I915_WRITE(FBC_CONTROL, fbc_ctl);
/* Wait for compressing bit to clear */
if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
DRM_DEBUG_KMS("FBC idle timed out\n");
return;
}
DRM_DEBUG_KMS("disabled FBC\n");
}
static void i8xx_enable_fbc(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_framebuffer *fb = crtc->primary->fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int cfb_pitch;
int i;
u32 fbc_ctl;
dev_priv->fbc.enabled = true;
cfb_pitch = dev_priv->fbc.size / FBC_LL_SIZE;
if (fb->pitches[0] < cfb_pitch)
cfb_pitch = fb->pitches[0];
/* FBC_CTL wants 32B or 64B units */
if (IS_GEN2(dev))
cfb_pitch = (cfb_pitch / 32) - 1;
else
cfb_pitch = (cfb_pitch / 64) - 1;
/* Clear old tags */
for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
I915_WRITE(FBC_TAG + (i * 4), 0);
if (IS_GEN4(dev)) {
u32 fbc_ctl2;
/* Set it up... */
fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
fbc_ctl2 |= FBC_CTL_PLANE(intel_crtc->plane);
I915_WRITE(FBC_CONTROL2, fbc_ctl2);
I915_WRITE(FBC_FENCE_OFF, crtc->y);
}
/* enable it... */
fbc_ctl = I915_READ(FBC_CONTROL);
fbc_ctl &= 0x3fff << FBC_CTL_INTERVAL_SHIFT;
fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC;
if (IS_I945GM(dev))
fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
fbc_ctl |= obj->fence_reg;
I915_WRITE(FBC_CONTROL, fbc_ctl);
DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %c\n",
cfb_pitch, crtc->y, plane_name(intel_crtc->plane));
}
static bool i8xx_fbc_enabled(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
}
static void g4x_enable_fbc(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_framebuffer *fb = crtc->primary->fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
u32 dpfc_ctl;
dev_priv->fbc.enabled = true;
dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane) | DPFC_SR_EN;
if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
dpfc_ctl |= DPFC_CTL_LIMIT_2X;
else
dpfc_ctl |= DPFC_CTL_LIMIT_1X;
dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
/* enable it... */
I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
}
static void g4x_disable_fbc(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpfc_ctl;
dev_priv->fbc.enabled = false;
/* Disable compression */
dpfc_ctl = I915_READ(DPFC_CONTROL);
if (dpfc_ctl & DPFC_CTL_EN) {
dpfc_ctl &= ~DPFC_CTL_EN;
I915_WRITE(DPFC_CONTROL, dpfc_ctl);
DRM_DEBUG_KMS("disabled FBC\n");
}
}
static bool g4x_fbc_enabled(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
}
static void sandybridge_blit_fbc_update(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 blt_ecoskpd;
/* Make sure blitter notifies FBC of writes */
/* Blitter is part of Media powerwell on VLV. No impact of
* his param in other platforms for now */
gen6_gt_force_wake_get(dev_priv, FORCEWAKE_MEDIA);
blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
GEN6_BLITTER_LOCK_SHIFT;
I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
GEN6_BLITTER_LOCK_SHIFT);
I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
POSTING_READ(GEN6_BLITTER_ECOSKPD);
gen6_gt_force_wake_put(dev_priv, FORCEWAKE_MEDIA);
}
static void ironlake_enable_fbc(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_framebuffer *fb = crtc->primary->fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
u32 dpfc_ctl;
dev_priv->fbc.enabled = true;
dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane);
if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
dev_priv->fbc.threshold++;
switch (dev_priv->fbc.threshold) {
case 4:
case 3:
dpfc_ctl |= DPFC_CTL_LIMIT_4X;
break;
case 2:
dpfc_ctl |= DPFC_CTL_LIMIT_2X;
break;
case 1:
dpfc_ctl |= DPFC_CTL_LIMIT_1X;
break;
}
dpfc_ctl |= DPFC_CTL_FENCE_EN;
if (IS_GEN5(dev))
dpfc_ctl |= obj->fence_reg;
I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID);
/* enable it... */
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
if (IS_GEN6(dev)) {
I915_WRITE(SNB_DPFC_CTL_SA,
SNB_CPU_FENCE_ENABLE | obj->fence_reg);
I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
sandybridge_blit_fbc_update(dev);
}
DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
}
static void ironlake_disable_fbc(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpfc_ctl;
dev_priv->fbc.enabled = false;
/* Disable compression */
dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
if (dpfc_ctl & DPFC_CTL_EN) {
dpfc_ctl &= ~DPFC_CTL_EN;
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
DRM_DEBUG_KMS("disabled FBC\n");
}
}
static bool ironlake_fbc_enabled(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
}
static void gen7_enable_fbc(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_framebuffer *fb = crtc->primary->fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
u32 dpfc_ctl;
dev_priv->fbc.enabled = true;
dpfc_ctl = IVB_DPFC_CTL_PLANE(intel_crtc->plane);
if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
dev_priv->fbc.threshold++;
switch (dev_priv->fbc.threshold) {
case 4:
case 3:
dpfc_ctl |= DPFC_CTL_LIMIT_4X;
break;
case 2:
dpfc_ctl |= DPFC_CTL_LIMIT_2X;
break;
case 1:
dpfc_ctl |= DPFC_CTL_LIMIT_1X;
break;
}
dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
if (dev_priv->fbc.false_color)
dpfc_ctl |= FBC_CTL_FALSE_COLOR;
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
if (IS_IVYBRIDGE(dev)) {
/* WaFbcAsynchFlipDisableFbcQueue:ivb */
I915_WRITE(ILK_DISPLAY_CHICKEN1,
I915_READ(ILK_DISPLAY_CHICKEN1) |
ILK_FBCQ_DIS);
} else {
/* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
I915_WRITE(CHICKEN_PIPESL_1(intel_crtc->pipe),
I915_READ(CHICKEN_PIPESL_1(intel_crtc->pipe)) |
HSW_FBCQ_DIS);
}
I915_WRITE(SNB_DPFC_CTL_SA,
SNB_CPU_FENCE_ENABLE | obj->fence_reg);
I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
sandybridge_blit_fbc_update(dev);
DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
}
bool intel_fbc_enabled(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
return dev_priv->fbc.enabled;
}
void bdw_fbc_sw_flush(struct drm_device *dev, u32 value)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (!IS_GEN8(dev))
return;
if (!intel_fbc_enabled(dev))
return;
I915_WRITE(MSG_FBC_REND_STATE, value);
}
static void intel_fbc_work_fn(struct work_struct *__work)
{
struct intel_fbc_work *work =
container_of(to_delayed_work(__work),
struct intel_fbc_work, work);
struct drm_device *dev = work->crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
mutex_lock(&dev->struct_mutex);
if (work == dev_priv->fbc.fbc_work) {
/* Double check that we haven't switched fb without cancelling
* the prior work.
*/
if (work->crtc->primary->fb == work->fb) {
dev_priv->display.enable_fbc(work->crtc);
dev_priv->fbc.plane = to_intel_crtc(work->crtc)->plane;
dev_priv->fbc.fb_id = work->crtc->primary->fb->base.id;
dev_priv->fbc.y = work->crtc->y;
}
dev_priv->fbc.fbc_work = NULL;
}
mutex_unlock(&dev->struct_mutex);
kfree(work);
}
static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
{
if (dev_priv->fbc.fbc_work == NULL)
return;
DRM_DEBUG_KMS("cancelling pending FBC enable\n");
/* Synchronisation is provided by struct_mutex and checking of
* dev_priv->fbc.fbc_work, so we can perform the cancellation
* entirely asynchronously.
*/
if (cancel_delayed_work(&dev_priv->fbc.fbc_work->work))
/* tasklet was killed before being run, clean up */
kfree(dev_priv->fbc.fbc_work);
/* Mark the work as no longer wanted so that if it does
* wake-up (because the work was already running and waiting
* for our mutex), it will discover that is no longer
* necessary to run.
*/
dev_priv->fbc.fbc_work = NULL;
}
static void intel_enable_fbc(struct drm_crtc *crtc)
{
struct intel_fbc_work *work;
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
if (!dev_priv->display.enable_fbc)
return;
intel_cancel_fbc_work(dev_priv);
work = kzalloc(sizeof(*work), GFP_KERNEL);
if (work == NULL) {
DRM_ERROR("Failed to allocate FBC work structure\n");
dev_priv->display.enable_fbc(crtc);
return;
}
work->crtc = crtc;
work->fb = crtc->primary->fb;
INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
dev_priv->fbc.fbc_work = work;
/* Delay the actual enabling to let pageflipping cease and the
* display to settle before starting the compression. Note that
* this delay also serves a second purpose: it allows for a
* vblank to pass after disabling the FBC before we attempt
* to modify the control registers.
*
* A more complicated solution would involve tracking vblanks
* following the termination of the page-flipping sequence
* and indeed performing the enable as a co-routine and not
* waiting synchronously upon the vblank.
*
* WaFbcWaitForVBlankBeforeEnable:ilk,snb
*/
schedule_delayed_work(&work->work, msecs_to_jiffies(50));
}
void intel_disable_fbc(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
intel_cancel_fbc_work(dev_priv);
if (!dev_priv->display.disable_fbc)
return;
dev_priv->display.disable_fbc(dev);
dev_priv->fbc.plane = -1;
}
static bool set_no_fbc_reason(struct drm_i915_private *dev_priv,
enum no_fbc_reason reason)
{
if (dev_priv->fbc.no_fbc_reason == reason)
return false;
dev_priv->fbc.no_fbc_reason = reason;
return true;
}
/**
* intel_update_fbc - enable/disable FBC as needed
* @dev: the drm_device
*
* Set up the framebuffer compression hardware at mode set time. We
* enable it if possible:
* - plane A only (on pre-965)
* - no pixel mulitply/line duplication
* - no alpha buffer discard
* - no dual wide
* - framebuffer <= max_hdisplay in width, max_vdisplay in height
*
* We can't assume that any compression will take place (worst case),
* so the compressed buffer has to be the same size as the uncompressed
* one. It also must reside (along with the line length buffer) in
* stolen memory.
*
* We need to enable/disable FBC on a global basis.
*/
void intel_update_fbc(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = NULL, *tmp_crtc;
struct intel_crtc *intel_crtc;
struct drm_framebuffer *fb;
struct drm_i915_gem_object *obj;
const struct drm_display_mode *adjusted_mode;
unsigned int max_width, max_height;
if (!HAS_FBC(dev)) {
set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED);
return;
}
if (!i915.powersave) {
if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
DRM_DEBUG_KMS("fbc disabled per module param\n");
return;
}
/*
* If FBC is already on, we just have to verify that we can
* keep it that way...
* Need to disable if:
* - more than one pipe is active
* - changing FBC params (stride, fence, mode)
* - new fb is too large to fit in compressed buffer
* - going to an unsupported config (interlace, pixel multiply, etc.)
*/
for_each_crtc(dev, tmp_crtc) {
if (intel_crtc_active(tmp_crtc) &&
to_intel_crtc(tmp_crtc)->primary_enabled) {
if (crtc) {
if (set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES))
DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
goto out_disable;
}
crtc = tmp_crtc;
}
}
if (!crtc || crtc->primary->fb == NULL) {
if (set_no_fbc_reason(dev_priv, FBC_NO_OUTPUT))
DRM_DEBUG_KMS("no output, disabling\n");
goto out_disable;
}
intel_crtc = to_intel_crtc(crtc);
fb = crtc->primary->fb;
obj = intel_fb_obj(fb);
adjusted_mode = &intel_crtc->config.adjusted_mode;
if (i915.enable_fbc < 0) {
if (set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT))
DRM_DEBUG_KMS("disabled per chip default\n");
goto out_disable;
}
if (!i915.enable_fbc) {
if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
DRM_DEBUG_KMS("fbc disabled per module param\n");
goto out_disable;
}
if ((adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) ||
(adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE))
DRM_DEBUG_KMS("mode incompatible with compression, "
"disabling\n");
goto out_disable;
}
if (INTEL_INFO(dev)->gen >= 8 || IS_HASWELL(dev)) {
max_width = 4096;
max_height = 4096;
} else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
max_width = 4096;
max_height = 2048;
} else {
max_width = 2048;
max_height = 1536;
}
if (intel_crtc->config.pipe_src_w > max_width ||
intel_crtc->config.pipe_src_h > max_height) {
if (set_no_fbc_reason(dev_priv, FBC_MODE_TOO_LARGE))
DRM_DEBUG_KMS("mode too large for compression, disabling\n");
goto out_disable;
}
if ((INTEL_INFO(dev)->gen < 4 || HAS_DDI(dev)) &&
intel_crtc->plane != PLANE_A) {
if (set_no_fbc_reason(dev_priv, FBC_BAD_PLANE))
DRM_DEBUG_KMS("plane not A, disabling compression\n");
goto out_disable;
}
/* The use of a CPU fence is mandatory in order to detect writes
* by the CPU to the scanout and trigger updates to the FBC.
*/
if (obj->tiling_mode != I915_TILING_X ||
obj->fence_reg == I915_FENCE_REG_NONE) {
if (set_no_fbc_reason(dev_priv, FBC_NOT_TILED))
DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
goto out_disable;
}
if (INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) &&
to_intel_plane(crtc->primary)->rotation != BIT(DRM_ROTATE_0)) {
if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE))
DRM_DEBUG_KMS("Rotation unsupported, disabling\n");
goto out_disable;
}
/* If the kernel debugger is active, always disable compression */
if (in_dbg_master())
goto out_disable;
if (i915_gem_stolen_setup_compression(dev, obj->base.size,
drm_format_plane_cpp(fb->pixel_format, 0))) {
if (set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL))
DRM_DEBUG_KMS("framebuffer too large, disabling compression\n");
goto out_disable;
}
/* If the scanout has not changed, don't modify the FBC settings.
* Note that we make the fundamental assumption that the fb->obj
* cannot be unpinned (and have its GTT offset and fence revoked)
* without first being decoupled from the scanout and FBC disabled.
*/
if (dev_priv->fbc.plane == intel_crtc->plane &&
dev_priv->fbc.fb_id == fb->base.id &&
dev_priv->fbc.y == crtc->y)
return;
if (intel_fbc_enabled(dev)) {
/* We update FBC along two paths, after changing fb/crtc
* configuration (modeswitching) and after page-flipping
* finishes. For the latter, we know that not only did
* we disable the FBC at the start of the page-flip
* sequence, but also more than one vblank has passed.
*
* For the former case of modeswitching, it is possible
* to switch between two FBC valid configurations
* instantaneously so we do need to disable the FBC
* before we can modify its control registers. We also
* have to wait for the next vblank for that to take
* effect. However, since we delay enabling FBC we can
* assume that a vblank has passed since disabling and
* that we can safely alter the registers in the deferred
* callback.
*
* In the scenario that we go from a valid to invalid
* and then back to valid FBC configuration we have
* no strict enforcement that a vblank occurred since
* disabling the FBC. However, along all current pipe
* disabling paths we do need to wait for a vblank at
* some point. And we wait before enabling FBC anyway.
*/
DRM_DEBUG_KMS("disabling active FBC for update\n");
intel_disable_fbc(dev);
}
intel_enable_fbc(crtc);
dev_priv->fbc.no_fbc_reason = FBC_OK;
return;
out_disable:
/* Multiple disables should be harmless */
if (intel_fbc_enabled(dev)) {
DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
intel_disable_fbc(dev);
}
i915_gem_stolen_cleanup_compression(dev);
}
static void i915_pineview_get_mem_freq(struct drm_device *dev)
{
@ -3286,7 +2668,8 @@ static void skl_compute_wm_pipe_parameters(struct drm_crtc *crtc,
list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
struct intel_plane *intel_plane = to_intel_plane(plane);
if (intel_plane->pipe == pipe)
if (intel_plane->pipe == pipe &&
plane->type == DRM_PLANE_TYPE_OVERLAY)
p->plane[i++] = intel_plane->wm;
}
}
@ -3621,10 +3004,9 @@ static void skl_flush_wm_values(struct drm_i915_private *dev_priv,
skl_ddb_entry_size(&cur_ddb->pipe[pipe])) {
skl_wm_flush_pipe(dev_priv, pipe, 2);
intel_wait_for_vblank(dev, pipe);
}
reallocated[pipe] = true;
}
}
/*
* Third pass: flush the pipes that got more space allocated.
@ -5307,7 +4689,8 @@ static void cherryview_enable_rps(struct drm_device *dev)
I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
I915_WRITE(GEN6_RC_SLEEP, 0);
I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
/* TO threshold set to 1750 us ( 0x557 * 1.28 us) */
I915_WRITE(GEN6_RC6_THRESHOLD, 0x557);
/* allows RC6 residency counter to work */
I915_WRITE(VLV_COUNTER_CONTROL,
@ -5321,7 +4704,7 @@ static void cherryview_enable_rps(struct drm_device *dev)
/* 3: Enable RC6 */
if ((intel_enable_rc6(dev) & INTEL_RC6_ENABLE) &&
(pcbr >> VLV_PCBR_ADDR_SHIFT))
rc6_mode = GEN6_RC_CTL_EI_MODE(1);
rc6_mode = GEN7_RC_CTL_TO_MODE;
I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
@ -5681,146 +5064,27 @@ unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
return ((m * x) / 127) - b;
}
static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
static int _pxvid_to_vd(u8 pxvid)
{
if (pxvid == 0)
return 0;
if (pxvid >= 8 && pxvid < 31)
pxvid = 31;
return (pxvid + 2) * 125;
}
static u32 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
{
struct drm_device *dev = dev_priv->dev;
static const struct v_table {
u16 vd; /* in .1 mil */
u16 vm; /* in .1 mil */
} v_table[] = {
{ 0, 0, },
{ 375, 0, },
{ 500, 0, },
{ 625, 0, },
{ 750, 0, },
{ 875, 0, },
{ 1000, 0, },
{ 1125, 0, },
{ 4125, 3000, },
{ 4125, 3000, },
{ 4125, 3000, },
{ 4125, 3000, },
{ 4125, 3000, },
{ 4125, 3000, },
{ 4125, 3000, },
{ 4125, 3000, },
{ 4125, 3000, },
{ 4125, 3000, },
{ 4125, 3000, },
{ 4125, 3000, },
{ 4125, 3000, },
{ 4125, 3000, },
{ 4125, 3000, },
{ 4125, 3000, },
{ 4125, 3000, },
{ 4125, 3000, },
{ 4125, 3000, },
{ 4125, 3000, },
{ 4125, 3000, },
{ 4125, 3000, },
{ 4125, 3000, },
{ 4125, 3000, },
{ 4250, 3125, },
{ 4375, 3250, },
{ 4500, 3375, },
{ 4625, 3500, },
{ 4750, 3625, },
{ 4875, 3750, },
{ 5000, 3875, },
{ 5125, 4000, },
{ 5250, 4125, },
{ 5375, 4250, },
{ 5500, 4375, },
{ 5625, 4500, },
{ 5750, 4625, },
{ 5875, 4750, },
{ 6000, 4875, },
{ 6125, 5000, },
{ 6250, 5125, },
{ 6375, 5250, },
{ 6500, 5375, },
{ 6625, 5500, },
{ 6750, 5625, },
{ 6875, 5750, },
{ 7000, 5875, },
{ 7125, 6000, },
{ 7250, 6125, },
{ 7375, 6250, },
{ 7500, 6375, },
{ 7625, 6500, },
{ 7750, 6625, },
{ 7875, 6750, },
{ 8000, 6875, },
{ 8125, 7000, },
{ 8250, 7125, },
{ 8375, 7250, },
{ 8500, 7375, },
{ 8625, 7500, },
{ 8750, 7625, },
{ 8875, 7750, },
{ 9000, 7875, },
{ 9125, 8000, },
{ 9250, 8125, },
{ 9375, 8250, },
{ 9500, 8375, },
{ 9625, 8500, },
{ 9750, 8625, },
{ 9875, 8750, },
{ 10000, 8875, },
{ 10125, 9000, },
{ 10250, 9125, },
{ 10375, 9250, },
{ 10500, 9375, },
{ 10625, 9500, },
{ 10750, 9625, },
{ 10875, 9750, },
{ 11000, 9875, },
{ 11125, 10000, },
{ 11250, 10125, },
{ 11375, 10250, },
{ 11500, 10375, },
{ 11625, 10500, },
{ 11750, 10625, },
{ 11875, 10750, },
{ 12000, 10875, },
{ 12125, 11000, },
{ 12250, 11125, },
{ 12375, 11250, },
{ 12500, 11375, },
{ 12625, 11500, },
{ 12750, 11625, },
{ 12875, 11750, },
{ 13000, 11875, },
{ 13125, 12000, },
{ 13250, 12125, },
{ 13375, 12250, },
{ 13500, 12375, },
{ 13625, 12500, },
{ 13750, 12625, },
{ 13875, 12750, },
{ 14000, 12875, },
{ 14125, 13000, },
{ 14250, 13125, },
{ 14375, 13250, },
{ 14500, 13375, },
{ 14625, 13500, },
{ 14750, 13625, },
{ 14875, 13750, },
{ 15000, 13875, },
{ 15125, 14000, },
{ 15250, 14125, },
{ 15375, 14250, },
{ 15500, 14375, },
{ 15625, 14500, },
{ 15750, 14625, },
{ 15875, 14750, },
{ 16000, 14875, },
{ 16125, 15000, },
};
const int vd = _pxvid_to_vd(pxvid);
const int vm = vd - 1125;
if (INTEL_INFO(dev)->is_mobile)
return v_table[pxvid].vm;
else
return v_table[pxvid].vd;
return vm > 0 ? vm : 0;
return vd;
}
static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
@ -7051,43 +6315,12 @@ void intel_suspend_hw(struct drm_device *dev)
lpt_suspend_hw(dev);
}
static void intel_init_fbc(struct drm_i915_private *dev_priv)
{
if (!HAS_FBC(dev_priv)) {
dev_priv->fbc.enabled = false;
return;
}
if (INTEL_INFO(dev_priv)->gen >= 7) {
dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
dev_priv->display.enable_fbc = gen7_enable_fbc;
dev_priv->display.disable_fbc = ironlake_disable_fbc;
} else if (INTEL_INFO(dev_priv)->gen >= 5) {
dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
dev_priv->display.enable_fbc = ironlake_enable_fbc;
dev_priv->display.disable_fbc = ironlake_disable_fbc;
} else if (IS_GM45(dev_priv)) {
dev_priv->display.fbc_enabled = g4x_fbc_enabled;
dev_priv->display.enable_fbc = g4x_enable_fbc;
dev_priv->display.disable_fbc = g4x_disable_fbc;
} else {
dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
dev_priv->display.enable_fbc = i8xx_enable_fbc;
dev_priv->display.disable_fbc = i8xx_disable_fbc;
/* This value was pulled out of someone's hat */
I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
}
dev_priv->fbc.enabled = dev_priv->display.fbc_enabled(dev_priv->dev);
}
/* Set up chip specific power management-related functions */
void intel_init_pm(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
intel_init_fbc(dev_priv);
intel_fbc_init(dev_priv);
/* For cxsr */
if (IS_PINEVIEW(dev))

View File

@ -61,14 +61,15 @@ static bool is_edp_psr(struct intel_dp *intel_dp)
return intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED;
}
bool intel_psr_is_enabled(struct drm_device *dev)
static bool vlv_is_psr_active_on_pipe(struct drm_device *dev, int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t val;
if (!HAS_PSR(dev))
return false;
return I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
val = I915_READ(VLV_PSRSTAT(pipe)) &
VLV_EDP_PSR_CURR_STATE_MASK;
return (val == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
(val == VLV_EDP_PSR_ACTIVE_SF_UPDATE);
}
static void intel_psr_write_vsc(struct intel_dp *intel_dp,
@ -100,7 +101,23 @@ static void intel_psr_write_vsc(struct intel_dp *intel_dp,
POSTING_READ(ctl_reg);
}
static void intel_psr_setup_vsc(struct intel_dp *intel_dp)
static void vlv_psr_setup_vsc(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
enum pipe pipe = to_intel_crtc(crtc)->pipe;
uint32_t val;
/* VLV auto-generate VSC package as per EDP 1.3 spec, Table 3.10 */
val = I915_READ(VLV_VSCSDP(pipe));
val &= ~VLV_EDP_PSR_SDP_FREQ_MASK;
val |= VLV_EDP_PSR_SDP_FREQ_EVFRAME;
I915_WRITE(VLV_VSCSDP(pipe), val);
}
static void hsw_psr_setup_vsc(struct intel_dp *intel_dp)
{
struct edp_vsc_psr psr_vsc;
@ -113,14 +130,20 @@ static void intel_psr_setup_vsc(struct intel_dp *intel_dp)
intel_psr_write_vsc(intel_dp, &psr_vsc);
}
static void intel_psr_enable_sink(struct intel_dp *intel_dp)
static void vlv_psr_enable_sink(struct intel_dp *intel_dp)
{
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
DP_PSR_ENABLE);
}
static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t aux_clock_divider;
int precharge = 0x3;
bool only_standby = false;
bool only_standby = dev_priv->vbt.psr.full_link;
static const uint8_t aux_msg[] = {
[0] = DP_AUX_NATIVE_WRITE << 4,
[1] = DP_SET_POWER >> 8,
@ -157,13 +180,50 @@ static void intel_psr_enable_sink(struct intel_dp *intel_dp)
(aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT));
}
static void intel_psr_enable_source(struct intel_dp *intel_dp)
static void vlv_psr_enable_source(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = dig_port->base.base.crtc;
enum pipe pipe = to_intel_crtc(crtc)->pipe;
/* Transition from PSR_state 0 to PSR_state 1, i.e. PSR Inactive */
I915_WRITE(VLV_PSRCTL(pipe),
VLV_EDP_PSR_MODE_SW_TIMER |
VLV_EDP_PSR_SRC_TRANSMITTER_STATE |
VLV_EDP_PSR_ENABLE);
}
static void vlv_psr_activate(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = dig_port->base.base.crtc;
enum pipe pipe = to_intel_crtc(crtc)->pipe;
/* Let's do the transition from PSR_state 1 to PSR_state 2
* that is PSR transition to active - static frame transmission.
* Then Hardware is responsible for the transition to PSR_state 3
* that is PSR active - no Remote Frame Buffer (RFB) update.
*/
I915_WRITE(VLV_PSRCTL(pipe), I915_READ(VLV_PSRCTL(pipe)) |
VLV_EDP_PSR_ACTIVE_ENTRY);
}
static void hsw_psr_enable_source(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t max_sleep_time = 0x1f;
uint32_t idle_frames = 1;
/* Lately it was identified that depending on panel idle frame count
* calculated at HW can be off by 1. So let's use what came
* from VBT + 1 and at minimum 2 to be on the safe side.
*/
uint32_t idle_frames = dev_priv->vbt.psr.idle_frames ?
dev_priv->vbt.psr.idle_frames + 1 : 2;
uint32_t val = 0x0;
const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
bool only_standby = false;
@ -176,7 +236,6 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp)
val |= EDP_PSR_TP2_TP3_TIME_0us;
val |= EDP_PSR_TP1_TIME_0us;
val |= EDP_PSR_SKIP_AUX_EXIT;
val |= IS_BROADWELL(dev) ? BDW_PSR_SINGLE_FRAME : 0;
} else
val |= EDP_PSR_LINK_DISABLE;
@ -231,7 +290,7 @@ static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
return true;
}
static void intel_psr_do_enable(struct intel_dp *intel_dp)
static void intel_psr_activate(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
@ -242,7 +301,14 @@ static void intel_psr_do_enable(struct intel_dp *intel_dp)
lockdep_assert_held(&dev_priv->psr.lock);
/* Enable/Re-enable PSR on the host */
intel_psr_enable_source(intel_dp);
if (HAS_DDI(dev))
/* On HSW+ after we enable PSR on source it will activate it
* as soon as it match configure idle_frame count. So
* we just actually enable it here on activation time.
*/
hsw_psr_enable_source(intel_dp);
else
vlv_psr_activate(intel_dp);
dev_priv->psr.active = true;
}
@ -280,20 +346,83 @@ void intel_psr_enable(struct intel_dp *intel_dp)
dev_priv->psr.busy_frontbuffer_bits = 0;
intel_psr_setup_vsc(intel_dp);
if (HAS_DDI(dev)) {
hsw_psr_setup_vsc(intel_dp);
/* Avoid continuous PSR exit by masking memup and hpd */
I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP |
EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
/* Enable PSR on the panel */
intel_psr_enable_sink(intel_dp);
hsw_psr_enable_sink(intel_dp);
} else {
vlv_psr_setup_vsc(intel_dp);
/* Enable PSR on the panel */
vlv_psr_enable_sink(intel_dp);
/* On HSW+ enable_source also means go to PSR entry/active
* state as soon as idle_frame achieved and here would be
* to soon. However on VLV enable_source just enable PSR
* but let it on inactive state. So we might do this prior
* to active transition, i.e. here.
*/
vlv_psr_enable_source(intel_dp);
}
dev_priv->psr.enabled = intel_dp;
unlock:
mutex_unlock(&dev_priv->psr.lock);
}
static void vlv_psr_disable(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc =
to_intel_crtc(intel_dig_port->base.base.crtc);
uint32_t val;
if (dev_priv->psr.active) {
/* Put VLV PSR back to PSR_state 0 that is PSR Disabled. */
if (wait_for((I915_READ(VLV_PSRSTAT(intel_crtc->pipe)) &
VLV_EDP_PSR_IN_TRANS) == 0, 1))
WARN(1, "PSR transition took longer than expected\n");
val = I915_READ(VLV_PSRCTL(intel_crtc->pipe));
val &= ~VLV_EDP_PSR_ACTIVE_ENTRY;
val &= ~VLV_EDP_PSR_ENABLE;
val &= ~VLV_EDP_PSR_MODE_MASK;
I915_WRITE(VLV_PSRCTL(intel_crtc->pipe), val);
dev_priv->psr.active = false;
} else {
WARN_ON(vlv_is_psr_active_on_pipe(dev, intel_crtc->pipe));
}
}
static void hsw_psr_disable(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
if (dev_priv->psr.active) {
I915_WRITE(EDP_PSR_CTL(dev),
I915_READ(EDP_PSR_CTL(dev)) & ~EDP_PSR_ENABLE);
/* Wait till PSR is idle */
if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev)) &
EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
DRM_ERROR("Timed out waiting for PSR Idle State\n");
dev_priv->psr.active = false;
} else {
WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
}
}
/**
* intel_psr_disable - Disable PSR
* @intel_dp: Intel DP
@ -312,19 +441,10 @@ void intel_psr_disable(struct intel_dp *intel_dp)
return;
}
if (dev_priv->psr.active) {
I915_WRITE(EDP_PSR_CTL(dev),
I915_READ(EDP_PSR_CTL(dev)) & ~EDP_PSR_ENABLE);
/* Wait till PSR is idle */
if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev)) &
EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
DRM_ERROR("Timed out waiting for PSR Idle State\n");
dev_priv->psr.active = false;
} else {
WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
}
if (HAS_DDI(dev))
hsw_psr_disable(intel_dp);
else
vlv_psr_disable(intel_dp);
dev_priv->psr.enabled = NULL;
mutex_unlock(&dev_priv->psr.lock);
@ -337,18 +457,27 @@ static void intel_psr_work(struct work_struct *work)
struct drm_i915_private *dev_priv =
container_of(work, typeof(*dev_priv), psr.work.work);
struct intel_dp *intel_dp = dev_priv->psr.enabled;
struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
enum pipe pipe = to_intel_crtc(crtc)->pipe;
/* We have to make sure PSR is ready for re-enable
* otherwise it keeps disabled until next full enable/disable cycle.
* PSR might take some time to get fully disabled
* and be ready for re-enable.
*/
if (HAS_DDI(dev_priv->dev)) {
if (wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev_priv->dev)) &
EDP_PSR_STATUS_STATE_MASK) == 0, 50)) {
DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
return;
}
} else {
if (wait_for((I915_READ(VLV_PSRSTAT(pipe)) &
VLV_EDP_PSR_IN_TRANS) == 0, 1)) {
DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
return;
}
}
mutex_lock(&dev_priv->psr.lock);
intel_dp = dev_priv->psr.enabled;
@ -363,7 +492,7 @@ static void intel_psr_work(struct work_struct *work)
if (dev_priv->psr.busy_frontbuffer_bits)
goto unlock;
intel_psr_do_enable(intel_dp);
intel_psr_activate(intel_dp);
unlock:
mutex_unlock(&dev_priv->psr.lock);
}
@ -371,17 +500,47 @@ unlock:
static void intel_psr_exit(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_dp *intel_dp = dev_priv->psr.enabled;
struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
enum pipe pipe = to_intel_crtc(crtc)->pipe;
u32 val;
if (dev_priv->psr.active) {
u32 val = I915_READ(EDP_PSR_CTL(dev));
if (!dev_priv->psr.active)
return;
if (HAS_DDI(dev)) {
val = I915_READ(EDP_PSR_CTL(dev));
WARN_ON(!(val & EDP_PSR_ENABLE));
I915_WRITE(EDP_PSR_CTL(dev), val & ~EDP_PSR_ENABLE);
dev_priv->psr.active = false;
} else {
val = I915_READ(VLV_PSRCTL(pipe));
/* Here we do the transition from PSR_state 3 to PSR_state 5
* directly once PSR State 4 that is active with single frame
* update can be skipped. PSR_state 5 that is PSR exit then
* Hardware is responsible to transition back to PSR_state 1
* that is PSR inactive. Same state after
* vlv_edp_psr_enable_source.
*/
val &= ~VLV_EDP_PSR_ACTIVE_ENTRY;
I915_WRITE(VLV_PSRCTL(pipe), val);
/* Send AUX wake up - Spec says after transitioning to PSR
* active we have to send AUX wake up by writing 01h in DPCD
* 600h of sink device.
* XXX: This might slow down the transition, but without this
* HW doesn't complete the transition to PSR_state 1 and we
* never get the screen updated.
*/
drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
DP_SET_POWER_D0);
}
dev_priv->psr.active = false;
}
/**
@ -459,6 +618,17 @@ void intel_psr_flush(struct drm_device *dev,
(frontbuffer_bits & INTEL_FRONTBUFFER_SPRITE(pipe)))
intel_psr_exit(dev);
/*
* On Valleyview and Cherryview we don't use hardware tracking so
* sprite plane updates or cursor moves don't result in a PSR
* invalidating. Which means we need to manually fake this in
* software for all flushes, not just when we've seen a preceding
* invalidation through frontbuffer rendering. */
if (!HAS_DDI(dev) &&
((frontbuffer_bits & INTEL_FRONTBUFFER_SPRITE(pipe)) ||
(frontbuffer_bits & INTEL_FRONTBUFFER_CURSOR(pipe))))
intel_psr_exit(dev);
if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
schedule_delayed_work(&dev_priv->psr.work,
msecs_to_jiffies(100));

View File

@ -1,3 +1,28 @@
/*
* Copyright © 2014 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Generated by: intel-gpu-tools-1.8-220-g01153e7
*/
#include "intel_renderstate.h"
static const u32 gen6_null_state_relocs[] = {

View File

@ -1,3 +1,28 @@
/*
* Copyright © 2014 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Generated by: intel-gpu-tools-1.8-220-g01153e7
*/
#include "intel_renderstate.h"
static const u32 gen7_null_state_relocs[] = {

View File

@ -1,3 +1,28 @@
/*
* Copyright © 2014 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Generated by: intel-gpu-tools-1.8-220-g01153e7
*/
#include "intel_renderstate.h"
static const u32 gen8_null_state_relocs[] = {

View File

@ -1,3 +1,28 @@
/*
* Copyright © 2014 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Generated by: intel-gpu-tools-1.8-220-g01153e7
*/
#include "intel_renderstate.h"
static const u32 gen9_null_state_relocs[] = {

View File

@ -52,16 +52,27 @@ intel_ring_initialized(struct intel_engine_cs *ring)
int __intel_ring_space(int head, int tail, int size)
{
int space = head - (tail + I915_RING_FREE_SPACE);
if (space < 0)
int space = head - tail;
if (space <= 0)
space += size;
return space;
return space - I915_RING_FREE_SPACE;
}
void intel_ring_update_space(struct intel_ringbuffer *ringbuf)
{
if (ringbuf->last_retired_head != -1) {
ringbuf->head = ringbuf->last_retired_head;
ringbuf->last_retired_head = -1;
}
ringbuf->space = __intel_ring_space(ringbuf->head & HEAD_ADDR,
ringbuf->tail, ringbuf->size);
}
int intel_ring_space(struct intel_ringbuffer *ringbuf)
{
return __intel_ring_space(ringbuf->head & HEAD_ADDR,
ringbuf->tail, ringbuf->size);
intel_ring_update_space(ringbuf);
return ringbuf->space;
}
bool intel_ring_stopped(struct intel_engine_cs *ring)
@ -592,10 +603,10 @@ static int init_ring_common(struct intel_engine_cs *ring)
goto out;
}
ringbuf->last_retired_head = -1;
ringbuf->head = I915_READ_HEAD(ring);
ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
ringbuf->space = intel_ring_space(ringbuf);
ringbuf->last_retired_head = -1;
intel_ring_update_space(ringbuf);
memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
@ -627,8 +638,7 @@ intel_init_pipe_control(struct intel_engine_cs *ring)
{
int ret;
if (ring->scratch.obj)
return 0;
WARN_ON(ring->scratch.obj);
ring->scratch.obj = i915_gem_alloc_object(ring->dev, 4096);
if (ring->scratch.obj == NULL) {
@ -672,7 +682,7 @@ static int intel_ring_workarounds_emit(struct intel_engine_cs *ring,
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_workarounds *w = &dev_priv->workarounds;
if (WARN_ON(w->count == 0))
if (WARN_ON_ONCE(w->count == 0))
return 0;
ring->gpu_caches_dirty = true;
@ -703,6 +713,22 @@ static int intel_ring_workarounds_emit(struct intel_engine_cs *ring,
return 0;
}
static int intel_rcs_ctx_init(struct intel_engine_cs *ring,
struct intel_context *ctx)
{
int ret;
ret = intel_ring_workarounds_emit(ring, ctx);
if (ret != 0)
return ret;
ret = i915_gem_render_state_init(ring);
if (ret)
DRM_ERROR("init render state: %d\n", ret);
return ret;
}
static int wa_add(struct drm_i915_private *dev_priv,
const u32 addr, const u32 mask, const u32 val)
{
@ -762,9 +788,12 @@ static int bdw_init_workarounds(struct intel_engine_cs *ring)
* workaround for for a possible hang in the unlikely event a TLB
* invalidation occurs during a PSD flush.
*/
/* WaForceEnableNonCoherent:bdw */
/* WaHdcDisableFetchWhenMasked:bdw */
/* WaDisableFenceDestinationToSLM:bdw (GT3 pre-production) */
WA_SET_BIT_MASKED(HDC_CHICKEN0,
HDC_FORCE_NON_COHERENT |
HDC_DONOT_FETCH_MEM_WHEN_MASKED |
(IS_BDW_GT3(dev) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
/* Wa4x4STCOptimizationDisable:bdw */
@ -861,12 +890,6 @@ static int init_render_ring(struct intel_engine_cs *ring)
_MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) |
_MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
if (INTEL_INFO(dev)->gen >= 5) {
ret = intel_init_pipe_control(ring);
if (ret)
return ret;
}
if (IS_GEN6(dev)) {
/* From the Sandybridge PRM, volume 1 part 3, page 24:
* "If this bit is set, STCunit will have LRA as replacement
@ -918,17 +941,20 @@ static int gen8_rcs_signal(struct intel_engine_cs *signaller,
return ret;
for_each_ring(waiter, dev_priv, i) {
u32 seqno;
u64 gtt_offset = signaller->semaphore.signal_ggtt[i];
if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
continue;
seqno = i915_gem_request_get_seqno(
signaller->outstanding_lazy_request);
intel_ring_emit(signaller, GFX_OP_PIPE_CONTROL(6));
intel_ring_emit(signaller, PIPE_CONTROL_GLOBAL_GTT_IVB |
PIPE_CONTROL_QW_WRITE |
PIPE_CONTROL_FLUSH_ENABLE);
intel_ring_emit(signaller, lower_32_bits(gtt_offset));
intel_ring_emit(signaller, upper_32_bits(gtt_offset));
intel_ring_emit(signaller, signaller->outstanding_lazy_seqno);
intel_ring_emit(signaller, seqno);
intel_ring_emit(signaller, 0);
intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
MI_SEMAPHORE_TARGET(waiter->id));
@ -956,16 +982,19 @@ static int gen8_xcs_signal(struct intel_engine_cs *signaller,
return ret;
for_each_ring(waiter, dev_priv, i) {
u32 seqno;
u64 gtt_offset = signaller->semaphore.signal_ggtt[i];
if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
continue;
seqno = i915_gem_request_get_seqno(
signaller->outstanding_lazy_request);
intel_ring_emit(signaller, (MI_FLUSH_DW + 1) |
MI_FLUSH_DW_OP_STOREDW);
intel_ring_emit(signaller, lower_32_bits(gtt_offset) |
MI_FLUSH_DW_USE_GTT);
intel_ring_emit(signaller, upper_32_bits(gtt_offset));
intel_ring_emit(signaller, signaller->outstanding_lazy_seqno);
intel_ring_emit(signaller, seqno);
intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
MI_SEMAPHORE_TARGET(waiter->id));
intel_ring_emit(signaller, 0);
@ -994,9 +1023,11 @@ static int gen6_signal(struct intel_engine_cs *signaller,
for_each_ring(useless, dev_priv, i) {
u32 mbox_reg = signaller->semaphore.mbox.signal[i];
if (mbox_reg != GEN6_NOSYNC) {
u32 seqno = i915_gem_request_get_seqno(
signaller->outstanding_lazy_request);
intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit(signaller, mbox_reg);
intel_ring_emit(signaller, signaller->outstanding_lazy_seqno);
intel_ring_emit(signaller, seqno);
}
}
@ -1031,7 +1062,8 @@ gen6_add_request(struct intel_engine_cs *ring)
intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
intel_ring_emit(ring, ring->outstanding_lazy_seqno);
intel_ring_emit(ring,
i915_gem_request_get_seqno(ring->outstanding_lazy_request));
intel_ring_emit(ring, MI_USER_INTERRUPT);
__intel_ring_advance(ring);
@ -1149,7 +1181,8 @@ pc_render_add_request(struct intel_engine_cs *ring)
PIPE_CONTROL_WRITE_FLUSH |
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
intel_ring_emit(ring, ring->outstanding_lazy_seqno);
intel_ring_emit(ring,
i915_gem_request_get_seqno(ring->outstanding_lazy_request));
intel_ring_emit(ring, 0);
PIPE_CONTROL_FLUSH(ring, scratch_addr);
scratch_addr += 2 * CACHELINE_BYTES; /* write to separate cachelines */
@ -1168,7 +1201,8 @@ pc_render_add_request(struct intel_engine_cs *ring)
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
PIPE_CONTROL_NOTIFY);
intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
intel_ring_emit(ring, ring->outstanding_lazy_seqno);
intel_ring_emit(ring,
i915_gem_request_get_seqno(ring->outstanding_lazy_request));
intel_ring_emit(ring, 0);
__intel_ring_advance(ring);
@ -1408,7 +1442,8 @@ i9xx_add_request(struct intel_engine_cs *ring)
intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
intel_ring_emit(ring, ring->outstanding_lazy_seqno);
intel_ring_emit(ring,
i915_gem_request_get_seqno(ring->outstanding_lazy_request));
intel_ring_emit(ring, MI_USER_INTERRUPT);
__intel_ring_advance(ring);
@ -1789,15 +1824,15 @@ int intel_alloc_ringbuffer_obj(struct drm_device *dev,
static int intel_init_ring_buffer(struct drm_device *dev,
struct intel_engine_cs *ring)
{
struct intel_ringbuffer *ringbuf = ring->buffer;
struct intel_ringbuffer *ringbuf;
int ret;
if (ringbuf == NULL) {
WARN_ON(ring->buffer);
ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
if (!ringbuf)
return -ENOMEM;
ring->buffer = ringbuf;
}
ring->dev = dev;
INIT_LIST_HEAD(&ring->active_list);
@ -1820,7 +1855,8 @@ static int intel_init_ring_buffer(struct drm_device *dev,
goto error;
}
if (ringbuf->obj == NULL) {
WARN_ON(ringbuf->obj);
ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
if (ret) {
DRM_ERROR("Failed to allocate ringbuffer %s: %d\n",
@ -1835,7 +1871,6 @@ static int intel_init_ring_buffer(struct drm_device *dev,
intel_destroy_ringbuffer_obj(ringbuf);
goto error;
}
}
/* Workaround an erratum on the i830 which causes a hang if
* the TAIL pointer points to within the last 2 cachelines
@ -1849,10 +1884,6 @@ static int intel_init_ring_buffer(struct drm_device *dev,
if (ret)
goto error;
ret = ring->init(ring);
if (ret)
goto error;
return 0;
error:
@ -1877,8 +1908,7 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
intel_unpin_ringbuffer_obj(ringbuf);
intel_destroy_ringbuffer_obj(ringbuf);
ring->preallocated_lazy_request = NULL;
ring->outstanding_lazy_seqno = 0;
i915_gem_request_assign(&ring->outstanding_lazy_request, NULL);
if (ring->cleanup)
ring->cleanup(ring);
@ -1895,38 +1925,27 @@ static int intel_ring_wait_request(struct intel_engine_cs *ring, int n)
{
struct intel_ringbuffer *ringbuf = ring->buffer;
struct drm_i915_gem_request *request;
u32 seqno = 0;
int ret;
if (ringbuf->last_retired_head != -1) {
ringbuf->head = ringbuf->last_retired_head;
ringbuf->last_retired_head = -1;
ringbuf->space = intel_ring_space(ringbuf);
if (ringbuf->space >= n)
if (intel_ring_space(ringbuf) >= n)
return 0;
}
list_for_each_entry(request, &ring->request_list, list) {
if (__intel_ring_space(request->tail, ringbuf->tail,
ringbuf->size) >= n) {
seqno = request->seqno;
break;
}
}
if (seqno == 0)
if (&request->list == &ring->request_list)
return -ENOSPC;
ret = i915_wait_seqno(ring, seqno);
ret = i915_wait_request(request);
if (ret)
return ret;
i915_gem_retire_requests_ring(ring);
ringbuf->head = ringbuf->last_retired_head;
ringbuf->last_retired_head = -1;
ringbuf->space = intel_ring_space(ringbuf);
return 0;
}
@ -1952,14 +1971,14 @@ static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
* case by choosing an insanely large timeout. */
end = jiffies + 60 * HZ;
ret = 0;
trace_i915_ring_wait_begin(ring);
do {
ringbuf->head = I915_READ_HEAD(ring);
ringbuf->space = intel_ring_space(ringbuf);
if (ringbuf->space >= n) {
ret = 0;
if (intel_ring_space(ringbuf) >= n)
break;
ringbuf->head = I915_READ_HEAD(ring);
if (intel_ring_space(ringbuf) >= n)
break;
}
msleep(1);
@ -2000,19 +2019,19 @@ static int intel_wrap_ring_buffer(struct intel_engine_cs *ring)
iowrite32(MI_NOOP, virt++);
ringbuf->tail = 0;
ringbuf->space = intel_ring_space(ringbuf);
intel_ring_update_space(ringbuf);
return 0;
}
int intel_ring_idle(struct intel_engine_cs *ring)
{
u32 seqno;
struct drm_i915_gem_request *req;
int ret;
/* We need to add any requests required to flush the objects and ring */
if (ring->outstanding_lazy_seqno) {
ret = i915_add_request(ring, NULL);
if (ring->outstanding_lazy_request) {
ret = i915_add_request(ring);
if (ret)
return ret;
}
@ -2021,30 +2040,39 @@ int intel_ring_idle(struct intel_engine_cs *ring)
if (list_empty(&ring->request_list))
return 0;
seqno = list_entry(ring->request_list.prev,
req = list_entry(ring->request_list.prev,
struct drm_i915_gem_request,
list)->seqno;
list);
return i915_wait_seqno(ring, seqno);
return i915_wait_request(req);
}
static int
intel_ring_alloc_seqno(struct intel_engine_cs *ring)
intel_ring_alloc_request(struct intel_engine_cs *ring)
{
if (ring->outstanding_lazy_seqno)
int ret;
struct drm_i915_gem_request *request;
struct drm_i915_private *dev_private = ring->dev->dev_private;
if (ring->outstanding_lazy_request)
return 0;
if (ring->preallocated_lazy_request == NULL) {
struct drm_i915_gem_request *request;
request = kmalloc(sizeof(*request), GFP_KERNEL);
request = kzalloc(sizeof(*request), GFP_KERNEL);
if (request == NULL)
return -ENOMEM;
ring->preallocated_lazy_request = request;
kref_init(&request->ref);
request->ring = ring;
request->uniq = dev_private->request_uniq++;
ret = i915_gem_get_seqno(ring->dev, &request->seqno);
if (ret) {
kfree(request);
return ret;
}
return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno);
ring->outstanding_lazy_request = request;
return 0;
}
static int __intel_ring_prepare(struct intel_engine_cs *ring,
@ -2084,7 +2112,7 @@ int intel_ring_begin(struct intel_engine_cs *ring,
return ret;
/* Preallocate the olr before touching the ring */
ret = intel_ring_alloc_seqno(ring);
ret = intel_ring_alloc_request(ring);
if (ret)
return ret;
@ -2119,7 +2147,7 @@ void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno)
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
BUG_ON(ring->outstanding_lazy_seqno);
BUG_ON(ring->outstanding_lazy_request);
if (INTEL_INFO(dev)->gen == 6 || INTEL_INFO(dev)->gen == 7) {
I915_WRITE(RING_SYNC_0(ring->mmio_base), 0);
@ -2341,7 +2369,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
}
}
ring->init_context = intel_ring_workarounds_emit;
ring->init_context = intel_rcs_ctx_init;
ring->add_request = gen6_add_request;
ring->flush = gen8_render_ring_flush;
ring->irq_get = gen8_ring_get_irq;
@ -2426,7 +2454,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
ring->dispatch_execbuffer = i830_dispatch_execbuffer;
else
ring->dispatch_execbuffer = i915_dispatch_execbuffer;
ring->init = init_render_ring;
ring->init_hw = init_render_ring;
ring->cleanup = render_ring_cleanup;
/* Workaround batchbuffer to combat CS tlb bug. */
@ -2448,7 +2476,17 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj);
}
return intel_init_ring_buffer(dev, ring);
ret = intel_init_ring_buffer(dev, ring);
if (ret)
return ret;
if (INTEL_INFO(dev)->gen >= 5) {
ret = intel_init_pipe_control(ring);
if (ret)
return ret;
}
return 0;
}
int intel_init_bsd_ring_buffer(struct drm_device *dev)
@ -2519,7 +2557,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
}
ring->dispatch_execbuffer = i965_dispatch_execbuffer;
}
ring->init = init_ring_common;
ring->init_hw = init_ring_common;
return intel_init_ring_buffer(dev, ring);
}
@ -2558,7 +2596,7 @@ int intel_init_bsd2_ring_buffer(struct drm_device *dev)
ring->semaphore.signal = gen8_xcs_signal;
GEN8_RING_SEMAPHORE_INIT;
}
ring->init = init_ring_common;
ring->init_hw = init_ring_common;
return intel_init_ring_buffer(dev, ring);
}
@ -2615,7 +2653,7 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
}
}
ring->init = init_ring_common;
ring->init_hw = init_ring_common;
return intel_init_ring_buffer(dev, ring);
}
@ -2666,7 +2704,7 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev)
ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
}
}
ring->init = init_ring_common;
ring->init_hw = init_ring_common;
return intel_init_ring_buffer(dev, ring);
}

View File

@ -142,11 +142,11 @@ struct intel_engine_cs {
unsigned irq_refcount; /* protected by dev_priv->irq_lock */
u32 irq_enable_mask; /* bitmask to enable ring interrupt */
u32 trace_irq_seqno;
struct drm_i915_gem_request *trace_irq_req;
bool __must_check (*irq_get)(struct intel_engine_cs *ring);
void (*irq_put)(struct intel_engine_cs *ring);
int (*init)(struct intel_engine_cs *ring);
int (*init_hw)(struct intel_engine_cs *ring);
int (*init_context)(struct intel_engine_cs *ring,
struct intel_context *ctx);
@ -251,7 +251,7 @@ struct intel_engine_cs {
* ringbuffer.
*
* Includes buffers having the contents of their GPU caches
* flushed, not necessarily primitives. last_rendering_seqno
* flushed, not necessarily primitives. last_read_req
* represents when the rendering involved will be completed.
*
* A reference is held on the buffer while on this list.
@ -267,8 +267,7 @@ struct intel_engine_cs {
/**
* Do we have some not yet emitted requests outstanding?
*/
struct drm_i915_gem_request *preallocated_lazy_request;
u32 outstanding_lazy_seqno;
struct drm_i915_gem_request *outstanding_lazy_request;
bool gpu_caches_dirty;
bool fbc_dirty;
@ -408,6 +407,7 @@ static inline void intel_ring_advance(struct intel_engine_cs *ring)
ringbuf->tail &= ringbuf->size - 1;
}
int __intel_ring_space(int head, int tail, int size);
void intel_ring_update_space(struct intel_ringbuffer *ringbuf);
int intel_ring_space(struct intel_ringbuffer *ringbuf);
bool intel_ring_stopped(struct intel_engine_cs *ring);
void __intel_ring_advance(struct intel_engine_cs *ring);
@ -436,16 +436,11 @@ static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf)
return ringbuf->tail;
}
static inline u32 intel_ring_get_seqno(struct intel_engine_cs *ring)
static inline struct drm_i915_gem_request *
intel_ring_get_request(struct intel_engine_cs *ring)
{
BUG_ON(ring->outstanding_lazy_seqno == 0);
return ring->outstanding_lazy_seqno;
}
static inline void i915_trace_irq_get(struct intel_engine_cs *ring, u32 seqno)
{
if (ring->trace_irq_seqno == 0 && ring->irq_get(ring))
ring->trace_irq_seqno = seqno;
BUG_ON(ring->outstanding_lazy_request == NULL);
return ring->outstanding_lazy_request;
}
#endif /* _INTEL_RINGBUFFER_H_ */

View File

@ -118,7 +118,7 @@ bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
}
/**
* intel_display_power_is_enabled - unlocked check for a power domain
* intel_display_power_is_enabled - check for a power domain
* @dev_priv: i915 device instance
* @domain: power domain to check
*
@ -633,7 +633,7 @@ static void check_power_well_state(struct drm_i915_private *dev_priv,
return;
mismatch:
WARN(1, "state mismatch for '%s' (always_on %d hw state %d use-count %d disable_power_well %d\n",
I915_STATE_WARN(1, "state mismatch for '%s' (always_on %d hw state %d use-count %d disable_power_well %d\n",
power_well->name, power_well->always_on, enabled,
power_well->count, i915.disable_power_well);
}

View File

@ -1004,7 +1004,7 @@ intel_post_enable_primary(struct drm_crtc *crtc)
hsw_enable_ips(intel_crtc);
mutex_lock(&dev->struct_mutex);
intel_update_fbc(dev);
intel_fbc_update(dev);
mutex_unlock(&dev->struct_mutex);
}
@ -1017,7 +1017,7 @@ intel_pre_disable_primary(struct drm_crtc *crtc)
mutex_lock(&dev->struct_mutex);
if (dev_priv->fbc.plane == intel_crtc->plane)
intel_disable_fbc(dev);
intel_fbc_disable(dev);
mutex_unlock(&dev->struct_mutex);
/*
@ -1096,9 +1096,9 @@ static int
intel_check_sprite_plane(struct drm_plane *plane,
struct intel_plane_state *state)
{
struct intel_crtc *intel_crtc = to_intel_crtc(state->crtc);
struct intel_crtc *intel_crtc = to_intel_crtc(state->base.crtc);
struct intel_plane *intel_plane = to_intel_plane(plane);
struct drm_framebuffer *fb = state->fb;
struct drm_framebuffer *fb = state->base.fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
int crtc_x, crtc_y;
unsigned int crtc_w, crtc_h;
@ -1109,7 +1109,12 @@ intel_check_sprite_plane(struct drm_plane *plane,
const struct drm_rect *clip = &state->clip;
int hscale, vscale;
int max_scale, min_scale;
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
int pixel_size;
if (!fb) {
state->visible = false;
return 0;
}
/* Don't modify another pipe's plane */
if (intel_plane->pipe != intel_crtc->pipe) {
@ -1232,6 +1237,7 @@ intel_check_sprite_plane(struct drm_plane *plane,
if (src_w < 3 || src_h < 3)
state->visible = false;
pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
width_bytes = ((src_x * pixel_size) & 63) +
src_w * pixel_size;
@ -1257,53 +1263,17 @@ intel_check_sprite_plane(struct drm_plane *plane,
return 0;
}
static int
intel_prepare_sprite_plane(struct drm_plane *plane,
struct intel_plane_state *state)
{
struct drm_device *dev = plane->dev;
struct drm_crtc *crtc = state->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_plane *intel_plane = to_intel_plane(plane);
enum pipe pipe = intel_crtc->pipe;
struct drm_framebuffer *fb = state->fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct drm_i915_gem_object *old_obj = intel_plane->obj;
int ret;
if (old_obj != obj) {
mutex_lock(&dev->struct_mutex);
/* Note that this will apply the VT-d workaround for scanouts,
* which is more restrictive than required for sprites. (The
* primary plane requires 256KiB alignment with 64 PTE padding,
* the sprite planes only require 128KiB alignment and 32 PTE
* padding.
*/
ret = intel_pin_and_fence_fb_obj(plane, fb, NULL);
if (ret == 0)
i915_gem_track_fb(old_obj, obj,
INTEL_FRONTBUFFER_SPRITE(pipe));
mutex_unlock(&dev->struct_mutex);
if (ret)
return ret;
}
return 0;
}
static void
intel_commit_sprite_plane(struct drm_plane *plane,
struct intel_plane_state *state)
{
struct drm_device *dev = plane->dev;
struct drm_crtc *crtc = state->crtc;
struct drm_crtc *crtc = state->base.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_plane *intel_plane = to_intel_plane(plane);
enum pipe pipe = intel_crtc->pipe;
struct drm_framebuffer *fb = state->fb;
struct drm_framebuffer *fb = state->base.fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct drm_i915_gem_object *old_obj = intel_plane->obj;
int crtc_x, crtc_y;
unsigned int crtc_w, crtc_h;
uint32_t src_x, src_y, src_w, src_h;
@ -1311,6 +1281,17 @@ intel_commit_sprite_plane(struct drm_plane *plane,
const struct drm_rect *clip = &state->clip;
bool primary_enabled;
/*
* 'prepare' is never called when plane is being disabled, so we need
* to handle frontbuffer tracking here
*/
if (!fb) {
mutex_lock(&dev->struct_mutex);
i915_gem_track_fb(intel_fb_obj(plane->fb), NULL,
INTEL_FRONTBUFFER_SPRITE(pipe));
mutex_unlock(&dev->struct_mutex);
}
/*
* If the sprite is completely covering the primary plane,
* we can disable the primary and save power.
@ -1361,112 +1342,6 @@ intel_commit_sprite_plane(struct drm_plane *plane,
if (!primary_was_enabled && primary_enabled)
intel_post_enable_primary(crtc);
}
/* Unpin old obj after new one is active to avoid ugliness */
if (old_obj && old_obj != obj) {
/*
* It's fairly common to simply update the position of
* an existing object. In that case, we don't need to
* wait for vblank to avoid ugliness, we only need to
* do the pin & ref bookkeeping.
*/
if (intel_crtc->active)
intel_wait_for_vblank(dev, intel_crtc->pipe);
mutex_lock(&dev->struct_mutex);
intel_unpin_fb_obj(old_obj);
mutex_unlock(&dev->struct_mutex);
}
}
static int
intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
struct drm_framebuffer *fb, int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h)
{
struct intel_plane_state state;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int ret;
state.crtc = crtc;
state.fb = fb;
/* sample coordinates in 16.16 fixed point */
state.src.x1 = src_x;
state.src.x2 = src_x + src_w;
state.src.y1 = src_y;
state.src.y2 = src_y + src_h;
/* integer pixels */
state.dst.x1 = crtc_x;
state.dst.x2 = crtc_x + crtc_w;
state.dst.y1 = crtc_y;
state.dst.y2 = crtc_y + crtc_h;
state.clip.x1 = 0;
state.clip.y1 = 0;
state.clip.x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0;
state.clip.y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0;
state.orig_src = state.src;
state.orig_dst = state.dst;
ret = intel_check_sprite_plane(plane, &state);
if (ret)
return ret;
ret = intel_prepare_sprite_plane(plane, &state);
if (ret)
return ret;
intel_commit_sprite_plane(plane, &state);
return 0;
}
static int
intel_disable_plane(struct drm_plane *plane)
{
struct drm_device *dev = plane->dev;
struct intel_plane *intel_plane = to_intel_plane(plane);
struct intel_crtc *intel_crtc;
enum pipe pipe;
if (!plane->fb)
return 0;
if (WARN_ON(!plane->crtc))
return -EINVAL;
intel_crtc = to_intel_crtc(plane->crtc);
pipe = intel_crtc->pipe;
if (intel_crtc->active) {
bool primary_was_enabled = intel_crtc->primary_enabled;
intel_crtc->primary_enabled = true;
intel_plane->disable_plane(plane, plane->crtc);
if (!primary_was_enabled && intel_crtc->primary_enabled)
intel_post_enable_primary(plane->crtc);
}
if (intel_plane->obj) {
if (intel_crtc->active)
intel_wait_for_vblank(dev, intel_plane->pipe);
mutex_lock(&dev->struct_mutex);
intel_unpin_fb_obj(intel_plane->obj);
i915_gem_track_fb(intel_plane->obj, NULL,
INTEL_FRONTBUFFER_SPRITE(pipe));
mutex_unlock(&dev->struct_mutex);
intel_plane->obj = NULL;
}
return 0;
}
static void intel_destroy_plane(struct drm_plane *plane)
@ -1576,14 +1451,6 @@ int intel_plane_restore(struct drm_plane *plane)
intel_plane->src_w, intel_plane->src_h);
}
void intel_plane_disable(struct drm_plane *plane)
{
if (!plane->crtc || !plane->fb)
return;
intel_disable_plane(plane);
}
static const struct drm_plane_funcs intel_plane_funcs = {
.update_plane = intel_update_plane,
.disable_plane = intel_disable_plane,
@ -1720,6 +1587,8 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
intel_plane->pipe = pipe;
intel_plane->plane = plane;
intel_plane->rotation = BIT(DRM_ROTATE_0);
intel_plane->check_plane = intel_check_sprite_plane;
intel_plane->commit_plane = intel_commit_sprite_plane;
possible_crtcs = (1 << pipe);
ret = drm_universal_plane_init(dev, &intel_plane->base, possible_crtcs,
&intel_plane_funcs,

View File

@ -647,9 +647,9 @@ void assert_force_wake_inactive(struct drm_i915_private *dev_priv)
#define FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg) \
(REG_RANGE((reg), 0x2000, 0x4000) || \
REG_RANGE((reg), 0x5000, 0x8000) || \
REG_RANGE((reg), 0x5200, 0x8000) || \
REG_RANGE((reg), 0x8300, 0x8500) || \
REG_RANGE((reg), 0xB000, 0xC000) || \
REG_RANGE((reg), 0xB000, 0xB480) || \
REG_RANGE((reg), 0xE000, 0xE800))
#define FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg) \
@ -658,17 +658,14 @@ void assert_force_wake_inactive(struct drm_i915_private *dev_priv)
REG_RANGE((reg), 0x12000, 0x14000) || \
REG_RANGE((reg), 0x1A000, 0x1C000) || \
REG_RANGE((reg), 0x1E800, 0x1EA00) || \
REG_RANGE((reg), 0x30000, 0x40000))
REG_RANGE((reg), 0x30000, 0x38000))
#define FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg) \
(REG_RANGE((reg), 0x4000, 0x5000) || \
REG_RANGE((reg), 0x8000, 0x8300) || \
REG_RANGE((reg), 0x8500, 0x8600) || \
REG_RANGE((reg), 0x9000, 0xB000) || \
REG_RANGE((reg), 0xC000, 0xC800) || \
REG_RANGE((reg), 0xF000, 0x10000) || \
REG_RANGE((reg), 0x14000, 0x14400) || \
REG_RANGE((reg), 0x22000, 0x24000))
REG_RANGE((reg), 0xF000, 0x10000))
#define FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) \
REG_RANGE((reg), 0xB00, 0x2000)
@ -1202,7 +1199,7 @@ void intel_uncore_init(struct drm_device *dev)
switch (INTEL_INFO(dev)->gen) {
default:
WARN_ON(1);
MISSING_CASE(INTEL_INFO(dev)->gen);
return;
case 9:
ASSIGN_WRITE_MMIO_VFUNCS(gen9);
@ -1300,7 +1297,7 @@ int i915_reg_read_ioctl(struct drm_device *dev,
reg->val = I915_READ8(reg->offset);
break;
default:
WARN_ON(1);
MISSING_CASE(entry->size);
ret = -EINVAL;
goto out;
}

View File

@ -1247,6 +1247,8 @@ extern int drm_plane_init(struct drm_device *dev,
extern void drm_plane_cleanup(struct drm_plane *plane);
extern unsigned int drm_plane_index(struct drm_plane *plane);
extern void drm_plane_force_disable(struct drm_plane *plane);
extern void drm_crtc_get_hv_timing(const struct drm_display_mode *mode,
int *hdisplay, int *vdisplay);
extern int drm_crtc_check_viewport(const struct drm_crtc *crtc,
int x, int y,
const struct drm_display_mode *mode,

View File

@ -90,6 +90,9 @@ enum drm_mode_status {
#define CRTC_INTERLACE_HALVE_V (1 << 0) /* halve V values for interlacing */
#define CRTC_STEREO_DOUBLE (1 << 1) /* adjust timings for stereo modes */
#define CRTC_NO_DBLSCAN (1 << 2) /* don't adjust doublescan */
#define CRTC_NO_VSCAN (1 << 3) /* don't adjust doublescan */
#define CRTC_STEREO_DOUBLE_ONLY (CRTC_NO_DBLSCAN | CRTC_NO_VSCAN)
#define DRM_MODE_FLAG_3D_MAX DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF