Merge tag 'drm-intel-next-2014-04-16' of git://anongit.freedesktop.org/drm-intel into drm-next
drm-intel-next-2014-04-16: - vlv infoframe fixes from Jesse - dsi/mipi fixes from Shobhit - gen8 pageflip fixes for LRI/SRM from Damien - cmd parser fixes from Brad Volkin - some prep patches for CHV, DRRS, ... - and tons of little things all over drm-intel-next-2014-04-04: - cmd parser for gen7 but only in enforcing and not yet granting mode - the batch copying stuff is still missing. Also performance is a bit ... rough (Brad Volkin + OACONTROL fix from Ken). - deprecate UMS harder (i.e. CONFIG_BROKEN) - interrupt rework from Paulo Zanoni - runtime PM support for bdw and snb, again from Paulo - a pile of refactorings from various people all over the place to prep for new stuff (irq reworks, power domain polish, ...) drm-intel-next-2014-04-04: - cmd parser for gen7 but only in enforcing and not yet granting mode - the batch copying stuff is still missing. Also performance is a bit ... rough (Brad Volkin + OACONTROL fix from Ken). - deprecate UMS harder (i.e. CONFIG_BROKEN) - interrupt rework from Paulo Zanoni - runtime PM support for bdw and snb, again from Paulo - a pile of refactorings from various people all over the place to prep for new stuff (irq reworks, power domain polish, ...) Conflicts: drivers/gpu/drm/i915/i915_gem_context.c
This commit is contained in:
commit
885ac04ab3
|
@ -131,11 +131,11 @@ drm_clflush_sg(struct sg_table *st)
|
|||
EXPORT_SYMBOL(drm_clflush_sg);
|
||||
|
||||
void
|
||||
drm_clflush_virt_range(char *addr, unsigned long length)
|
||||
drm_clflush_virt_range(void *addr, unsigned long length)
|
||||
{
|
||||
#if defined(CONFIG_X86)
|
||||
if (cpu_has_clflush) {
|
||||
char *end = addr + length;
|
||||
void *end = addr + length;
|
||||
mb();
|
||||
for (; addr < end; addr += boot_cpu_data.x86_clflush_size)
|
||||
clflush(addr);
|
||||
|
|
|
@ -71,7 +71,7 @@ config DRM_I915_PRELIMINARY_HW_SUPPORT
|
|||
|
||||
config DRM_I915_UMS
|
||||
bool "Enable userspace modesetting on Intel hardware (DEPRECATED)"
|
||||
depends on DRM_I915
|
||||
depends on DRM_I915 && BROKEN
|
||||
default n
|
||||
help
|
||||
Choose this option if you still need userspace modesetting.
|
||||
|
|
|
@ -160,7 +160,7 @@ static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
|
|||
if (i2c_transfer(adapter, msgs, 2) == 2) {
|
||||
*ch = in_buf[0];
|
||||
return true;
|
||||
};
|
||||
}
|
||||
|
||||
if (!ch7xxx->quiet) {
|
||||
DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
|
||||
|
|
|
@ -195,7 +195,7 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data)
|
|||
if (i2c_transfer(adapter, msgs, 3) == 3) {
|
||||
*data = (in_buf[1] << 8) | in_buf[0];
|
||||
return true;
|
||||
};
|
||||
}
|
||||
|
||||
if (!priv->quiet) {
|
||||
DRM_DEBUG_KMS("Unable to read register 0x%02x from "
|
||||
|
|
|
@ -121,7 +121,7 @@ static bool ns2501_readb(struct intel_dvo_device *dvo, int addr, uint8_t * ch)
|
|||
if (i2c_transfer(adapter, msgs, 2) == 2) {
|
||||
*ch = in_buf[0];
|
||||
return true;
|
||||
};
|
||||
}
|
||||
|
||||
if (!ns->quiet) {
|
||||
DRM_DEBUG_KMS
|
||||
|
@ -233,9 +233,8 @@ static enum drm_mode_status ns2501_mode_valid(struct intel_dvo_device *dvo,
|
|||
struct drm_display_mode *mode)
|
||||
{
|
||||
DRM_DEBUG_KMS
|
||||
("%s: is mode valid (hdisplay=%d,htotal=%d,vdisplay=%d,vtotal=%d)\n",
|
||||
__FUNCTION__, mode->hdisplay, mode->htotal, mode->vdisplay,
|
||||
mode->vtotal);
|
||||
("is mode valid (hdisplay=%d,htotal=%d,vdisplay=%d,vtotal=%d)\n",
|
||||
mode->hdisplay, mode->htotal, mode->vdisplay, mode->vtotal);
|
||||
|
||||
/*
|
||||
* Currently, these are all the modes I have data from.
|
||||
|
@ -261,9 +260,8 @@ static void ns2501_mode_set(struct intel_dvo_device *dvo,
|
|||
struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
|
||||
|
||||
DRM_DEBUG_KMS
|
||||
("%s: set mode (hdisplay=%d,htotal=%d,vdisplay=%d,vtotal=%d).\n",
|
||||
__FUNCTION__, mode->hdisplay, mode->htotal, mode->vdisplay,
|
||||
mode->vtotal);
|
||||
("set mode (hdisplay=%d,htotal=%d,vdisplay=%d,vtotal=%d).\n",
|
||||
mode->hdisplay, mode->htotal, mode->vdisplay, mode->vtotal);
|
||||
|
||||
/*
|
||||
* Where do I find the native resolution for which scaling is not required???
|
||||
|
@ -277,8 +275,7 @@ static void ns2501_mode_set(struct intel_dvo_device *dvo,
|
|||
if (mode->hdisplay == 800 && mode->vdisplay == 600) {
|
||||
/* mode 277 */
|
||||
ns->reg_8_shadow &= ~NS2501_8_BPAS;
|
||||
DRM_DEBUG_KMS("%s: switching to 800x600\n",
|
||||
__FUNCTION__);
|
||||
DRM_DEBUG_KMS("switching to 800x600\n");
|
||||
|
||||
/*
|
||||
* No, I do not know where this data comes from.
|
||||
|
@ -341,8 +338,7 @@ static void ns2501_mode_set(struct intel_dvo_device *dvo,
|
|||
|
||||
} else if (mode->hdisplay == 640 && mode->vdisplay == 480) {
|
||||
/* mode 274 */
|
||||
DRM_DEBUG_KMS("%s: switching to 640x480\n",
|
||||
__FUNCTION__);
|
||||
DRM_DEBUG_KMS("switching to 640x480\n");
|
||||
/*
|
||||
* No, I do not know where this data comes from.
|
||||
* It is just what the video bios left in the DVO, so
|
||||
|
@ -406,8 +402,7 @@ static void ns2501_mode_set(struct intel_dvo_device *dvo,
|
|||
|
||||
} else if (mode->hdisplay == 1024 && mode->vdisplay == 768) {
|
||||
/* mode 280 */
|
||||
DRM_DEBUG_KMS("%s: switching to 1024x768\n",
|
||||
__FUNCTION__);
|
||||
DRM_DEBUG_KMS("switching to 1024x768\n");
|
||||
/*
|
||||
* This might or might not work, actually. I'm silently
|
||||
* assuming here that the native panel resolution is
|
||||
|
@ -458,8 +453,7 @@ static void ns2501_dpms(struct intel_dvo_device *dvo, bool enable)
|
|||
struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
|
||||
unsigned char ch;
|
||||
|
||||
DRM_DEBUG_KMS("%s: Trying set the dpms of the DVO to %i\n",
|
||||
__FUNCTION__, enable);
|
||||
DRM_DEBUG_KMS("Trying set the dpms of the DVO to %i\n", enable);
|
||||
|
||||
ch = ns->reg_8_shadow;
|
||||
|
||||
|
|
|
@ -93,7 +93,7 @@ static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
|
|||
if (i2c_transfer(adapter, msgs, 2) == 2) {
|
||||
*ch = in_buf[0];
|
||||
return true;
|
||||
};
|
||||
}
|
||||
|
||||
if (!sil->quiet) {
|
||||
DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
|
||||
|
|
|
@ -118,7 +118,7 @@ static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
|
|||
if (i2c_transfer(adapter, msgs, 2) == 2) {
|
||||
*ch = in_buf[0];
|
||||
return true;
|
||||
};
|
||||
}
|
||||
|
||||
if (!tfp->quiet) {
|
||||
DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
|
||||
|
|
|
@ -86,6 +86,367 @@
|
|||
* general bitmasking mechanism.
|
||||
*/
|
||||
|
||||
#define STD_MI_OPCODE_MASK 0xFF800000
|
||||
#define STD_3D_OPCODE_MASK 0xFFFF0000
|
||||
#define STD_2D_OPCODE_MASK 0xFFC00000
|
||||
#define STD_MFX_OPCODE_MASK 0xFFFF0000
|
||||
|
||||
#define CMD(op, opm, f, lm, fl, ...) \
|
||||
{ \
|
||||
.flags = (fl) | ((f) ? CMD_DESC_FIXED : 0), \
|
||||
.cmd = { (op), (opm) }, \
|
||||
.length = { (lm) }, \
|
||||
__VA_ARGS__ \
|
||||
}
|
||||
|
||||
/* Convenience macros to compress the tables */
|
||||
#define SMI STD_MI_OPCODE_MASK
|
||||
#define S3D STD_3D_OPCODE_MASK
|
||||
#define S2D STD_2D_OPCODE_MASK
|
||||
#define SMFX STD_MFX_OPCODE_MASK
|
||||
#define F true
|
||||
#define S CMD_DESC_SKIP
|
||||
#define R CMD_DESC_REJECT
|
||||
#define W CMD_DESC_REGISTER
|
||||
#define B CMD_DESC_BITMASK
|
||||
#define M CMD_DESC_MASTER
|
||||
|
||||
/* Command Mask Fixed Len Action
|
||||
---------------------------------------------------------- */
|
||||
static const struct drm_i915_cmd_descriptor common_cmds[] = {
|
||||
CMD( MI_NOOP, SMI, F, 1, S ),
|
||||
CMD( MI_USER_INTERRUPT, SMI, F, 1, R ),
|
||||
CMD( MI_WAIT_FOR_EVENT, SMI, F, 1, M ),
|
||||
CMD( MI_ARB_CHECK, SMI, F, 1, S ),
|
||||
CMD( MI_REPORT_HEAD, SMI, F, 1, S ),
|
||||
CMD( MI_SUSPEND_FLUSH, SMI, F, 1, S ),
|
||||
CMD( MI_SEMAPHORE_MBOX, SMI, !F, 0xFF, R ),
|
||||
CMD( MI_STORE_DWORD_INDEX, SMI, !F, 0xFF, R ),
|
||||
CMD( MI_LOAD_REGISTER_IMM(1), SMI, !F, 0xFF, W,
|
||||
.reg = { .offset = 1, .mask = 0x007FFFFC } ),
|
||||
CMD( MI_STORE_REGISTER_MEM(1), SMI, !F, 0xFF, W | B,
|
||||
.reg = { .offset = 1, .mask = 0x007FFFFC },
|
||||
.bits = {{
|
||||
.offset = 0,
|
||||
.mask = MI_GLOBAL_GTT,
|
||||
.expected = 0,
|
||||
}}, ),
|
||||
CMD( MI_LOAD_REGISTER_MEM, SMI, !F, 0xFF, W | B,
|
||||
.reg = { .offset = 1, .mask = 0x007FFFFC },
|
||||
.bits = {{
|
||||
.offset = 0,
|
||||
.mask = MI_GLOBAL_GTT,
|
||||
.expected = 0,
|
||||
}}, ),
|
||||
CMD( MI_BATCH_BUFFER_START, SMI, !F, 0xFF, S ),
|
||||
};
|
||||
|
||||
static const struct drm_i915_cmd_descriptor render_cmds[] = {
|
||||
CMD( MI_FLUSH, SMI, F, 1, S ),
|
||||
CMD( MI_ARB_ON_OFF, SMI, F, 1, R ),
|
||||
CMD( MI_PREDICATE, SMI, F, 1, S ),
|
||||
CMD( MI_TOPOLOGY_FILTER, SMI, F, 1, S ),
|
||||
CMD( MI_DISPLAY_FLIP, SMI, !F, 0xFF, R ),
|
||||
CMD( MI_SET_CONTEXT, SMI, !F, 0xFF, R ),
|
||||
CMD( MI_URB_CLEAR, SMI, !F, 0xFF, S ),
|
||||
CMD( MI_STORE_DWORD_IMM, SMI, !F, 0x3F, B,
|
||||
.bits = {{
|
||||
.offset = 0,
|
||||
.mask = MI_GLOBAL_GTT,
|
||||
.expected = 0,
|
||||
}}, ),
|
||||
CMD( MI_UPDATE_GTT, SMI, !F, 0xFF, R ),
|
||||
CMD( MI_CLFLUSH, SMI, !F, 0x3FF, B,
|
||||
.bits = {{
|
||||
.offset = 0,
|
||||
.mask = MI_GLOBAL_GTT,
|
||||
.expected = 0,
|
||||
}}, ),
|
||||
CMD( MI_REPORT_PERF_COUNT, SMI, !F, 0x3F, B,
|
||||
.bits = {{
|
||||
.offset = 1,
|
||||
.mask = MI_REPORT_PERF_COUNT_GGTT,
|
||||
.expected = 0,
|
||||
}}, ),
|
||||
CMD( MI_CONDITIONAL_BATCH_BUFFER_END, SMI, !F, 0xFF, B,
|
||||
.bits = {{
|
||||
.offset = 0,
|
||||
.mask = MI_GLOBAL_GTT,
|
||||
.expected = 0,
|
||||
}}, ),
|
||||
CMD( GFX_OP_3DSTATE_VF_STATISTICS, S3D, F, 1, S ),
|
||||
CMD( PIPELINE_SELECT, S3D, F, 1, S ),
|
||||
CMD( MEDIA_VFE_STATE, S3D, !F, 0xFFFF, B,
|
||||
.bits = {{
|
||||
.offset = 2,
|
||||
.mask = MEDIA_VFE_STATE_MMIO_ACCESS_MASK,
|
||||
.expected = 0,
|
||||
}}, ),
|
||||
CMD( GPGPU_OBJECT, S3D, !F, 0xFF, S ),
|
||||
CMD( GPGPU_WALKER, S3D, !F, 0xFF, S ),
|
||||
CMD( GFX_OP_3DSTATE_SO_DECL_LIST, S3D, !F, 0x1FF, S ),
|
||||
CMD( GFX_OP_PIPE_CONTROL(5), S3D, !F, 0xFF, B,
|
||||
.bits = {{
|
||||
.offset = 1,
|
||||
.mask = (PIPE_CONTROL_MMIO_WRITE | PIPE_CONTROL_NOTIFY),
|
||||
.expected = 0,
|
||||
},
|
||||
{
|
||||
.offset = 1,
|
||||
.mask = (PIPE_CONTROL_GLOBAL_GTT_IVB |
|
||||
PIPE_CONTROL_STORE_DATA_INDEX),
|
||||
.expected = 0,
|
||||
.condition_offset = 1,
|
||||
.condition_mask = PIPE_CONTROL_POST_SYNC_OP_MASK,
|
||||
}}, ),
|
||||
};
|
||||
|
||||
static const struct drm_i915_cmd_descriptor hsw_render_cmds[] = {
|
||||
CMD( MI_SET_PREDICATE, SMI, F, 1, S ),
|
||||
CMD( MI_RS_CONTROL, SMI, F, 1, S ),
|
||||
CMD( MI_URB_ATOMIC_ALLOC, SMI, F, 1, S ),
|
||||
CMD( MI_RS_CONTEXT, SMI, F, 1, S ),
|
||||
CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, M ),
|
||||
CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ),
|
||||
CMD( MI_LOAD_REGISTER_REG, SMI, !F, 0xFF, R ),
|
||||
CMD( MI_RS_STORE_DATA_IMM, SMI, !F, 0xFF, S ),
|
||||
CMD( MI_LOAD_URB_MEM, SMI, !F, 0xFF, S ),
|
||||
CMD( MI_STORE_URB_MEM, SMI, !F, 0xFF, S ),
|
||||
CMD( GFX_OP_3DSTATE_DX9_CONSTANTF_VS, S3D, !F, 0x7FF, S ),
|
||||
CMD( GFX_OP_3DSTATE_DX9_CONSTANTF_PS, S3D, !F, 0x7FF, S ),
|
||||
|
||||
CMD( GFX_OP_3DSTATE_BINDING_TABLE_EDIT_VS, S3D, !F, 0x1FF, S ),
|
||||
CMD( GFX_OP_3DSTATE_BINDING_TABLE_EDIT_GS, S3D, !F, 0x1FF, S ),
|
||||
CMD( GFX_OP_3DSTATE_BINDING_TABLE_EDIT_HS, S3D, !F, 0x1FF, S ),
|
||||
CMD( GFX_OP_3DSTATE_BINDING_TABLE_EDIT_DS, S3D, !F, 0x1FF, S ),
|
||||
CMD( GFX_OP_3DSTATE_BINDING_TABLE_EDIT_PS, S3D, !F, 0x1FF, S ),
|
||||
};
|
||||
|
||||
static const struct drm_i915_cmd_descriptor video_cmds[] = {
|
||||
CMD( MI_ARB_ON_OFF, SMI, F, 1, R ),
|
||||
CMD( MI_STORE_DWORD_IMM, SMI, !F, 0xFF, B,
|
||||
.bits = {{
|
||||
.offset = 0,
|
||||
.mask = MI_GLOBAL_GTT,
|
||||
.expected = 0,
|
||||
}}, ),
|
||||
CMD( MI_UPDATE_GTT, SMI, !F, 0x3F, R ),
|
||||
CMD( MI_FLUSH_DW, SMI, !F, 0x3F, B,
|
||||
.bits = {{
|
||||
.offset = 0,
|
||||
.mask = MI_FLUSH_DW_NOTIFY,
|
||||
.expected = 0,
|
||||
},
|
||||
{
|
||||
.offset = 1,
|
||||
.mask = MI_FLUSH_DW_USE_GTT,
|
||||
.expected = 0,
|
||||
.condition_offset = 0,
|
||||
.condition_mask = MI_FLUSH_DW_OP_MASK,
|
||||
},
|
||||
{
|
||||
.offset = 0,
|
||||
.mask = MI_FLUSH_DW_STORE_INDEX,
|
||||
.expected = 0,
|
||||
.condition_offset = 0,
|
||||
.condition_mask = MI_FLUSH_DW_OP_MASK,
|
||||
}}, ),
|
||||
CMD( MI_CONDITIONAL_BATCH_BUFFER_END, SMI, !F, 0xFF, B,
|
||||
.bits = {{
|
||||
.offset = 0,
|
||||
.mask = MI_GLOBAL_GTT,
|
||||
.expected = 0,
|
||||
}}, ),
|
||||
/*
|
||||
* MFX_WAIT doesn't fit the way we handle length for most commands.
|
||||
* It has a length field but it uses a non-standard length bias.
|
||||
* It is always 1 dword though, so just treat it as fixed length.
|
||||
*/
|
||||
CMD( MFX_WAIT, SMFX, F, 1, S ),
|
||||
};
|
||||
|
||||
static const struct drm_i915_cmd_descriptor vecs_cmds[] = {
|
||||
CMD( MI_ARB_ON_OFF, SMI, F, 1, R ),
|
||||
CMD( MI_STORE_DWORD_IMM, SMI, !F, 0xFF, B,
|
||||
.bits = {{
|
||||
.offset = 0,
|
||||
.mask = MI_GLOBAL_GTT,
|
||||
.expected = 0,
|
||||
}}, ),
|
||||
CMD( MI_UPDATE_GTT, SMI, !F, 0x3F, R ),
|
||||
CMD( MI_FLUSH_DW, SMI, !F, 0x3F, B,
|
||||
.bits = {{
|
||||
.offset = 0,
|
||||
.mask = MI_FLUSH_DW_NOTIFY,
|
||||
.expected = 0,
|
||||
},
|
||||
{
|
||||
.offset = 1,
|
||||
.mask = MI_FLUSH_DW_USE_GTT,
|
||||
.expected = 0,
|
||||
.condition_offset = 0,
|
||||
.condition_mask = MI_FLUSH_DW_OP_MASK,
|
||||
},
|
||||
{
|
||||
.offset = 0,
|
||||
.mask = MI_FLUSH_DW_STORE_INDEX,
|
||||
.expected = 0,
|
||||
.condition_offset = 0,
|
||||
.condition_mask = MI_FLUSH_DW_OP_MASK,
|
||||
}}, ),
|
||||
CMD( MI_CONDITIONAL_BATCH_BUFFER_END, SMI, !F, 0xFF, B,
|
||||
.bits = {{
|
||||
.offset = 0,
|
||||
.mask = MI_GLOBAL_GTT,
|
||||
.expected = 0,
|
||||
}}, ),
|
||||
};
|
||||
|
||||
static const struct drm_i915_cmd_descriptor blt_cmds[] = {
|
||||
CMD( MI_DISPLAY_FLIP, SMI, !F, 0xFF, R ),
|
||||
CMD( MI_STORE_DWORD_IMM, SMI, !F, 0x3FF, B,
|
||||
.bits = {{
|
||||
.offset = 0,
|
||||
.mask = MI_GLOBAL_GTT,
|
||||
.expected = 0,
|
||||
}}, ),
|
||||
CMD( MI_UPDATE_GTT, SMI, !F, 0x3F, R ),
|
||||
CMD( MI_FLUSH_DW, SMI, !F, 0x3F, B,
|
||||
.bits = {{
|
||||
.offset = 0,
|
||||
.mask = MI_FLUSH_DW_NOTIFY,
|
||||
.expected = 0,
|
||||
},
|
||||
{
|
||||
.offset = 1,
|
||||
.mask = MI_FLUSH_DW_USE_GTT,
|
||||
.expected = 0,
|
||||
.condition_offset = 0,
|
||||
.condition_mask = MI_FLUSH_DW_OP_MASK,
|
||||
},
|
||||
{
|
||||
.offset = 0,
|
||||
.mask = MI_FLUSH_DW_STORE_INDEX,
|
||||
.expected = 0,
|
||||
.condition_offset = 0,
|
||||
.condition_mask = MI_FLUSH_DW_OP_MASK,
|
||||
}}, ),
|
||||
CMD( COLOR_BLT, S2D, !F, 0x3F, S ),
|
||||
CMD( SRC_COPY_BLT, S2D, !F, 0x3F, S ),
|
||||
};
|
||||
|
||||
static const struct drm_i915_cmd_descriptor hsw_blt_cmds[] = {
|
||||
CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, M ),
|
||||
CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ),
|
||||
};
|
||||
|
||||
#undef CMD
|
||||
#undef SMI
|
||||
#undef S3D
|
||||
#undef S2D
|
||||
#undef SMFX
|
||||
#undef F
|
||||
#undef S
|
||||
#undef R
|
||||
#undef W
|
||||
#undef B
|
||||
#undef M
|
||||
|
||||
static const struct drm_i915_cmd_table gen7_render_cmds[] = {
|
||||
{ common_cmds, ARRAY_SIZE(common_cmds) },
|
||||
{ render_cmds, ARRAY_SIZE(render_cmds) },
|
||||
};
|
||||
|
||||
static const struct drm_i915_cmd_table hsw_render_ring_cmds[] = {
|
||||
{ common_cmds, ARRAY_SIZE(common_cmds) },
|
||||
{ render_cmds, ARRAY_SIZE(render_cmds) },
|
||||
{ hsw_render_cmds, ARRAY_SIZE(hsw_render_cmds) },
|
||||
};
|
||||
|
||||
static const struct drm_i915_cmd_table gen7_video_cmds[] = {
|
||||
{ common_cmds, ARRAY_SIZE(common_cmds) },
|
||||
{ video_cmds, ARRAY_SIZE(video_cmds) },
|
||||
};
|
||||
|
||||
static const struct drm_i915_cmd_table hsw_vebox_cmds[] = {
|
||||
{ common_cmds, ARRAY_SIZE(common_cmds) },
|
||||
{ vecs_cmds, ARRAY_SIZE(vecs_cmds) },
|
||||
};
|
||||
|
||||
static const struct drm_i915_cmd_table gen7_blt_cmds[] = {
|
||||
{ common_cmds, ARRAY_SIZE(common_cmds) },
|
||||
{ blt_cmds, ARRAY_SIZE(blt_cmds) },
|
||||
};
|
||||
|
||||
static const struct drm_i915_cmd_table hsw_blt_ring_cmds[] = {
|
||||
{ common_cmds, ARRAY_SIZE(common_cmds) },
|
||||
{ blt_cmds, ARRAY_SIZE(blt_cmds) },
|
||||
{ hsw_blt_cmds, ARRAY_SIZE(hsw_blt_cmds) },
|
||||
};
|
||||
|
||||
/*
|
||||
* Register whitelists, sorted by increasing register offset.
|
||||
*
|
||||
* Some registers that userspace accesses are 64 bits. The register
|
||||
* access commands only allow 32-bit accesses. Hence, we have to include
|
||||
* entries for both halves of the 64-bit registers.
|
||||
*/
|
||||
|
||||
/* Convenience macro for adding 64-bit registers */
|
||||
#define REG64(addr) (addr), (addr + sizeof(u32))
|
||||
|
||||
static const u32 gen7_render_regs[] = {
|
||||
REG64(HS_INVOCATION_COUNT),
|
||||
REG64(DS_INVOCATION_COUNT),
|
||||
REG64(IA_VERTICES_COUNT),
|
||||
REG64(IA_PRIMITIVES_COUNT),
|
||||
REG64(VS_INVOCATION_COUNT),
|
||||
REG64(GS_INVOCATION_COUNT),
|
||||
REG64(GS_PRIMITIVES_COUNT),
|
||||
REG64(CL_INVOCATION_COUNT),
|
||||
REG64(CL_PRIMITIVES_COUNT),
|
||||
REG64(PS_INVOCATION_COUNT),
|
||||
REG64(PS_DEPTH_COUNT),
|
||||
OACONTROL, /* Only allowed for LRI and SRM. See below. */
|
||||
GEN7_3DPRIM_END_OFFSET,
|
||||
GEN7_3DPRIM_START_VERTEX,
|
||||
GEN7_3DPRIM_VERTEX_COUNT,
|
||||
GEN7_3DPRIM_INSTANCE_COUNT,
|
||||
GEN7_3DPRIM_START_INSTANCE,
|
||||
GEN7_3DPRIM_BASE_VERTEX,
|
||||
REG64(GEN7_SO_NUM_PRIMS_WRITTEN(0)),
|
||||
REG64(GEN7_SO_NUM_PRIMS_WRITTEN(1)),
|
||||
REG64(GEN7_SO_NUM_PRIMS_WRITTEN(2)),
|
||||
REG64(GEN7_SO_NUM_PRIMS_WRITTEN(3)),
|
||||
REG64(GEN7_SO_PRIM_STORAGE_NEEDED(0)),
|
||||
REG64(GEN7_SO_PRIM_STORAGE_NEEDED(1)),
|
||||
REG64(GEN7_SO_PRIM_STORAGE_NEEDED(2)),
|
||||
REG64(GEN7_SO_PRIM_STORAGE_NEEDED(3)),
|
||||
GEN7_SO_WRITE_OFFSET(0),
|
||||
GEN7_SO_WRITE_OFFSET(1),
|
||||
GEN7_SO_WRITE_OFFSET(2),
|
||||
GEN7_SO_WRITE_OFFSET(3),
|
||||
};
|
||||
|
||||
static const u32 gen7_blt_regs[] = {
|
||||
BCS_SWCTRL,
|
||||
};
|
||||
|
||||
static const u32 ivb_master_regs[] = {
|
||||
FORCEWAKE_MT,
|
||||
DERRMR,
|
||||
GEN7_PIPE_DE_LOAD_SL(PIPE_A),
|
||||
GEN7_PIPE_DE_LOAD_SL(PIPE_B),
|
||||
GEN7_PIPE_DE_LOAD_SL(PIPE_C),
|
||||
};
|
||||
|
||||
static const u32 hsw_master_regs[] = {
|
||||
FORCEWAKE_MT,
|
||||
DERRMR,
|
||||
};
|
||||
|
||||
#undef REG64
|
||||
|
||||
static u32 gen7_render_get_cmd_length_mask(u32 cmd_header)
|
||||
{
|
||||
u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT;
|
||||
|
@ -137,12 +498,13 @@ static u32 gen7_blt_get_cmd_length_mask(u32 cmd_header)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void validate_cmds_sorted(struct intel_ring_buffer *ring)
|
||||
static bool validate_cmds_sorted(struct intel_ring_buffer *ring)
|
||||
{
|
||||
int i;
|
||||
bool ret = true;
|
||||
|
||||
if (!ring->cmd_tables || ring->cmd_table_count == 0)
|
||||
return;
|
||||
return true;
|
||||
|
||||
for (i = 0; i < ring->cmd_table_count; i++) {
|
||||
const struct drm_i915_cmd_table *table = &ring->cmd_tables[i];
|
||||
|
@ -154,35 +516,45 @@ static void validate_cmds_sorted(struct intel_ring_buffer *ring)
|
|||
&table->table[i];
|
||||
u32 curr = desc->cmd.value & desc->cmd.mask;
|
||||
|
||||
if (curr < previous)
|
||||
if (curr < previous) {
|
||||
DRM_ERROR("CMD: table not sorted ring=%d table=%d entry=%d cmd=0x%08X prev=0x%08X\n",
|
||||
ring->id, i, j, curr, previous);
|
||||
ret = false;
|
||||
}
|
||||
|
||||
previous = curr;
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void check_sorted(int ring_id, const u32 *reg_table, int reg_count)
|
||||
static bool check_sorted(int ring_id, const u32 *reg_table, int reg_count)
|
||||
{
|
||||
int i;
|
||||
u32 previous = 0;
|
||||
bool ret = true;
|
||||
|
||||
for (i = 0; i < reg_count; i++) {
|
||||
u32 curr = reg_table[i];
|
||||
|
||||
if (curr < previous)
|
||||
if (curr < previous) {
|
||||
DRM_ERROR("CMD: table not sorted ring=%d entry=%d reg=0x%08X prev=0x%08X\n",
|
||||
ring_id, i, curr, previous);
|
||||
ret = false;
|
||||
}
|
||||
|
||||
previous = curr;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void validate_regs_sorted(struct intel_ring_buffer *ring)
|
||||
static bool validate_regs_sorted(struct intel_ring_buffer *ring)
|
||||
{
|
||||
check_sorted(ring->id, ring->reg_table, ring->reg_count);
|
||||
check_sorted(ring->id, ring->master_reg_table, ring->master_reg_count);
|
||||
return check_sorted(ring->id, ring->reg_table, ring->reg_count) &&
|
||||
check_sorted(ring->id, ring->master_reg_table,
|
||||
ring->master_reg_count);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -200,15 +572,58 @@ void i915_cmd_parser_init_ring(struct intel_ring_buffer *ring)
|
|||
|
||||
switch (ring->id) {
|
||||
case RCS:
|
||||
if (IS_HASWELL(ring->dev)) {
|
||||
ring->cmd_tables = hsw_render_ring_cmds;
|
||||
ring->cmd_table_count =
|
||||
ARRAY_SIZE(hsw_render_ring_cmds);
|
||||
} else {
|
||||
ring->cmd_tables = gen7_render_cmds;
|
||||
ring->cmd_table_count = ARRAY_SIZE(gen7_render_cmds);
|
||||
}
|
||||
|
||||
ring->reg_table = gen7_render_regs;
|
||||
ring->reg_count = ARRAY_SIZE(gen7_render_regs);
|
||||
|
||||
if (IS_HASWELL(ring->dev)) {
|
||||
ring->master_reg_table = hsw_master_regs;
|
||||
ring->master_reg_count = ARRAY_SIZE(hsw_master_regs);
|
||||
} else {
|
||||
ring->master_reg_table = ivb_master_regs;
|
||||
ring->master_reg_count = ARRAY_SIZE(ivb_master_regs);
|
||||
}
|
||||
|
||||
ring->get_cmd_length_mask = gen7_render_get_cmd_length_mask;
|
||||
break;
|
||||
case VCS:
|
||||
ring->cmd_tables = gen7_video_cmds;
|
||||
ring->cmd_table_count = ARRAY_SIZE(gen7_video_cmds);
|
||||
ring->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
|
||||
break;
|
||||
case BCS:
|
||||
if (IS_HASWELL(ring->dev)) {
|
||||
ring->cmd_tables = hsw_blt_ring_cmds;
|
||||
ring->cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmds);
|
||||
} else {
|
||||
ring->cmd_tables = gen7_blt_cmds;
|
||||
ring->cmd_table_count = ARRAY_SIZE(gen7_blt_cmds);
|
||||
}
|
||||
|
||||
ring->reg_table = gen7_blt_regs;
|
||||
ring->reg_count = ARRAY_SIZE(gen7_blt_regs);
|
||||
|
||||
if (IS_HASWELL(ring->dev)) {
|
||||
ring->master_reg_table = hsw_master_regs;
|
||||
ring->master_reg_count = ARRAY_SIZE(hsw_master_regs);
|
||||
} else {
|
||||
ring->master_reg_table = ivb_master_regs;
|
||||
ring->master_reg_count = ARRAY_SIZE(ivb_master_regs);
|
||||
}
|
||||
|
||||
ring->get_cmd_length_mask = gen7_blt_get_cmd_length_mask;
|
||||
break;
|
||||
case VECS:
|
||||
ring->cmd_tables = hsw_vebox_cmds;
|
||||
ring->cmd_table_count = ARRAY_SIZE(hsw_vebox_cmds);
|
||||
/* VECS can use the same length_mask function as VCS */
|
||||
ring->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
|
||||
break;
|
||||
|
@ -218,8 +633,8 @@ void i915_cmd_parser_init_ring(struct intel_ring_buffer *ring)
|
|||
BUG();
|
||||
}
|
||||
|
||||
validate_cmds_sorted(ring);
|
||||
validate_regs_sorted(ring);
|
||||
BUG_ON(!validate_cmds_sorted(ring));
|
||||
BUG_ON(!validate_regs_sorted(ring));
|
||||
}
|
||||
|
||||
static const struct drm_i915_cmd_descriptor*
|
||||
|
@ -331,13 +746,111 @@ finish:
|
|||
*/
|
||||
bool i915_needs_cmd_parser(struct intel_ring_buffer *ring)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = ring->dev->dev_private;
|
||||
|
||||
/* No command tables indicates a platform without parsing */
|
||||
if (!ring->cmd_tables)
|
||||
return false;
|
||||
|
||||
/*
|
||||
* XXX: VLV is Gen7 and therefore has cmd_tables, but has PPGTT
|
||||
* disabled. That will cause all of the parser's PPGTT checks to
|
||||
* fail. For now, disable parsing when PPGTT is off.
|
||||
*/
|
||||
if (!dev_priv->mm.aliasing_ppgtt)
|
||||
return false;
|
||||
|
||||
return (i915.enable_cmd_parser == 1);
|
||||
}
|
||||
|
||||
static bool check_cmd(const struct intel_ring_buffer *ring,
|
||||
const struct drm_i915_cmd_descriptor *desc,
|
||||
const u32 *cmd,
|
||||
const bool is_master,
|
||||
bool *oacontrol_set)
|
||||
{
|
||||
if (desc->flags & CMD_DESC_REJECT) {
|
||||
DRM_DEBUG_DRIVER("CMD: Rejected command: 0x%08X\n", *cmd);
|
||||
return false;
|
||||
}
|
||||
|
||||
if ((desc->flags & CMD_DESC_MASTER) && !is_master) {
|
||||
DRM_DEBUG_DRIVER("CMD: Rejected master-only command: 0x%08X\n",
|
||||
*cmd);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (desc->flags & CMD_DESC_REGISTER) {
|
||||
u32 reg_addr = cmd[desc->reg.offset] & desc->reg.mask;
|
||||
|
||||
/*
|
||||
* OACONTROL requires some special handling for writes. We
|
||||
* want to make sure that any batch which enables OA also
|
||||
* disables it before the end of the batch. The goal is to
|
||||
* prevent one process from snooping on the perf data from
|
||||
* another process. To do that, we need to check the value
|
||||
* that will be written to the register. Hence, limit
|
||||
* OACONTROL writes to only MI_LOAD_REGISTER_IMM commands.
|
||||
*/
|
||||
if (reg_addr == OACONTROL) {
|
||||
if (desc->cmd.value == MI_LOAD_REGISTER_MEM)
|
||||
return false;
|
||||
|
||||
if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1))
|
||||
*oacontrol_set = (cmd[2] != 0);
|
||||
}
|
||||
|
||||
if (!valid_reg(ring->reg_table,
|
||||
ring->reg_count, reg_addr)) {
|
||||
if (!is_master ||
|
||||
!valid_reg(ring->master_reg_table,
|
||||
ring->master_reg_count,
|
||||
reg_addr)) {
|
||||
DRM_DEBUG_DRIVER("CMD: Rejected register 0x%08X in command: 0x%08X (ring=%d)\n",
|
||||
reg_addr,
|
||||
*cmd,
|
||||
ring->id);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (desc->flags & CMD_DESC_BITMASK) {
|
||||
int i;
|
||||
|
||||
for (i = 0; i < MAX_CMD_DESC_BITMASKS; i++) {
|
||||
u32 dword;
|
||||
|
||||
if (desc->bits[i].mask == 0)
|
||||
break;
|
||||
|
||||
if (desc->bits[i].condition_mask != 0) {
|
||||
u32 offset =
|
||||
desc->bits[i].condition_offset;
|
||||
u32 condition = cmd[offset] &
|
||||
desc->bits[i].condition_mask;
|
||||
|
||||
if (condition == 0)
|
||||
continue;
|
||||
}
|
||||
|
||||
dword = cmd[desc->bits[i].offset] &
|
||||
desc->bits[i].mask;
|
||||
|
||||
if (dword != desc->bits[i].expected) {
|
||||
DRM_DEBUG_DRIVER("CMD: Rejected command 0x%08X for bitmask 0x%08X (exp=0x%08X act=0x%08X) (ring=%d)\n",
|
||||
*cmd,
|
||||
desc->bits[i].mask,
|
||||
desc->bits[i].expected,
|
||||
dword, ring->id);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
#define LENGTH_BIAS 2
|
||||
|
||||
/**
|
||||
|
@ -361,6 +874,7 @@ int i915_parse_cmds(struct intel_ring_buffer *ring,
|
|||
u32 *cmd, *batch_base, *batch_end;
|
||||
struct drm_i915_cmd_descriptor default_desc = { 0 };
|
||||
int needs_clflush = 0;
|
||||
bool oacontrol_set = false; /* OACONTROL tracking. See check_cmd() */
|
||||
|
||||
ret = i915_gem_obj_prepare_shmem_read(batch_obj, &needs_clflush);
|
||||
if (ret) {
|
||||
|
@ -402,7 +916,7 @@ int i915_parse_cmds(struct intel_ring_buffer *ring,
|
|||
length = ((*cmd & desc->length.mask) + LENGTH_BIAS);
|
||||
|
||||
if ((batch_end - cmd) < length) {
|
||||
DRM_DEBUG_DRIVER("CMD: Command length exceeds batch length: 0x%08X length=%d batchlen=%td\n",
|
||||
DRM_DEBUG_DRIVER("CMD: Command length exceeds batch length: 0x%08X length=%u batchlen=%td\n",
|
||||
*cmd,
|
||||
length,
|
||||
(unsigned long)(batch_end - cmd));
|
||||
|
@ -410,68 +924,19 @@ int i915_parse_cmds(struct intel_ring_buffer *ring,
|
|||
break;
|
||||
}
|
||||
|
||||
if (desc->flags & CMD_DESC_REJECT) {
|
||||
DRM_DEBUG_DRIVER("CMD: Rejected command: 0x%08X\n", *cmd);
|
||||
if (!check_cmd(ring, desc, cmd, is_master, &oacontrol_set)) {
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
if ((desc->flags & CMD_DESC_MASTER) && !is_master) {
|
||||
DRM_DEBUG_DRIVER("CMD: Rejected master-only command: 0x%08X\n",
|
||||
*cmd);
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
if (desc->flags & CMD_DESC_REGISTER) {
|
||||
u32 reg_addr = cmd[desc->reg.offset] & desc->reg.mask;
|
||||
|
||||
if (!valid_reg(ring->reg_table,
|
||||
ring->reg_count, reg_addr)) {
|
||||
if (!is_master ||
|
||||
!valid_reg(ring->master_reg_table,
|
||||
ring->master_reg_count,
|
||||
reg_addr)) {
|
||||
DRM_DEBUG_DRIVER("CMD: Rejected register 0x%08X in command: 0x%08X (ring=%d)\n",
|
||||
reg_addr,
|
||||
*cmd,
|
||||
ring->id);
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (desc->flags & CMD_DESC_BITMASK) {
|
||||
int i;
|
||||
|
||||
for (i = 0; i < MAX_CMD_DESC_BITMASKS; i++) {
|
||||
u32 dword;
|
||||
|
||||
if (desc->bits[i].mask == 0)
|
||||
break;
|
||||
|
||||
dword = cmd[desc->bits[i].offset] &
|
||||
desc->bits[i].mask;
|
||||
|
||||
if (dword != desc->bits[i].expected) {
|
||||
DRM_DEBUG_DRIVER("CMD: Rejected command 0x%08X for bitmask 0x%08X (exp=0x%08X act=0x%08X) (ring=%d)\n",
|
||||
*cmd,
|
||||
desc->bits[i].mask,
|
||||
desc->bits[i].expected,
|
||||
dword, ring->id);
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
|
||||
cmd += length;
|
||||
}
|
||||
|
||||
if (oacontrol_set) {
|
||||
DRM_DEBUG_DRIVER("CMD: batch set OACONTROL but did not clear it\n");
|
||||
ret = -EINVAL;
|
||||
}
|
||||
|
||||
if (cmd >= batch_end) {
|
||||
DRM_DEBUG_DRIVER("CMD: Got to the end of the buffer w/o a BBE cmd!\n");
|
||||
ret = -EINVAL;
|
||||
|
@ -483,3 +948,22 @@ int i915_parse_cmds(struct intel_ring_buffer *ring,
|
|||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* i915_cmd_parser_get_version() - get the cmd parser version number
|
||||
*
|
||||
* The cmd parser maintains a simple increasing integer version number suitable
|
||||
* for passing to userspace clients to determine what operations are permitted.
|
||||
*
|
||||
* Return: the current version number of the cmd parser
|
||||
*/
|
||||
int i915_cmd_parser_get_version(void)
|
||||
{
|
||||
/*
|
||||
* Command parser version history
|
||||
*
|
||||
* 1. Initial version. Checks batches and reports violations, but leaves
|
||||
* hardware parsing enabled (so does not allow new use cases).
|
||||
*/
|
||||
return 1;
|
||||
}
|
||||
|
|
|
@ -966,7 +966,7 @@ static int i915_rstdby_delays(struct seq_file *m, void *unused)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int i915_cur_delayinfo(struct seq_file *m, void *unused)
|
||||
static int i915_frequency_info(struct seq_file *m, void *unused)
|
||||
{
|
||||
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
|
@ -991,6 +991,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
|
|||
u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
|
||||
u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
|
||||
u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
|
||||
u32 rpmodectl, rpinclimit, rpdeclimit;
|
||||
u32 rpstat, cagf, reqf;
|
||||
u32 rpupei, rpcurup, rpprevup;
|
||||
u32 rpdownei, rpcurdown, rpprevdown;
|
||||
|
@ -1011,6 +1012,10 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
|
|||
reqf >>= 25;
|
||||
reqf *= GT_FREQUENCY_MULTIPLIER;
|
||||
|
||||
rpmodectl = I915_READ(GEN6_RP_CONTROL);
|
||||
rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
|
||||
rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
|
||||
|
||||
rpstat = I915_READ(GEN6_RPSTAT1);
|
||||
rpupei = I915_READ(GEN6_RP_CUR_UP_EI);
|
||||
rpcurup = I915_READ(GEN6_RP_CUR_UP);
|
||||
|
@ -1027,14 +1032,23 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
|
|||
gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n",
|
||||
I915_READ(GEN6_PMIER),
|
||||
I915_READ(GEN6_PMIMR),
|
||||
I915_READ(GEN6_PMISR),
|
||||
I915_READ(GEN6_PMIIR),
|
||||
I915_READ(GEN6_PMINTRMSK));
|
||||
seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
|
||||
seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
|
||||
seq_printf(m, "Render p-state ratio: %d\n",
|
||||
(gt_perf_status & 0xff00) >> 8);
|
||||
seq_printf(m, "Render p-state VID: %d\n",
|
||||
gt_perf_status & 0xff);
|
||||
seq_printf(m, "Render p-state limit: %d\n",
|
||||
rp_state_limits & 0xff);
|
||||
seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
|
||||
seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
|
||||
seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
|
||||
seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
|
||||
seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
|
||||
seq_printf(m, "CAGF: %dMHz\n", cagf);
|
||||
seq_printf(m, "RP CUR UP EI: %dus\n", rpupei &
|
||||
|
@ -1816,8 +1830,7 @@ static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
|
|||
u64 pdp = I915_READ(ring->mmio_base + offset + 4);
|
||||
pdp <<= 32;
|
||||
pdp |= I915_READ(ring->mmio_base + offset);
|
||||
for (i = 0; i < 4; i++)
|
||||
seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp);
|
||||
seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -2044,7 +2057,7 @@ static int i915_pc8_status(struct seq_file *m, void *unused)
|
|||
struct drm_device *dev = node->minor->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
if (!IS_HASWELL(dev)) {
|
||||
if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
|
||||
seq_puts(m, "not supported\n");
|
||||
return 0;
|
||||
}
|
||||
|
@ -3774,7 +3787,7 @@ static const struct drm_info_list i915_debugfs_list[] = {
|
|||
{"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
|
||||
{"i915_gem_hws_vebox", i915_hws_info, 0, (void *)VECS},
|
||||
{"i915_rstdby_delays", i915_rstdby_delays, 0},
|
||||
{"i915_cur_delayinfo", i915_cur_delayinfo, 0},
|
||||
{"i915_frequency_info", i915_frequency_info, 0},
|
||||
{"i915_delayfreq_table", i915_delayfreq_table, 0},
|
||||
{"i915_inttoext_table", i915_inttoext_table, 0},
|
||||
{"i915_drpc_info", i915_drpc_info, 0},
|
||||
|
|
|
@ -1017,6 +1017,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
|
|||
case I915_PARAM_HAS_EXEC_HANDLE_LUT:
|
||||
value = 1;
|
||||
break;
|
||||
case I915_PARAM_CMD_PARSER_VERSION:
|
||||
value = i915_cmd_parser_get_version();
|
||||
break;
|
||||
default:
|
||||
DRM_DEBUG("Unknown parameter %d\n", param->param);
|
||||
return -EINVAL;
|
||||
|
|
|
@ -891,7 +891,36 @@ static int i915_pm_poweroff(struct device *dev)
|
|||
return i915_drm_freeze(drm_dev);
|
||||
}
|
||||
|
||||
static int i915_runtime_suspend(struct device *device)
|
||||
static void snb_runtime_suspend(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
|
||||
intel_runtime_pm_disable_interrupts(dev);
|
||||
}
|
||||
|
||||
static void hsw_runtime_suspend(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
hsw_enable_pc8(dev_priv);
|
||||
}
|
||||
|
||||
static void snb_runtime_resume(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
|
||||
intel_runtime_pm_restore_interrupts(dev);
|
||||
intel_init_pch_refclk(dev);
|
||||
i915_gem_init_swizzling(dev);
|
||||
mutex_lock(&dev_priv->rps.hw_lock);
|
||||
gen6_update_ring_freq(dev);
|
||||
mutex_unlock(&dev_priv->rps.hw_lock);
|
||||
}
|
||||
|
||||
static void hsw_runtime_resume(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
hsw_disable_pc8(dev_priv);
|
||||
}
|
||||
|
||||
static int intel_runtime_suspend(struct device *device)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(device);
|
||||
struct drm_device *dev = pci_get_drvdata(pdev);
|
||||
|
@ -902,8 +931,12 @@ static int i915_runtime_suspend(struct device *device)
|
|||
|
||||
DRM_DEBUG_KMS("Suspending device\n");
|
||||
|
||||
if (HAS_PC8(dev))
|
||||
hsw_enable_pc8(dev_priv);
|
||||
if (IS_GEN6(dev))
|
||||
snb_runtime_suspend(dev_priv);
|
||||
else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
|
||||
hsw_runtime_suspend(dev_priv);
|
||||
else
|
||||
WARN_ON(1);
|
||||
|
||||
i915_gem_release_all_mmaps(dev_priv);
|
||||
|
||||
|
@ -923,7 +956,7 @@ static int i915_runtime_suspend(struct device *device)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int i915_runtime_resume(struct device *device)
|
||||
static int intel_runtime_resume(struct device *device)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(device);
|
||||
struct drm_device *dev = pci_get_drvdata(pdev);
|
||||
|
@ -936,8 +969,12 @@ static int i915_runtime_resume(struct device *device)
|
|||
intel_opregion_notify_adapter(dev, PCI_D0);
|
||||
dev_priv->pm.suspended = false;
|
||||
|
||||
if (HAS_PC8(dev))
|
||||
hsw_disable_pc8(dev_priv);
|
||||
if (IS_GEN6(dev))
|
||||
snb_runtime_resume(dev_priv);
|
||||
else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
|
||||
hsw_runtime_resume(dev_priv);
|
||||
else
|
||||
WARN_ON(1);
|
||||
|
||||
DRM_DEBUG_KMS("Device resumed\n");
|
||||
return 0;
|
||||
|
@ -954,8 +991,8 @@ static const struct dev_pm_ops i915_pm_ops = {
|
|||
.poweroff = i915_pm_poweroff,
|
||||
.restore_early = i915_pm_resume_early,
|
||||
.restore = i915_pm_resume,
|
||||
.runtime_suspend = i915_runtime_suspend,
|
||||
.runtime_resume = i915_runtime_resume,
|
||||
.runtime_suspend = intel_runtime_suspend,
|
||||
.runtime_resume = intel_runtime_resume,
|
||||
};
|
||||
|
||||
static const struct vm_operations_struct i915_gem_vm_ops = {
|
||||
|
|
|
@ -35,6 +35,7 @@
|
|||
#include "i915_reg.h"
|
||||
#include "intel_bios.h"
|
||||
#include "intel_ringbuffer.h"
|
||||
#include "i915_gem_gtt.h"
|
||||
#include <linux/io-mapping.h>
|
||||
#include <linux/i2c.h>
|
||||
#include <linux/i2c-algo-bit.h>
|
||||
|
@ -358,7 +359,7 @@ struct drm_i915_error_state {
|
|||
u64 bbaddr;
|
||||
u64 acthd;
|
||||
u32 fault_reg;
|
||||
u32 faddr;
|
||||
u64 faddr;
|
||||
u32 rc_psmi; /* sleep state */
|
||||
u32 semaphore_mboxes[I915_NUM_RINGS - 1];
|
||||
|
||||
|
@ -572,168 +573,6 @@ enum i915_cache_level {
|
|||
I915_CACHE_WT, /* hsw:gt3e WriteThrough for scanouts */
|
||||
};
|
||||
|
||||
typedef uint32_t gen6_gtt_pte_t;
|
||||
|
||||
/**
|
||||
* A VMA represents a GEM BO that is bound into an address space. Therefore, a
|
||||
* VMA's presence cannot be guaranteed before binding, or after unbinding the
|
||||
* object into/from the address space.
|
||||
*
|
||||
* To make things as simple as possible (ie. no refcounting), a VMA's lifetime
|
||||
* will always be <= an objects lifetime. So object refcounting should cover us.
|
||||
*/
|
||||
struct i915_vma {
|
||||
struct drm_mm_node node;
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct i915_address_space *vm;
|
||||
|
||||
/** This object's place on the active/inactive lists */
|
||||
struct list_head mm_list;
|
||||
|
||||
struct list_head vma_link; /* Link in the object's VMA list */
|
||||
|
||||
/** This vma's place in the batchbuffer or on the eviction list */
|
||||
struct list_head exec_list;
|
||||
|
||||
/**
|
||||
* Used for performing relocations during execbuffer insertion.
|
||||
*/
|
||||
struct hlist_node exec_node;
|
||||
unsigned long exec_handle;
|
||||
struct drm_i915_gem_exec_object2 *exec_entry;
|
||||
|
||||
/**
|
||||
* How many users have pinned this object in GTT space. The following
|
||||
* users can each hold at most one reference: pwrite/pread, pin_ioctl
|
||||
* (via user_pin_count), execbuffer (objects are not allowed multiple
|
||||
* times for the same batchbuffer), and the framebuffer code. When
|
||||
* switching/pageflipping, the framebuffer code has at most two buffers
|
||||
* pinned per crtc.
|
||||
*
|
||||
* In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
|
||||
* bits with absolutely no headroom. So use 4 bits. */
|
||||
unsigned int pin_count:4;
|
||||
#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
|
||||
|
||||
/** Unmap an object from an address space. This usually consists of
|
||||
* setting the valid PTE entries to a reserved scratch page. */
|
||||
void (*unbind_vma)(struct i915_vma *vma);
|
||||
/* Map an object into an address space with the given cache flags. */
|
||||
#define GLOBAL_BIND (1<<0)
|
||||
void (*bind_vma)(struct i915_vma *vma,
|
||||
enum i915_cache_level cache_level,
|
||||
u32 flags);
|
||||
};
|
||||
|
||||
struct i915_address_space {
|
||||
struct drm_mm mm;
|
||||
struct drm_device *dev;
|
||||
struct list_head global_link;
|
||||
unsigned long start; /* Start offset always 0 for dri2 */
|
||||
size_t total; /* size addr space maps (ex. 2GB for ggtt) */
|
||||
|
||||
struct {
|
||||
dma_addr_t addr;
|
||||
struct page *page;
|
||||
} scratch;
|
||||
|
||||
/**
|
||||
* List of objects currently involved in rendering.
|
||||
*
|
||||
* Includes buffers having the contents of their GPU caches
|
||||
* flushed, not necessarily primitives. last_rendering_seqno
|
||||
* represents when the rendering involved will be completed.
|
||||
*
|
||||
* A reference is held on the buffer while on this list.
|
||||
*/
|
||||
struct list_head active_list;
|
||||
|
||||
/**
|
||||
* LRU list of objects which are not in the ringbuffer and
|
||||
* are ready to unbind, but are still in the GTT.
|
||||
*
|
||||
* last_rendering_seqno is 0 while an object is in this list.
|
||||
*
|
||||
* A reference is not held on the buffer while on this list,
|
||||
* as merely being GTT-bound shouldn't prevent its being
|
||||
* freed, and we'll pull it off the list in the free path.
|
||||
*/
|
||||
struct list_head inactive_list;
|
||||
|
||||
/* FIXME: Need a more generic return type */
|
||||
gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr,
|
||||
enum i915_cache_level level,
|
||||
bool valid); /* Create a valid PTE */
|
||||
void (*clear_range)(struct i915_address_space *vm,
|
||||
uint64_t start,
|
||||
uint64_t length,
|
||||
bool use_scratch);
|
||||
void (*insert_entries)(struct i915_address_space *vm,
|
||||
struct sg_table *st,
|
||||
uint64_t start,
|
||||
enum i915_cache_level cache_level);
|
||||
void (*cleanup)(struct i915_address_space *vm);
|
||||
};
|
||||
|
||||
/* The Graphics Translation Table is the way in which GEN hardware translates a
|
||||
* Graphics Virtual Address into a Physical Address. In addition to the normal
|
||||
* collateral associated with any va->pa translations GEN hardware also has a
|
||||
* portion of the GTT which can be mapped by the CPU and remain both coherent
|
||||
* and correct (in cases like swizzling). That region is referred to as GMADR in
|
||||
* the spec.
|
||||
*/
|
||||
struct i915_gtt {
|
||||
struct i915_address_space base;
|
||||
size_t stolen_size; /* Total size of stolen memory */
|
||||
|
||||
unsigned long mappable_end; /* End offset that we can CPU map */
|
||||
struct io_mapping *mappable; /* Mapping to our CPU mappable region */
|
||||
phys_addr_t mappable_base; /* PA of our GMADR */
|
||||
|
||||
/** "Graphics Stolen Memory" holds the global PTEs */
|
||||
void __iomem *gsm;
|
||||
|
||||
bool do_idle_maps;
|
||||
|
||||
int mtrr;
|
||||
|
||||
/* global gtt ops */
|
||||
int (*gtt_probe)(struct drm_device *dev, size_t *gtt_total,
|
||||
size_t *stolen, phys_addr_t *mappable_base,
|
||||
unsigned long *mappable_end);
|
||||
};
|
||||
#define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT)
|
||||
|
||||
#define GEN8_LEGACY_PDPS 4
|
||||
struct i915_hw_ppgtt {
|
||||
struct i915_address_space base;
|
||||
struct kref ref;
|
||||
struct drm_mm_node node;
|
||||
unsigned num_pd_entries;
|
||||
unsigned num_pd_pages; /* gen8+ */
|
||||
union {
|
||||
struct page **pt_pages;
|
||||
struct page **gen8_pt_pages[GEN8_LEGACY_PDPS];
|
||||
};
|
||||
struct page *pd_pages;
|
||||
union {
|
||||
uint32_t pd_offset;
|
||||
dma_addr_t pd_dma_addr[GEN8_LEGACY_PDPS];
|
||||
};
|
||||
union {
|
||||
dma_addr_t *pt_dma_addr;
|
||||
dma_addr_t *gen8_pt_dma_addr[4];
|
||||
};
|
||||
|
||||
struct i915_hw_context *ctx;
|
||||
|
||||
int (*enable)(struct i915_hw_ppgtt *ppgtt);
|
||||
int (*switch_mm)(struct i915_hw_ppgtt *ppgtt,
|
||||
struct intel_ring_buffer *ring,
|
||||
bool synchronous);
|
||||
void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m);
|
||||
};
|
||||
|
||||
struct i915_ctx_hang_stats {
|
||||
/* This context had batch pending when hang was declared */
|
||||
unsigned batch_pending;
|
||||
|
@ -794,6 +633,10 @@ struct i915_fbc {
|
|||
} no_fbc_reason;
|
||||
};
|
||||
|
||||
struct i915_drrs {
|
||||
struct intel_connector *connector;
|
||||
};
|
||||
|
||||
struct i915_psr {
|
||||
bool sink_support;
|
||||
bool source_ok;
|
||||
|
@ -1260,8 +1103,12 @@ struct i915_gpu_error {
|
|||
*/
|
||||
wait_queue_head_t reset_queue;
|
||||
|
||||
/* For gpu hang simulation. */
|
||||
unsigned int stop_rings;
|
||||
/* Userspace knobs for gpu hang simulation;
|
||||
* combines both a ring mask, and extra flags
|
||||
*/
|
||||
u32 stop_rings;
|
||||
#define I915_STOP_RING_ALLOW_BAN (1 << 31)
|
||||
#define I915_STOP_RING_ALLOW_WARN (1 << 30)
|
||||
|
||||
/* For missed irq/seqno simulation. */
|
||||
unsigned int test_irq_rings;
|
||||
|
@ -1281,6 +1128,12 @@ struct ddi_vbt_port_info {
|
|||
uint8_t supports_dp:1;
|
||||
};
|
||||
|
||||
enum drrs_support_type {
|
||||
DRRS_NOT_SUPPORTED = 0,
|
||||
STATIC_DRRS_SUPPORT = 1,
|
||||
SEAMLESS_DRRS_SUPPORT = 2
|
||||
};
|
||||
|
||||
struct intel_vbt_data {
|
||||
struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
|
||||
struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
|
||||
|
@ -1296,6 +1149,8 @@ struct intel_vbt_data {
|
|||
int lvds_ssc_freq;
|
||||
unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
|
||||
|
||||
enum drrs_support_type drrs_type;
|
||||
|
||||
/* eDP */
|
||||
int edp_rate;
|
||||
int edp_lanes;
|
||||
|
@ -1315,6 +1170,12 @@ struct intel_vbt_data {
|
|||
/* MIPI DSI */
|
||||
struct {
|
||||
u16 panel_id;
|
||||
struct mipi_config *config;
|
||||
struct mipi_pps_data *pps;
|
||||
u8 seq_version;
|
||||
u32 size;
|
||||
u8 *data;
|
||||
u8 *sequence[MIPI_SEQ_MAX];
|
||||
} dsi;
|
||||
|
||||
int crt_ddc_pin;
|
||||
|
@ -1366,23 +1227,13 @@ struct ilk_wm_values {
|
|||
* goes back to false exactly before we reenable the IRQs. We use this variable
|
||||
* to check if someone is trying to enable/disable IRQs while they're supposed
|
||||
* to be disabled. This shouldn't happen and we'll print some error messages in
|
||||
* case it happens, but if it actually happens we'll also update the variables
|
||||
* inside struct regsave so when we restore the IRQs they will contain the
|
||||
* latest expected values.
|
||||
* case it happens.
|
||||
*
|
||||
* For more, read the Documentation/power/runtime_pm.txt.
|
||||
*/
|
||||
struct i915_runtime_pm {
|
||||
bool suspended;
|
||||
bool irqs_disabled;
|
||||
|
||||
struct {
|
||||
uint32_t deimr;
|
||||
uint32_t sdeimr;
|
||||
uint32_t gtimr;
|
||||
uint32_t gtier;
|
||||
uint32_t gen6_pmimr;
|
||||
} regsave;
|
||||
};
|
||||
|
||||
enum intel_pipe_crc_source {
|
||||
|
@ -1415,7 +1266,7 @@ struct intel_pipe_crc {
|
|||
wait_queue_head_t wq;
|
||||
};
|
||||
|
||||
typedef struct drm_i915_private {
|
||||
struct drm_i915_private {
|
||||
struct drm_device *dev;
|
||||
struct kmem_cache *slab;
|
||||
|
||||
|
@ -1484,6 +1335,7 @@ typedef struct drm_i915_private {
|
|||
struct timer_list hotplug_reenable_timer;
|
||||
|
||||
struct i915_fbc fbc;
|
||||
struct i915_drrs drrs;
|
||||
struct intel_opregion opregion;
|
||||
struct intel_vbt_data vbt;
|
||||
|
||||
|
@ -1501,6 +1353,7 @@ typedef struct drm_i915_private {
|
|||
int num_fence_regs; /* 8 on pre-965, 16 otherwise */
|
||||
|
||||
unsigned int fsb_freq, mem_freq, is_ddr3;
|
||||
unsigned int vlv_cdclk_freq;
|
||||
|
||||
/**
|
||||
* wq - Driver workqueue for GEM.
|
||||
|
@ -1524,7 +1377,7 @@ typedef struct drm_i915_private {
|
|||
struct mutex modeset_restore_lock;
|
||||
|
||||
struct list_head vm_list; /* Global list of all address spaces */
|
||||
struct i915_gtt gtt; /* VMA representing the global address space */
|
||||
struct i915_gtt gtt; /* VM representing the global address space */
|
||||
|
||||
struct i915_gem_mm mm;
|
||||
|
||||
|
@ -1620,7 +1473,7 @@ typedef struct drm_i915_private {
|
|||
struct i915_dri1_state dri1;
|
||||
/* Old ums support infrastructure, same warning applies. */
|
||||
struct i915_ums_state ums;
|
||||
} drm_i915_private_t;
|
||||
};
|
||||
|
||||
static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
|
||||
{
|
||||
|
@ -1894,11 +1747,17 @@ struct drm_i915_cmd_descriptor {
|
|||
* the expected value, the parser rejects it. Only valid if flags has
|
||||
* the CMD_DESC_BITMASK bit set. Only entries where mask is non-zero
|
||||
* are valid.
|
||||
*
|
||||
* If the check specifies a non-zero condition_mask then the parser
|
||||
* only performs the check when the bits specified by condition_mask
|
||||
* are non-zero.
|
||||
*/
|
||||
struct {
|
||||
u32 offset;
|
||||
u32 mask;
|
||||
u32 expected;
|
||||
u32 condition_offset;
|
||||
u32 condition_mask;
|
||||
} bits[MAX_CMD_DESC_BITMASKS];
|
||||
};
|
||||
|
||||
|
@ -1940,8 +1799,9 @@ struct drm_i915_cmd_table {
|
|||
(dev)->pdev->device == 0x0106 || \
|
||||
(dev)->pdev->device == 0x010A)
|
||||
#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
|
||||
#define IS_CHERRYVIEW(dev) (INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev))
|
||||
#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
|
||||
#define IS_BROADWELL(dev) (INTEL_INFO(dev)->gen == 8)
|
||||
#define IS_BROADWELL(dev) (!INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev))
|
||||
#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
|
||||
#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \
|
||||
((dev)->pdev->device & 0xFF00) == 0x0C00)
|
||||
|
@ -2022,8 +1882,8 @@ struct drm_i915_cmd_table {
|
|||
#define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi)
|
||||
#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg)
|
||||
#define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev))
|
||||
#define HAS_PC8(dev) (IS_HASWELL(dev)) /* XXX HSW:ULX */
|
||||
#define HAS_RUNTIME_PM(dev) (IS_HASWELL(dev))
|
||||
#define HAS_RUNTIME_PM(dev) (IS_GEN6(dev) || IS_HASWELL(dev) || \
|
||||
IS_BROADWELL(dev))
|
||||
|
||||
#define INTEL_PCH_DEVICE_ID_MASK 0xff00
|
||||
#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
|
||||
|
@ -2080,6 +1940,7 @@ struct i915_params {
|
|||
bool prefault_disable;
|
||||
bool reset;
|
||||
bool disable_display;
|
||||
bool disable_vtd_wa;
|
||||
};
|
||||
extern struct i915_params i915 __read_mostly;
|
||||
|
||||
|
@ -2302,6 +2163,18 @@ static inline u32 i915_reset_count(struct i915_gpu_error *error)
|
|||
return ((atomic_read(&error->reset_counter) & ~I915_WEDGED) + 1) / 2;
|
||||
}
|
||||
|
||||
static inline bool i915_stop_ring_allow_ban(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
return dev_priv->gpu_error.stop_rings == 0 ||
|
||||
dev_priv->gpu_error.stop_rings & I915_STOP_RING_ALLOW_BAN;
|
||||
}
|
||||
|
||||
static inline bool i915_stop_ring_allow_warn(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
return dev_priv->gpu_error.stop_rings == 0 ||
|
||||
dev_priv->gpu_error.stop_rings & I915_STOP_RING_ALLOW_WARN;
|
||||
}
|
||||
|
||||
void i915_gem_reset(struct drm_device *dev);
|
||||
bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
|
||||
int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
|
||||
|
@ -2466,23 +2339,12 @@ int __must_check i915_gem_evict_something(struct drm_device *dev,
|
|||
int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
|
||||
int i915_gem_evict_everything(struct drm_device *dev);
|
||||
|
||||
/* i915_gem_gtt.c */
|
||||
void i915_check_and_clear_faults(struct drm_device *dev);
|
||||
void i915_gem_suspend_gtt_mappings(struct drm_device *dev);
|
||||
void i915_gem_restore_gtt_mappings(struct drm_device *dev);
|
||||
int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
|
||||
void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
|
||||
void i915_gem_init_global_gtt(struct drm_device *dev);
|
||||
void i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start,
|
||||
unsigned long mappable_end, unsigned long end);
|
||||
int i915_gem_gtt_init(struct drm_device *dev);
|
||||
/* belongs in i915_gem_gtt.h */
|
||||
static inline void i915_gem_chipset_flush(struct drm_device *dev)
|
||||
{
|
||||
if (INTEL_INFO(dev)->gen < 6)
|
||||
intel_gtt_chipset_flush();
|
||||
}
|
||||
int i915_gem_init_ppgtt(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt);
|
||||
bool intel_enable_ppgtt(struct drm_device *dev, bool full);
|
||||
|
||||
/* i915_gem_stolen.c */
|
||||
int i915_gem_init_stolen(struct drm_device *dev);
|
||||
|
@ -2550,6 +2412,7 @@ void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone);
|
|||
const char *i915_cache_level_str(int type);
|
||||
|
||||
/* i915_cmd_parser.c */
|
||||
int i915_cmd_parser_get_version(void);
|
||||
void i915_cmd_parser_init_ring(struct intel_ring_buffer *ring);
|
||||
bool i915_needs_cmd_parser(struct intel_ring_buffer *ring);
|
||||
int i915_parse_cmds(struct intel_ring_buffer *ring,
|
||||
|
@ -2701,20 +2564,6 @@ void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
|
|||
int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val);
|
||||
int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val);
|
||||
|
||||
void vlv_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine);
|
||||
void vlv_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine);
|
||||
|
||||
#define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \
|
||||
(((reg) >= 0x2000 && (reg) < 0x4000) ||\
|
||||
((reg) >= 0x5000 && (reg) < 0x8000) ||\
|
||||
((reg) >= 0xB000 && (reg) < 0x12000) ||\
|
||||
((reg) >= 0x2E000 && (reg) < 0x30000))
|
||||
|
||||
#define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)\
|
||||
(((reg) >= 0x12000 && (reg) < 0x14000) ||\
|
||||
((reg) >= 0x22000 && (reg) < 0x24000) ||\
|
||||
((reg) >= 0x30000 && (reg) < 0x40000))
|
||||
|
||||
#define FORCEWAKE_RENDER (1 << 0)
|
||||
#define FORCEWAKE_MEDIA (1 << 1)
|
||||
#define FORCEWAKE_ALL (FORCEWAKE_RENDER | FORCEWAKE_MEDIA)
|
||||
|
|
|
@ -2277,8 +2277,9 @@ static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
|
|||
if (!i915_gem_context_is_default(ctx)) {
|
||||
DRM_DEBUG("context hanging too fast, banning!\n");
|
||||
return true;
|
||||
} else if (dev_priv->gpu_error.stop_rings == 0) {
|
||||
DRM_ERROR("gpu hanging too fast, banning!\n");
|
||||
} else if (i915_stop_ring_allow_ban(dev_priv)) {
|
||||
if (i915_stop_ring_allow_warn(dev_priv))
|
||||
DRM_ERROR("gpu hanging too fast, banning!\n");
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -240,7 +240,15 @@ __create_hw_context(struct drm_device *dev,
|
|||
goto err_out;
|
||||
}
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 7) {
|
||||
/*
|
||||
* Try to make the context utilize L3 as well as LLC.
|
||||
*
|
||||
* On VLV we don't have L3 controls in the PTEs so we
|
||||
* shouldn't touch the cache level, especially as that
|
||||
* would make the object snooped which might have a
|
||||
* negative performance impact.
|
||||
*/
|
||||
if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev)) {
|
||||
ret = i915_gem_object_set_cache_level(ctx->obj,
|
||||
I915_CACHE_L3_LLC);
|
||||
/* Failure shouldn't ever happen this early */
|
||||
|
@ -549,7 +557,7 @@ mi_set_context(struct intel_ring_buffer *ring,
|
|||
* explicitly, so we rely on the value at ring init, stored in
|
||||
* itlb_before_ctx_switch.
|
||||
*/
|
||||
if (IS_GEN6(ring->dev) && ring->itlb_before_ctx_switch) {
|
||||
if (IS_GEN6(ring->dev)) {
|
||||
ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -559,8 +567,8 @@ mi_set_context(struct intel_ring_buffer *ring,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw */
|
||||
if (IS_GEN7(ring->dev))
|
||||
/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw */
|
||||
if (INTEL_INFO(ring->dev)->gen >= 7)
|
||||
intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
|
||||
else
|
||||
intel_ring_emit(ring, MI_NOOP);
|
||||
|
@ -578,7 +586,7 @@ mi_set_context(struct intel_ring_buffer *ring,
|
|||
*/
|
||||
intel_ring_emit(ring, MI_NOOP);
|
||||
|
||||
if (IS_GEN7(ring->dev))
|
||||
if (INTEL_INFO(ring->dev)->gen >= 7)
|
||||
intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE);
|
||||
else
|
||||
intel_ring_emit(ring, MI_NOOP);
|
||||
|
|
|
@ -161,12 +161,8 @@ static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
|
|||
{
|
||||
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
|
||||
struct drm_device *dev = obj->base.dev;
|
||||
int ret;
|
||||
|
||||
ret = i915_mutex_lock_interruptible(dev);
|
||||
if (ret)
|
||||
return;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
if (--obj->vmapping_count == 0) {
|
||||
vunmap(obj->dma_buf_vmapping);
|
||||
obj->dma_buf_vmapping = NULL;
|
||||
|
|
|
@ -1132,7 +1132,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
|||
mutex_unlock(&dev->struct_mutex);
|
||||
ret = PTR_ERR(ctx);
|
||||
goto pre_mutex_err;
|
||||
}
|
||||
}
|
||||
|
||||
i915_gem_context_reference(ctx);
|
||||
|
||||
|
@ -1142,6 +1142,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
|||
|
||||
eb = eb_create(args);
|
||||
if (eb == NULL) {
|
||||
i915_gem_context_unreference(ctx);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
ret = -ENOMEM;
|
||||
goto pre_mutex_err;
|
||||
|
|
|
@ -55,59 +55,6 @@ bool intel_enable_ppgtt(struct drm_device *dev, bool full)
|
|||
return HAS_ALIASING_PPGTT(dev);
|
||||
}
|
||||
|
||||
#define GEN6_PPGTT_PD_ENTRIES 512
|
||||
#define I915_PPGTT_PT_ENTRIES (PAGE_SIZE / sizeof(gen6_gtt_pte_t))
|
||||
typedef uint64_t gen8_gtt_pte_t;
|
||||
typedef gen8_gtt_pte_t gen8_ppgtt_pde_t;
|
||||
|
||||
/* PPGTT stuff */
|
||||
#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
|
||||
#define HSW_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0x7f0))
|
||||
|
||||
#define GEN6_PDE_VALID (1 << 0)
|
||||
/* gen6+ has bit 11-4 for physical addr bit 39-32 */
|
||||
#define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
|
||||
|
||||
#define GEN6_PTE_VALID (1 << 0)
|
||||
#define GEN6_PTE_UNCACHED (1 << 1)
|
||||
#define HSW_PTE_UNCACHED (0)
|
||||
#define GEN6_PTE_CACHE_LLC (2 << 1)
|
||||
#define GEN7_PTE_CACHE_L3_LLC (3 << 1)
|
||||
#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
|
||||
#define HSW_PTE_ADDR_ENCODE(addr) HSW_GTT_ADDR_ENCODE(addr)
|
||||
|
||||
/* Cacheability Control is a 4-bit value. The low three bits are stored in *
|
||||
* bits 3:1 of the PTE, while the fourth bit is stored in bit 11 of the PTE.
|
||||
*/
|
||||
#define HSW_CACHEABILITY_CONTROL(bits) ((((bits) & 0x7) << 1) | \
|
||||
(((bits) & 0x8) << (11 - 3)))
|
||||
#define HSW_WB_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x2)
|
||||
#define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3)
|
||||
#define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb)
|
||||
#define HSW_WB_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x8)
|
||||
#define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6)
|
||||
#define HSW_WT_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x7)
|
||||
|
||||
#define GEN8_PTES_PER_PAGE (PAGE_SIZE / sizeof(gen8_gtt_pte_t))
|
||||
#define GEN8_PDES_PER_PAGE (PAGE_SIZE / sizeof(gen8_ppgtt_pde_t))
|
||||
|
||||
/* GEN8 legacy style addressis defined as a 3 level page table:
|
||||
* 31:30 | 29:21 | 20:12 | 11:0
|
||||
* PDPE | PDE | PTE | offset
|
||||
* The difference as compared to normal x86 3 level page table is the PDPEs are
|
||||
* programmed via register.
|
||||
*/
|
||||
#define GEN8_PDPE_SHIFT 30
|
||||
#define GEN8_PDPE_MASK 0x3
|
||||
#define GEN8_PDE_SHIFT 21
|
||||
#define GEN8_PDE_MASK 0x1ff
|
||||
#define GEN8_PTE_SHIFT 12
|
||||
#define GEN8_PTE_MASK 0x1ff
|
||||
|
||||
#define PPAT_UNCACHED_INDEX (_PAGE_PWT | _PAGE_PCD)
|
||||
#define PPAT_CACHED_PDE_INDEX 0 /* WB LLC */
|
||||
#define PPAT_CACHED_INDEX _PAGE_PAT /* WB LLCeLLC */
|
||||
#define PPAT_DISPLAY_ELLC_INDEX _PAGE_PCD /* WT eLLC */
|
||||
|
||||
static void ppgtt_bind_vma(struct i915_vma *vma,
|
||||
enum i915_cache_level cache_level,
|
||||
|
@ -187,9 +134,6 @@ static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr,
|
|||
return pte;
|
||||
}
|
||||
|
||||
#define BYT_PTE_WRITEABLE (1 << 1)
|
||||
#define BYT_PTE_SNOOPED_BY_CPU_CACHES (1 << 2)
|
||||
|
||||
static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr,
|
||||
enum i915_cache_level level,
|
||||
bool valid)
|
||||
|
@ -1057,8 +1001,6 @@ static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
|
|||
|
||||
static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
|
||||
{
|
||||
#define GEN6_PD_ALIGN (PAGE_SIZE * 16)
|
||||
#define GEN6_PD_SIZE (GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE)
|
||||
struct drm_device *dev = ppgtt->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
bool retried = false;
|
||||
|
@ -1848,17 +1790,6 @@ static int ggtt_probe_common(struct drm_device *dev,
|
|||
* writing this data shouldn't be harmful even in those cases. */
|
||||
static void gen8_setup_private_ppat(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
#define GEN8_PPAT_UC (0<<0)
|
||||
#define GEN8_PPAT_WC (1<<0)
|
||||
#define GEN8_PPAT_WT (2<<0)
|
||||
#define GEN8_PPAT_WB (3<<0)
|
||||
#define GEN8_PPAT_ELLC_OVERRIDE (0<<2)
|
||||
/* FIXME(BDW): Bspec is completely confused about cache control bits. */
|
||||
#define GEN8_PPAT_LLC (1<<2)
|
||||
#define GEN8_PPAT_LLCELLC (2<<2)
|
||||
#define GEN8_PPAT_LLCeLLC (3<<2)
|
||||
#define GEN8_PPAT_AGE(x) (x<<4)
|
||||
#define GEN8_PPAT(i, x) ((uint64_t) (x) << ((i) * 8))
|
||||
uint64_t pat;
|
||||
|
||||
pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC) | /* for normal objects, no eLLC */
|
||||
|
@ -2031,6 +1962,10 @@ int i915_gem_gtt_init(struct drm_device *dev)
|
|||
gtt->base.total >> 20);
|
||||
DRM_DEBUG_DRIVER("GMADR size = %ldM\n", gtt->mappable_end >> 20);
|
||||
DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", gtt->stolen_size >> 20);
|
||||
#ifdef CONFIG_INTEL_IOMMU
|
||||
if (intel_iommu_gfx_mapped)
|
||||
DRM_INFO("VT-d active for gfx access\n");
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -0,0 +1,283 @@
|
|||
/*
|
||||
* Copyright © 2014 Intel Corporation
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
* IN THE SOFTWARE.
|
||||
*
|
||||
* Please try to maintain the following order within this file unless it makes
|
||||
* sense to do otherwise. From top to bottom:
|
||||
* 1. typedefs
|
||||
* 2. #defines, and macros
|
||||
* 3. structure definitions
|
||||
* 4. function prototypes
|
||||
*
|
||||
* Within each section, please try to order by generation in ascending order,
|
||||
* from top to bottom (ie. gen6 on the top, gen8 on the bottom).
|
||||
*/
|
||||
|
||||
#ifndef __I915_GEM_GTT_H__
|
||||
#define __I915_GEM_GTT_H__
|
||||
|
||||
typedef uint32_t gen6_gtt_pte_t;
|
||||
typedef uint64_t gen8_gtt_pte_t;
|
||||
typedef gen8_gtt_pte_t gen8_ppgtt_pde_t;
|
||||
|
||||
#define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT)
|
||||
|
||||
#define I915_PPGTT_PT_ENTRIES (PAGE_SIZE / sizeof(gen6_gtt_pte_t))
|
||||
/* gen6-hsw has bit 11-4 for physical addr bit 39-32 */
|
||||
#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
|
||||
#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
|
||||
#define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
|
||||
#define GEN6_PTE_CACHE_LLC (2 << 1)
|
||||
#define GEN6_PTE_UNCACHED (1 << 1)
|
||||
#define GEN6_PTE_VALID (1 << 0)
|
||||
|
||||
#define GEN6_PPGTT_PD_ENTRIES 512
|
||||
#define GEN6_PD_SIZE (GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE)
|
||||
#define GEN6_PD_ALIGN (PAGE_SIZE * 16)
|
||||
#define GEN6_PDE_VALID (1 << 0)
|
||||
|
||||
#define GEN7_PTE_CACHE_L3_LLC (3 << 1)
|
||||
|
||||
#define BYT_PTE_SNOOPED_BY_CPU_CACHES (1 << 2)
|
||||
#define BYT_PTE_WRITEABLE (1 << 1)
|
||||
|
||||
/* Cacheability Control is a 4-bit value. The low three bits are stored in bits
|
||||
* 3:1 of the PTE, while the fourth bit is stored in bit 11 of the PTE.
|
||||
*/
|
||||
#define HSW_CACHEABILITY_CONTROL(bits) ((((bits) & 0x7) << 1) | \
|
||||
(((bits) & 0x8) << (11 - 3)))
|
||||
#define HSW_WB_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x2)
|
||||
#define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3)
|
||||
#define HSW_WB_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x8)
|
||||
#define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb)
|
||||
#define HSW_WT_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x7)
|
||||
#define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6)
|
||||
#define HSW_PTE_UNCACHED (0)
|
||||
#define HSW_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0x7f0))
|
||||
#define HSW_PTE_ADDR_ENCODE(addr) HSW_GTT_ADDR_ENCODE(addr)
|
||||
|
||||
/* GEN8 legacy style address is defined as a 3 level page table:
|
||||
* 31:30 | 29:21 | 20:12 | 11:0
|
||||
* PDPE | PDE | PTE | offset
|
||||
* The difference as compared to normal x86 3 level page table is the PDPEs are
|
||||
* programmed via register.
|
||||
*/
|
||||
#define GEN8_PDPE_SHIFT 30
|
||||
#define GEN8_PDPE_MASK 0x3
|
||||
#define GEN8_PDE_SHIFT 21
|
||||
#define GEN8_PDE_MASK 0x1ff
|
||||
#define GEN8_PTE_SHIFT 12
|
||||
#define GEN8_PTE_MASK 0x1ff
|
||||
#define GEN8_LEGACY_PDPS 4
|
||||
#define GEN8_PTES_PER_PAGE (PAGE_SIZE / sizeof(gen8_gtt_pte_t))
|
||||
#define GEN8_PDES_PER_PAGE (PAGE_SIZE / sizeof(gen8_ppgtt_pde_t))
|
||||
|
||||
#define PPAT_UNCACHED_INDEX (_PAGE_PWT | _PAGE_PCD)
|
||||
#define PPAT_CACHED_PDE_INDEX 0 /* WB LLC */
|
||||
#define PPAT_CACHED_INDEX _PAGE_PAT /* WB LLCeLLC */
|
||||
#define PPAT_DISPLAY_ELLC_INDEX _PAGE_PCD /* WT eLLC */
|
||||
|
||||
#define GEN8_PPAT_AGE(x) (x<<4)
|
||||
#define GEN8_PPAT_LLCeLLC (3<<2)
|
||||
#define GEN8_PPAT_LLCELLC (2<<2)
|
||||
#define GEN8_PPAT_LLC (1<<2)
|
||||
#define GEN8_PPAT_WB (3<<0)
|
||||
#define GEN8_PPAT_WT (2<<0)
|
||||
#define GEN8_PPAT_WC (1<<0)
|
||||
#define GEN8_PPAT_UC (0<<0)
|
||||
#define GEN8_PPAT_ELLC_OVERRIDE (0<<2)
|
||||
#define GEN8_PPAT(i, x) ((uint64_t) (x) << ((i) * 8))
|
||||
|
||||
enum i915_cache_level;
|
||||
/**
|
||||
* A VMA represents a GEM BO that is bound into an address space. Therefore, a
|
||||
* VMA's presence cannot be guaranteed before binding, or after unbinding the
|
||||
* object into/from the address space.
|
||||
*
|
||||
* To make things as simple as possible (ie. no refcounting), a VMA's lifetime
|
||||
* will always be <= an objects lifetime. So object refcounting should cover us.
|
||||
*/
|
||||
struct i915_vma {
|
||||
struct drm_mm_node node;
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct i915_address_space *vm;
|
||||
|
||||
/** This object's place on the active/inactive lists */
|
||||
struct list_head mm_list;
|
||||
|
||||
struct list_head vma_link; /* Link in the object's VMA list */
|
||||
|
||||
/** This vma's place in the batchbuffer or on the eviction list */
|
||||
struct list_head exec_list;
|
||||
|
||||
/**
|
||||
* Used for performing relocations during execbuffer insertion.
|
||||
*/
|
||||
struct hlist_node exec_node;
|
||||
unsigned long exec_handle;
|
||||
struct drm_i915_gem_exec_object2 *exec_entry;
|
||||
|
||||
/**
|
||||
* How many users have pinned this object in GTT space. The following
|
||||
* users can each hold at most one reference: pwrite/pread, pin_ioctl
|
||||
* (via user_pin_count), execbuffer (objects are not allowed multiple
|
||||
* times for the same batchbuffer), and the framebuffer code. When
|
||||
* switching/pageflipping, the framebuffer code has at most two buffers
|
||||
* pinned per crtc.
|
||||
*
|
||||
* In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
|
||||
* bits with absolutely no headroom. So use 4 bits. */
|
||||
unsigned int pin_count:4;
|
||||
#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
|
||||
|
||||
/** Unmap an object from an address space. This usually consists of
|
||||
* setting the valid PTE entries to a reserved scratch page. */
|
||||
void (*unbind_vma)(struct i915_vma *vma);
|
||||
/* Map an object into an address space with the given cache flags. */
|
||||
#define GLOBAL_BIND (1<<0)
|
||||
void (*bind_vma)(struct i915_vma *vma,
|
||||
enum i915_cache_level cache_level,
|
||||
u32 flags);
|
||||
};
|
||||
|
||||
struct i915_address_space {
|
||||
struct drm_mm mm;
|
||||
struct drm_device *dev;
|
||||
struct list_head global_link;
|
||||
unsigned long start; /* Start offset always 0 for dri2 */
|
||||
size_t total; /* size addr space maps (ex. 2GB for ggtt) */
|
||||
|
||||
struct {
|
||||
dma_addr_t addr;
|
||||
struct page *page;
|
||||
} scratch;
|
||||
|
||||
/**
|
||||
* List of objects currently involved in rendering.
|
||||
*
|
||||
* Includes buffers having the contents of their GPU caches
|
||||
* flushed, not necessarily primitives. last_rendering_seqno
|
||||
* represents when the rendering involved will be completed.
|
||||
*
|
||||
* A reference is held on the buffer while on this list.
|
||||
*/
|
||||
struct list_head active_list;
|
||||
|
||||
/**
|
||||
* LRU list of objects which are not in the ringbuffer and
|
||||
* are ready to unbind, but are still in the GTT.
|
||||
*
|
||||
* last_rendering_seqno is 0 while an object is in this list.
|
||||
*
|
||||
* A reference is not held on the buffer while on this list,
|
||||
* as merely being GTT-bound shouldn't prevent its being
|
||||
* freed, and we'll pull it off the list in the free path.
|
||||
*/
|
||||
struct list_head inactive_list;
|
||||
|
||||
/* FIXME: Need a more generic return type */
|
||||
gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr,
|
||||
enum i915_cache_level level,
|
||||
bool valid); /* Create a valid PTE */
|
||||
void (*clear_range)(struct i915_address_space *vm,
|
||||
uint64_t start,
|
||||
uint64_t length,
|
||||
bool use_scratch);
|
||||
void (*insert_entries)(struct i915_address_space *vm,
|
||||
struct sg_table *st,
|
||||
uint64_t start,
|
||||
enum i915_cache_level cache_level);
|
||||
void (*cleanup)(struct i915_address_space *vm);
|
||||
};
|
||||
|
||||
/* The Graphics Translation Table is the way in which GEN hardware translates a
|
||||
* Graphics Virtual Address into a Physical Address. In addition to the normal
|
||||
* collateral associated with any va->pa translations GEN hardware also has a
|
||||
* portion of the GTT which can be mapped by the CPU and remain both coherent
|
||||
* and correct (in cases like swizzling). That region is referred to as GMADR in
|
||||
* the spec.
|
||||
*/
|
||||
struct i915_gtt {
|
||||
struct i915_address_space base;
|
||||
size_t stolen_size; /* Total size of stolen memory */
|
||||
|
||||
unsigned long mappable_end; /* End offset that we can CPU map */
|
||||
struct io_mapping *mappable; /* Mapping to our CPU mappable region */
|
||||
phys_addr_t mappable_base; /* PA of our GMADR */
|
||||
|
||||
/** "Graphics Stolen Memory" holds the global PTEs */
|
||||
void __iomem *gsm;
|
||||
|
||||
bool do_idle_maps;
|
||||
|
||||
int mtrr;
|
||||
|
||||
/* global gtt ops */
|
||||
int (*gtt_probe)(struct drm_device *dev, size_t *gtt_total,
|
||||
size_t *stolen, phys_addr_t *mappable_base,
|
||||
unsigned long *mappable_end);
|
||||
};
|
||||
|
||||
struct i915_hw_ppgtt {
|
||||
struct i915_address_space base;
|
||||
struct kref ref;
|
||||
struct drm_mm_node node;
|
||||
unsigned num_pd_entries;
|
||||
unsigned num_pd_pages; /* gen8+ */
|
||||
union {
|
||||
struct page **pt_pages;
|
||||
struct page **gen8_pt_pages[GEN8_LEGACY_PDPS];
|
||||
};
|
||||
struct page *pd_pages;
|
||||
union {
|
||||
uint32_t pd_offset;
|
||||
dma_addr_t pd_dma_addr[GEN8_LEGACY_PDPS];
|
||||
};
|
||||
union {
|
||||
dma_addr_t *pt_dma_addr;
|
||||
dma_addr_t *gen8_pt_dma_addr[4];
|
||||
};
|
||||
|
||||
struct i915_hw_context *ctx;
|
||||
|
||||
int (*enable)(struct i915_hw_ppgtt *ppgtt);
|
||||
int (*switch_mm)(struct i915_hw_ppgtt *ppgtt,
|
||||
struct intel_ring_buffer *ring,
|
||||
bool synchronous);
|
||||
void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m);
|
||||
};
|
||||
|
||||
int i915_gem_gtt_init(struct drm_device *dev);
|
||||
void i915_gem_init_global_gtt(struct drm_device *dev);
|
||||
void i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start,
|
||||
unsigned long mappable_end, unsigned long end);
|
||||
|
||||
bool intel_enable_ppgtt(struct drm_device *dev, bool full);
|
||||
int i915_gem_init_ppgtt(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt);
|
||||
|
||||
void i915_check_and_clear_faults(struct drm_device *dev);
|
||||
void i915_gem_suspend_gtt_mappings(struct drm_device *dev);
|
||||
void i915_gem_restore_gtt_mappings(struct drm_device *dev);
|
||||
|
||||
int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
|
||||
void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
|
||||
|
||||
#endif
|
|
@ -257,7 +257,8 @@ static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
|
|||
err_printf(m, " INSTPS: 0x%08x\n", ring->instps);
|
||||
}
|
||||
err_printf(m, " INSTPM: 0x%08x\n", ring->instpm);
|
||||
err_printf(m, " FADDR: 0x%08x\n", ring->faddr);
|
||||
err_printf(m, " FADDR: 0x%08x %08x\n", upper_32_bits(ring->faddr),
|
||||
lower_32_bits(ring->faddr));
|
||||
if (INTEL_INFO(dev)->gen >= 6) {
|
||||
err_printf(m, " RC PSMI: 0x%08x\n", ring->rc_psmi);
|
||||
err_printf(m, " FAULT_REG: 0x%08x\n", ring->fault_reg);
|
||||
|
@ -452,16 +453,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
|
|||
err_printf(m, "%s --- HW Context = 0x%08x\n",
|
||||
dev_priv->ring[i].name,
|
||||
obj->gtt_offset);
|
||||
offset = 0;
|
||||
for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
|
||||
err_printf(m, "[%04x] %08x %08x %08x %08x\n",
|
||||
offset,
|
||||
obj->pages[0][elt],
|
||||
obj->pages[0][elt+1],
|
||||
obj->pages[0][elt+2],
|
||||
obj->pages[0][elt+3]);
|
||||
offset += 16;
|
||||
}
|
||||
print_error_obj(m, obj);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -781,8 +773,10 @@ static void i915_record_ring_state(struct drm_device *dev,
|
|||
ering->instdone = I915_READ(RING_INSTDONE(ring->mmio_base));
|
||||
ering->instps = I915_READ(RING_INSTPS(ring->mmio_base));
|
||||
ering->bbaddr = I915_READ(RING_BBADDR(ring->mmio_base));
|
||||
if (INTEL_INFO(dev)->gen >= 8)
|
||||
if (INTEL_INFO(dev)->gen >= 8) {
|
||||
ering->faddr |= (u64) I915_READ(RING_DMA_FADD_UDW(ring->mmio_base)) << 32;
|
||||
ering->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(ring->mmio_base)) << 32;
|
||||
}
|
||||
ering->bbstate = I915_READ(RING_BBSTATE(ring->mmio_base));
|
||||
} else {
|
||||
ering->faddr = I915_READ(DMA_FADD_I8XX);
|
||||
|
@ -875,10 +869,7 @@ static void i915_gem_record_active_context(struct intel_ring_buffer *ring,
|
|||
|
||||
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
|
||||
if ((error->ccid & PAGE_MASK) == i915_gem_obj_ggtt_offset(obj)) {
|
||||
ering->ctx = i915_error_object_create_sized(dev_priv,
|
||||
obj,
|
||||
&dev_priv->gtt.base,
|
||||
1);
|
||||
ering->ctx = i915_error_ggtt_object_create(dev_priv, obj);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -80,17 +80,64 @@ static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
|
|||
[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
|
||||
};
|
||||
|
||||
/* IIR can theoretically queue up two events. Be paranoid. */
|
||||
#define GEN8_IRQ_RESET_NDX(type, which) do { \
|
||||
I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
|
||||
POSTING_READ(GEN8_##type##_IMR(which)); \
|
||||
I915_WRITE(GEN8_##type##_IER(which), 0); \
|
||||
I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
|
||||
POSTING_READ(GEN8_##type##_IIR(which)); \
|
||||
I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
|
||||
POSTING_READ(GEN8_##type##_IIR(which)); \
|
||||
} while (0)
|
||||
|
||||
#define GEN5_IRQ_RESET(type) do { \
|
||||
I915_WRITE(type##IMR, 0xffffffff); \
|
||||
POSTING_READ(type##IMR); \
|
||||
I915_WRITE(type##IER, 0); \
|
||||
I915_WRITE(type##IIR, 0xffffffff); \
|
||||
POSTING_READ(type##IIR); \
|
||||
I915_WRITE(type##IIR, 0xffffffff); \
|
||||
POSTING_READ(type##IIR); \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* We should clear IMR at preinstall/uninstall, and just check at postinstall.
|
||||
*/
|
||||
#define GEN5_ASSERT_IIR_IS_ZERO(reg) do { \
|
||||
u32 val = I915_READ(reg); \
|
||||
if (val) { \
|
||||
WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", \
|
||||
(reg), val); \
|
||||
I915_WRITE((reg), 0xffffffff); \
|
||||
POSTING_READ(reg); \
|
||||
I915_WRITE((reg), 0xffffffff); \
|
||||
POSTING_READ(reg); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
|
||||
GEN5_ASSERT_IIR_IS_ZERO(GEN8_##type##_IIR(which)); \
|
||||
I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
|
||||
I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
|
||||
POSTING_READ(GEN8_##type##_IER(which)); \
|
||||
} while (0)
|
||||
|
||||
#define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
|
||||
GEN5_ASSERT_IIR_IS_ZERO(type##IIR); \
|
||||
I915_WRITE(type##IMR, (imr_val)); \
|
||||
I915_WRITE(type##IER, (ier_val)); \
|
||||
POSTING_READ(type##IER); \
|
||||
} while (0)
|
||||
|
||||
/* For display hotplug interrupt */
|
||||
static void
|
||||
ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
|
||||
{
|
||||
assert_spin_locked(&dev_priv->irq_lock);
|
||||
|
||||
if (dev_priv->pm.irqs_disabled) {
|
||||
WARN(1, "IRQs disabled\n");
|
||||
dev_priv->pm.regsave.deimr &= ~mask;
|
||||
if (WARN_ON(dev_priv->pm.irqs_disabled))
|
||||
return;
|
||||
}
|
||||
|
||||
if ((dev_priv->irq_mask & mask) != 0) {
|
||||
dev_priv->irq_mask &= ~mask;
|
||||
|
@ -104,11 +151,8 @@ ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
|
|||
{
|
||||
assert_spin_locked(&dev_priv->irq_lock);
|
||||
|
||||
if (dev_priv->pm.irqs_disabled) {
|
||||
WARN(1, "IRQs disabled\n");
|
||||
dev_priv->pm.regsave.deimr |= mask;
|
||||
if (WARN_ON(dev_priv->pm.irqs_disabled))
|
||||
return;
|
||||
}
|
||||
|
||||
if ((dev_priv->irq_mask & mask) != mask) {
|
||||
dev_priv->irq_mask |= mask;
|
||||
|
@ -129,13 +173,8 @@ static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
|
|||
{
|
||||
assert_spin_locked(&dev_priv->irq_lock);
|
||||
|
||||
if (dev_priv->pm.irqs_disabled) {
|
||||
WARN(1, "IRQs disabled\n");
|
||||
dev_priv->pm.regsave.gtimr &= ~interrupt_mask;
|
||||
dev_priv->pm.regsave.gtimr |= (~enabled_irq_mask &
|
||||
interrupt_mask);
|
||||
if (WARN_ON(dev_priv->pm.irqs_disabled))
|
||||
return;
|
||||
}
|
||||
|
||||
dev_priv->gt_irq_mask &= ~interrupt_mask;
|
||||
dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
|
||||
|
@ -167,13 +206,8 @@ static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
|
|||
|
||||
assert_spin_locked(&dev_priv->irq_lock);
|
||||
|
||||
if (dev_priv->pm.irqs_disabled) {
|
||||
WARN(1, "IRQs disabled\n");
|
||||
dev_priv->pm.regsave.gen6_pmimr &= ~interrupt_mask;
|
||||
dev_priv->pm.regsave.gen6_pmimr |= (~enabled_irq_mask &
|
||||
interrupt_mask);
|
||||
if (WARN_ON(dev_priv->pm.irqs_disabled))
|
||||
return;
|
||||
}
|
||||
|
||||
new_val = dev_priv->pm_irq_mask;
|
||||
new_val &= ~interrupt_mask;
|
||||
|
@ -313,14 +347,8 @@ static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
|
|||
|
||||
assert_spin_locked(&dev_priv->irq_lock);
|
||||
|
||||
if (dev_priv->pm.irqs_disabled &&
|
||||
(interrupt_mask & SDE_HOTPLUG_MASK_CPT)) {
|
||||
WARN(1, "IRQs disabled\n");
|
||||
dev_priv->pm.regsave.sdeimr &= ~interrupt_mask;
|
||||
dev_priv->pm.regsave.sdeimr |= (~enabled_irq_mask &
|
||||
interrupt_mask);
|
||||
if (WARN_ON(dev_priv->pm.irqs_disabled))
|
||||
return;
|
||||
}
|
||||
|
||||
I915_WRITE(SDEIMR, sdeimr);
|
||||
POSTING_READ(SDEIMR);
|
||||
|
@ -503,8 +531,10 @@ __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
|
|||
|
||||
assert_spin_locked(&dev_priv->irq_lock);
|
||||
|
||||
if (WARN_ON_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
|
||||
status_mask & ~PIPESTAT_INT_STATUS_MASK))
|
||||
if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
|
||||
status_mask & ~PIPESTAT_INT_STATUS_MASK,
|
||||
"pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
|
||||
pipe_name(pipe), enable_mask, status_mask))
|
||||
return;
|
||||
|
||||
if ((pipestat & enable_mask) == enable_mask)
|
||||
|
@ -527,8 +557,10 @@ __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
|
|||
|
||||
assert_spin_locked(&dev_priv->irq_lock);
|
||||
|
||||
if (WARN_ON_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
|
||||
status_mask & ~PIPESTAT_INT_STATUS_MASK))
|
||||
if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
|
||||
status_mask & ~PIPESTAT_INT_STATUS_MASK,
|
||||
"pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
|
||||
pipe_name(pipe), enable_mask, status_mask))
|
||||
return;
|
||||
|
||||
if ((pipestat & enable_mask) == 0)
|
||||
|
@ -1619,6 +1651,33 @@ static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
|
|||
gmbus_irq_handler(dev);
|
||||
}
|
||||
|
||||
static void i9xx_hpd_irq_handler(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
|
||||
|
||||
if (IS_G4X(dev)) {
|
||||
u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
|
||||
|
||||
intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_g4x);
|
||||
} else {
|
||||
u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
|
||||
|
||||
intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
|
||||
}
|
||||
|
||||
if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) &&
|
||||
hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
|
||||
dp_aux_irq_handler(dev);
|
||||
|
||||
I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
|
||||
/*
|
||||
* Make sure hotplug status is cleared before we clear IIR, or else we
|
||||
* may miss hotplug events.
|
||||
*/
|
||||
POSTING_READ(PORT_HOTPLUG_STAT);
|
||||
}
|
||||
|
||||
static irqreturn_t valleyview_irq_handler(int irq, void *arg)
|
||||
{
|
||||
struct drm_device *dev = (struct drm_device *) arg;
|
||||
|
@ -1641,19 +1700,8 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
|
|||
valleyview_pipestat_irq_handler(dev, iir);
|
||||
|
||||
/* Consume port. Then clear IIR or we'll miss events */
|
||||
if (iir & I915_DISPLAY_PORT_INTERRUPT) {
|
||||
u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
|
||||
u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
|
||||
|
||||
intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
|
||||
|
||||
if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
|
||||
dp_aux_irq_handler(dev);
|
||||
|
||||
I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
|
||||
I915_READ(PORT_HOTPLUG_STAT);
|
||||
}
|
||||
|
||||
if (iir & I915_DISPLAY_PORT_INTERRUPT)
|
||||
i9xx_hpd_irq_handler(dev);
|
||||
|
||||
if (pm_iir)
|
||||
gen6_rps_irq_handler(dev_priv, pm_iir);
|
||||
|
@ -2022,7 +2070,7 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
|
|||
if (pipe_iir & GEN8_PIPE_VBLANK)
|
||||
drm_handle_vblank(dev, pipe);
|
||||
|
||||
if (pipe_iir & GEN8_PIPE_FLIP_DONE) {
|
||||
if (pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE) {
|
||||
intel_prepare_page_flip(dev, pipe);
|
||||
intel_finish_page_flip_plane(dev, pipe);
|
||||
}
|
||||
|
@ -2511,6 +2559,56 @@ ring_idle(struct intel_ring_buffer *ring, u32 seqno)
|
|||
i915_seqno_passed(seqno, ring_last_seqno(ring)));
|
||||
}
|
||||
|
||||
static bool
|
||||
ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
|
||||
{
|
||||
if (INTEL_INFO(dev)->gen >= 8) {
|
||||
/*
|
||||
* FIXME: gen8 semaphore support - currently we don't emit
|
||||
* semaphores on bdw anyway, but this needs to be addressed when
|
||||
* we merge that code.
|
||||
*/
|
||||
return false;
|
||||
} else {
|
||||
ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
|
||||
return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
|
||||
MI_SEMAPHORE_REGISTER);
|
||||
}
|
||||
}
|
||||
|
||||
static struct intel_ring_buffer *
|
||||
semaphore_wait_to_signaller_ring(struct intel_ring_buffer *ring, u32 ipehr)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = ring->dev->dev_private;
|
||||
struct intel_ring_buffer *signaller;
|
||||
int i;
|
||||
|
||||
if (INTEL_INFO(dev_priv->dev)->gen >= 8) {
|
||||
/*
|
||||
* FIXME: gen8 semaphore support - currently we don't emit
|
||||
* semaphores on bdw anyway, but this needs to be addressed when
|
||||
* we merge that code.
|
||||
*/
|
||||
return NULL;
|
||||
} else {
|
||||
u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
|
||||
|
||||
for_each_ring(signaller, dev_priv, i) {
|
||||
if(ring == signaller)
|
||||
continue;
|
||||
|
||||
if (sync_bits ==
|
||||
signaller->semaphore_register[ring->id])
|
||||
return signaller;
|
||||
}
|
||||
}
|
||||
|
||||
DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x\n",
|
||||
ring->id, ipehr);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct intel_ring_buffer *
|
||||
semaphore_waits_for(struct intel_ring_buffer *ring, u32 *seqno)
|
||||
{
|
||||
|
@ -2519,8 +2617,7 @@ semaphore_waits_for(struct intel_ring_buffer *ring, u32 *seqno)
|
|||
int i;
|
||||
|
||||
ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
|
||||
if ((ipehr & ~(0x3 << 16)) !=
|
||||
(MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER))
|
||||
if (!ipehr_is_semaphore_wait(ring->dev, ipehr))
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
|
@ -2552,7 +2649,7 @@ semaphore_waits_for(struct intel_ring_buffer *ring, u32 *seqno)
|
|||
return NULL;
|
||||
|
||||
*seqno = ioread32(ring->virtual_start + head + 4) + 1;
|
||||
return &dev_priv->ring[(ring->id + (((ipehr >> 17) & 1) + 1)) % 3];
|
||||
return semaphore_wait_to_signaller_ring(ring, ipehr);
|
||||
}
|
||||
|
||||
static int semaphore_passed(struct intel_ring_buffer *ring)
|
||||
|
@ -2759,57 +2856,68 @@ void i915_queue_hangcheck(struct drm_device *dev)
|
|||
round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
|
||||
}
|
||||
|
||||
static void ibx_irq_preinstall(struct drm_device *dev)
|
||||
static void ibx_irq_reset(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
if (HAS_PCH_NOP(dev))
|
||||
return;
|
||||
|
||||
/* south display irq */
|
||||
I915_WRITE(SDEIMR, 0xffffffff);
|
||||
/*
|
||||
* SDEIER is also touched by the interrupt handler to work around missed
|
||||
* PCH interrupts. Hence we can't update it after the interrupt handler
|
||||
* is enabled - instead we unconditionally enable all PCH interrupt
|
||||
* sources here, but then only unmask them as needed with SDEIMR.
|
||||
*/
|
||||
GEN5_IRQ_RESET(SDE);
|
||||
|
||||
if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
|
||||
I915_WRITE(SERR_INT, 0xffffffff);
|
||||
}
|
||||
|
||||
/*
|
||||
* SDEIER is also touched by the interrupt handler to work around missed PCH
|
||||
* interrupts. Hence we can't update it after the interrupt handler is enabled -
|
||||
* instead we unconditionally enable all PCH interrupt sources here, but then
|
||||
* only unmask them as needed with SDEIMR.
|
||||
*
|
||||
* This function needs to be called before interrupts are enabled.
|
||||
*/
|
||||
static void ibx_irq_pre_postinstall(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
if (HAS_PCH_NOP(dev))
|
||||
return;
|
||||
|
||||
WARN_ON(I915_READ(SDEIER) != 0);
|
||||
I915_WRITE(SDEIER, 0xffffffff);
|
||||
POSTING_READ(SDEIER);
|
||||
}
|
||||
|
||||
static void gen5_gt_irq_preinstall(struct drm_device *dev)
|
||||
static void gen5_gt_irq_reset(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
/* and GT */
|
||||
I915_WRITE(GTIMR, 0xffffffff);
|
||||
I915_WRITE(GTIER, 0x0);
|
||||
POSTING_READ(GTIER);
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 6) {
|
||||
/* and PM */
|
||||
I915_WRITE(GEN6_PMIMR, 0xffffffff);
|
||||
I915_WRITE(GEN6_PMIER, 0x0);
|
||||
POSTING_READ(GEN6_PMIER);
|
||||
}
|
||||
GEN5_IRQ_RESET(GT);
|
||||
if (INTEL_INFO(dev)->gen >= 6)
|
||||
GEN5_IRQ_RESET(GEN6_PM);
|
||||
}
|
||||
|
||||
/* drm_dma.h hooks
|
||||
*/
|
||||
static void ironlake_irq_preinstall(struct drm_device *dev)
|
||||
static void ironlake_irq_reset(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
I915_WRITE(HWSTAM, 0xeffe);
|
||||
I915_WRITE(HWSTAM, 0xffffffff);
|
||||
|
||||
I915_WRITE(DEIMR, 0xffffffff);
|
||||
I915_WRITE(DEIER, 0x0);
|
||||
POSTING_READ(DEIER);
|
||||
GEN5_IRQ_RESET(DE);
|
||||
if (IS_GEN7(dev))
|
||||
I915_WRITE(GEN7_ERR_INT, 0xffffffff);
|
||||
|
||||
gen5_gt_irq_preinstall(dev);
|
||||
gen5_gt_irq_reset(dev);
|
||||
|
||||
ibx_irq_preinstall(dev);
|
||||
ibx_irq_reset(dev);
|
||||
}
|
||||
|
||||
static void ironlake_irq_preinstall(struct drm_device *dev)
|
||||
{
|
||||
ironlake_irq_reset(dev);
|
||||
}
|
||||
|
||||
static void valleyview_irq_preinstall(struct drm_device *dev)
|
||||
|
@ -2827,7 +2935,7 @@ static void valleyview_irq_preinstall(struct drm_device *dev)
|
|||
I915_WRITE(GTIIR, I915_READ(GTIIR));
|
||||
I915_WRITE(GTIIR, I915_READ(GTIIR));
|
||||
|
||||
gen5_gt_irq_preinstall(dev);
|
||||
gen5_gt_irq_reset(dev);
|
||||
|
||||
I915_WRITE(DPINVGTT, 0xff);
|
||||
|
||||
|
@ -2841,7 +2949,7 @@ static void valleyview_irq_preinstall(struct drm_device *dev)
|
|||
POSTING_READ(VLV_IER);
|
||||
}
|
||||
|
||||
static void gen8_irq_preinstall(struct drm_device *dev)
|
||||
static void gen8_irq_reset(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int pipe;
|
||||
|
@ -2849,43 +2957,24 @@ static void gen8_irq_preinstall(struct drm_device *dev)
|
|||
I915_WRITE(GEN8_MASTER_IRQ, 0);
|
||||
POSTING_READ(GEN8_MASTER_IRQ);
|
||||
|
||||
/* IIR can theoretically queue up two events. Be paranoid */
|
||||
#define GEN8_IRQ_INIT_NDX(type, which) do { \
|
||||
I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
|
||||
POSTING_READ(GEN8_##type##_IMR(which)); \
|
||||
I915_WRITE(GEN8_##type##_IER(which), 0); \
|
||||
I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
|
||||
POSTING_READ(GEN8_##type##_IIR(which)); \
|
||||
I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
|
||||
} while (0)
|
||||
GEN8_IRQ_RESET_NDX(GT, 0);
|
||||
GEN8_IRQ_RESET_NDX(GT, 1);
|
||||
GEN8_IRQ_RESET_NDX(GT, 2);
|
||||
GEN8_IRQ_RESET_NDX(GT, 3);
|
||||
|
||||
#define GEN8_IRQ_INIT(type) do { \
|
||||
I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \
|
||||
POSTING_READ(GEN8_##type##_IMR); \
|
||||
I915_WRITE(GEN8_##type##_IER, 0); \
|
||||
I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
|
||||
POSTING_READ(GEN8_##type##_IIR); \
|
||||
I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
|
||||
} while (0)
|
||||
for_each_pipe(pipe)
|
||||
GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
|
||||
|
||||
GEN8_IRQ_INIT_NDX(GT, 0);
|
||||
GEN8_IRQ_INIT_NDX(GT, 1);
|
||||
GEN8_IRQ_INIT_NDX(GT, 2);
|
||||
GEN8_IRQ_INIT_NDX(GT, 3);
|
||||
GEN5_IRQ_RESET(GEN8_DE_PORT_);
|
||||
GEN5_IRQ_RESET(GEN8_DE_MISC_);
|
||||
GEN5_IRQ_RESET(GEN8_PCU_);
|
||||
|
||||
for_each_pipe(pipe) {
|
||||
GEN8_IRQ_INIT_NDX(DE_PIPE, pipe);
|
||||
}
|
||||
ibx_irq_reset(dev);
|
||||
}
|
||||
|
||||
GEN8_IRQ_INIT(DE_PORT);
|
||||
GEN8_IRQ_INIT(DE_MISC);
|
||||
GEN8_IRQ_INIT(PCU);
|
||||
#undef GEN8_IRQ_INIT
|
||||
#undef GEN8_IRQ_INIT_NDX
|
||||
|
||||
POSTING_READ(GEN8_PCU_IIR);
|
||||
|
||||
ibx_irq_preinstall(dev);
|
||||
static void gen8_irq_preinstall(struct drm_device *dev)
|
||||
{
|
||||
gen8_irq_reset(dev);
|
||||
}
|
||||
|
||||
static void ibx_hpd_irq_setup(struct drm_device *dev)
|
||||
|
@ -2931,15 +3020,12 @@ static void ibx_irq_postinstall(struct drm_device *dev)
|
|||
if (HAS_PCH_NOP(dev))
|
||||
return;
|
||||
|
||||
if (HAS_PCH_IBX(dev)) {
|
||||
if (HAS_PCH_IBX(dev))
|
||||
mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
|
||||
} else {
|
||||
else
|
||||
mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
|
||||
|
||||
I915_WRITE(SERR_INT, I915_READ(SERR_INT));
|
||||
}
|
||||
|
||||
I915_WRITE(SDEIIR, I915_READ(SDEIIR));
|
||||
GEN5_ASSERT_IIR_IS_ZERO(SDEIIR);
|
||||
I915_WRITE(SDEIMR, ~mask);
|
||||
}
|
||||
|
||||
|
@ -2965,10 +3051,7 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
|
|||
gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
|
||||
}
|
||||
|
||||
I915_WRITE(GTIIR, I915_READ(GTIIR));
|
||||
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
|
||||
I915_WRITE(GTIER, gt_irqs);
|
||||
POSTING_READ(GTIER);
|
||||
GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 6) {
|
||||
pm_irqs |= dev_priv->pm_rps_events;
|
||||
|
@ -2977,10 +3060,7 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
|
|||
pm_irqs |= PM_VEBOX_USER_INTERRUPT;
|
||||
|
||||
dev_priv->pm_irq_mask = 0xffffffff;
|
||||
I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
|
||||
I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask);
|
||||
I915_WRITE(GEN6_PMIER, pm_irqs);
|
||||
POSTING_READ(GEN6_PMIER);
|
||||
GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2997,8 +3077,6 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
|
|||
DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
|
||||
extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
|
||||
DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB);
|
||||
|
||||
I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
|
||||
} else {
|
||||
display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
|
||||
DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
|
||||
|
@ -3011,11 +3089,11 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
|
|||
|
||||
dev_priv->irq_mask = ~display_mask;
|
||||
|
||||
/* should always can generate irq */
|
||||
I915_WRITE(DEIIR, I915_READ(DEIIR));
|
||||
I915_WRITE(DEIMR, dev_priv->irq_mask);
|
||||
I915_WRITE(DEIER, display_mask | extra_mask);
|
||||
POSTING_READ(DEIER);
|
||||
I915_WRITE(HWSTAM, 0xeffe);
|
||||
|
||||
ibx_irq_pre_postinstall(dev);
|
||||
|
||||
GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
|
||||
|
||||
gen5_gt_irq_postinstall(dev);
|
||||
|
||||
|
@ -3175,21 +3253,14 @@ static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
|
|||
GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT
|
||||
};
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(gt_interrupts); i++) {
|
||||
u32 tmp = I915_READ(GEN8_GT_IIR(i));
|
||||
if (tmp)
|
||||
DRM_ERROR("Interrupt (%d) should have been masked in pre-install 0x%08x\n",
|
||||
i, tmp);
|
||||
I915_WRITE(GEN8_GT_IMR(i), ~gt_interrupts[i]);
|
||||
I915_WRITE(GEN8_GT_IER(i), gt_interrupts[i]);
|
||||
}
|
||||
POSTING_READ(GEN8_GT_IER(0));
|
||||
for (i = 0; i < ARRAY_SIZE(gt_interrupts); i++)
|
||||
GEN8_IRQ_INIT_NDX(GT, i, ~gt_interrupts[i], gt_interrupts[i]);
|
||||
}
|
||||
|
||||
static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
uint32_t de_pipe_masked = GEN8_PIPE_FLIP_DONE |
|
||||
uint32_t de_pipe_masked = GEN8_PIPE_PRIMARY_FLIP_DONE |
|
||||
GEN8_PIPE_CDCLK_CRC_DONE |
|
||||
GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
|
||||
uint32_t de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
|
||||
|
@ -3199,25 +3270,19 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
|
|||
dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
|
||||
dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
|
||||
|
||||
for_each_pipe(pipe) {
|
||||
u32 tmp = I915_READ(GEN8_DE_PIPE_IIR(pipe));
|
||||
if (tmp)
|
||||
DRM_ERROR("Interrupt (%d) should have been masked in pre-install 0x%08x\n",
|
||||
pipe, tmp);
|
||||
I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
|
||||
I915_WRITE(GEN8_DE_PIPE_IER(pipe), de_pipe_enables);
|
||||
}
|
||||
POSTING_READ(GEN8_DE_PIPE_ISR(0));
|
||||
for_each_pipe(pipe)
|
||||
GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, dev_priv->de_irq_mask[pipe],
|
||||
de_pipe_enables);
|
||||
|
||||
I915_WRITE(GEN8_DE_PORT_IMR, ~GEN8_AUX_CHANNEL_A);
|
||||
I915_WRITE(GEN8_DE_PORT_IER, GEN8_AUX_CHANNEL_A);
|
||||
POSTING_READ(GEN8_DE_PORT_IER);
|
||||
GEN5_IRQ_INIT(GEN8_DE_PORT_, ~GEN8_AUX_CHANNEL_A, GEN8_AUX_CHANNEL_A);
|
||||
}
|
||||
|
||||
static int gen8_irq_postinstall(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
ibx_irq_pre_postinstall(dev);
|
||||
|
||||
gen8_gt_irq_postinstall(dev_priv);
|
||||
gen8_de_irq_postinstall(dev_priv);
|
||||
|
||||
|
@ -3232,41 +3297,13 @@ static int gen8_irq_postinstall(struct drm_device *dev)
|
|||
static void gen8_irq_uninstall(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int pipe;
|
||||
|
||||
if (!dev_priv)
|
||||
return;
|
||||
|
||||
I915_WRITE(GEN8_MASTER_IRQ, 0);
|
||||
intel_hpd_irq_uninstall(dev_priv);
|
||||
|
||||
#define GEN8_IRQ_FINI_NDX(type, which) do { \
|
||||
I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
|
||||
I915_WRITE(GEN8_##type##_IER(which), 0); \
|
||||
I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
|
||||
} while (0)
|
||||
|
||||
#define GEN8_IRQ_FINI(type) do { \
|
||||
I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \
|
||||
I915_WRITE(GEN8_##type##_IER, 0); \
|
||||
I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
|
||||
} while (0)
|
||||
|
||||
GEN8_IRQ_FINI_NDX(GT, 0);
|
||||
GEN8_IRQ_FINI_NDX(GT, 1);
|
||||
GEN8_IRQ_FINI_NDX(GT, 2);
|
||||
GEN8_IRQ_FINI_NDX(GT, 3);
|
||||
|
||||
for_each_pipe(pipe) {
|
||||
GEN8_IRQ_FINI_NDX(DE_PIPE, pipe);
|
||||
}
|
||||
|
||||
GEN8_IRQ_FINI(DE_PORT);
|
||||
GEN8_IRQ_FINI(DE_MISC);
|
||||
GEN8_IRQ_FINI(PCU);
|
||||
#undef GEN8_IRQ_FINI
|
||||
#undef GEN8_IRQ_FINI_NDX
|
||||
|
||||
POSTING_READ(GEN8_PCU_IIR);
|
||||
gen8_irq_reset(dev);
|
||||
}
|
||||
|
||||
static void valleyview_irq_uninstall(struct drm_device *dev)
|
||||
|
@ -3309,26 +3346,7 @@ static void ironlake_irq_uninstall(struct drm_device *dev)
|
|||
|
||||
intel_hpd_irq_uninstall(dev_priv);
|
||||
|
||||
I915_WRITE(HWSTAM, 0xffffffff);
|
||||
|
||||
I915_WRITE(DEIMR, 0xffffffff);
|
||||
I915_WRITE(DEIER, 0x0);
|
||||
I915_WRITE(DEIIR, I915_READ(DEIIR));
|
||||
if (IS_GEN7(dev))
|
||||
I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
|
||||
|
||||
I915_WRITE(GTIMR, 0xffffffff);
|
||||
I915_WRITE(GTIER, 0x0);
|
||||
I915_WRITE(GTIIR, I915_READ(GTIIR));
|
||||
|
||||
if (HAS_PCH_NOP(dev))
|
||||
return;
|
||||
|
||||
I915_WRITE(SDEIMR, 0xffffffff);
|
||||
I915_WRITE(SDEIER, 0x0);
|
||||
I915_WRITE(SDEIIR, I915_READ(SDEIIR));
|
||||
if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
|
||||
I915_WRITE(SERR_INT, I915_READ(SERR_INT));
|
||||
ironlake_irq_reset(dev);
|
||||
}
|
||||
|
||||
static void i8xx_irq_preinstall(struct drm_device * dev)
|
||||
|
@ -3636,16 +3654,9 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
|
|||
break;
|
||||
|
||||
/* Consume port. Then clear IIR or we'll miss events */
|
||||
if ((I915_HAS_HOTPLUG(dev)) &&
|
||||
(iir & I915_DISPLAY_PORT_INTERRUPT)) {
|
||||
u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
|
||||
u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
|
||||
|
||||
intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
|
||||
|
||||
I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
|
||||
POSTING_READ(PORT_HOTPLUG_STAT);
|
||||
}
|
||||
if (I915_HAS_HOTPLUG(dev) &&
|
||||
iir & I915_DISPLAY_PORT_INTERRUPT)
|
||||
i9xx_hpd_irq_handler(dev);
|
||||
|
||||
I915_WRITE(IIR, iir & ~flip_mask);
|
||||
new_iir = I915_READ(IIR); /* Flush posted writes */
|
||||
|
@ -3879,22 +3890,8 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
|
|||
ret = IRQ_HANDLED;
|
||||
|
||||
/* Consume port. Then clear IIR or we'll miss events */
|
||||
if (iir & I915_DISPLAY_PORT_INTERRUPT) {
|
||||
u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
|
||||
u32 hotplug_trigger = hotplug_status & (IS_G4X(dev) ?
|
||||
HOTPLUG_INT_STATUS_G4X :
|
||||
HOTPLUG_INT_STATUS_I915);
|
||||
|
||||
intel_hpd_irq_handler(dev, hotplug_trigger,
|
||||
IS_G4X(dev) ? hpd_status_g4x : hpd_status_i915);
|
||||
|
||||
if (IS_G4X(dev) &&
|
||||
(hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X))
|
||||
dp_aux_irq_handler(dev);
|
||||
|
||||
I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
|
||||
I915_READ(PORT_HOTPLUG_STAT);
|
||||
}
|
||||
if (iir & I915_DISPLAY_PORT_INTERRUPT)
|
||||
i9xx_hpd_irq_handler(dev);
|
||||
|
||||
I915_WRITE(IIR, iir & ~flip_mask);
|
||||
new_iir = I915_READ(IIR); /* Flush posted writes */
|
||||
|
@ -4121,57 +4118,20 @@ void intel_hpd_init(struct drm_device *dev)
|
|||
}
|
||||
|
||||
/* Disable interrupts so we can allow runtime PM. */
|
||||
void hsw_runtime_pm_disable_interrupts(struct drm_device *dev)
|
||||
void intel_runtime_pm_disable_interrupts(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
unsigned long irqflags;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
|
||||
|
||||
dev_priv->pm.regsave.deimr = I915_READ(DEIMR);
|
||||
dev_priv->pm.regsave.sdeimr = I915_READ(SDEIMR);
|
||||
dev_priv->pm.regsave.gtimr = I915_READ(GTIMR);
|
||||
dev_priv->pm.regsave.gtier = I915_READ(GTIER);
|
||||
dev_priv->pm.regsave.gen6_pmimr = I915_READ(GEN6_PMIMR);
|
||||
|
||||
ironlake_disable_display_irq(dev_priv, 0xffffffff);
|
||||
ibx_disable_display_interrupt(dev_priv, 0xffffffff);
|
||||
ilk_disable_gt_irq(dev_priv, 0xffffffff);
|
||||
snb_disable_pm_irq(dev_priv, 0xffffffff);
|
||||
|
||||
dev->driver->irq_uninstall(dev);
|
||||
dev_priv->pm.irqs_disabled = true;
|
||||
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
|
||||
}
|
||||
|
||||
/* Restore interrupts so we can recover from runtime PM. */
|
||||
void hsw_runtime_pm_restore_interrupts(struct drm_device *dev)
|
||||
void intel_runtime_pm_restore_interrupts(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
unsigned long irqflags;
|
||||
uint32_t val;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
|
||||
|
||||
val = I915_READ(DEIMR);
|
||||
WARN(val != 0xffffffff, "DEIMR is 0x%08x\n", val);
|
||||
|
||||
val = I915_READ(SDEIMR);
|
||||
WARN(val != 0xffffffff, "SDEIMR is 0x%08x\n", val);
|
||||
|
||||
val = I915_READ(GTIMR);
|
||||
WARN(val != 0xffffffff, "GTIMR is 0x%08x\n", val);
|
||||
|
||||
val = I915_READ(GEN6_PMIMR);
|
||||
WARN(val != 0xffffffff, "GEN6_PMIMR is 0x%08x\n", val);
|
||||
|
||||
dev_priv->pm.irqs_disabled = false;
|
||||
|
||||
ironlake_enable_display_irq(dev_priv, ~dev_priv->pm.regsave.deimr);
|
||||
ibx_enable_display_interrupt(dev_priv, ~dev_priv->pm.regsave.sdeimr);
|
||||
ilk_enable_gt_irq(dev_priv, ~dev_priv->pm.regsave.gtimr);
|
||||
snb_enable_pm_irq(dev_priv, ~dev_priv->pm.regsave.gen6_pmimr);
|
||||
I915_WRITE(GTIER, dev_priv->pm.regsave.gtier);
|
||||
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
|
||||
dev->driver->irq_preinstall(dev);
|
||||
dev->driver->irq_postinstall(dev);
|
||||
}
|
||||
|
|
|
@ -46,7 +46,8 @@ struct i915_params i915 __read_mostly = {
|
|||
.reset = true,
|
||||
.invert_brightness = 0,
|
||||
.disable_display = 0,
|
||||
.enable_cmd_parser = 0,
|
||||
.enable_cmd_parser = 1,
|
||||
.disable_vtd_wa = 0,
|
||||
};
|
||||
|
||||
module_param_named(modeset, i915.modeset, int, 0400);
|
||||
|
@ -149,6 +150,9 @@ MODULE_PARM_DESC(invert_brightness,
|
|||
module_param_named(disable_display, i915.disable_display, bool, 0600);
|
||||
MODULE_PARM_DESC(disable_display, "Disable display (default: false)");
|
||||
|
||||
module_param_named(disable_vtd_wa, i915.disable_vtd_wa, bool, 0600);
|
||||
MODULE_PARM_DESC(disable_vtd_wa, "Disable all VT-d workarounds (default: false)");
|
||||
|
||||
module_param_named(enable_cmd_parser, i915.enable_cmd_parser, int, 0600);
|
||||
MODULE_PARM_DESC(enable_cmd_parser,
|
||||
"Enable command parsing (1=enabled, 0=disabled [default])");
|
||||
"Enable command parsing (1=enabled [default], 0=disabled)");
|
||||
|
|
|
@ -190,6 +190,8 @@
|
|||
* Memory interface instructions used by the kernel
|
||||
*/
|
||||
#define MI_INSTR(opcode, flags) (((opcode) << 23) | (flags))
|
||||
/* Many MI commands use bit 22 of the header dword for GGTT vs PPGTT */
|
||||
#define MI_GLOBAL_GTT (1<<22)
|
||||
|
||||
#define MI_NOOP MI_INSTR(0, 0)
|
||||
#define MI_USER_INTERRUPT MI_INSTR(0x02, 0)
|
||||
|
@ -244,7 +246,8 @@
|
|||
#define MI_SEMAPHORE_SYNC_BVE (0<<16) /* VECS wait for BCS (VEBSYNC) */
|
||||
#define MI_SEMAPHORE_SYNC_VVE (1<<16) /* VECS wait for VCS (VEVSYNC) */
|
||||
#define MI_SEMAPHORE_SYNC_RVE (2<<16) /* VECS wait for RCS (VERSYNC) */
|
||||
#define MI_SEMAPHORE_SYNC_INVALID (3<<16)
|
||||
#define MI_SEMAPHORE_SYNC_INVALID (3<<16)
|
||||
#define MI_SEMAPHORE_SYNC_MASK (3<<16)
|
||||
#define MI_SET_CONTEXT MI_INSTR(0x18, 0)
|
||||
#define MI_MM_SPACE_GTT (1<<8)
|
||||
#define MI_MM_SPACE_PHYSICAL (0<<8)
|
||||
|
@ -262,13 +265,16 @@
|
|||
* - One can actually load arbitrary many arbitrary registers: Simply issue x
|
||||
* address/value pairs. Don't overdue it, though, x <= 2^4 must hold!
|
||||
*/
|
||||
#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1)
|
||||
#define MI_STORE_REGISTER_MEM(x) MI_INSTR(0x24, 2*x-1)
|
||||
#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*(x)-1)
|
||||
#define MI_STORE_REGISTER_MEM(x) MI_INSTR(0x24, 2*(x)-1)
|
||||
#define MI_STORE_REGISTER_MEM_GEN8(x) MI_INSTR(0x24, 3*(x)-1)
|
||||
#define MI_SRM_LRM_GLOBAL_GTT (1<<22)
|
||||
#define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */
|
||||
#define MI_FLUSH_DW_STORE_INDEX (1<<21)
|
||||
#define MI_INVALIDATE_TLB (1<<18)
|
||||
#define MI_FLUSH_DW_OP_STOREDW (1<<14)
|
||||
#define MI_FLUSH_DW_OP_MASK (3<<14)
|
||||
#define MI_FLUSH_DW_NOTIFY (1<<8)
|
||||
#define MI_INVALIDATE_BSD (1<<7)
|
||||
#define MI_FLUSH_DW_USE_GTT (1<<2)
|
||||
#define MI_FLUSH_DW_USE_PPGTT (0<<2)
|
||||
|
@ -330,9 +336,12 @@
|
|||
#define DISPLAY_PLANE_B (1<<20)
|
||||
#define GFX_OP_PIPE_CONTROL(len) ((0x3<<29)|(0x3<<27)|(0x2<<24)|(len-2))
|
||||
#define PIPE_CONTROL_GLOBAL_GTT_IVB (1<<24) /* gen7+ */
|
||||
#define PIPE_CONTROL_MMIO_WRITE (1<<23)
|
||||
#define PIPE_CONTROL_STORE_DATA_INDEX (1<<21)
|
||||
#define PIPE_CONTROL_CS_STALL (1<<20)
|
||||
#define PIPE_CONTROL_TLB_INVALIDATE (1<<18)
|
||||
#define PIPE_CONTROL_QW_WRITE (1<<14)
|
||||
#define PIPE_CONTROL_POST_SYNC_OP_MASK (3<<14)
|
||||
#define PIPE_CONTROL_DEPTH_STALL (1<<13)
|
||||
#define PIPE_CONTROL_WRITE_FLUSH (1<<12)
|
||||
#define PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH (1<<12) /* gen6+ */
|
||||
|
@ -347,6 +356,94 @@
|
|||
#define PIPE_CONTROL_DEPTH_CACHE_FLUSH (1<<0)
|
||||
#define PIPE_CONTROL_GLOBAL_GTT (1<<2) /* in addr dword */
|
||||
|
||||
/*
|
||||
* Commands used only by the command parser
|
||||
*/
|
||||
#define MI_SET_PREDICATE MI_INSTR(0x01, 0)
|
||||
#define MI_ARB_CHECK MI_INSTR(0x05, 0)
|
||||
#define MI_RS_CONTROL MI_INSTR(0x06, 0)
|
||||
#define MI_URB_ATOMIC_ALLOC MI_INSTR(0x09, 0)
|
||||
#define MI_PREDICATE MI_INSTR(0x0C, 0)
|
||||
#define MI_RS_CONTEXT MI_INSTR(0x0F, 0)
|
||||
#define MI_TOPOLOGY_FILTER MI_INSTR(0x0D, 0)
|
||||
#define MI_LOAD_SCAN_LINES_EXCL MI_INSTR(0x13, 0)
|
||||
#define MI_URB_CLEAR MI_INSTR(0x19, 0)
|
||||
#define MI_UPDATE_GTT MI_INSTR(0x23, 0)
|
||||
#define MI_CLFLUSH MI_INSTR(0x27, 0)
|
||||
#define MI_REPORT_PERF_COUNT MI_INSTR(0x28, 0)
|
||||
#define MI_REPORT_PERF_COUNT_GGTT (1<<0)
|
||||
#define MI_LOAD_REGISTER_MEM MI_INSTR(0x29, 0)
|
||||
#define MI_LOAD_REGISTER_REG MI_INSTR(0x2A, 0)
|
||||
#define MI_RS_STORE_DATA_IMM MI_INSTR(0x2B, 0)
|
||||
#define MI_LOAD_URB_MEM MI_INSTR(0x2C, 0)
|
||||
#define MI_STORE_URB_MEM MI_INSTR(0x2D, 0)
|
||||
#define MI_CONDITIONAL_BATCH_BUFFER_END MI_INSTR(0x36, 0)
|
||||
|
||||
#define PIPELINE_SELECT ((0x3<<29)|(0x1<<27)|(0x1<<24)|(0x4<<16))
|
||||
#define GFX_OP_3DSTATE_VF_STATISTICS ((0x3<<29)|(0x1<<27)|(0x0<<24)|(0xB<<16))
|
||||
#define MEDIA_VFE_STATE ((0x3<<29)|(0x2<<27)|(0x0<<24)|(0x0<<16))
|
||||
#define MEDIA_VFE_STATE_MMIO_ACCESS_MASK (0x18)
|
||||
#define GPGPU_OBJECT ((0x3<<29)|(0x2<<27)|(0x1<<24)|(0x4<<16))
|
||||
#define GPGPU_WALKER ((0x3<<29)|(0x2<<27)|(0x1<<24)|(0x5<<16))
|
||||
#define GFX_OP_3DSTATE_DX9_CONSTANTF_VS \
|
||||
((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x39<<16))
|
||||
#define GFX_OP_3DSTATE_DX9_CONSTANTF_PS \
|
||||
((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x3A<<16))
|
||||
#define GFX_OP_3DSTATE_SO_DECL_LIST \
|
||||
((0x3<<29)|(0x3<<27)|(0x1<<24)|(0x17<<16))
|
||||
|
||||
#define GFX_OP_3DSTATE_BINDING_TABLE_EDIT_VS \
|
||||
((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x43<<16))
|
||||
#define GFX_OP_3DSTATE_BINDING_TABLE_EDIT_GS \
|
||||
((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x44<<16))
|
||||
#define GFX_OP_3DSTATE_BINDING_TABLE_EDIT_HS \
|
||||
((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x45<<16))
|
||||
#define GFX_OP_3DSTATE_BINDING_TABLE_EDIT_DS \
|
||||
((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x46<<16))
|
||||
#define GFX_OP_3DSTATE_BINDING_TABLE_EDIT_PS \
|
||||
((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x47<<16))
|
||||
|
||||
#define MFX_WAIT ((0x3<<29)|(0x1<<27)|(0x0<<16))
|
||||
|
||||
#define COLOR_BLT ((0x2<<29)|(0x40<<22))
|
||||
#define SRC_COPY_BLT ((0x2<<29)|(0x43<<22))
|
||||
|
||||
/*
|
||||
* Registers used only by the command parser
|
||||
*/
|
||||
#define BCS_SWCTRL 0x22200
|
||||
|
||||
#define HS_INVOCATION_COUNT 0x2300
|
||||
#define DS_INVOCATION_COUNT 0x2308
|
||||
#define IA_VERTICES_COUNT 0x2310
|
||||
#define IA_PRIMITIVES_COUNT 0x2318
|
||||
#define VS_INVOCATION_COUNT 0x2320
|
||||
#define GS_INVOCATION_COUNT 0x2328
|
||||
#define GS_PRIMITIVES_COUNT 0x2330
|
||||
#define CL_INVOCATION_COUNT 0x2338
|
||||
#define CL_PRIMITIVES_COUNT 0x2340
|
||||
#define PS_INVOCATION_COUNT 0x2348
|
||||
#define PS_DEPTH_COUNT 0x2350
|
||||
|
||||
/* There are the 4 64-bit counter registers, one for each stream output */
|
||||
#define GEN7_SO_NUM_PRIMS_WRITTEN(n) (0x5200 + (n) * 8)
|
||||
|
||||
#define GEN7_SO_PRIM_STORAGE_NEEDED(n) (0x5240 + (n) * 8)
|
||||
|
||||
#define GEN7_3DPRIM_END_OFFSET 0x2420
|
||||
#define GEN7_3DPRIM_START_VERTEX 0x2430
|
||||
#define GEN7_3DPRIM_VERTEX_COUNT 0x2434
|
||||
#define GEN7_3DPRIM_INSTANCE_COUNT 0x2438
|
||||
#define GEN7_3DPRIM_START_INSTANCE 0x243C
|
||||
#define GEN7_3DPRIM_BASE_VERTEX 0x2440
|
||||
|
||||
#define OACONTROL 0x2360
|
||||
|
||||
#define _GEN7_PIPEA_DE_LOAD_SL 0x70068
|
||||
#define _GEN7_PIPEB_DE_LOAD_SL 0x71068
|
||||
#define GEN7_PIPE_DE_LOAD_SL(pipe) _PIPE(pipe, \
|
||||
_GEN7_PIPEA_DE_LOAD_SL, \
|
||||
_GEN7_PIPEB_DE_LOAD_SL)
|
||||
|
||||
/*
|
||||
* Reset registers
|
||||
|
@ -748,6 +845,7 @@ enum punit_power_well {
|
|||
#define RING_INSTDONE(base) ((base)+0x6c)
|
||||
#define RING_INSTPS(base) ((base)+0x70)
|
||||
#define RING_DMA_FADD(base) ((base)+0x78)
|
||||
#define RING_DMA_FADD_UDW(base) ((base)+0x60) /* gen8+ */
|
||||
#define RING_INSTPM(base) ((base)+0xc0)
|
||||
#define RING_MI_MODE(base) ((base)+0x9c)
|
||||
#define INSTPS 0x02070 /* 965+ only */
|
||||
|
@ -842,7 +940,7 @@ enum punit_power_well {
|
|||
#define GFX_MODE_GEN7 0x0229c
|
||||
#define RING_MODE_GEN7(ring) ((ring)->mmio_base+0x29c)
|
||||
#define GFX_RUN_LIST_ENABLE (1<<15)
|
||||
#define GFX_TLB_INVALIDATE_ALWAYS (1<<13)
|
||||
#define GFX_TLB_INVALIDATE_EXPLICIT (1<<13)
|
||||
#define GFX_SURFACE_FAULT_ENABLE (1<<12)
|
||||
#define GFX_REPLAY_MODE (1<<11)
|
||||
#define GFX_PSMI_GRANULARITY (1<<10)
|
||||
|
@ -973,6 +1071,7 @@ enum punit_power_well {
|
|||
#define ECO_FLIP_DONE (1<<0)
|
||||
|
||||
#define CACHE_MODE_0_GEN7 0x7000 /* IVB+ */
|
||||
#define RC_OP_FLUSH_ENABLE (1<<0)
|
||||
#define HIZ_RAW_STALL_OPT_DISABLE (1<<2)
|
||||
#define CACHE_MODE_1 0x7004 /* IVB+ */
|
||||
#define PIXEL_SUBSPAN_COLLECT_OPT_DISABLE (1<<6)
|
||||
|
@ -3258,6 +3357,7 @@ enum punit_power_well {
|
|||
#define PIPECONF_INTERLACED_DBL_ILK (4 << 21) /* ilk/snb only */
|
||||
#define PIPECONF_PFIT_PF_INTERLACED_DBL_ILK (5 << 21) /* ilk/snb only */
|
||||
#define PIPECONF_INTERLACE_MODE_MASK (7 << 21)
|
||||
#define PIPECONF_EDP_RR_MODE_SWITCH (1 << 20)
|
||||
#define PIPECONF_CXSR_DOWNCLOCK (1<<16)
|
||||
#define PIPECONF_COLOR_RANGE_SELECT (1 << 13)
|
||||
#define PIPECONF_BPC_MASK (0x7 << 5)
|
||||
|
@ -3535,9 +3635,9 @@ enum punit_power_well {
|
|||
#define PIPE_PIXEL_MASK 0x00ffffff
|
||||
#define PIPE_PIXEL_SHIFT 0
|
||||
/* GM45+ just has to be different */
|
||||
#define _PIPEA_FRMCOUNT_GM45 (dev_priv->info.display_mmio_offset + 0x70040)
|
||||
#define _PIPEA_FLIPCOUNT_GM45 (dev_priv->info.display_mmio_offset + 0x70044)
|
||||
#define PIPE_FRMCOUNT_GM45(pipe) _PIPE(pipe, _PIPEA_FRMCOUNT_GM45, _PIPEB_FRMCOUNT_GM45)
|
||||
#define _PIPEA_FRMCOUNT_GM45 0x70040
|
||||
#define _PIPEA_FLIPCOUNT_GM45 0x70044
|
||||
#define PIPE_FRMCOUNT_GM45(pipe) _PIPE2(pipe, _PIPEA_FRMCOUNT_GM45)
|
||||
|
||||
/* Cursor A & B regs */
|
||||
#define _CURACNTR (dev_priv->info.display_mmio_offset + 0x70080)
|
||||
|
@ -4120,7 +4220,7 @@ enum punit_power_well {
|
|||
#define GEN8_PIPE_SPRITE_FAULT (1 << 9)
|
||||
#define GEN8_PIPE_PRIMARY_FAULT (1 << 8)
|
||||
#define GEN8_PIPE_SPRITE_FLIP_DONE (1 << 5)
|
||||
#define GEN8_PIPE_FLIP_DONE (1 << 4)
|
||||
#define GEN8_PIPE_PRIMARY_FLIP_DONE (1 << 4)
|
||||
#define GEN8_PIPE_SCAN_LINE_EVENT (1 << 2)
|
||||
#define GEN8_PIPE_VSYNC (1 << 1)
|
||||
#define GEN8_PIPE_VBLANK (1 << 0)
|
||||
|
|
|
@ -206,7 +206,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
|
|||
const struct lvds_dvo_timing *panel_dvo_timing;
|
||||
const struct lvds_fp_timing *fp_timing;
|
||||
struct drm_display_mode *panel_fixed_mode;
|
||||
int i, downclock;
|
||||
int i, downclock, drrs_mode;
|
||||
|
||||
lvds_options = find_section(bdb, BDB_LVDS_OPTIONS);
|
||||
if (!lvds_options)
|
||||
|
@ -218,6 +218,28 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
|
|||
|
||||
panel_type = lvds_options->panel_type;
|
||||
|
||||
drrs_mode = (lvds_options->dps_panel_type_bits
|
||||
>> (panel_type * 2)) & MODE_MASK;
|
||||
/*
|
||||
* VBT has static DRRS = 0 and seamless DRRS = 2.
|
||||
* The below piece of code is required to adjust vbt.drrs_type
|
||||
* to match the enum drrs_support_type.
|
||||
*/
|
||||
switch (drrs_mode) {
|
||||
case 0:
|
||||
dev_priv->vbt.drrs_type = STATIC_DRRS_SUPPORT;
|
||||
DRM_DEBUG_KMS("DRRS supported mode is static\n");
|
||||
break;
|
||||
case 2:
|
||||
dev_priv->vbt.drrs_type = SEAMLESS_DRRS_SUPPORT;
|
||||
DRM_DEBUG_KMS("DRRS supported mode is seamless\n");
|
||||
break;
|
||||
default:
|
||||
dev_priv->vbt.drrs_type = DRRS_NOT_SUPPORTED;
|
||||
DRM_DEBUG_KMS("DRRS not supported (VBT input)\n");
|
||||
break;
|
||||
}
|
||||
|
||||
lvds_lfp_data = find_section(bdb, BDB_LVDS_LFP_DATA);
|
||||
if (!lvds_lfp_data)
|
||||
return;
|
||||
|
@ -526,6 +548,16 @@ parse_driver_features(struct drm_i915_private *dev_priv,
|
|||
|
||||
if (driver->dual_frequency)
|
||||
dev_priv->render_reclock_avail = true;
|
||||
|
||||
DRM_DEBUG_KMS("DRRS State Enabled:%d\n", driver->drrs_enabled);
|
||||
/*
|
||||
* If DRRS is not supported, drrs_type has to be set to 0.
|
||||
* This is because, VBT is configured in such a way that
|
||||
* static DRRS is 0 and DRRS not supported is represented by
|
||||
* driver->drrs_enabled=false
|
||||
*/
|
||||
if (!driver->drrs_enabled)
|
||||
dev_priv->vbt.drrs_type = DRRS_NOT_SUPPORTED;
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -604,19 +636,217 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
|
|||
}
|
||||
}
|
||||
|
||||
static u8 *goto_next_sequence(u8 *data, int *size)
|
||||
{
|
||||
u16 len;
|
||||
int tmp = *size;
|
||||
|
||||
if (--tmp < 0)
|
||||
return NULL;
|
||||
|
||||
/* goto first element */
|
||||
data++;
|
||||
while (1) {
|
||||
switch (*data) {
|
||||
case MIPI_SEQ_ELEM_SEND_PKT:
|
||||
/*
|
||||
* skip by this element payload size
|
||||
* skip elem id, command flag and data type
|
||||
*/
|
||||
tmp -= 5;
|
||||
if (tmp < 0)
|
||||
return NULL;
|
||||
|
||||
data += 3;
|
||||
len = *((u16 *)data);
|
||||
|
||||
tmp -= len;
|
||||
if (tmp < 0)
|
||||
return NULL;
|
||||
|
||||
/* skip by len */
|
||||
data = data + 2 + len;
|
||||
break;
|
||||
case MIPI_SEQ_ELEM_DELAY:
|
||||
/* skip by elem id, and delay is 4 bytes */
|
||||
tmp -= 5;
|
||||
if (tmp < 0)
|
||||
return NULL;
|
||||
|
||||
data += 5;
|
||||
break;
|
||||
case MIPI_SEQ_ELEM_GPIO:
|
||||
tmp -= 3;
|
||||
if (tmp < 0)
|
||||
return NULL;
|
||||
|
||||
data += 3;
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Unknown element\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* end of sequence ? */
|
||||
if (*data == 0)
|
||||
break;
|
||||
}
|
||||
|
||||
/* goto next sequence or end of block byte */
|
||||
if (--tmp < 0)
|
||||
return NULL;
|
||||
|
||||
data++;
|
||||
|
||||
/* update amount of data left for the sequence block to be parsed */
|
||||
*size = tmp;
|
||||
return data;
|
||||
}
|
||||
|
||||
static void
|
||||
parse_mipi(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
|
||||
{
|
||||
struct bdb_mipi *mipi;
|
||||
struct bdb_mipi_config *start;
|
||||
struct bdb_mipi_sequence *sequence;
|
||||
struct mipi_config *config;
|
||||
struct mipi_pps_data *pps;
|
||||
u8 *data, *seq_data;
|
||||
int i, panel_id, seq_size;
|
||||
u16 block_size;
|
||||
|
||||
mipi = find_section(bdb, BDB_MIPI_CONFIG);
|
||||
if (!mipi) {
|
||||
DRM_DEBUG_KMS("No MIPI BDB found");
|
||||
/* Initialize this to undefined indicating no generic MIPI support */
|
||||
dev_priv->vbt.dsi.panel_id = MIPI_DSI_UNDEFINED_PANEL_ID;
|
||||
|
||||
/* Block #40 is already parsed and panel_fixed_mode is
|
||||
* stored in dev_priv->lfp_lvds_vbt_mode
|
||||
* resuse this when needed
|
||||
*/
|
||||
|
||||
/* Parse #52 for panel index used from panel_type already
|
||||
* parsed
|
||||
*/
|
||||
start = find_section(bdb, BDB_MIPI_CONFIG);
|
||||
if (!start) {
|
||||
DRM_DEBUG_KMS("No MIPI config BDB found");
|
||||
return;
|
||||
}
|
||||
|
||||
/* XXX: add more info */
|
||||
DRM_DEBUG_DRIVER("Found MIPI Config block, panel index = %d\n",
|
||||
panel_type);
|
||||
|
||||
/*
|
||||
* get hold of the correct configuration block and pps data as per
|
||||
* the panel_type as index
|
||||
*/
|
||||
config = &start->config[panel_type];
|
||||
pps = &start->pps[panel_type];
|
||||
|
||||
/* store as of now full data. Trim when we realise all is not needed */
|
||||
dev_priv->vbt.dsi.config = kmemdup(config, sizeof(struct mipi_config), GFP_KERNEL);
|
||||
if (!dev_priv->vbt.dsi.config)
|
||||
return;
|
||||
|
||||
dev_priv->vbt.dsi.pps = kmemdup(pps, sizeof(struct mipi_pps_data), GFP_KERNEL);
|
||||
if (!dev_priv->vbt.dsi.pps) {
|
||||
kfree(dev_priv->vbt.dsi.config);
|
||||
return;
|
||||
}
|
||||
|
||||
/* We have mandatory mipi config blocks. Initialize as generic panel */
|
||||
dev_priv->vbt.dsi.panel_id = MIPI_DSI_GENERIC_PANEL_ID;
|
||||
|
||||
/* Check if we have sequence block as well */
|
||||
sequence = find_section(bdb, BDB_MIPI_SEQUENCE);
|
||||
if (!sequence) {
|
||||
DRM_DEBUG_KMS("No MIPI Sequence found, parsing complete\n");
|
||||
return;
|
||||
}
|
||||
|
||||
DRM_DEBUG_DRIVER("Found MIPI sequence block\n");
|
||||
|
||||
block_size = get_blocksize(sequence);
|
||||
|
||||
/*
|
||||
* parse the sequence block for individual sequences
|
||||
*/
|
||||
dev_priv->vbt.dsi.seq_version = sequence->version;
|
||||
|
||||
seq_data = &sequence->data[0];
|
||||
|
||||
/*
|
||||
* sequence block is variable length and hence we need to parse and
|
||||
* get the sequence data for specific panel id
|
||||
*/
|
||||
for (i = 0; i < MAX_MIPI_CONFIGURATIONS; i++) {
|
||||
panel_id = *seq_data;
|
||||
seq_size = *((u16 *) (seq_data + 1));
|
||||
if (panel_id == panel_type)
|
||||
break;
|
||||
|
||||
/* skip the sequence including seq header of 3 bytes */
|
||||
seq_data = seq_data + 3 + seq_size;
|
||||
if ((seq_data - &sequence->data[0]) > block_size) {
|
||||
DRM_ERROR("Sequence start is beyond sequence block size, corrupted sequence block\n");
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if (i == MAX_MIPI_CONFIGURATIONS) {
|
||||
DRM_ERROR("Sequence block detected but no valid configuration\n");
|
||||
return;
|
||||
}
|
||||
|
||||
/* check if found sequence is completely within the sequence block
|
||||
* just being paranoid */
|
||||
if (seq_size > block_size) {
|
||||
DRM_ERROR("Corrupted sequence/size, bailing out\n");
|
||||
return;
|
||||
}
|
||||
|
||||
/* skip the panel id(1 byte) and seq size(2 bytes) */
|
||||
dev_priv->vbt.dsi.data = kmemdup(seq_data + 3, seq_size, GFP_KERNEL);
|
||||
if (!dev_priv->vbt.dsi.data)
|
||||
return;
|
||||
|
||||
/*
|
||||
* loop into the sequence data and split into multiple sequneces
|
||||
* There are only 5 types of sequences as of now
|
||||
*/
|
||||
data = dev_priv->vbt.dsi.data;
|
||||
dev_priv->vbt.dsi.size = seq_size;
|
||||
|
||||
/* two consecutive 0x00 indicate end of all sequences */
|
||||
while (1) {
|
||||
int seq_id = *data;
|
||||
if (MIPI_SEQ_MAX > seq_id && seq_id > MIPI_SEQ_UNDEFINED) {
|
||||
dev_priv->vbt.dsi.sequence[seq_id] = data;
|
||||
DRM_DEBUG_DRIVER("Found mipi sequence - %d\n", seq_id);
|
||||
} else {
|
||||
DRM_ERROR("undefined sequence\n");
|
||||
goto err;
|
||||
}
|
||||
|
||||
/* partial parsing to skip elements */
|
||||
data = goto_next_sequence(data, &seq_size);
|
||||
|
||||
if (data == NULL) {
|
||||
DRM_ERROR("Sequence elements going beyond block itself. Sequence block parsing failed\n");
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (*data == 0)
|
||||
break; /* end of sequence reached */
|
||||
}
|
||||
|
||||
DRM_DEBUG_DRIVER("MIPI related vbt parsing complete\n");
|
||||
return;
|
||||
err:
|
||||
kfree(dev_priv->vbt.dsi.data);
|
||||
dev_priv->vbt.dsi.data = NULL;
|
||||
|
||||
/* error during parsing so set all pointers to null
|
||||
* because of partial parsing */
|
||||
memset(dev_priv->vbt.dsi.sequence, 0, MIPI_SEQ_MAX);
|
||||
}
|
||||
|
||||
static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
|
||||
|
|
|
@ -282,6 +282,9 @@ struct bdb_general_definitions {
|
|||
union child_device_config devices[0];
|
||||
} __packed;
|
||||
|
||||
/* Mask for DRRS / Panel Channel / SSC / BLT control bits extraction */
|
||||
#define MODE_MASK 0x3
|
||||
|
||||
struct bdb_lvds_options {
|
||||
u8 panel_type;
|
||||
u8 rsvd1;
|
||||
|
@ -294,6 +297,18 @@ struct bdb_lvds_options {
|
|||
u8 lvds_edid:1;
|
||||
u8 rsvd2:1;
|
||||
u8 rsvd4;
|
||||
/* LVDS Panel channel bits stored here */
|
||||
u32 lvds_panel_channel_bits;
|
||||
/* LVDS SSC (Spread Spectrum Clock) bits stored here. */
|
||||
u16 ssc_bits;
|
||||
u16 ssc_freq;
|
||||
u16 ssc_ddt;
|
||||
/* Panel color depth defined here */
|
||||
u16 panel_color_depth;
|
||||
/* LVDS panel type bits stored here */
|
||||
u32 dps_panel_type_bits;
|
||||
/* LVDS backlight control type bits stored here */
|
||||
u32 blt_control_type_bits;
|
||||
} __packed;
|
||||
|
||||
/* LFP pointer table contains entries to the struct below */
|
||||
|
@ -482,6 +497,20 @@ struct bdb_driver_features {
|
|||
|
||||
u8 hdmi_termination;
|
||||
u8 custom_vbt_version;
|
||||
/* Driver features data block */
|
||||
u16 rmpm_enabled:1;
|
||||
u16 s2ddt_enabled:1;
|
||||
u16 dpst_enabled:1;
|
||||
u16 bltclt_enabled:1;
|
||||
u16 adb_enabled:1;
|
||||
u16 drrs_enabled:1;
|
||||
u16 grs_enabled:1;
|
||||
u16 gpmt_enabled:1;
|
||||
u16 tbt_enabled:1;
|
||||
u16 psr_enabled:1;
|
||||
u16 ips_enabled:1;
|
||||
u16 reserved3:4;
|
||||
u16 pc_feature_valid:1;
|
||||
} __packed;
|
||||
|
||||
#define EDP_18BPP 0
|
||||
|
@ -870,4 +899,35 @@ struct bdb_mipi_sequence {
|
|||
u8 data[0];
|
||||
};
|
||||
|
||||
/* MIPI Sequnece Block definitions */
|
||||
enum mipi_seq {
|
||||
MIPI_SEQ_UNDEFINED = 0,
|
||||
MIPI_SEQ_ASSERT_RESET,
|
||||
MIPI_SEQ_INIT_OTP,
|
||||
MIPI_SEQ_DISPLAY_ON,
|
||||
MIPI_SEQ_DISPLAY_OFF,
|
||||
MIPI_SEQ_DEASSERT_RESET,
|
||||
MIPI_SEQ_MAX
|
||||
};
|
||||
|
||||
enum mipi_seq_element {
|
||||
MIPI_SEQ_ELEM_UNDEFINED = 0,
|
||||
MIPI_SEQ_ELEM_SEND_PKT,
|
||||
MIPI_SEQ_ELEM_DELAY,
|
||||
MIPI_SEQ_ELEM_GPIO,
|
||||
MIPI_SEQ_ELEM_STATUS,
|
||||
MIPI_SEQ_ELEM_MAX
|
||||
};
|
||||
|
||||
enum mipi_gpio_pin_index {
|
||||
MIPI_GPIO_UNDEFINED = 0,
|
||||
MIPI_GPIO_PANEL_ENABLE,
|
||||
MIPI_GPIO_BL_ENABLE,
|
||||
MIPI_GPIO_PWM_ENABLE,
|
||||
MIPI_GPIO_RESET_N,
|
||||
MIPI_GPIO_PWR_DOWN_R,
|
||||
MIPI_GPIO_STDBY_RST_N,
|
||||
MIPI_GPIO_MAX
|
||||
};
|
||||
|
||||
#endif /* _I830_BIOS_H_ */
|
||||
|
|
|
@ -765,7 +765,7 @@ static void g4x_wait_for_vblank(struct drm_device *dev, int pipe)
|
|||
frame = I915_READ(frame_reg);
|
||||
|
||||
if (wait_for(I915_READ_NOTRACE(frame_reg) != frame, 50))
|
||||
DRM_DEBUG_KMS("vblank wait timed out\n");
|
||||
WARN(1, "vblank wait timed out\n");
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1804,16 +1804,6 @@ static void intel_enable_pipe(struct intel_crtc *crtc)
|
|||
|
||||
I915_WRITE(reg, val | PIPECONF_ENABLE);
|
||||
POSTING_READ(reg);
|
||||
|
||||
/*
|
||||
* There's no guarantee the pipe will really start running now. It
|
||||
* depends on the Gen, the output type and the relative order between
|
||||
* pipe and plane enabling. Avoid waiting on HSW+ since it's not
|
||||
* necessary.
|
||||
* TODO: audit the previous gens.
|
||||
*/
|
||||
if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
|
||||
intel_wait_for_vblank(dev_priv->dev, pipe);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -2166,15 +2156,6 @@ static int i9xx_update_primary_plane(struct drm_crtc *crtc,
|
|||
u32 dspcntr;
|
||||
u32 reg;
|
||||
|
||||
switch (plane) {
|
||||
case 0:
|
||||
case 1:
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Can't update plane %c in SAREA\n", plane_name(plane));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
intel_fb = to_intel_framebuffer(fb);
|
||||
obj = intel_fb->obj;
|
||||
|
||||
|
@ -2267,16 +2248,6 @@ static int ironlake_update_primary_plane(struct drm_crtc *crtc,
|
|||
u32 dspcntr;
|
||||
u32 reg;
|
||||
|
||||
switch (plane) {
|
||||
case 0:
|
||||
case 1:
|
||||
case 2:
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Can't update plane %c in SAREA\n", plane_name(plane));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
intel_fb = to_intel_framebuffer(fb);
|
||||
obj = intel_fb->obj;
|
||||
|
||||
|
@ -3602,10 +3573,13 @@ void hsw_disable_ips(struct intel_crtc *crtc)
|
|||
return;
|
||||
|
||||
assert_plane_enabled(dev_priv, crtc->plane);
|
||||
if (IS_BROADWELL(crtc->base.dev)) {
|
||||
if (IS_BROADWELL(dev)) {
|
||||
mutex_lock(&dev_priv->rps.hw_lock);
|
||||
WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
|
||||
mutex_unlock(&dev_priv->rps.hw_lock);
|
||||
/* wait for pcode to finish disabling IPS, which may take up to 42ms */
|
||||
if (wait_for((I915_READ(IPS_CTL) & IPS_ENABLE) == 0, 42))
|
||||
DRM_ERROR("Timed out waiting for IPS disable\n");
|
||||
} else {
|
||||
I915_WRITE(IPS_CTL, 0);
|
||||
POSTING_READ(IPS_CTL);
|
||||
|
@ -3662,6 +3636,46 @@ static void intel_crtc_load_lut(struct drm_crtc *crtc)
|
|||
hsw_enable_ips(intel_crtc);
|
||||
}
|
||||
|
||||
static void ilk_crtc_enable_planes(struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
int pipe = intel_crtc->pipe;
|
||||
int plane = intel_crtc->plane;
|
||||
|
||||
intel_enable_primary_hw_plane(dev_priv, plane, pipe);
|
||||
intel_enable_planes(crtc);
|
||||
intel_crtc_update_cursor(crtc, true);
|
||||
|
||||
hsw_enable_ips(intel_crtc);
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
intel_update_fbc(dev);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
}
|
||||
|
||||
static void ilk_crtc_disable_planes(struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
int pipe = intel_crtc->pipe;
|
||||
int plane = intel_crtc->plane;
|
||||
|
||||
intel_crtc_wait_for_pending_flips(crtc);
|
||||
drm_vblank_off(dev, pipe);
|
||||
|
||||
if (dev_priv->fbc.plane == plane)
|
||||
intel_disable_fbc(dev);
|
||||
|
||||
hsw_disable_ips(intel_crtc);
|
||||
|
||||
intel_crtc_update_cursor(crtc, false);
|
||||
intel_disable_planes(crtc);
|
||||
intel_disable_primary_hw_plane(dev_priv, plane, pipe);
|
||||
}
|
||||
|
||||
static void ironlake_crtc_enable(struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
|
@ -3669,7 +3683,6 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
|
|||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
struct intel_encoder *encoder;
|
||||
int pipe = intel_crtc->pipe;
|
||||
int plane = intel_crtc->plane;
|
||||
|
||||
WARN_ON(!crtc->enabled);
|
||||
|
||||
|
@ -3705,23 +3718,18 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
|
|||
|
||||
intel_update_watermarks(crtc);
|
||||
intel_enable_pipe(intel_crtc);
|
||||
intel_enable_primary_hw_plane(dev_priv, plane, pipe);
|
||||
intel_enable_planes(crtc);
|
||||
intel_crtc_update_cursor(crtc, true);
|
||||
|
||||
if (intel_crtc->config.has_pch_encoder)
|
||||
ironlake_pch_enable(crtc);
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
intel_update_fbc(dev);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
for_each_encoder_on_crtc(dev, crtc, encoder)
|
||||
encoder->enable(encoder);
|
||||
|
||||
if (HAS_PCH_CPT(dev))
|
||||
cpt_verify_modeset(dev, intel_crtc->pipe);
|
||||
|
||||
ilk_crtc_enable_planes(crtc);
|
||||
|
||||
/*
|
||||
* There seems to be a race in PCH platform hw (at least on some
|
||||
* outputs) where an enabled pipe still completes any pageflip right
|
||||
|
@ -3739,47 +3747,6 @@ static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
|
|||
return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A;
|
||||
}
|
||||
|
||||
static void haswell_crtc_enable_planes(struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
int pipe = intel_crtc->pipe;
|
||||
int plane = intel_crtc->plane;
|
||||
|
||||
intel_enable_primary_hw_plane(dev_priv, plane, pipe);
|
||||
intel_enable_planes(crtc);
|
||||
intel_crtc_update_cursor(crtc, true);
|
||||
|
||||
hsw_enable_ips(intel_crtc);
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
intel_update_fbc(dev);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
}
|
||||
|
||||
static void haswell_crtc_disable_planes(struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
int pipe = intel_crtc->pipe;
|
||||
int plane = intel_crtc->plane;
|
||||
|
||||
intel_crtc_wait_for_pending_flips(crtc);
|
||||
drm_vblank_off(dev, pipe);
|
||||
|
||||
/* FBC must be disabled before disabling the plane on HSW. */
|
||||
if (dev_priv->fbc.plane == plane)
|
||||
intel_disable_fbc(dev);
|
||||
|
||||
hsw_disable_ips(intel_crtc);
|
||||
|
||||
intel_crtc_update_cursor(crtc, false);
|
||||
intel_disable_planes(crtc);
|
||||
intel_disable_primary_hw_plane(dev_priv, plane, pipe);
|
||||
}
|
||||
|
||||
/*
|
||||
* This implements the workaround described in the "notes" section of the mode
|
||||
* set sequence documentation. When going from no pipes or single pipe to
|
||||
|
@ -3862,7 +3829,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
|
|||
/* If we change the relative order between pipe/planes enabling, we need
|
||||
* to change the workaround. */
|
||||
haswell_mode_set_planes_workaround(intel_crtc);
|
||||
haswell_crtc_enable_planes(crtc);
|
||||
ilk_crtc_enable_planes(crtc);
|
||||
}
|
||||
|
||||
static void ironlake_pfit_disable(struct intel_crtc *crtc)
|
||||
|
@ -3887,26 +3854,16 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
|
|||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
struct intel_encoder *encoder;
|
||||
int pipe = intel_crtc->pipe;
|
||||
int plane = intel_crtc->plane;
|
||||
u32 reg, temp;
|
||||
|
||||
|
||||
if (!intel_crtc->active)
|
||||
return;
|
||||
|
||||
ilk_crtc_disable_planes(crtc);
|
||||
|
||||
for_each_encoder_on_crtc(dev, crtc, encoder)
|
||||
encoder->disable(encoder);
|
||||
|
||||
intel_crtc_wait_for_pending_flips(crtc);
|
||||
drm_vblank_off(dev, pipe);
|
||||
|
||||
if (dev_priv->fbc.plane == plane)
|
||||
intel_disable_fbc(dev);
|
||||
|
||||
intel_crtc_update_cursor(crtc, false);
|
||||
intel_disable_planes(crtc);
|
||||
intel_disable_primary_hw_plane(dev_priv, plane, pipe);
|
||||
|
||||
if (intel_crtc->config.has_pch_encoder)
|
||||
intel_set_pch_fifo_underrun_reporting(dev, pipe, false);
|
||||
|
||||
|
@ -3965,7 +3922,7 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
|
|||
if (!intel_crtc->active)
|
||||
return;
|
||||
|
||||
haswell_crtc_disable_planes(crtc);
|
||||
ilk_crtc_disable_planes(crtc);
|
||||
|
||||
for_each_encoder_on_crtc(dev, crtc, encoder) {
|
||||
intel_opregion_notify_encoder(encoder, false);
|
||||
|
@ -4207,6 +4164,9 @@ static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
|
|||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 val, cmd;
|
||||
|
||||
WARN_ON(valleyview_cur_cdclk(dev_priv) != dev_priv->vlv_cdclk_freq);
|
||||
dev_priv->vlv_cdclk_freq = cdclk;
|
||||
|
||||
if (cdclk >= 320) /* jump to highest voltage for 400MHz too */
|
||||
cmd = 2;
|
||||
else if (cdclk == 266)
|
||||
|
@ -4261,7 +4221,7 @@ static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
|
|||
intel_i2c_reset(dev);
|
||||
}
|
||||
|
||||
static int valleyview_cur_cdclk(struct drm_i915_private *dev_priv)
|
||||
int valleyview_cur_cdclk(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
int cur_cdclk, vco;
|
||||
int divider;
|
||||
|
@ -4282,10 +4242,6 @@ static int valleyview_cur_cdclk(struct drm_i915_private *dev_priv)
|
|||
static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
|
||||
int max_pixclk)
|
||||
{
|
||||
int cur_cdclk;
|
||||
|
||||
cur_cdclk = valleyview_cur_cdclk(dev_priv);
|
||||
|
||||
/*
|
||||
* Really only a few cases to deal with, as only 4 CDclks are supported:
|
||||
* 200MHz
|
||||
|
@ -4327,9 +4283,9 @@ static void valleyview_modeset_global_pipes(struct drm_device *dev,
|
|||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *intel_crtc;
|
||||
int max_pixclk = intel_mode_max_pixclk(dev_priv);
|
||||
int cur_cdclk = valleyview_cur_cdclk(dev_priv);
|
||||
|
||||
if (valleyview_calc_cdclk(dev_priv, max_pixclk) == cur_cdclk)
|
||||
if (valleyview_calc_cdclk(dev_priv, max_pixclk) ==
|
||||
dev_priv->vlv_cdclk_freq)
|
||||
return;
|
||||
|
||||
/* disable/enable all currently active pipes while we change cdclk */
|
||||
|
@ -4343,10 +4299,9 @@ static void valleyview_modeset_global_resources(struct drm_device *dev)
|
|||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int max_pixclk = intel_mode_max_pixclk(dev_priv);
|
||||
int cur_cdclk = valleyview_cur_cdclk(dev_priv);
|
||||
int req_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk);
|
||||
|
||||
if (req_cdclk != cur_cdclk)
|
||||
if (req_cdclk != dev_priv->vlv_cdclk_freq)
|
||||
valleyview_set_cdclk(dev, req_cdclk);
|
||||
modeset_update_crtc_power_domains(dev);
|
||||
}
|
||||
|
@ -4387,7 +4342,9 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
|
|||
|
||||
intel_update_watermarks(crtc);
|
||||
intel_enable_pipe(intel_crtc);
|
||||
intel_wait_for_vblank(dev_priv->dev, pipe);
|
||||
intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
|
||||
|
||||
intel_enable_primary_hw_plane(dev_priv, plane, pipe);
|
||||
intel_enable_planes(crtc);
|
||||
intel_crtc_update_cursor(crtc, true);
|
||||
|
@ -4426,7 +4383,9 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
|
|||
|
||||
intel_update_watermarks(crtc);
|
||||
intel_enable_pipe(intel_crtc);
|
||||
intel_wait_for_vblank(dev_priv->dev, pipe);
|
||||
intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
|
||||
|
||||
intel_enable_primary_hw_plane(dev_priv, plane, pipe);
|
||||
intel_enable_planes(crtc);
|
||||
/* The fixup needs to happen before cursor is enabled */
|
||||
|
@ -5245,9 +5204,6 @@ static void vlv_update_pll(struct intel_crtc *crtc)
|
|||
<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
|
||||
crtc->config.dpll_hw_state.dpll_md = dpll_md;
|
||||
|
||||
if (crtc->config.has_dp_encoder)
|
||||
intel_dp_set_m_n(crtc);
|
||||
|
||||
mutex_unlock(&dev_priv->dpio_lock);
|
||||
}
|
||||
|
||||
|
@ -5325,9 +5281,6 @@ static void i9xx_update_pll(struct intel_crtc *crtc,
|
|||
<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
|
||||
crtc->config.dpll_hw_state.dpll_md = dpll_md;
|
||||
}
|
||||
|
||||
if (crtc->config.has_dp_encoder)
|
||||
intel_dp_set_m_n(crtc);
|
||||
}
|
||||
|
||||
static void i8xx_update_pll(struct intel_crtc *crtc,
|
||||
|
@ -5656,6 +5609,9 @@ skip_dpll:
|
|||
dspcntr |= DISPPLANE_SEL_PIPE_B;
|
||||
}
|
||||
|
||||
if (intel_crtc->config.has_dp_encoder)
|
||||
intel_dp_set_m_n(intel_crtc);
|
||||
|
||||
intel_set_pipe_timings(intel_crtc);
|
||||
|
||||
/* pipesrc and dspsize control the size that is scaled from,
|
||||
|
@ -6880,8 +6836,6 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
|
|||
struct drm_device *dev = dev_priv->dev;
|
||||
struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
|
||||
struct intel_crtc *crtc;
|
||||
unsigned long irqflags;
|
||||
uint32_t val;
|
||||
|
||||
list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head)
|
||||
WARN(crtc->active, "CRTC for pipe %c enabled\n",
|
||||
|
@ -6902,14 +6856,29 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
|
|||
"Utility pin enabled\n");
|
||||
WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n");
|
||||
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
|
||||
val = I915_READ(DEIMR);
|
||||
WARN((val | DE_PCH_EVENT_IVB) != 0xffffffff,
|
||||
"Unexpected DEIMR bits enabled: 0x%x\n", val);
|
||||
val = I915_READ(SDEIMR);
|
||||
WARN((val | SDE_HOTPLUG_MASK_CPT) != 0xffffffff,
|
||||
"Unexpected SDEIMR bits enabled: 0x%x\n", val);
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
|
||||
/*
|
||||
* In theory we can still leave IRQs enabled, as long as only the HPD
|
||||
* interrupts remain enabled. We used to check for that, but since it's
|
||||
* gen-specific and since we only disable LCPLL after we fully disable
|
||||
* the interrupts, the check below should be enough.
|
||||
*/
|
||||
WARN(!dev_priv->pm.irqs_disabled, "IRQs enabled\n");
|
||||
}
|
||||
|
||||
static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
|
||||
if (IS_HASWELL(dev)) {
|
||||
mutex_lock(&dev_priv->rps.hw_lock);
|
||||
if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP,
|
||||
val))
|
||||
DRM_ERROR("Failed to disable D_COMP\n");
|
||||
mutex_unlock(&dev_priv->rps.hw_lock);
|
||||
} else {
|
||||
I915_WRITE(D_COMP, val);
|
||||
}
|
||||
POSTING_READ(D_COMP);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -6949,11 +6918,7 @@ static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
|
|||
|
||||
val = I915_READ(D_COMP);
|
||||
val |= D_COMP_COMP_DISABLE;
|
||||
mutex_lock(&dev_priv->rps.hw_lock);
|
||||
if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP, val))
|
||||
DRM_ERROR("Failed to disable D_COMP\n");
|
||||
mutex_unlock(&dev_priv->rps.hw_lock);
|
||||
POSTING_READ(D_COMP);
|
||||
hsw_write_dcomp(dev_priv, val);
|
||||
ndelay(100);
|
||||
|
||||
if (wait_for((I915_READ(D_COMP) & D_COMP_RCOMP_IN_PROGRESS) == 0, 1))
|
||||
|
@ -7008,11 +6973,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
|
|||
val = I915_READ(D_COMP);
|
||||
val |= D_COMP_COMP_FORCE;
|
||||
val &= ~D_COMP_COMP_DISABLE;
|
||||
mutex_lock(&dev_priv->rps.hw_lock);
|
||||
if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP, val))
|
||||
DRM_ERROR("Failed to enable D_COMP\n");
|
||||
mutex_unlock(&dev_priv->rps.hw_lock);
|
||||
POSTING_READ(D_COMP);
|
||||
hsw_write_dcomp(dev_priv, val);
|
||||
|
||||
val = I915_READ(LCPLL_CTL);
|
||||
val &= ~LCPLL_PLL_DISABLE;
|
||||
|
@ -7066,8 +7027,6 @@ void hsw_enable_pc8(struct drm_i915_private *dev_priv)
|
|||
struct drm_device *dev = dev_priv->dev;
|
||||
uint32_t val;
|
||||
|
||||
WARN_ON(!HAS_PC8(dev));
|
||||
|
||||
DRM_DEBUG_KMS("Enabling package C8+\n");
|
||||
|
||||
if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
|
||||
|
@ -7077,7 +7036,7 @@ void hsw_enable_pc8(struct drm_i915_private *dev_priv)
|
|||
}
|
||||
|
||||
lpt_disable_clkout_dp(dev);
|
||||
hsw_runtime_pm_disable_interrupts(dev);
|
||||
intel_runtime_pm_disable_interrupts(dev);
|
||||
hsw_disable_lcpll(dev_priv, true, true);
|
||||
}
|
||||
|
||||
|
@ -7086,12 +7045,10 @@ void hsw_disable_pc8(struct drm_i915_private *dev_priv)
|
|||
struct drm_device *dev = dev_priv->dev;
|
||||
uint32_t val;
|
||||
|
||||
WARN_ON(!HAS_PC8(dev));
|
||||
|
||||
DRM_DEBUG_KMS("Disabling package C8+\n");
|
||||
|
||||
hsw_restore_lcpll(dev_priv);
|
||||
hsw_runtime_pm_restore_interrupts(dev);
|
||||
intel_runtime_pm_restore_interrupts(dev);
|
||||
lpt_init_pch_refclk(dev);
|
||||
|
||||
if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
|
||||
|
@ -7107,6 +7064,11 @@ void hsw_disable_pc8(struct drm_i915_private *dev_priv)
|
|||
mutex_unlock(&dev_priv->rps.hw_lock);
|
||||
}
|
||||
|
||||
static void snb_modeset_global_resources(struct drm_device *dev)
|
||||
{
|
||||
modeset_update_crtc_power_domains(dev);
|
||||
}
|
||||
|
||||
static void haswell_modeset_global_resources(struct drm_device *dev)
|
||||
{
|
||||
modeset_update_crtc_power_domains(dev);
|
||||
|
@ -7374,7 +7336,6 @@ static void haswell_write_eld(struct drm_connector *connector,
|
|||
{
|
||||
struct drm_i915_private *dev_priv = connector->dev->dev_private;
|
||||
uint8_t *eld = connector->eld;
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
uint32_t eldv;
|
||||
uint32_t i;
|
||||
|
@ -7387,17 +7348,14 @@ static void haswell_write_eld(struct drm_connector *connector,
|
|||
int aud_config = HSW_AUD_CFG(pipe);
|
||||
int aud_cntrl_st2 = HSW_AUD_PIN_ELD_CP_VLD;
|
||||
|
||||
|
||||
DRM_DEBUG_DRIVER("HDMI: Haswell Audio initialize....\n");
|
||||
|
||||
/* Audio output enable */
|
||||
DRM_DEBUG_DRIVER("HDMI audio: enable codec\n");
|
||||
tmp = I915_READ(aud_cntrl_st2);
|
||||
tmp |= (AUDIO_OUTPUT_ENABLE_A << (pipe * 4));
|
||||
I915_WRITE(aud_cntrl_st2, tmp);
|
||||
POSTING_READ(aud_cntrl_st2);
|
||||
|
||||
/* Wait for 1 vertical blank */
|
||||
intel_wait_for_vblank(dev, pipe);
|
||||
assert_pipe_disabled(dev_priv, to_intel_crtc(crtc)->pipe);
|
||||
|
||||
/* Set ELD valid state */
|
||||
tmp = I915_READ(aud_cntrl_st2);
|
||||
|
@ -8836,8 +8794,16 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
|
|||
}
|
||||
|
||||
len = 4;
|
||||
if (ring->id == RCS)
|
||||
if (ring->id == RCS) {
|
||||
len += 6;
|
||||
/*
|
||||
* On Gen 8, SRM is now taking an extra dword to accommodate
|
||||
* 48bits addresses, and we need a NOOP for the batch size to
|
||||
* stay even.
|
||||
*/
|
||||
if (IS_GEN8(dev))
|
||||
len += 2;
|
||||
}
|
||||
|
||||
/*
|
||||
* BSpec MI_DISPLAY_FLIP for IVB:
|
||||
|
@ -8872,10 +8838,18 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
|
|||
intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
|
||||
DERRMR_PIPEB_PRI_FLIP_DONE |
|
||||
DERRMR_PIPEC_PRI_FLIP_DONE));
|
||||
intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) |
|
||||
MI_SRM_LRM_GLOBAL_GTT);
|
||||
if (IS_GEN8(dev))
|
||||
intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8(1) |
|
||||
MI_SRM_LRM_GLOBAL_GTT);
|
||||
else
|
||||
intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) |
|
||||
MI_SRM_LRM_GLOBAL_GTT);
|
||||
intel_ring_emit(ring, DERRMR);
|
||||
intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
|
||||
if (IS_GEN8(dev)) {
|
||||
intel_ring_emit(ring, 0);
|
||||
intel_ring_emit(ring, MI_NOOP);
|
||||
}
|
||||
}
|
||||
|
||||
intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
|
||||
|
@ -10578,16 +10552,6 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
|
|||
|
||||
drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs);
|
||||
|
||||
if (IS_GEN2(dev)) {
|
||||
intel_crtc->max_cursor_width = GEN2_CURSOR_WIDTH;
|
||||
intel_crtc->max_cursor_height = GEN2_CURSOR_HEIGHT;
|
||||
} else {
|
||||
intel_crtc->max_cursor_width = CURSOR_WIDTH;
|
||||
intel_crtc->max_cursor_height = CURSOR_HEIGHT;
|
||||
}
|
||||
dev->mode_config.cursor_width = intel_crtc->max_cursor_width;
|
||||
dev->mode_config.cursor_height = intel_crtc->max_cursor_height;
|
||||
|
||||
drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
|
||||
for (i = 0; i < 256; i++) {
|
||||
intel_crtc->lut_r[i] = i;
|
||||
|
@ -11088,6 +11052,8 @@ static void intel_init_display(struct drm_device *dev)
|
|||
} else if (IS_GEN6(dev)) {
|
||||
dev_priv->display.fdi_link_train = gen6_fdi_link_train;
|
||||
dev_priv->display.write_eld = ironlake_write_eld;
|
||||
dev_priv->display.modeset_global_resources =
|
||||
snb_modeset_global_resources;
|
||||
} else if (IS_IVYBRIDGE(dev)) {
|
||||
/* FIXME: detect B0+ stepping and use auto training */
|
||||
dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
|
||||
|
@ -11338,6 +11304,15 @@ void intel_modeset_init(struct drm_device *dev)
|
|||
dev->mode_config.max_width = 8192;
|
||||
dev->mode_config.max_height = 8192;
|
||||
}
|
||||
|
||||
if (IS_GEN2(dev)) {
|
||||
dev->mode_config.cursor_width = GEN2_CURSOR_WIDTH;
|
||||
dev->mode_config.cursor_height = GEN2_CURSOR_HEIGHT;
|
||||
} else {
|
||||
dev->mode_config.cursor_width = MAX_CURSOR_WIDTH;
|
||||
dev->mode_config.cursor_height = MAX_CURSOR_HEIGHT;
|
||||
}
|
||||
|
||||
dev->mode_config.fb_base = dev_priv->gtt.mappable_base;
|
||||
|
||||
DRM_DEBUG_KMS("%d display pipe%s available.\n",
|
||||
|
|
|
@ -738,6 +738,20 @@ intel_dp_set_clock(struct intel_encoder *encoder,
|
|||
}
|
||||
}
|
||||
|
||||
static void
|
||||
intel_dp_set_m2_n2(struct intel_crtc *crtc, struct intel_link_m_n *m_n)
|
||||
{
|
||||
struct drm_device *dev = crtc->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
enum transcoder transcoder = crtc->config.cpu_transcoder;
|
||||
|
||||
I915_WRITE(PIPE_DATA_M2(transcoder),
|
||||
TU_SIZE(m_n->tu) | m_n->gmch_m);
|
||||
I915_WRITE(PIPE_DATA_N2(transcoder), m_n->gmch_n);
|
||||
I915_WRITE(PIPE_LINK_M2(transcoder), m_n->link_m);
|
||||
I915_WRITE(PIPE_LINK_N2(transcoder), m_n->link_n);
|
||||
}
|
||||
|
||||
bool
|
||||
intel_dp_compute_config(struct intel_encoder *encoder,
|
||||
struct intel_crtc_config *pipe_config)
|
||||
|
@ -842,6 +856,14 @@ found:
|
|||
pipe_config->port_clock,
|
||||
&pipe_config->dp_m_n);
|
||||
|
||||
if (intel_connector->panel.downclock_mode != NULL &&
|
||||
intel_dp->drrs_state.type == SEAMLESS_DRRS_SUPPORT) {
|
||||
intel_link_compute_m_n(bpp, lane_count,
|
||||
intel_connector->panel.downclock_mode->clock,
|
||||
pipe_config->port_clock,
|
||||
&pipe_config->dp_m2_n2);
|
||||
}
|
||||
|
||||
intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
|
||||
|
||||
return true;
|
||||
|
@ -1044,7 +1066,10 @@ static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
|
|||
static bool _edp_panel_vdd_on(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct drm_device *dev = intel_dp_to_dev(intel_dp);
|
||||
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
|
||||
struct intel_encoder *intel_encoder = &intel_dig_port->base;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
enum intel_display_power_domain power_domain;
|
||||
u32 pp;
|
||||
u32 pp_stat_reg, pp_ctrl_reg;
|
||||
bool need_to_disable = !intel_dp->want_panel_vdd;
|
||||
|
@ -1057,7 +1082,8 @@ static bool _edp_panel_vdd_on(struct intel_dp *intel_dp)
|
|||
if (edp_have_panel_vdd(intel_dp))
|
||||
return need_to_disable;
|
||||
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
power_domain = intel_display_port_power_domain(intel_encoder);
|
||||
intel_display_power_get(dev_priv, power_domain);
|
||||
|
||||
DRM_DEBUG_KMS("Turning eDP VDD on\n");
|
||||
|
||||
|
@ -1104,6 +1130,11 @@ static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
|
|||
WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
|
||||
|
||||
if (!intel_dp->want_panel_vdd && edp_have_panel_vdd(intel_dp)) {
|
||||
struct intel_digital_port *intel_dig_port =
|
||||
dp_to_dig_port(intel_dp);
|
||||
struct intel_encoder *intel_encoder = &intel_dig_port->base;
|
||||
enum intel_display_power_domain power_domain;
|
||||
|
||||
DRM_DEBUG_KMS("Turning eDP VDD off\n");
|
||||
|
||||
pp = ironlake_get_pp_control(intel_dp);
|
||||
|
@ -1122,7 +1153,8 @@ static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
|
|||
if ((pp & POWER_TARGET_ON) == 0)
|
||||
intel_dp->last_power_cycle = jiffies;
|
||||
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
power_domain = intel_display_port_power_domain(intel_encoder);
|
||||
intel_display_power_put(dev_priv, power_domain);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1206,8 +1238,11 @@ void intel_edp_panel_on(struct intel_dp *intel_dp)
|
|||
|
||||
void intel_edp_panel_off(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
|
||||
struct intel_encoder *intel_encoder = &intel_dig_port->base;
|
||||
struct drm_device *dev = intel_dp_to_dev(intel_dp);
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
enum intel_display_power_domain power_domain;
|
||||
u32 pp;
|
||||
u32 pp_ctrl_reg;
|
||||
|
||||
|
@ -1237,7 +1272,8 @@ void intel_edp_panel_off(struct intel_dp *intel_dp)
|
|||
wait_panel_off(intel_dp);
|
||||
|
||||
/* We got a reference when we enabled the VDD. */
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
power_domain = intel_display_port_power_domain(intel_encoder);
|
||||
intel_display_power_put(dev_priv, power_domain);
|
||||
}
|
||||
|
||||
void intel_edp_backlight_on(struct intel_dp *intel_dp)
|
||||
|
@ -1778,17 +1814,23 @@ static void intel_disable_dp(struct intel_encoder *encoder)
|
|||
intel_dp_link_down(intel_dp);
|
||||
}
|
||||
|
||||
static void intel_post_disable_dp(struct intel_encoder *encoder)
|
||||
static void g4x_post_disable_dp(struct intel_encoder *encoder)
|
||||
{
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
|
||||
enum port port = dp_to_dig_port(intel_dp)->port;
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
|
||||
if (port == PORT_A || IS_VALLEYVIEW(dev)) {
|
||||
intel_dp_link_down(intel_dp);
|
||||
if (!IS_VALLEYVIEW(dev))
|
||||
ironlake_edp_pll_off(intel_dp);
|
||||
}
|
||||
if (port != PORT_A)
|
||||
return;
|
||||
|
||||
intel_dp_link_down(intel_dp);
|
||||
ironlake_edp_pll_off(intel_dp);
|
||||
}
|
||||
|
||||
static void vlv_post_disable_dp(struct intel_encoder *encoder)
|
||||
{
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
|
||||
|
||||
intel_dp_link_down(intel_dp);
|
||||
}
|
||||
|
||||
static void intel_enable_dp(struct intel_encoder *encoder)
|
||||
|
@ -3613,6 +3655,130 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
|
|||
I915_READ(pp_div_reg));
|
||||
}
|
||||
|
||||
void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_encoder *encoder;
|
||||
struct intel_dp *intel_dp = NULL;
|
||||
struct intel_crtc_config *config = NULL;
|
||||
struct intel_crtc *intel_crtc = NULL;
|
||||
struct intel_connector *intel_connector = dev_priv->drrs.connector;
|
||||
u32 reg, val;
|
||||
enum edp_drrs_refresh_rate_type index = DRRS_HIGH_RR;
|
||||
|
||||
if (refresh_rate <= 0) {
|
||||
DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (intel_connector == NULL) {
|
||||
DRM_DEBUG_KMS("DRRS supported for eDP only.\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (INTEL_INFO(dev)->gen < 8 && intel_edp_is_psr_enabled(dev)) {
|
||||
DRM_DEBUG_KMS("DRRS is disabled as PSR is enabled\n");
|
||||
return;
|
||||
}
|
||||
|
||||
encoder = intel_attached_encoder(&intel_connector->base);
|
||||
intel_dp = enc_to_intel_dp(&encoder->base);
|
||||
intel_crtc = encoder->new_crtc;
|
||||
|
||||
if (!intel_crtc) {
|
||||
DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
|
||||
return;
|
||||
}
|
||||
|
||||
config = &intel_crtc->config;
|
||||
|
||||
if (intel_dp->drrs_state.type < SEAMLESS_DRRS_SUPPORT) {
|
||||
DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (intel_connector->panel.downclock_mode->vrefresh == refresh_rate)
|
||||
index = DRRS_LOW_RR;
|
||||
|
||||
if (index == intel_dp->drrs_state.refresh_rate_type) {
|
||||
DRM_DEBUG_KMS(
|
||||
"DRRS requested for previously set RR...ignoring\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (!intel_crtc->active) {
|
||||
DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (INTEL_INFO(dev)->gen > 6 && INTEL_INFO(dev)->gen < 8) {
|
||||
reg = PIPECONF(intel_crtc->config.cpu_transcoder);
|
||||
val = I915_READ(reg);
|
||||
if (index > DRRS_HIGH_RR) {
|
||||
val |= PIPECONF_EDP_RR_MODE_SWITCH;
|
||||
intel_dp_set_m2_n2(intel_crtc, &config->dp_m2_n2);
|
||||
} else {
|
||||
val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
|
||||
}
|
||||
I915_WRITE(reg, val);
|
||||
}
|
||||
|
||||
/*
|
||||
* mutex taken to ensure that there is no race between differnt
|
||||
* drrs calls trying to update refresh rate. This scenario may occur
|
||||
* in future when idleness detection based DRRS in kernel and
|
||||
* possible calls from user space to set differnt RR are made.
|
||||
*/
|
||||
|
||||
mutex_lock(&intel_dp->drrs_state.mutex);
|
||||
|
||||
intel_dp->drrs_state.refresh_rate_type = index;
|
||||
|
||||
mutex_unlock(&intel_dp->drrs_state.mutex);
|
||||
|
||||
DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
|
||||
}
|
||||
|
||||
static struct drm_display_mode *
|
||||
intel_dp_drrs_init(struct intel_digital_port *intel_dig_port,
|
||||
struct intel_connector *intel_connector,
|
||||
struct drm_display_mode *fixed_mode)
|
||||
{
|
||||
struct drm_connector *connector = &intel_connector->base;
|
||||
struct intel_dp *intel_dp = &intel_dig_port->dp;
|
||||
struct drm_device *dev = intel_dig_port->base.base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_display_mode *downclock_mode = NULL;
|
||||
|
||||
if (INTEL_INFO(dev)->gen <= 6) {
|
||||
DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
|
||||
DRM_INFO("VBT doesn't support DRRS\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
downclock_mode = intel_find_panel_downclock
|
||||
(dev, fixed_mode, connector);
|
||||
|
||||
if (!downclock_mode) {
|
||||
DRM_INFO("DRRS not supported\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
dev_priv->drrs.connector = intel_connector;
|
||||
|
||||
mutex_init(&intel_dp->drrs_state.mutex);
|
||||
|
||||
intel_dp->drrs_state.type = dev_priv->vbt.drrs_type;
|
||||
|
||||
intel_dp->drrs_state.refresh_rate_type = DRRS_HIGH_RR;
|
||||
DRM_INFO("seamless DRRS supported for eDP panel.\n");
|
||||
return downclock_mode;
|
||||
}
|
||||
|
||||
static bool intel_edp_init_connector(struct intel_dp *intel_dp,
|
||||
struct intel_connector *intel_connector,
|
||||
struct edp_power_seq *power_seq)
|
||||
|
@ -3623,10 +3789,13 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
|
|||
struct drm_device *dev = intel_encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_display_mode *fixed_mode = NULL;
|
||||
struct drm_display_mode *downclock_mode = NULL;
|
||||
bool has_dpcd;
|
||||
struct drm_display_mode *scan;
|
||||
struct edid *edid;
|
||||
|
||||
intel_dp->drrs_state.type = DRRS_NOT_SUPPORTED;
|
||||
|
||||
if (!is_edp(intel_dp))
|
||||
return true;
|
||||
|
||||
|
@ -3677,6 +3846,9 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
|
|||
list_for_each_entry(scan, &connector->probed_modes, head) {
|
||||
if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
|
||||
fixed_mode = drm_mode_duplicate(dev, scan);
|
||||
downclock_mode = intel_dp_drrs_init(
|
||||
intel_dig_port,
|
||||
intel_connector, fixed_mode);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -3690,7 +3862,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
|
|||
}
|
||||
mutex_unlock(&dev->mode_config.mutex);
|
||||
|
||||
intel_panel_init(&intel_connector->panel, fixed_mode, NULL);
|
||||
intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
|
||||
intel_panel_setup_backlight(connector);
|
||||
|
||||
return true;
|
||||
|
@ -3841,16 +4013,17 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
|
|||
intel_encoder->compute_config = intel_dp_compute_config;
|
||||
intel_encoder->mode_set = intel_dp_mode_set;
|
||||
intel_encoder->disable = intel_disable_dp;
|
||||
intel_encoder->post_disable = intel_post_disable_dp;
|
||||
intel_encoder->get_hw_state = intel_dp_get_hw_state;
|
||||
intel_encoder->get_config = intel_dp_get_config;
|
||||
if (IS_VALLEYVIEW(dev)) {
|
||||
intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
|
||||
intel_encoder->pre_enable = vlv_pre_enable_dp;
|
||||
intel_encoder->enable = vlv_enable_dp;
|
||||
intel_encoder->post_disable = vlv_post_disable_dp;
|
||||
} else {
|
||||
intel_encoder->pre_enable = g4x_pre_enable_dp;
|
||||
intel_encoder->enable = g4x_enable_dp;
|
||||
intel_encoder->post_disable = g4x_post_disable_dp;
|
||||
}
|
||||
|
||||
intel_dig_port->port = port;
|
||||
|
|
|
@ -81,8 +81,8 @@
|
|||
/* Maximum cursor sizes */
|
||||
#define GEN2_CURSOR_WIDTH 64
|
||||
#define GEN2_CURSOR_HEIGHT 64
|
||||
#define CURSOR_WIDTH 256
|
||||
#define CURSOR_HEIGHT 256
|
||||
#define MAX_CURSOR_WIDTH 256
|
||||
#define MAX_CURSOR_HEIGHT 256
|
||||
|
||||
#define INTEL_I2C_BUS_DVO 1
|
||||
#define INTEL_I2C_BUS_SDVO 2
|
||||
|
@ -306,6 +306,9 @@ struct intel_crtc_config {
|
|||
int pipe_bpp;
|
||||
struct intel_link_m_n dp_m_n;
|
||||
|
||||
/* m2_n2 for eDP downclock */
|
||||
struct intel_link_m_n dp_m2_n2;
|
||||
|
||||
/*
|
||||
* Frequence the dpll for the port should run at. Differs from the
|
||||
* adjusted dotclock e.g. for DP or 12bpc hdmi mode. This is also
|
||||
|
@ -343,6 +346,9 @@ struct intel_pipe_wm {
|
|||
struct intel_wm_level wm[5];
|
||||
uint32_t linetime;
|
||||
bool fbc_wm_enabled;
|
||||
bool pipe_enabled;
|
||||
bool sprites_enabled;
|
||||
bool sprites_scaled;
|
||||
};
|
||||
|
||||
struct intel_crtc {
|
||||
|
@ -374,7 +380,6 @@ struct intel_crtc {
|
|||
uint32_t cursor_addr;
|
||||
int16_t cursor_x, cursor_y;
|
||||
int16_t cursor_width, cursor_height;
|
||||
int16_t max_cursor_width, max_cursor_height;
|
||||
bool cursor_visible;
|
||||
|
||||
struct intel_plane_config plane_config;
|
||||
|
@ -484,6 +489,17 @@ struct intel_hdmi {
|
|||
|
||||
#define DP_MAX_DOWNSTREAM_PORTS 0x10
|
||||
|
||||
/**
|
||||
* HIGH_RR is the highest eDP panel refresh rate read from EDID
|
||||
* LOW_RR is the lowest eDP panel refresh rate found from EDID
|
||||
* parsing for same resolution.
|
||||
*/
|
||||
enum edp_drrs_refresh_rate_type {
|
||||
DRRS_HIGH_RR,
|
||||
DRRS_LOW_RR,
|
||||
DRRS_MAX_RR, /* RR count */
|
||||
};
|
||||
|
||||
struct intel_dp {
|
||||
uint32_t output_reg;
|
||||
uint32_t aux_ch_ctl_reg;
|
||||
|
@ -522,6 +538,12 @@ struct intel_dp {
|
|||
bool has_aux_irq,
|
||||
int send_bytes,
|
||||
uint32_t aux_clock_divider);
|
||||
struct {
|
||||
enum drrs_support_type type;
|
||||
enum edp_drrs_refresh_rate_type refresh_rate_type;
|
||||
struct mutex mutex;
|
||||
} drrs_state;
|
||||
|
||||
};
|
||||
|
||||
struct intel_digital_port {
|
||||
|
@ -629,8 +651,8 @@ void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
|
|||
void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
|
||||
void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
|
||||
void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
|
||||
void hsw_runtime_pm_disable_interrupts(struct drm_device *dev);
|
||||
void hsw_runtime_pm_restore_interrupts(struct drm_device *dev);
|
||||
void intel_runtime_pm_disable_interrupts(struct drm_device *dev);
|
||||
void intel_runtime_pm_restore_interrupts(struct drm_device *dev);
|
||||
|
||||
|
||||
/* intel_crt.c */
|
||||
|
@ -666,6 +688,7 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
|
|||
const char *intel_output_name(int output);
|
||||
bool intel_has_pending_fb_unpin(struct drm_device *dev);
|
||||
int intel_pch_rawclk(struct drm_device *dev);
|
||||
int valleyview_cur_cdclk(struct drm_i915_private *dev_priv);
|
||||
void intel_mark_busy(struct drm_device *dev);
|
||||
void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
|
||||
struct intel_ring_buffer *ring);
|
||||
|
@ -774,7 +797,7 @@ void intel_edp_panel_off(struct intel_dp *intel_dp);
|
|||
void intel_edp_psr_enable(struct intel_dp *intel_dp);
|
||||
void intel_edp_psr_disable(struct intel_dp *intel_dp);
|
||||
void intel_edp_psr_update(struct drm_device *dev);
|
||||
|
||||
void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate);
|
||||
|
||||
/* intel_dsi.c */
|
||||
bool intel_dsi_init(struct drm_device *dev);
|
||||
|
|
|
@ -110,6 +110,15 @@ static void intel_dsi_device_ready(struct intel_encoder *encoder)
|
|||
|
||||
DRM_DEBUG_KMS("\n");
|
||||
|
||||
mutex_lock(&dev_priv->dpio_lock);
|
||||
/* program rcomp for compliance, reduce from 50 ohms to 45 ohms
|
||||
* needed everytime after power gate */
|
||||
vlv_flisdsi_write(dev_priv, 0x04, 0x0004);
|
||||
mutex_unlock(&dev_priv->dpio_lock);
|
||||
|
||||
/* bandgap reset is needed after everytime we do power gate */
|
||||
band_gap_reset(dev_priv);
|
||||
|
||||
val = I915_READ(MIPI_PORT_CTRL(pipe));
|
||||
I915_WRITE(MIPI_PORT_CTRL(pipe), val | LP_OUTPUT_HOLD);
|
||||
usleep_range(1000, 1500);
|
||||
|
@ -122,21 +131,6 @@ static void intel_dsi_device_ready(struct intel_encoder *encoder)
|
|||
I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY);
|
||||
usleep_range(2000, 2500);
|
||||
}
|
||||
static void intel_dsi_pre_enable(struct intel_encoder *encoder)
|
||||
{
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
|
||||
|
||||
DRM_DEBUG_KMS("\n");
|
||||
|
||||
if (intel_dsi->dev.dev_ops->panel_reset)
|
||||
intel_dsi->dev.dev_ops->panel_reset(&intel_dsi->dev);
|
||||
|
||||
/* put device in ready state */
|
||||
intel_dsi_device_ready(encoder);
|
||||
|
||||
if (intel_dsi->dev.dev_ops->send_otp_cmds)
|
||||
intel_dsi->dev.dev_ops->send_otp_cmds(&intel_dsi->dev);
|
||||
}
|
||||
|
||||
static void intel_dsi_enable(struct intel_encoder *encoder)
|
||||
{
|
||||
|
@ -153,18 +147,63 @@ static void intel_dsi_enable(struct intel_encoder *encoder)
|
|||
I915_WRITE(MIPI_MAX_RETURN_PKT_SIZE(pipe), 8 * 4);
|
||||
else {
|
||||
msleep(20); /* XXX */
|
||||
dpi_send_cmd(intel_dsi, TURN_ON);
|
||||
dpi_send_cmd(intel_dsi, TURN_ON, DPI_LP_MODE_EN);
|
||||
msleep(100);
|
||||
|
||||
if (intel_dsi->dev.dev_ops->enable)
|
||||
intel_dsi->dev.dev_ops->enable(&intel_dsi->dev);
|
||||
|
||||
/* assert ip_tg_enable signal */
|
||||
temp = I915_READ(MIPI_PORT_CTRL(pipe)) & ~LANE_CONFIGURATION_MASK;
|
||||
temp = temp | intel_dsi->port_bits;
|
||||
I915_WRITE(MIPI_PORT_CTRL(pipe), temp | DPI_ENABLE);
|
||||
POSTING_READ(MIPI_PORT_CTRL(pipe));
|
||||
}
|
||||
}
|
||||
|
||||
if (intel_dsi->dev.dev_ops->enable)
|
||||
intel_dsi->dev.dev_ops->enable(&intel_dsi->dev);
|
||||
static void intel_dsi_pre_enable(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
|
||||
enum pipe pipe = intel_crtc->pipe;
|
||||
u32 tmp;
|
||||
|
||||
DRM_DEBUG_KMS("\n");
|
||||
|
||||
/* Disable DPOunit clock gating, can stall pipe
|
||||
* and we need DPLL REFA always enabled */
|
||||
tmp = I915_READ(DPLL(pipe));
|
||||
tmp |= DPLL_REFA_CLK_ENABLE_VLV;
|
||||
I915_WRITE(DPLL(pipe), tmp);
|
||||
|
||||
tmp = I915_READ(DSPCLK_GATE_D);
|
||||
tmp |= DPOUNIT_CLOCK_GATE_DISABLE;
|
||||
I915_WRITE(DSPCLK_GATE_D, tmp);
|
||||
|
||||
/* put device in ready state */
|
||||
intel_dsi_device_ready(encoder);
|
||||
|
||||
if (intel_dsi->dev.dev_ops->panel_reset)
|
||||
intel_dsi->dev.dev_ops->panel_reset(&intel_dsi->dev);
|
||||
|
||||
if (intel_dsi->dev.dev_ops->send_otp_cmds)
|
||||
intel_dsi->dev.dev_ops->send_otp_cmds(&intel_dsi->dev);
|
||||
|
||||
/* Enable port in pre-enable phase itself because as per hw team
|
||||
* recommendation, port should be enabled befor plane & pipe */
|
||||
intel_dsi_enable(encoder);
|
||||
}
|
||||
|
||||
static void intel_dsi_enable_nop(struct intel_encoder *encoder)
|
||||
{
|
||||
DRM_DEBUG_KMS("\n");
|
||||
|
||||
/* for DSI port enable has to be done before pipe
|
||||
* and plane enable, so port enable is done in
|
||||
* pre_enable phase itself unlike other encoders
|
||||
*/
|
||||
}
|
||||
|
||||
static void intel_dsi_disable(struct intel_encoder *encoder)
|
||||
|
@ -179,7 +218,8 @@ static void intel_dsi_disable(struct intel_encoder *encoder)
|
|||
DRM_DEBUG_KMS("\n");
|
||||
|
||||
if (is_vid_mode(intel_dsi)) {
|
||||
dpi_send_cmd(intel_dsi, SHUTDOWN);
|
||||
/* Send Shutdown command to the panel in LP mode */
|
||||
dpi_send_cmd(intel_dsi, SHUTDOWN, DPI_LP_MODE_EN);
|
||||
msleep(10);
|
||||
|
||||
/* de-assert ip_tg_enable signal */
|
||||
|
@ -190,6 +230,23 @@ static void intel_dsi_disable(struct intel_encoder *encoder)
|
|||
msleep(2);
|
||||
}
|
||||
|
||||
/* Panel commands can be sent when clock is in LP11 */
|
||||
I915_WRITE(MIPI_DEVICE_READY(pipe), 0x0);
|
||||
|
||||
temp = I915_READ(MIPI_CTRL(pipe));
|
||||
temp &= ~ESCAPE_CLOCK_DIVIDER_MASK;
|
||||
I915_WRITE(MIPI_CTRL(pipe), temp |
|
||||
intel_dsi->escape_clk_div <<
|
||||
ESCAPE_CLOCK_DIVIDER_SHIFT);
|
||||
|
||||
I915_WRITE(MIPI_EOT_DISABLE(pipe), CLOCKSTOP);
|
||||
|
||||
temp = I915_READ(MIPI_DSI_FUNC_PRG(pipe));
|
||||
temp &= ~VID_MODE_FORMAT_MASK;
|
||||
I915_WRITE(MIPI_DSI_FUNC_PRG(pipe), temp);
|
||||
|
||||
I915_WRITE(MIPI_DEVICE_READY(pipe), 0x1);
|
||||
|
||||
/* if disable packets are sent before sending shutdown packet then in
|
||||
* some next enable sequence send turn on packet error is observed */
|
||||
if (intel_dsi->dev.dev_ops->disable)
|
||||
|
@ -227,14 +284,21 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
|
|||
|
||||
vlv_disable_dsi_pll(encoder);
|
||||
}
|
||||
|
||||
static void intel_dsi_post_disable(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
|
||||
u32 val;
|
||||
|
||||
DRM_DEBUG_KMS("\n");
|
||||
|
||||
intel_dsi_clear_device_ready(encoder);
|
||||
|
||||
val = I915_READ(DSPCLK_GATE_D);
|
||||
val &= ~DPOUNIT_CLOCK_GATE_DISABLE;
|
||||
I915_WRITE(DSPCLK_GATE_D, val);
|
||||
|
||||
if (intel_dsi->dev.dev_ops->disable_panel_power)
|
||||
intel_dsi->dev.dev_ops->disable_panel_power(&intel_dsi->dev);
|
||||
}
|
||||
|
@ -379,9 +443,6 @@ static void intel_dsi_mode_set(struct intel_encoder *intel_encoder)
|
|||
|
||||
DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe));
|
||||
|
||||
/* XXX: Location of the call */
|
||||
band_gap_reset(dev_priv);
|
||||
|
||||
/* escape clock divider, 20MHz, shared for A and C. device ready must be
|
||||
* off when doing this! txclkesc? */
|
||||
tmp = I915_READ(MIPI_CTRL(0));
|
||||
|
@ -452,10 +513,17 @@ static void intel_dsi_mode_set(struct intel_encoder *intel_encoder)
|
|||
/* dphy stuff */
|
||||
|
||||
/* in terms of low power clock */
|
||||
I915_WRITE(MIPI_INIT_COUNT(pipe), txclkesc(ESCAPE_CLOCK_DIVIDER_1, 100));
|
||||
I915_WRITE(MIPI_INIT_COUNT(pipe), txclkesc(intel_dsi->escape_clk_div, 100));
|
||||
|
||||
val = 0;
|
||||
if (intel_dsi->eotp_pkt == 0)
|
||||
val |= EOT_DISABLE;
|
||||
|
||||
if (intel_dsi->clock_stop)
|
||||
val |= CLOCKSTOP;
|
||||
|
||||
/* recovery disables */
|
||||
I915_WRITE(MIPI_EOT_DISABLE(pipe), intel_dsi->eot_disable);
|
||||
I915_WRITE(MIPI_EOT_DISABLE(pipe), val);
|
||||
|
||||
/* in terms of txbyteclkhs. actual high to low switch +
|
||||
* MIPI_STOP_STATE_STALL * MIPI_LP_BYTECLK.
|
||||
|
@ -484,9 +552,14 @@ static void intel_dsi_mode_set(struct intel_encoder *intel_encoder)
|
|||
intel_dsi->clk_hs_to_lp_count << HS_LP_PWR_SW_CNT_SHIFT);
|
||||
|
||||
if (is_vid_mode(intel_dsi))
|
||||
/* Some panels might have resolution which is not a multiple of
|
||||
* 64 like 1366 x 768. Enable RANDOM resolution support for such
|
||||
* panels by default */
|
||||
I915_WRITE(MIPI_VIDEO_MODE_FORMAT(pipe),
|
||||
intel_dsi->video_frmt_cfg_bits |
|
||||
intel_dsi->video_mode_format);
|
||||
intel_dsi->video_mode_format |
|
||||
IP_TG_CONFIG |
|
||||
RANDOM_DPI_DISPLAY_RESOLUTION);
|
||||
}
|
||||
|
||||
static enum drm_connector_status
|
||||
|
@ -594,7 +667,7 @@ bool intel_dsi_init(struct drm_device *dev)
|
|||
intel_encoder->compute_config = intel_dsi_compute_config;
|
||||
intel_encoder->pre_pll_enable = intel_dsi_pre_pll_enable;
|
||||
intel_encoder->pre_enable = intel_dsi_pre_enable;
|
||||
intel_encoder->enable = intel_dsi_enable;
|
||||
intel_encoder->enable = intel_dsi_enable_nop;
|
||||
intel_encoder->mode_set = intel_dsi_mode_set;
|
||||
intel_encoder->disable = intel_dsi_disable;
|
||||
intel_encoder->post_disable = intel_dsi_post_disable;
|
||||
|
|
|
@ -95,8 +95,10 @@ struct intel_dsi {
|
|||
u32 video_mode_format;
|
||||
|
||||
/* eot for MIPI_EOT_DISABLE register */
|
||||
u32 eot_disable;
|
||||
u8 eotp_pkt;
|
||||
u8 clock_stop;
|
||||
|
||||
u8 escape_clk_div;
|
||||
u32 port_bits;
|
||||
u32 bw_timer;
|
||||
u32 dphy_reg;
|
||||
|
|
|
@ -389,7 +389,7 @@ int dsi_vc_generic_read(struct intel_dsi *intel_dsi, int channel,
|
|||
*
|
||||
* XXX: commands with data in MIPI_DPI_DATA?
|
||||
*/
|
||||
int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd)
|
||||
int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs)
|
||||
{
|
||||
struct drm_encoder *encoder = &intel_dsi->base.base;
|
||||
struct drm_device *dev = encoder->dev;
|
||||
|
@ -399,7 +399,7 @@ int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd)
|
|||
u32 mask;
|
||||
|
||||
/* XXX: pipe, hs */
|
||||
if (intel_dsi->hs)
|
||||
if (hs)
|
||||
cmd &= ~DPI_LP_MODE;
|
||||
else
|
||||
cmd |= DPI_LP_MODE;
|
||||
|
|
|
@ -33,6 +33,9 @@
|
|||
#include "intel_drv.h"
|
||||
#include "intel_dsi.h"
|
||||
|
||||
#define DPI_LP_MODE_EN false
|
||||
#define DPI_HS_MODE_EN true
|
||||
|
||||
void dsi_hs_mode_enable(struct intel_dsi *intel_dsi, bool enable);
|
||||
|
||||
int dsi_vc_dcs_write(struct intel_dsi *intel_dsi, int channel,
|
||||
|
@ -47,7 +50,7 @@ int dsi_vc_dcs_read(struct intel_dsi *intel_dsi, int channel, u8 dcs_cmd,
|
|||
int dsi_vc_generic_read(struct intel_dsi *intel_dsi, int channel,
|
||||
u8 *reqdata, int reqlen, u8 *buf, int buflen);
|
||||
|
||||
int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd);
|
||||
int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs);
|
||||
|
||||
/* XXX: questionable write helpers */
|
||||
static inline int dsi_vc_dcs_write_0(struct intel_dsi *intel_dsi,
|
||||
|
|
|
@ -557,10 +557,12 @@ static void vlv_set_infoframes(struct drm_encoder *encoder,
|
|||
struct drm_display_mode *adjusted_mode)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
|
||||
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
|
||||
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
|
||||
u32 reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
|
||||
u32 val = I915_READ(reg);
|
||||
u32 port = VIDEO_DIP_PORT(intel_dig_port->port);
|
||||
|
||||
assert_hdmi_port_disabled(intel_hdmi);
|
||||
|
||||
|
@ -576,9 +578,19 @@ static void vlv_set_infoframes(struct drm_encoder *encoder,
|
|||
return;
|
||||
}
|
||||
|
||||
if (port != (val & VIDEO_DIP_PORT_MASK)) {
|
||||
if (val & VIDEO_DIP_ENABLE) {
|
||||
val &= ~VIDEO_DIP_ENABLE;
|
||||
I915_WRITE(reg, val);
|
||||
POSTING_READ(reg);
|
||||
}
|
||||
val &= ~VIDEO_DIP_PORT_MASK;
|
||||
val |= port;
|
||||
}
|
||||
|
||||
val |= VIDEO_DIP_ENABLE;
|
||||
val &= ~(VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
|
||||
VIDEO_DIP_ENABLE_GCP);
|
||||
val &= ~(VIDEO_DIP_ENABLE_AVI | VIDEO_DIP_ENABLE_VENDOR |
|
||||
VIDEO_DIP_ENABLE_GAMUT | VIDEO_DIP_ENABLE_GCP);
|
||||
|
||||
I915_WRITE(reg, val);
|
||||
POSTING_READ(reg);
|
||||
|
@ -638,8 +650,8 @@ static void intel_hdmi_mode_set(struct intel_encoder *encoder)
|
|||
else
|
||||
hdmi_val |= SDVO_COLOR_FORMAT_8bpc;
|
||||
|
||||
/* Required on CPT */
|
||||
if (intel_hdmi->has_hdmi_sink && HAS_PCH_CPT(dev))
|
||||
if (intel_hdmi->has_hdmi_sink &&
|
||||
(HAS_PCH_CPT(dev) || IS_VALLEYVIEW(dev)))
|
||||
hdmi_val |= HDMI_MODE_SELECT_HDMI;
|
||||
|
||||
if (intel_hdmi->has_audio) {
|
||||
|
@ -657,8 +669,6 @@ static void intel_hdmi_mode_set(struct intel_encoder *encoder)
|
|||
|
||||
I915_WRITE(intel_hdmi->hdmi_reg, hdmi_val);
|
||||
POSTING_READ(intel_hdmi->hdmi_reg);
|
||||
|
||||
intel_hdmi->set_infoframes(&encoder->base, adjusted_mode);
|
||||
}
|
||||
|
||||
static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
|
||||
|
@ -1104,13 +1114,26 @@ done:
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void intel_hdmi_pre_enable(struct intel_encoder *encoder)
|
||||
{
|
||||
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
|
||||
struct drm_display_mode *adjusted_mode =
|
||||
&intel_crtc->config.adjusted_mode;
|
||||
|
||||
intel_hdmi->set_infoframes(&encoder->base, adjusted_mode);
|
||||
}
|
||||
|
||||
static void vlv_hdmi_pre_enable(struct intel_encoder *encoder)
|
||||
{
|
||||
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
|
||||
struct intel_hdmi *intel_hdmi = &dport->hdmi;
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *intel_crtc =
|
||||
to_intel_crtc(encoder->base.crtc);
|
||||
struct drm_display_mode *adjusted_mode =
|
||||
&intel_crtc->config.adjusted_mode;
|
||||
enum dpio_channel port = vlv_dport_to_channel(dport);
|
||||
int pipe = intel_crtc->pipe;
|
||||
u32 val;
|
||||
|
@ -1144,6 +1167,8 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder)
|
|||
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
|
||||
mutex_unlock(&dev_priv->dpio_lock);
|
||||
|
||||
intel_hdmi->set_infoframes(&encoder->base, adjusted_mode);
|
||||
|
||||
intel_enable_hdmi(encoder);
|
||||
|
||||
vlv_wait_port_ready(dev_priv, dport);
|
||||
|
@ -1339,6 +1364,7 @@ void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port)
|
|||
intel_encoder->enable = vlv_enable_hdmi;
|
||||
intel_encoder->post_disable = vlv_hdmi_post_disable;
|
||||
} else {
|
||||
intel_encoder->pre_enable = intel_hdmi_pre_enable;
|
||||
intel_encoder->enable = intel_enable_hdmi;
|
||||
}
|
||||
|
||||
|
|
|
@ -111,13 +111,6 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
|
|||
|
||||
pipe_config->adjusted_mode.flags |= flags;
|
||||
|
||||
/* gen2/3 store dither state in pfit control, needs to match */
|
||||
if (INTEL_INFO(dev)->gen < 4) {
|
||||
tmp = I915_READ(PFIT_CONTROL);
|
||||
|
||||
pipe_config->gmch_pfit.control |= tmp & PANEL_8TO6_DITHER_ENABLE;
|
||||
}
|
||||
|
||||
dotclock = pipe_config->port_clock;
|
||||
|
||||
if (HAS_PCH_SPLIT(dev_priv->dev))
|
||||
|
|
|
@ -308,16 +308,16 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
|
|||
pfit_control |= ((intel_crtc->pipe << PFIT_PIPE_SHIFT) |
|
||||
PFIT_FILTER_FUZZY);
|
||||
|
||||
/* Make sure pre-965 set dither correctly for 18bpp panels. */
|
||||
if (INTEL_INFO(dev)->gen < 4 && pipe_config->pipe_bpp == 18)
|
||||
pfit_control |= PANEL_8TO6_DITHER_ENABLE;
|
||||
|
||||
out:
|
||||
if ((pfit_control & PFIT_ENABLE) == 0) {
|
||||
pfit_control = 0;
|
||||
pfit_pgm_ratios = 0;
|
||||
}
|
||||
|
||||
/* Make sure pre-965 set dither correctly for 18bpp panels. */
|
||||
if (INTEL_INFO(dev)->gen < 4 && pipe_config->pipe_bpp == 18)
|
||||
pfit_control |= PANEL_8TO6_DITHER_ENABLE;
|
||||
|
||||
pipe_config->gmch_pfit.control = pfit_control;
|
||||
pipe_config->gmch_pfit.pgm_ratios = pfit_pgm_ratios;
|
||||
pipe_config->gmch_pfit.lvds_border_bits = border;
|
||||
|
|
|
@ -1831,6 +1831,40 @@ static unsigned int ilk_display_fifo_size(const struct drm_device *dev)
|
|||
return 512;
|
||||
}
|
||||
|
||||
static unsigned int ilk_plane_wm_reg_max(const struct drm_device *dev,
|
||||
int level, bool is_sprite)
|
||||
{
|
||||
if (INTEL_INFO(dev)->gen >= 8)
|
||||
/* BDW primary/sprite plane watermarks */
|
||||
return level == 0 ? 255 : 2047;
|
||||
else if (INTEL_INFO(dev)->gen >= 7)
|
||||
/* IVB/HSW primary/sprite plane watermarks */
|
||||
return level == 0 ? 127 : 1023;
|
||||
else if (!is_sprite)
|
||||
/* ILK/SNB primary plane watermarks */
|
||||
return level == 0 ? 127 : 511;
|
||||
else
|
||||
/* ILK/SNB sprite plane watermarks */
|
||||
return level == 0 ? 63 : 255;
|
||||
}
|
||||
|
||||
static unsigned int ilk_cursor_wm_reg_max(const struct drm_device *dev,
|
||||
int level)
|
||||
{
|
||||
if (INTEL_INFO(dev)->gen >= 7)
|
||||
return level == 0 ? 63 : 255;
|
||||
else
|
||||
return level == 0 ? 31 : 63;
|
||||
}
|
||||
|
||||
static unsigned int ilk_fbc_wm_reg_max(const struct drm_device *dev)
|
||||
{
|
||||
if (INTEL_INFO(dev)->gen >= 8)
|
||||
return 31;
|
||||
else
|
||||
return 15;
|
||||
}
|
||||
|
||||
/* Calculate the maximum primary/sprite plane watermark */
|
||||
static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
|
||||
int level,
|
||||
|
@ -1839,7 +1873,6 @@ static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
|
|||
bool is_sprite)
|
||||
{
|
||||
unsigned int fifo_size = ilk_display_fifo_size(dev);
|
||||
unsigned int max;
|
||||
|
||||
/* if sprites aren't enabled, sprites get nothing */
|
||||
if (is_sprite && !config->sprites_enabled)
|
||||
|
@ -1870,19 +1903,7 @@ static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
|
|||
}
|
||||
|
||||
/* clamp to max that the registers can hold */
|
||||
if (INTEL_INFO(dev)->gen >= 8)
|
||||
max = level == 0 ? 255 : 2047;
|
||||
else if (INTEL_INFO(dev)->gen >= 7)
|
||||
/* IVB/HSW primary/sprite plane watermarks */
|
||||
max = level == 0 ? 127 : 1023;
|
||||
else if (!is_sprite)
|
||||
/* ILK/SNB primary plane watermarks */
|
||||
max = level == 0 ? 127 : 511;
|
||||
else
|
||||
/* ILK/SNB sprite plane watermarks */
|
||||
max = level == 0 ? 63 : 255;
|
||||
|
||||
return min(fifo_size, max);
|
||||
return min(fifo_size, ilk_plane_wm_reg_max(dev, level, is_sprite));
|
||||
}
|
||||
|
||||
/* Calculate the maximum cursor plane watermark */
|
||||
|
@ -1895,20 +1916,7 @@ static unsigned int ilk_cursor_wm_max(const struct drm_device *dev,
|
|||
return 64;
|
||||
|
||||
/* otherwise just report max that registers can hold */
|
||||
if (INTEL_INFO(dev)->gen >= 7)
|
||||
return level == 0 ? 63 : 255;
|
||||
else
|
||||
return level == 0 ? 31 : 63;
|
||||
}
|
||||
|
||||
/* Calculate the maximum FBC watermark */
|
||||
static unsigned int ilk_fbc_wm_max(const struct drm_device *dev)
|
||||
{
|
||||
/* max that registers can hold */
|
||||
if (INTEL_INFO(dev)->gen >= 8)
|
||||
return 31;
|
||||
else
|
||||
return 15;
|
||||
return ilk_cursor_wm_reg_max(dev, level);
|
||||
}
|
||||
|
||||
static void ilk_compute_wm_maximums(const struct drm_device *dev,
|
||||
|
@ -1920,7 +1928,7 @@ static void ilk_compute_wm_maximums(const struct drm_device *dev,
|
|||
max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false);
|
||||
max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true);
|
||||
max->cur = ilk_cursor_wm_max(dev, level, config);
|
||||
max->fbc = ilk_fbc_wm_max(dev);
|
||||
max->fbc = ilk_fbc_wm_reg_max(dev);
|
||||
}
|
||||
|
||||
static bool ilk_validate_wm_level(int level,
|
||||
|
@ -2115,38 +2123,52 @@ static void ilk_setup_wm_latency(struct drm_device *dev)
|
|||
}
|
||||
|
||||
static void ilk_compute_wm_parameters(struct drm_crtc *crtc,
|
||||
struct ilk_pipe_wm_parameters *p,
|
||||
struct intel_wm_config *config)
|
||||
struct ilk_pipe_wm_parameters *p)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
enum pipe pipe = intel_crtc->pipe;
|
||||
struct drm_plane *plane;
|
||||
|
||||
p->active = intel_crtc_active(crtc);
|
||||
if (p->active) {
|
||||
p->pipe_htotal = intel_crtc->config.adjusted_mode.crtc_htotal;
|
||||
p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc);
|
||||
p->pri.bytes_per_pixel = crtc->primary->fb->bits_per_pixel / 8;
|
||||
p->cur.bytes_per_pixel = 4;
|
||||
p->pri.horiz_pixels = intel_crtc->config.pipe_src_w;
|
||||
p->cur.horiz_pixels = intel_crtc->cursor_width;
|
||||
/* TODO: for now, assume primary and cursor planes are always enabled. */
|
||||
p->pri.enabled = true;
|
||||
p->cur.enabled = true;
|
||||
}
|
||||
if (!intel_crtc_active(crtc))
|
||||
return;
|
||||
|
||||
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
|
||||
config->num_pipes_active += intel_crtc_active(crtc);
|
||||
p->active = true;
|
||||
p->pipe_htotal = intel_crtc->config.adjusted_mode.crtc_htotal;
|
||||
p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc);
|
||||
p->pri.bytes_per_pixel = crtc->primary->fb->bits_per_pixel / 8;
|
||||
p->cur.bytes_per_pixel = 4;
|
||||
p->pri.horiz_pixels = intel_crtc->config.pipe_src_w;
|
||||
p->cur.horiz_pixels = intel_crtc->cursor_width;
|
||||
/* TODO: for now, assume primary and cursor planes are always enabled. */
|
||||
p->pri.enabled = true;
|
||||
p->cur.enabled = true;
|
||||
|
||||
drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
|
||||
struct intel_plane *intel_plane = to_intel_plane(plane);
|
||||
|
||||
if (intel_plane->pipe == pipe)
|
||||
if (intel_plane->pipe == pipe) {
|
||||
p->spr = intel_plane->wm;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
config->sprites_enabled |= intel_plane->wm.enabled;
|
||||
config->sprites_scaled |= intel_plane->wm.scaled;
|
||||
static void ilk_compute_wm_config(struct drm_device *dev,
|
||||
struct intel_wm_config *config)
|
||||
{
|
||||
struct intel_crtc *intel_crtc;
|
||||
|
||||
/* Compute the currently _active_ config */
|
||||
list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) {
|
||||
const struct intel_pipe_wm *wm = &intel_crtc->wm.active;
|
||||
|
||||
if (!wm->pipe_enabled)
|
||||
continue;
|
||||
|
||||
config->sprites_enabled |= wm->sprites_enabled;
|
||||
config->sprites_scaled |= wm->sprites_scaled;
|
||||
config->num_pipes_active++;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2169,6 +2191,10 @@ static bool intel_compute_pipe_wm(struct drm_crtc *crtc,
|
|||
/* LP0 watermarks always use 1/2 DDB partitioning */
|
||||
ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max);
|
||||
|
||||
pipe_wm->pipe_enabled = params->active;
|
||||
pipe_wm->sprites_enabled = params->spr.enabled;
|
||||
pipe_wm->sprites_scaled = params->spr.scaled;
|
||||
|
||||
/* ILK/SNB: LP2+ watermarks only w/o sprites */
|
||||
if (INTEL_INFO(dev)->gen <= 6 && params->spr.enabled)
|
||||
max_level = 1;
|
||||
|
@ -2198,8 +2224,11 @@ static void ilk_merge_wm_level(struct drm_device *dev,
|
|||
const struct intel_crtc *intel_crtc;
|
||||
|
||||
list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) {
|
||||
const struct intel_wm_level *wm =
|
||||
&intel_crtc->wm.active.wm[level];
|
||||
const struct intel_pipe_wm *active = &intel_crtc->wm.active;
|
||||
const struct intel_wm_level *wm = &active->wm[level];
|
||||
|
||||
if (!active->pipe_enabled)
|
||||
continue;
|
||||
|
||||
if (!wm->enable)
|
||||
return;
|
||||
|
@ -2558,7 +2587,7 @@ static void ilk_update_wm(struct drm_crtc *crtc)
|
|||
struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
|
||||
struct intel_wm_config config = {};
|
||||
|
||||
ilk_compute_wm_parameters(crtc, ¶ms, &config);
|
||||
ilk_compute_wm_parameters(crtc, ¶ms);
|
||||
|
||||
intel_compute_pipe_wm(crtc, ¶ms, &pipe_wm);
|
||||
|
||||
|
@ -2567,6 +2596,8 @@ static void ilk_update_wm(struct drm_crtc *crtc)
|
|||
|
||||
intel_crtc->wm.active = pipe_wm;
|
||||
|
||||
ilk_compute_wm_config(dev, &config);
|
||||
|
||||
ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max);
|
||||
ilk_wm_merge(dev, &config, &max, &lp_wm_1_2);
|
||||
|
||||
|
@ -2633,7 +2664,9 @@ static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
|
|||
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
|
||||
hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
|
||||
|
||||
if (intel_crtc_active(crtc)) {
|
||||
active->pipe_enabled = intel_crtc_active(crtc);
|
||||
|
||||
if (active->pipe_enabled) {
|
||||
u32 tmp = hw->wm_pipe[pipe];
|
||||
|
||||
/*
|
||||
|
@ -2674,8 +2707,10 @@ void ilk_wm_get_hw_state(struct drm_device *dev)
|
|||
hw->wm_lp[2] = I915_READ(WM3_LP_ILK);
|
||||
|
||||
hw->wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
|
||||
hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
|
||||
hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
|
||||
if (INTEL_INFO(dev)->gen >= 7) {
|
||||
hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
|
||||
hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
|
||||
}
|
||||
|
||||
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
|
||||
hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
|
||||
|
@ -3051,7 +3086,7 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
|
|||
if (val != dev_priv->rps.cur_freq) {
|
||||
gen6_set_rps_thresholds(dev_priv, val);
|
||||
|
||||
if (IS_HASWELL(dev))
|
||||
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
|
||||
I915_WRITE(GEN6_RPNSWREQ,
|
||||
HSW_FREQUENCY(val));
|
||||
else
|
||||
|
@ -3252,6 +3287,27 @@ static void gen6_enable_rps_interrupts(struct drm_device *dev)
|
|||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
}
|
||||
|
||||
static void parse_rp_state_cap(struct drm_i915_private *dev_priv, u32 rp_state_cap)
|
||||
{
|
||||
/* All of these values are in units of 50MHz */
|
||||
dev_priv->rps.cur_freq = 0;
|
||||
/* static values from HW: RP0 < RPe < RP1 < RPn (min_freq) */
|
||||
dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
|
||||
dev_priv->rps.rp0_freq = (rp_state_cap >> 0) & 0xff;
|
||||
dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff;
|
||||
/* XXX: only BYT has a special efficient freq */
|
||||
dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
|
||||
/* hw_max = RP0 until we check for overclocking */
|
||||
dev_priv->rps.max_freq = dev_priv->rps.rp0_freq;
|
||||
|
||||
/* Preserve min/max settings in case of re-init */
|
||||
if (dev_priv->rps.max_freq_softlimit == 0)
|
||||
dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
|
||||
|
||||
if (dev_priv->rps.min_freq_softlimit == 0)
|
||||
dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
|
||||
}
|
||||
|
||||
static void gen8_enable_rps(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
@ -3270,6 +3326,7 @@ static void gen8_enable_rps(struct drm_device *dev)
|
|||
I915_WRITE(GEN6_RC_CONTROL, 0);
|
||||
|
||||
rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
|
||||
parse_rp_state_cap(dev_priv, rp_state_cap);
|
||||
|
||||
/* 2b: Program RC6 thresholds.*/
|
||||
I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
|
||||
|
@ -3289,8 +3346,10 @@ static void gen8_enable_rps(struct drm_device *dev)
|
|||
rc6_mask);
|
||||
|
||||
/* 4 Program defaults and thresholds for RPS*/
|
||||
I915_WRITE(GEN6_RPNSWREQ, HSW_FREQUENCY(10)); /* Request 500 MHz */
|
||||
I915_WRITE(GEN6_RC_VIDEO_FREQ, HSW_FREQUENCY(12)); /* Request 600 MHz */
|
||||
I915_WRITE(GEN6_RPNSWREQ,
|
||||
HSW_FREQUENCY(dev_priv->rps.rp1_freq));
|
||||
I915_WRITE(GEN6_RC_VIDEO_FREQ,
|
||||
HSW_FREQUENCY(dev_priv->rps.rp1_freq));
|
||||
/* NB: Docs say 1s, and 1000000 - which aren't equivalent */
|
||||
I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 100000000 / 128); /* 1 second timeout */
|
||||
|
||||
|
@ -3356,23 +3415,7 @@ static void gen6_enable_rps(struct drm_device *dev)
|
|||
rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
|
||||
gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
|
||||
|
||||
/* All of these values are in units of 50MHz */
|
||||
dev_priv->rps.cur_freq = 0;
|
||||
/* static values from HW: RP0 < RPe < RP1 < RPn (min_freq) */
|
||||
dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
|
||||
dev_priv->rps.rp0_freq = (rp_state_cap >> 0) & 0xff;
|
||||
dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff;
|
||||
/* XXX: only BYT has a special efficient freq */
|
||||
dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
|
||||
/* hw_max = RP0 until we check for overclocking */
|
||||
dev_priv->rps.max_freq = dev_priv->rps.rp0_freq;
|
||||
|
||||
/* Preserve min/max settings in case of re-init */
|
||||
if (dev_priv->rps.max_freq_softlimit == 0)
|
||||
dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
|
||||
|
||||
if (dev_priv->rps.min_freq_softlimit == 0)
|
||||
dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
|
||||
parse_rp_state_cap(dev_priv, rp_state_cap);
|
||||
|
||||
/* disable the counters and set deterministic thresholds */
|
||||
I915_WRITE(GEN6_RC_CONTROL, 0);
|
||||
|
@ -4626,6 +4669,9 @@ static void ironlake_init_clock_gating(struct drm_device *dev)
|
|||
I915_WRITE(CACHE_MODE_0,
|
||||
_MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
|
||||
|
||||
/* WaDisable_RenderCache_OperationalFlush:ilk */
|
||||
I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
|
||||
|
||||
g4x_disable_trickle_feed(dev);
|
||||
|
||||
ibx_init_clock_gating(dev);
|
||||
|
@ -4701,6 +4747,9 @@ static void gen6_init_clock_gating(struct drm_device *dev)
|
|||
I915_WRITE(GEN6_GT_MODE,
|
||||
_MASKED_BIT_ENABLE(GEN6_TD_FOUR_ROW_DISPATCH_DISABLE));
|
||||
|
||||
/* WaDisable_RenderCache_OperationalFlush:snb */
|
||||
I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
|
||||
|
||||
/*
|
||||
* BSpec recoomends 8x4 when MSAA is used,
|
||||
* however in practice 16x4 seems fastest.
|
||||
|
@ -4940,6 +4989,9 @@ static void haswell_init_clock_gating(struct drm_device *dev)
|
|||
I915_WRITE(GEN7_FF_THREAD_MODE,
|
||||
I915_READ(GEN7_FF_THREAD_MODE) & ~GEN7_FF_VS_REF_CNT_FFME);
|
||||
|
||||
/* WaDisable_RenderCache_OperationalFlush:hsw */
|
||||
I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
|
||||
|
||||
/* enable HiZ Raw Stall Optimization */
|
||||
I915_WRITE(CACHE_MODE_0_GEN7,
|
||||
_MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
|
||||
|
@ -4992,6 +5044,9 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
|
|||
I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
|
||||
_MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
|
||||
|
||||
/* WaDisable_RenderCache_OperationalFlush:ivb */
|
||||
I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
|
||||
|
||||
/* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
|
||||
I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
|
||||
GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
|
||||
|
@ -5086,6 +5141,10 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
|
|||
}
|
||||
DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
|
||||
|
||||
dev_priv->vlv_cdclk_freq = valleyview_cur_cdclk(dev_priv);
|
||||
DRM_DEBUG_DRIVER("Current CD clock rate: %d MHz",
|
||||
dev_priv->vlv_cdclk_freq);
|
||||
|
||||
I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
|
||||
|
||||
/* WaDisableEarlyCull:vlv */
|
||||
|
@ -5103,6 +5162,9 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
|
|||
_MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP |
|
||||
GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
|
||||
|
||||
/* WaDisable_RenderCache_OperationalFlush:vlv */
|
||||
I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
|
||||
|
||||
/* WaForceL3Serialization:vlv */
|
||||
I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
|
||||
~L3SQ_URB_READ_CAM_MATCH_DISABLE);
|
||||
|
@ -5172,6 +5234,9 @@ static void g4x_init_clock_gating(struct drm_device *dev)
|
|||
I915_WRITE(CACHE_MODE_0,
|
||||
_MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
|
||||
|
||||
/* WaDisable_RenderCache_OperationalFlush:g4x */
|
||||
I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
|
||||
|
||||
g4x_disable_trickle_feed(dev);
|
||||
}
|
||||
|
||||
|
@ -5186,6 +5251,9 @@ static void crestline_init_clock_gating(struct drm_device *dev)
|
|||
I915_WRITE16(DEUC, 0);
|
||||
I915_WRITE(MI_ARB_STATE,
|
||||
_MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
|
||||
|
||||
/* WaDisable_RenderCache_OperationalFlush:gen4 */
|
||||
I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
|
||||
}
|
||||
|
||||
static void broadwater_init_clock_gating(struct drm_device *dev)
|
||||
|
@ -5200,6 +5268,9 @@ static void broadwater_init_clock_gating(struct drm_device *dev)
|
|||
I915_WRITE(RENCLK_GATE_D2, 0);
|
||||
I915_WRITE(MI_ARB_STATE,
|
||||
_MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
|
||||
|
||||
/* WaDisable_RenderCache_OperationalFlush:gen4 */
|
||||
I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
|
||||
}
|
||||
|
||||
static void gen3_init_clock_gating(struct drm_device *dev)
|
||||
|
|
|
@ -41,12 +41,16 @@ static inline int ring_space(struct intel_ring_buffer *ring)
|
|||
return space;
|
||||
}
|
||||
|
||||
void __intel_ring_advance(struct intel_ring_buffer *ring)
|
||||
static bool intel_ring_stopped(struct intel_ring_buffer *ring)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = ring->dev->dev_private;
|
||||
return dev_priv->gpu_error.stop_rings & intel_ring_flag(ring);
|
||||
}
|
||||
|
||||
void __intel_ring_advance(struct intel_ring_buffer *ring)
|
||||
{
|
||||
ring->tail &= ring->size - 1;
|
||||
if (dev_priv->gpu_error.stop_rings & intel_ring_flag(ring))
|
||||
if (intel_ring_stopped(ring))
|
||||
return;
|
||||
ring->write_tail(ring, ring->tail);
|
||||
}
|
||||
|
@ -601,13 +605,15 @@ static int init_render_ring(struct intel_ring_buffer *ring)
|
|||
I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
|
||||
|
||||
/* Required for the hardware to program scanline values for waiting */
|
||||
/* WaEnableFlushTlbInvalidationMode:snb */
|
||||
if (INTEL_INFO(dev)->gen == 6)
|
||||
I915_WRITE(GFX_MODE,
|
||||
_MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_ALWAYS));
|
||||
_MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT));
|
||||
|
||||
/* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
|
||||
if (IS_GEN7(dev))
|
||||
I915_WRITE(GFX_MODE_GEN7,
|
||||
_MASKED_BIT_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
|
||||
_MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) |
|
||||
_MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 5) {
|
||||
|
@ -624,13 +630,6 @@ static int init_render_ring(struct intel_ring_buffer *ring)
|
|||
*/
|
||||
I915_WRITE(CACHE_MODE_0,
|
||||
_MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
|
||||
|
||||
/* This is not explicitly set for GEN6, so read the register.
|
||||
* see intel_ring_mi_set_context() for why we care.
|
||||
* TODO: consider explicitly setting the bit for GEN5
|
||||
*/
|
||||
ring->itlb_before_ctx_switch =
|
||||
!!(I915_READ(GFX_MODE) & GFX_TLB_INVALIDATE_ALWAYS);
|
||||
}
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 6)
|
||||
|
|
|
@ -153,10 +153,6 @@ struct intel_ring_buffer {
|
|||
|
||||
wait_queue_head_t irq_queue;
|
||||
|
||||
/**
|
||||
* Do an explicit TLB flush before MI_SET_CONTEXT
|
||||
*/
|
||||
bool itlb_before_ctx_switch;
|
||||
struct i915_hw_context *default_context;
|
||||
struct i915_hw_context *last_context;
|
||||
|
||||
|
|
|
@ -2424,8 +2424,8 @@ intel_sdvo_connector_init(struct intel_sdvo_connector *connector,
|
|||
if (ret < 0)
|
||||
goto err1;
|
||||
|
||||
ret = sysfs_create_link(&encoder->ddc.dev.kobj,
|
||||
&drm_connector->kdev->kobj,
|
||||
ret = sysfs_create_link(&drm_connector->kdev->kobj,
|
||||
&encoder->ddc.dev.kobj,
|
||||
encoder->ddc.dev.kobj.name);
|
||||
if (ret < 0)
|
||||
goto err2;
|
||||
|
|
|
@ -182,6 +182,14 @@ u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg)
|
|||
|
||||
vlv_sideband_rw(dev_priv, DPIO_DEVFN, DPIO_PHY_IOSF_PORT(DPIO_PHY(pipe)),
|
||||
DPIO_OPCODE_REG_READ, reg, &val);
|
||||
|
||||
/*
|
||||
* FIXME: There might be some registers where all 1's is a valid value,
|
||||
* so ideally we should check the register offset instead...
|
||||
*/
|
||||
WARN(val == 0xffffffff, "DPIO read pipe %c reg 0x%x == 0x%x\n",
|
||||
pipe_name(pipe), reg, val);
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
|
|
|
@ -253,8 +253,7 @@ static void __vlv_force_wake_put(struct drm_i915_private *dev_priv,
|
|||
|
||||
}
|
||||
|
||||
void vlv_force_wake_get(struct drm_i915_private *dev_priv,
|
||||
int fw_engine)
|
||||
static void vlv_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
|
||||
{
|
||||
unsigned long irqflags;
|
||||
|
||||
|
@ -273,8 +272,7 @@ void vlv_force_wake_get(struct drm_i915_private *dev_priv,
|
|||
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
|
||||
}
|
||||
|
||||
void vlv_force_wake_put(struct drm_i915_private *dev_priv,
|
||||
int fw_engine)
|
||||
static void vlv_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
|
||||
{
|
||||
unsigned long irqflags;
|
||||
|
||||
|
@ -486,6 +484,17 @@ void assert_force_wake_inactive(struct drm_i915_private *dev_priv)
|
|||
#define NEEDS_FORCE_WAKE(dev_priv, reg) \
|
||||
((reg) < 0x40000 && (reg) != FORCEWAKE)
|
||||
|
||||
#define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \
|
||||
(((reg) >= 0x2000 && (reg) < 0x4000) ||\
|
||||
((reg) >= 0x5000 && (reg) < 0x8000) ||\
|
||||
((reg) >= 0xB000 && (reg) < 0x12000) ||\
|
||||
((reg) >= 0x2E000 && (reg) < 0x30000))
|
||||
|
||||
#define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)\
|
||||
(((reg) >= 0x12000 && (reg) < 0x14000) ||\
|
||||
((reg) >= 0x22000 && (reg) < 0x24000) ||\
|
||||
((reg) >= 0x30000 && (reg) < 0x40000))
|
||||
|
||||
static void
|
||||
ilk_dummy_write(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
|
@ -852,12 +861,15 @@ void intel_uncore_fini(struct drm_device *dev)
|
|||
intel_uncore_forcewake_reset(dev, false);
|
||||
}
|
||||
|
||||
#define GEN_RANGE(l, h) GENMASK(h, l)
|
||||
|
||||
static const struct register_whitelist {
|
||||
uint64_t offset;
|
||||
uint32_t size;
|
||||
uint32_t gen_bitmask; /* support gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
|
||||
/* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
|
||||
uint32_t gen_bitmask;
|
||||
} whitelist[] = {
|
||||
{ RING_TIMESTAMP(RENDER_RING_BASE), 8, 0x1F0 },
|
||||
{ RING_TIMESTAMP(RENDER_RING_BASE), 8, GEN_RANGE(4, 8) },
|
||||
};
|
||||
|
||||
int i915_reg_read_ioctl(struct drm_device *dev,
|
||||
|
|
|
@ -1310,7 +1310,7 @@ extern int drm_remove_magic(struct drm_master *master, drm_magic_t magic);
|
|||
/* Cache management (drm_cache.c) */
|
||||
void drm_clflush_pages(struct page *pages[], unsigned long num_pages);
|
||||
void drm_clflush_sg(struct sg_table *st);
|
||||
void drm_clflush_virt_range(char *addr, unsigned long length);
|
||||
void drm_clflush_virt_range(void *addr, unsigned long length);
|
||||
|
||||
/* Locking IOCTL support (drm_lock.h) */
|
||||
extern int drm_lock(struct drm_device *dev, void *data,
|
||||
|
|
|
@ -337,6 +337,7 @@ typedef struct drm_i915_irq_wait {
|
|||
#define I915_PARAM_HAS_EXEC_NO_RELOC 25
|
||||
#define I915_PARAM_HAS_EXEC_HANDLE_LUT 26
|
||||
#define I915_PARAM_HAS_WT 27
|
||||
#define I915_PARAM_CMD_PARSER_VERSION 28
|
||||
|
||||
typedef struct drm_i915_getparam {
|
||||
int param;
|
||||
|
|
Loading…
Reference in New Issue